4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
325 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
329 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 /* sched_attr is not defined in glibc */
340 uint32_t sched_policy
;
341 uint64_t sched_flags
;
343 uint32_t sched_priority
;
344 uint64_t sched_runtime
;
345 uint64_t sched_deadline
;
346 uint64_t sched_period
;
347 uint32_t sched_util_min
;
348 uint32_t sched_util_max
;
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
352 unsigned int, size
, unsigned int, flags
);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
355 unsigned int, flags
);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
360 const struct sched_param
*, param
);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
363 struct sched_param
*, param
);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
366 const struct sched_param
*, param
);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
369 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
371 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
372 struct __user_cap_data_struct
*, data
);
373 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
374 struct __user_cap_data_struct
*, data
);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get
, int, which
, int, who
)
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
387 unsigned long, idx1
, unsigned long, idx2
)
391 * It is assumed that struct statx is architecture independent.
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
395 unsigned int, mask
, struct target_statx
*, statxbuf
)
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier
, int, cmd
, int, flags
)
401 static const bitmask_transtbl fcntl_flags_tbl
[] = {
402 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
403 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
404 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
405 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
406 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
407 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
408 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
409 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
410 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
411 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
412 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
413 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
414 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
415 #if defined(O_DIRECT)
416 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
418 #if defined(O_NOATIME)
419 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
421 #if defined(O_CLOEXEC)
422 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
425 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
427 #if defined(O_TMPFILE)
428 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
430 /* Don't terminate the list prematurely on 64-bit host+guest. */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
437 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
443 const struct timespec
*,tsp
,int,flags
)
445 static int sys_utimensat(int dirfd
, const char *pathname
,
446 const struct timespec times
[2], int flags
)
452 #endif /* TARGET_NR_utimensat */
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
458 const char *, new, unsigned int, flags
)
460 static int sys_renameat2(int oldfd
, const char *old
,
461 int newfd
, const char *new, int flags
)
464 return renameat(oldfd
, old
, newfd
, new);
470 #endif /* TARGET_NR_renameat2 */
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64
{
492 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
493 const struct host_rlimit64
*, new_limit
,
494 struct host_rlimit64
*, old_limit
)
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers
[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
507 if (g_posix_timers
[k
] == 0) {
508 g_posix_timers
[k
] = (timer_t
) 1;
516 static inline int host_to_target_errno(int host_errno
)
518 switch (host_errno
) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
527 static inline int target_to_host_errno(int target_errno
)
529 switch (target_errno
) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
538 abi_long
get_errno(abi_long ret
)
541 return -host_to_target_errno(errno
);
546 const char *target_strerror(int err
)
548 if (err
== QEMU_ERESTARTSYS
) {
549 return "To be restarted";
551 if (err
== QEMU_ESIGRETURN
) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err
));
558 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
562 if (usize
<= ksize
) {
565 for (i
= ksize
; i
< usize
; i
++) {
566 if (get_user_u8(b
, addr
+ i
)) {
567 return -TARGET_EFAULT
;
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
579 return safe_syscall(__NR_##name); \
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
585 return safe_syscall(__NR_##name, arg1); \
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
591 return safe_syscall(__NR_##name, arg1, arg2); \
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
597 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608 type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
612 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616 type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618 type5 arg5, type6 arg6) \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
623 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
624 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
625 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
626 int, flags
, mode_t
, mode
)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
629 struct rusage
*, rusage
)
631 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
632 int, options
, struct rusage
*, rusage
)
633 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
637 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
641 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
644 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
645 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
649 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
653 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
655 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
656 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
657 safe_syscall2(int, tkill
, int, tid
, int, sig
)
658 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
659 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
660 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
661 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
662 unsigned long, pos_l
, unsigned long, pos_h
)
663 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
664 unsigned long, pos_l
, unsigned long, pos_h
)
665 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
667 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
668 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
669 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
670 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
671 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
672 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
673 safe_syscall2(int, flock
, int, fd
, int, operation
)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
676 const struct timespec
*, uts
, size_t, sigsetsize
)
678 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
682 struct timespec
*, rem
)
684 #if defined(TARGET_NR_clock_nanosleep) || \
685 defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
687 const struct timespec
*, req
, struct timespec
*, rem
)
691 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
694 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
695 void *, ptr
, long, fifth
)
699 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
703 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
704 long, msgtype
, int, flags
)
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
708 unsigned, nsops
, const struct timespec
*, timeout
)
710 #if defined(TARGET_NR_mq_timedsend) || \
711 defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
713 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
715 #if defined(TARGET_NR_mq_timedreceive) || \
716 defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
718 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
722 int, outfd
, loff_t
*, poutoff
, size_t, length
,
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727 * "third argument might be integer or pointer or not present" behaviour of
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733 * use the flock64 struct rather than unsuffixed flock
734 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
742 static inline int host_to_target_sock_type(int host_type
)
746 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
748 target_type
= TARGET_SOCK_DGRAM
;
751 target_type
= TARGET_SOCK_STREAM
;
754 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
758 #if defined(SOCK_CLOEXEC)
759 if (host_type
& SOCK_CLOEXEC
) {
760 target_type
|= TARGET_SOCK_CLOEXEC
;
764 #if defined(SOCK_NONBLOCK)
765 if (host_type
& SOCK_NONBLOCK
) {
766 target_type
|= TARGET_SOCK_NONBLOCK
;
773 static abi_ulong target_brk
;
774 static abi_ulong target_original_brk
;
775 static abi_ulong brk_page
;
777 void target_set_brk(abi_ulong new_brk
)
779 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
780 brk_page
= HOST_PAGE_ALIGN(target_brk
);
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
786 /* do_brk() must return target values and target errnos. */
787 abi_long
do_brk(abi_ulong new_brk
)
789 abi_long mapped_addr
;
790 abi_ulong new_alloc_size
;
792 /* brk pointers are always untagged */
794 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
800 if (new_brk
< target_original_brk
) {
801 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
806 /* If the new brk is less than the highest page reserved to the
807 * target heap allocation, set it and we're almost done... */
808 if (new_brk
<= brk_page
) {
809 /* Heap contents are initialized to zero, as for anonymous
811 if (new_brk
> target_brk
) {
812 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
814 target_brk
= new_brk
;
815 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
819 /* We need to allocate more memory after the brk... Note that
820 * we don't use MAP_FIXED because that will map over the top of
821 * any existing mapping (like the one with the host libc or qemu
822 * itself); instead we treat "mapped but at wrong address" as
823 * a failure and unmap again.
825 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
826 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
827 PROT_READ
|PROT_WRITE
,
828 MAP_ANON
|MAP_PRIVATE
, 0, 0));
830 if (mapped_addr
== brk_page
) {
831 /* Heap contents are initialized to zero, as for anonymous
832 * mapped pages. Technically the new pages are already
833 * initialized to zero since they *are* anonymous mapped
834 * pages, however we have to take care with the contents that
835 * come from the remaining part of the previous page: it may
836 * contains garbage data due to a previous heap usage (grown
838 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
840 target_brk
= new_brk
;
841 brk_page
= HOST_PAGE_ALIGN(target_brk
);
842 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
845 } else if (mapped_addr
!= -1) {
846 /* Mapped but at wrong address, meaning there wasn't actually
847 * enough space for this brk.
849 target_munmap(mapped_addr
, new_alloc_size
);
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
857 #if defined(TARGET_ALPHA)
858 /* We (partially) emulate OSF/1 on Alpha, which requires we
859 return a proper errno, not an unchanged brk value. */
860 return -TARGET_ENOMEM
;
862 /* For everything else, return the previous break. */
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
869 abi_ulong target_fds_addr
,
873 abi_ulong b
, *target_fds
;
875 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
876 if (!(target_fds
= lock_user(VERIFY_READ
,
878 sizeof(abi_ulong
) * nw
,
880 return -TARGET_EFAULT
;
884 for (i
= 0; i
< nw
; i
++) {
885 /* grab the abi_ulong */
886 __get_user(b
, &target_fds
[i
]);
887 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
888 /* check the bit inside the abi_ulong */
895 unlock_user(target_fds
, target_fds_addr
, 0);
900 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
901 abi_ulong target_fds_addr
,
904 if (target_fds_addr
) {
905 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
906 return -TARGET_EFAULT
;
914 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
920 abi_ulong
*target_fds
;
922 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
923 if (!(target_fds
= lock_user(VERIFY_WRITE
,
925 sizeof(abi_ulong
) * nw
,
927 return -TARGET_EFAULT
;
930 for (i
= 0; i
< nw
; i
++) {
932 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
933 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
936 __put_user(v
, &target_fds
[i
]);
939 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
945 #if defined(__alpha__)
951 static inline abi_long
host_to_target_clock_t(long ticks
)
953 #if HOST_HZ == TARGET_HZ
956 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
960 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
961 const struct rusage
*rusage
)
963 struct target_rusage
*target_rusage
;
965 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
966 return -TARGET_EFAULT
;
967 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
968 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
969 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
970 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
971 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
972 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
973 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
974 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
975 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
976 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
977 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
978 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
979 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
980 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
981 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
982 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
983 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
984 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
985 unlock_user_struct(target_rusage
, target_addr
, 1);
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
993 abi_ulong target_rlim_swap
;
996 target_rlim_swap
= tswapal(target_rlim
);
997 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
998 return RLIM_INFINITY
;
1000 result
= target_rlim_swap
;
1001 if (target_rlim_swap
!= (rlim_t
)result
)
1002 return RLIM_INFINITY
;
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1011 abi_ulong target_rlim_swap
;
1014 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1015 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1017 target_rlim_swap
= rlim
;
1018 result
= tswapal(target_rlim_swap
);
1024 static inline int target_to_host_resource(int code
)
1027 case TARGET_RLIMIT_AS
:
1029 case TARGET_RLIMIT_CORE
:
1031 case TARGET_RLIMIT_CPU
:
1033 case TARGET_RLIMIT_DATA
:
1035 case TARGET_RLIMIT_FSIZE
:
1036 return RLIMIT_FSIZE
;
1037 case TARGET_RLIMIT_LOCKS
:
1038 return RLIMIT_LOCKS
;
1039 case TARGET_RLIMIT_MEMLOCK
:
1040 return RLIMIT_MEMLOCK
;
1041 case TARGET_RLIMIT_MSGQUEUE
:
1042 return RLIMIT_MSGQUEUE
;
1043 case TARGET_RLIMIT_NICE
:
1045 case TARGET_RLIMIT_NOFILE
:
1046 return RLIMIT_NOFILE
;
1047 case TARGET_RLIMIT_NPROC
:
1048 return RLIMIT_NPROC
;
1049 case TARGET_RLIMIT_RSS
:
1051 case TARGET_RLIMIT_RTPRIO
:
1052 return RLIMIT_RTPRIO
;
1053 #ifdef RLIMIT_RTTIME
1054 case TARGET_RLIMIT_RTTIME
:
1055 return RLIMIT_RTTIME
;
1057 case TARGET_RLIMIT_SIGPENDING
:
1058 return RLIMIT_SIGPENDING
;
1059 case TARGET_RLIMIT_STACK
:
1060 return RLIMIT_STACK
;
1066 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1067 abi_ulong target_tv_addr
)
1069 struct target_timeval
*target_tv
;
1071 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1072 return -TARGET_EFAULT
;
1075 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1076 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1078 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1083 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1084 const struct timeval
*tv
)
1086 struct target_timeval
*target_tv
;
1088 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1089 return -TARGET_EFAULT
;
1092 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1093 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1095 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1102 abi_ulong target_tv_addr
)
1104 struct target__kernel_sock_timeval
*target_tv
;
1106 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1107 return -TARGET_EFAULT
;
1110 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1111 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1113 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1119 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1120 const struct timeval
*tv
)
1122 struct target__kernel_sock_timeval
*target_tv
;
1124 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1125 return -TARGET_EFAULT
;
1128 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1129 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1131 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1136 #if defined(TARGET_NR_futex) || \
1137 defined(TARGET_NR_rt_sigtimedwait) || \
1138 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143 defined(TARGET_NR_timer_settime) || \
1144 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1146 abi_ulong target_addr
)
1148 struct target_timespec
*target_ts
;
1150 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1151 return -TARGET_EFAULT
;
1153 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1154 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1155 unlock_user_struct(target_ts
, target_addr
, 0);
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161 defined(TARGET_NR_timer_settime64) || \
1162 defined(TARGET_NR_mq_timedsend_time64) || \
1163 defined(TARGET_NR_mq_timedreceive_time64) || \
1164 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165 defined(TARGET_NR_clock_nanosleep_time64) || \
1166 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167 defined(TARGET_NR_utimensat) || \
1168 defined(TARGET_NR_utimensat_time64) || \
1169 defined(TARGET_NR_semtimedop_time64) || \
1170 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1172 abi_ulong target_addr
)
1174 struct target__kernel_timespec
*target_ts
;
1176 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1177 return -TARGET_EFAULT
;
1179 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1180 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1181 /* in 32bit mode, this drops the padding */
1182 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1183 unlock_user_struct(target_ts
, target_addr
, 0);
1188 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1189 struct timespec
*host_ts
)
1191 struct target_timespec
*target_ts
;
1193 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1194 return -TARGET_EFAULT
;
1196 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1197 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1198 unlock_user_struct(target_ts
, target_addr
, 1);
1202 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1203 struct timespec
*host_ts
)
1205 struct target__kernel_timespec
*target_ts
;
1207 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1208 return -TARGET_EFAULT
;
1210 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1211 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1212 unlock_user_struct(target_ts
, target_addr
, 1);
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1218 struct timezone
*tz
)
1220 struct target_timezone
*target_tz
;
1222 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1223 return -TARGET_EFAULT
;
1226 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1227 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1229 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1237 abi_ulong target_tz_addr
)
1239 struct target_timezone
*target_tz
;
1241 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1242 return -TARGET_EFAULT
;
1245 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1246 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1248 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1257 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1258 abi_ulong target_mq_attr_addr
)
1260 struct target_mq_attr
*target_mq_attr
;
1262 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1263 target_mq_attr_addr
, 1))
1264 return -TARGET_EFAULT
;
1266 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1267 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1268 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1269 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1271 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1276 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1277 const struct mq_attr
*attr
)
1279 struct target_mq_attr
*target_mq_attr
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1282 target_mq_attr_addr
, 0))
1283 return -TARGET_EFAULT
;
1285 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1286 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1287 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1288 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1290 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long
do_select(int n
,
1299 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1300 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1302 fd_set rfds
, wfds
, efds
;
1303 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1305 struct timespec ts
, *ts_ptr
;
1308 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1312 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1316 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1321 if (target_tv_addr
) {
1322 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1323 return -TARGET_EFAULT
;
1324 ts
.tv_sec
= tv
.tv_sec
;
1325 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1331 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1334 if (!is_error(ret
)) {
1335 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1336 return -TARGET_EFAULT
;
1337 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1338 return -TARGET_EFAULT
;
1339 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1340 return -TARGET_EFAULT
;
1342 if (target_tv_addr
) {
1343 tv
.tv_sec
= ts
.tv_sec
;
1344 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1345 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1346 return -TARGET_EFAULT
;
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long
do_old_select(abi_ulong arg1
)
1357 struct target_sel_arg_struct
*sel
;
1358 abi_ulong inp
, outp
, exp
, tvp
;
1361 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1362 return -TARGET_EFAULT
;
1365 nsel
= tswapal(sel
->n
);
1366 inp
= tswapal(sel
->inp
);
1367 outp
= tswapal(sel
->outp
);
1368 exp
= tswapal(sel
->exp
);
1369 tvp
= tswapal(sel
->tvp
);
1371 unlock_user_struct(sel
, arg1
, 0);
1373 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1380 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1383 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1384 fd_set rfds
, wfds
, efds
;
1385 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1386 struct timespec ts
, *ts_ptr
;
1390 * The 6th arg is actually two args smashed together,
1391 * so we cannot use the C library.
1398 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1406 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1414 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1420 * This takes a timespec, and not a timeval, so we cannot
1421 * use the do_select() helper ...
1425 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1426 return -TARGET_EFAULT
;
1429 if (target_to_host_timespec(&ts
, ts_addr
)) {
1430 return -TARGET_EFAULT
;
1438 /* Extract the two packed args for the sigset */
1441 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1443 return -TARGET_EFAULT
;
1445 arg_sigset
= tswapal(arg7
[0]);
1446 arg_sigsize
= tswapal(arg7
[1]);
1447 unlock_user(arg7
, arg6
, 0);
1450 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1455 sig
.size
= SIGSET_T_SIZE
;
1459 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1463 finish_sigsuspend_mask(ret
);
1466 if (!is_error(ret
)) {
1467 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1468 return -TARGET_EFAULT
;
1470 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1471 return -TARGET_EFAULT
;
1473 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1474 return -TARGET_EFAULT
;
1477 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1478 return -TARGET_EFAULT
;
1481 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1482 return -TARGET_EFAULT
;
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491 defined(TARGET_NR_ppoll_time64)
1492 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1493 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1495 struct target_pollfd
*target_pfd
;
1496 unsigned int nfds
= arg2
;
1504 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1505 return -TARGET_EINVAL
;
1507 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1508 sizeof(struct target_pollfd
) * nfds
, 1);
1510 return -TARGET_EFAULT
;
1513 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1514 for (i
= 0; i
< nfds
; i
++) {
1515 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1516 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1520 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1521 sigset_t
*set
= NULL
;
1525 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1526 unlock_user(target_pfd
, arg1
, 0);
1527 return -TARGET_EFAULT
;
1530 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1531 unlock_user(target_pfd
, arg1
, 0);
1532 return -TARGET_EFAULT
;
1540 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1542 unlock_user(target_pfd
, arg1
, 0);
1547 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1548 set
, SIGSET_T_SIZE
));
1551 finish_sigsuspend_mask(ret
);
1553 if (!is_error(ret
) && arg3
) {
1555 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1556 return -TARGET_EFAULT
;
1559 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1560 return -TARGET_EFAULT
;
1565 struct timespec ts
, *pts
;
1568 /* Convert ms to secs, ns */
1569 ts
.tv_sec
= arg3
/ 1000;
1570 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1573 /* -ve poll() timeout means "infinite" */
1576 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1579 if (!is_error(ret
)) {
1580 for (i
= 0; i
< nfds
; i
++) {
1581 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1584 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1589 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1592 return pipe2(host_pipe
, flags
);
1598 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1599 int flags
, int is_pipe2
)
1603 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1606 return get_errno(ret
);
1608 /* Several targets have special calling conventions for the original
1609 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1611 #if defined(TARGET_ALPHA)
1612 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1613 return host_pipe
[0];
1614 #elif defined(TARGET_MIPS)
1615 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1616 return host_pipe
[0];
1617 #elif defined(TARGET_SH4)
1618 cpu_env
->gregs
[1] = host_pipe
[1];
1619 return host_pipe
[0];
1620 #elif defined(TARGET_SPARC)
1621 cpu_env
->regwptr
[1] = host_pipe
[1];
1622 return host_pipe
[0];
1626 if (put_user_s32(host_pipe
[0], pipedes
)
1627 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1628 return -TARGET_EFAULT
;
1629 return get_errno(ret
);
1632 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1633 abi_ulong target_addr
,
1636 struct target_ip_mreqn
*target_smreqn
;
1638 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1640 return -TARGET_EFAULT
;
1641 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1642 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1643 if (len
== sizeof(struct target_ip_mreqn
))
1644 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1645 unlock_user(target_smreqn
, target_addr
, 0);
1650 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1651 abi_ulong target_addr
,
1654 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1655 sa_family_t sa_family
;
1656 struct target_sockaddr
*target_saddr
;
1658 if (fd_trans_target_to_host_addr(fd
)) {
1659 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1662 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1664 return -TARGET_EFAULT
;
1666 sa_family
= tswap16(target_saddr
->sa_family
);
1668 /* Oops. The caller might send a incomplete sun_path; sun_path
1669 * must be terminated by \0 (see the manual page), but
1670 * unfortunately it is quite common to specify sockaddr_un
1671 * length as "strlen(x->sun_path)" while it should be
1672 * "strlen(...) + 1". We'll fix that here if needed.
1673 * Linux kernel has a similar feature.
1676 if (sa_family
== AF_UNIX
) {
1677 if (len
< unix_maxlen
&& len
> 0) {
1678 char *cp
= (char*)target_saddr
;
1680 if ( cp
[len
-1] && !cp
[len
] )
1683 if (len
> unix_maxlen
)
1687 memcpy(addr
, target_saddr
, len
);
1688 addr
->sa_family
= sa_family
;
1689 if (sa_family
== AF_NETLINK
) {
1690 struct sockaddr_nl
*nladdr
;
1692 nladdr
= (struct sockaddr_nl
*)addr
;
1693 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1694 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1695 } else if (sa_family
== AF_PACKET
) {
1696 struct target_sockaddr_ll
*lladdr
;
1698 lladdr
= (struct target_sockaddr_ll
*)addr
;
1699 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1700 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1702 unlock_user(target_saddr
, target_addr
, 0);
1707 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1708 struct sockaddr
*addr
,
1711 struct target_sockaddr
*target_saddr
;
1718 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1720 return -TARGET_EFAULT
;
1721 memcpy(target_saddr
, addr
, len
);
1722 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1723 sizeof(target_saddr
->sa_family
)) {
1724 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1726 if (addr
->sa_family
== AF_NETLINK
&&
1727 len
>= sizeof(struct target_sockaddr_nl
)) {
1728 struct target_sockaddr_nl
*target_nl
=
1729 (struct target_sockaddr_nl
*)target_saddr
;
1730 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1731 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1732 } else if (addr
->sa_family
== AF_PACKET
) {
1733 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1734 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1735 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1736 } else if (addr
->sa_family
== AF_INET6
&&
1737 len
>= sizeof(struct target_sockaddr_in6
)) {
1738 struct target_sockaddr_in6
*target_in6
=
1739 (struct target_sockaddr_in6
*)target_saddr
;
1740 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1742 unlock_user(target_saddr
, target_addr
, len
);
1747 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1748 struct target_msghdr
*target_msgh
)
1750 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1751 abi_long msg_controllen
;
1752 abi_ulong target_cmsg_addr
;
1753 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1754 socklen_t space
= 0;
1756 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1757 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1759 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1760 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1761 target_cmsg_start
= target_cmsg
;
1763 return -TARGET_EFAULT
;
1765 while (cmsg
&& target_cmsg
) {
1766 void *data
= CMSG_DATA(cmsg
);
1767 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1769 int len
= tswapal(target_cmsg
->cmsg_len
)
1770 - sizeof(struct target_cmsghdr
);
1772 space
+= CMSG_SPACE(len
);
1773 if (space
> msgh
->msg_controllen
) {
1774 space
-= CMSG_SPACE(len
);
1775 /* This is a QEMU bug, since we allocated the payload
1776 * area ourselves (unlike overflow in host-to-target
1777 * conversion, which is just the guest giving us a buffer
1778 * that's too small). It can't happen for the payload types
1779 * we currently support; if it becomes an issue in future
1780 * we would need to improve our allocation strategy to
1781 * something more intelligent than "twice the size of the
1782 * target buffer we're reading from".
1784 qemu_log_mask(LOG_UNIMP
,
1785 ("Unsupported ancillary data %d/%d: "
1786 "unhandled msg size\n"),
1787 tswap32(target_cmsg
->cmsg_level
),
1788 tswap32(target_cmsg
->cmsg_type
));
1792 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1793 cmsg
->cmsg_level
= SOL_SOCKET
;
1795 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1797 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1798 cmsg
->cmsg_len
= CMSG_LEN(len
);
1800 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1801 int *fd
= (int *)data
;
1802 int *target_fd
= (int *)target_data
;
1803 int i
, numfds
= len
/ sizeof(int);
1805 for (i
= 0; i
< numfds
; i
++) {
1806 __get_user(fd
[i
], target_fd
+ i
);
1808 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1809 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1810 struct ucred
*cred
= (struct ucred
*)data
;
1811 struct target_ucred
*target_cred
=
1812 (struct target_ucred
*)target_data
;
1814 __get_user(cred
->pid
, &target_cred
->pid
);
1815 __get_user(cred
->uid
, &target_cred
->uid
);
1816 __get_user(cred
->gid
, &target_cred
->gid
);
1818 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1819 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1820 memcpy(data
, target_data
, len
);
1823 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1824 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1827 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1829 msgh
->msg_controllen
= space
;
1833 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1834 struct msghdr
*msgh
)
1836 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1837 abi_long msg_controllen
;
1838 abi_ulong target_cmsg_addr
;
1839 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1840 socklen_t space
= 0;
1842 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1843 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1845 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1846 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1847 target_cmsg_start
= target_cmsg
;
1849 return -TARGET_EFAULT
;
1851 while (cmsg
&& target_cmsg
) {
1852 void *data
= CMSG_DATA(cmsg
);
1853 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1855 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1856 int tgt_len
, tgt_space
;
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1864 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1865 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1869 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1870 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1872 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1874 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1880 switch (cmsg
->cmsg_level
) {
1882 switch (cmsg
->cmsg_type
) {
1884 tgt_len
= sizeof(struct target_timeval
);
1894 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1895 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1896 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1904 switch (cmsg
->cmsg_level
) {
1906 switch (cmsg
->cmsg_type
) {
1909 int *fd
= (int *)data
;
1910 int *target_fd
= (int *)target_data
;
1911 int i
, numfds
= tgt_len
/ sizeof(int);
1913 for (i
= 0; i
< numfds
; i
++) {
1914 __put_user(fd
[i
], target_fd
+ i
);
1920 struct timeval
*tv
= (struct timeval
*)data
;
1921 struct target_timeval
*target_tv
=
1922 (struct target_timeval
*)target_data
;
1924 if (len
!= sizeof(struct timeval
) ||
1925 tgt_len
!= sizeof(struct target_timeval
)) {
1929 /* copy struct timeval to target */
1930 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1931 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1934 case SCM_CREDENTIALS
:
1936 struct ucred
*cred
= (struct ucred
*)data
;
1937 struct target_ucred
*target_cred
=
1938 (struct target_ucred
*)target_data
;
1940 __put_user(cred
->pid
, &target_cred
->pid
);
1941 __put_user(cred
->uid
, &target_cred
->uid
);
1942 __put_user(cred
->gid
, &target_cred
->gid
);
1951 switch (cmsg
->cmsg_type
) {
1954 uint32_t *v
= (uint32_t *)data
;
1955 uint32_t *t_int
= (uint32_t *)target_data
;
1957 if (len
!= sizeof(uint32_t) ||
1958 tgt_len
!= sizeof(uint32_t)) {
1961 __put_user(*v
, t_int
);
1967 struct sock_extended_err ee
;
1968 struct sockaddr_in offender
;
1970 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1971 struct errhdr_t
*target_errh
=
1972 (struct errhdr_t
*)target_data
;
1974 if (len
!= sizeof(struct errhdr_t
) ||
1975 tgt_len
!= sizeof(struct errhdr_t
)) {
1978 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1979 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1980 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1981 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1982 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1983 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1984 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1985 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1986 (void *) &errh
->offender
, sizeof(errh
->offender
));
1995 switch (cmsg
->cmsg_type
) {
1998 uint32_t *v
= (uint32_t *)data
;
1999 uint32_t *t_int
= (uint32_t *)target_data
;
2001 if (len
!= sizeof(uint32_t) ||
2002 tgt_len
!= sizeof(uint32_t)) {
2005 __put_user(*v
, t_int
);
2011 struct sock_extended_err ee
;
2012 struct sockaddr_in6 offender
;
2014 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2015 struct errhdr6_t
*target_errh
=
2016 (struct errhdr6_t
*)target_data
;
2018 if (len
!= sizeof(struct errhdr6_t
) ||
2019 tgt_len
!= sizeof(struct errhdr6_t
)) {
2022 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2023 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2024 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2025 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2026 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2027 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2028 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2029 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2030 (void *) &errh
->offender
, sizeof(errh
->offender
));
2040 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2041 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2042 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2043 if (tgt_len
> len
) {
2044 memset(target_data
+ len
, 0, tgt_len
- len
);
2048 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2049 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2050 if (msg_controllen
< tgt_space
) {
2051 tgt_space
= msg_controllen
;
2053 msg_controllen
-= tgt_space
;
2055 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2056 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2059 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2061 target_msgh
->msg_controllen
= tswapal(space
);
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2067 abi_ulong optval_addr
, socklen_t optlen
)
2071 struct ip_mreqn
*ip_mreq
;
2072 struct ip_mreq_source
*ip_mreq_source
;
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen
< sizeof(uint32_t))
2079 return -TARGET_EINVAL
;
2081 if (get_user_u32(val
, optval_addr
))
2082 return -TARGET_EFAULT
;
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2090 case IP_ROUTER_ALERT
:
2094 case IP_MTU_DISCOVER
:
2101 case IP_MULTICAST_TTL
:
2102 case IP_MULTICAST_LOOP
:
2104 if (optlen
>= sizeof(uint32_t)) {
2105 if (get_user_u32(val
, optval_addr
))
2106 return -TARGET_EFAULT
;
2107 } else if (optlen
>= 1) {
2108 if (get_user_u8(val
, optval_addr
))
2109 return -TARGET_EFAULT
;
2111 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2113 case IP_ADD_MEMBERSHIP
:
2114 case IP_DROP_MEMBERSHIP
:
2115 if (optlen
< sizeof (struct target_ip_mreq
) ||
2116 optlen
> sizeof (struct target_ip_mreqn
))
2117 return -TARGET_EINVAL
;
2119 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2120 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2121 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2124 case IP_BLOCK_SOURCE
:
2125 case IP_UNBLOCK_SOURCE
:
2126 case IP_ADD_SOURCE_MEMBERSHIP
:
2127 case IP_DROP_SOURCE_MEMBERSHIP
:
2128 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2129 return -TARGET_EINVAL
;
2131 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2132 if (!ip_mreq_source
) {
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2136 unlock_user (ip_mreq_source
, optval_addr
, 0);
2145 case IPV6_MTU_DISCOVER
:
2148 case IPV6_RECVPKTINFO
:
2149 case IPV6_UNICAST_HOPS
:
2150 case IPV6_MULTICAST_HOPS
:
2151 case IPV6_MULTICAST_LOOP
:
2153 case IPV6_RECVHOPLIMIT
:
2154 case IPV6_2292HOPLIMIT
:
2157 case IPV6_2292PKTINFO
:
2158 case IPV6_RECVTCLASS
:
2159 case IPV6_RECVRTHDR
:
2160 case IPV6_2292RTHDR
:
2161 case IPV6_RECVHOPOPTS
:
2162 case IPV6_2292HOPOPTS
:
2163 case IPV6_RECVDSTOPTS
:
2164 case IPV6_2292DSTOPTS
:
2166 case IPV6_ADDR_PREFERENCES
:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU
:
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT
:
2173 #ifdef IPV6_FREEBIND
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR
:
2180 if (optlen
< sizeof(uint32_t)) {
2181 return -TARGET_EINVAL
;
2183 if (get_user_u32(val
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2187 &val
, sizeof(val
)));
2191 struct in6_pktinfo pki
;
2193 if (optlen
< sizeof(pki
)) {
2194 return -TARGET_EINVAL
;
2197 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2198 return -TARGET_EFAULT
;
2201 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2203 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2204 &pki
, sizeof(pki
)));
2207 case IPV6_ADD_MEMBERSHIP
:
2208 case IPV6_DROP_MEMBERSHIP
:
2210 struct ipv6_mreq ipv6mreq
;
2212 if (optlen
< sizeof(ipv6mreq
)) {
2213 return -TARGET_EINVAL
;
2216 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2217 return -TARGET_EFAULT
;
2220 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2222 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2223 &ipv6mreq
, sizeof(ipv6mreq
)));
2234 struct icmp6_filter icmp6f
;
2236 if (optlen
> sizeof(icmp6f
)) {
2237 optlen
= sizeof(icmp6f
);
2240 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2241 return -TARGET_EFAULT
;
2244 for (val
= 0; val
< 8; val
++) {
2245 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2248 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2260 /* those take an u32 value */
2261 if (optlen
< sizeof(uint32_t)) {
2262 return -TARGET_EINVAL
;
2265 if (get_user_u32(val
, optval_addr
)) {
2266 return -TARGET_EFAULT
;
2268 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2269 &val
, sizeof(val
)));
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2281 char *alg_key
= g_malloc(optlen
);
2284 return -TARGET_ENOMEM
;
2286 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2288 return -TARGET_EFAULT
;
2290 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 case ALG_SET_AEAD_AUTHSIZE
:
2297 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2306 case TARGET_SOL_SOCKET
:
2308 case TARGET_SO_RCVTIMEO
:
2312 optname
= SO_RCVTIMEO
;
2315 if (optlen
!= sizeof(struct target_timeval
)) {
2316 return -TARGET_EINVAL
;
2319 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2320 return -TARGET_EFAULT
;
2323 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2327 case TARGET_SO_SNDTIMEO
:
2328 optname
= SO_SNDTIMEO
;
2330 case TARGET_SO_ATTACH_FILTER
:
2332 struct target_sock_fprog
*tfprog
;
2333 struct target_sock_filter
*tfilter
;
2334 struct sock_fprog fprog
;
2335 struct sock_filter
*filter
;
2338 if (optlen
!= sizeof(*tfprog
)) {
2339 return -TARGET_EINVAL
;
2341 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2342 return -TARGET_EFAULT
;
2344 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2345 tswapal(tfprog
->filter
), 0)) {
2346 unlock_user_struct(tfprog
, optval_addr
, 1);
2347 return -TARGET_EFAULT
;
2350 fprog
.len
= tswap16(tfprog
->len
);
2351 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2352 if (filter
== NULL
) {
2353 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2354 unlock_user_struct(tfprog
, optval_addr
, 1);
2355 return -TARGET_ENOMEM
;
2357 for (i
= 0; i
< fprog
.len
; i
++) {
2358 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2359 filter
[i
].jt
= tfilter
[i
].jt
;
2360 filter
[i
].jf
= tfilter
[i
].jf
;
2361 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2363 fprog
.filter
= filter
;
2365 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2366 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2369 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2370 unlock_user_struct(tfprog
, optval_addr
, 1);
2373 case TARGET_SO_BINDTODEVICE
:
2375 char *dev_ifname
, *addr_ifname
;
2377 if (optlen
> IFNAMSIZ
- 1) {
2378 optlen
= IFNAMSIZ
- 1;
2380 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2382 return -TARGET_EFAULT
;
2384 optname
= SO_BINDTODEVICE
;
2385 addr_ifname
= alloca(IFNAMSIZ
);
2386 memcpy(addr_ifname
, dev_ifname
, optlen
);
2387 addr_ifname
[optlen
] = 0;
2388 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2389 addr_ifname
, optlen
));
2390 unlock_user (dev_ifname
, optval_addr
, 0);
2393 case TARGET_SO_LINGER
:
2396 struct target_linger
*tlg
;
2398 if (optlen
!= sizeof(struct target_linger
)) {
2399 return -TARGET_EINVAL
;
2401 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2402 return -TARGET_EFAULT
;
2404 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2405 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2406 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2408 unlock_user_struct(tlg
, optval_addr
, 0);
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG
:
2415 case TARGET_SO_REUSEADDR
:
2416 optname
= SO_REUSEADDR
;
2419 case TARGET_SO_REUSEPORT
:
2420 optname
= SO_REUSEPORT
;
2423 case TARGET_SO_TYPE
:
2426 case TARGET_SO_ERROR
:
2429 case TARGET_SO_DONTROUTE
:
2430 optname
= SO_DONTROUTE
;
2432 case TARGET_SO_BROADCAST
:
2433 optname
= SO_BROADCAST
;
2435 case TARGET_SO_SNDBUF
:
2436 optname
= SO_SNDBUF
;
2438 case TARGET_SO_SNDBUFFORCE
:
2439 optname
= SO_SNDBUFFORCE
;
2441 case TARGET_SO_RCVBUF
:
2442 optname
= SO_RCVBUF
;
2444 case TARGET_SO_RCVBUFFORCE
:
2445 optname
= SO_RCVBUFFORCE
;
2447 case TARGET_SO_KEEPALIVE
:
2448 optname
= SO_KEEPALIVE
;
2450 case TARGET_SO_OOBINLINE
:
2451 optname
= SO_OOBINLINE
;
2453 case TARGET_SO_NO_CHECK
:
2454 optname
= SO_NO_CHECK
;
2456 case TARGET_SO_PRIORITY
:
2457 optname
= SO_PRIORITY
;
2460 case TARGET_SO_BSDCOMPAT
:
2461 optname
= SO_BSDCOMPAT
;
2464 case TARGET_SO_PASSCRED
:
2465 optname
= SO_PASSCRED
;
2467 case TARGET_SO_PASSSEC
:
2468 optname
= SO_PASSSEC
;
2470 case TARGET_SO_TIMESTAMP
:
2471 optname
= SO_TIMESTAMP
;
2473 case TARGET_SO_RCVLOWAT
:
2474 optname
= SO_RCVLOWAT
;
2479 if (optlen
< sizeof(uint32_t))
2480 return -TARGET_EINVAL
;
2482 if (get_user_u32(val
, optval_addr
))
2483 return -TARGET_EFAULT
;
2484 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2489 case NETLINK_PKTINFO
:
2490 case NETLINK_ADD_MEMBERSHIP
:
2491 case NETLINK_DROP_MEMBERSHIP
:
2492 case NETLINK_BROADCAST_ERROR
:
2493 case NETLINK_NO_ENOBUFS
:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID
:
2496 case NETLINK_CAP_ACK
:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK
:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK
:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2509 if (optlen
< sizeof(uint32_t)) {
2510 return -TARGET_EINVAL
;
2512 if (get_user_u32(val
, optval_addr
)) {
2513 return -TARGET_EFAULT
;
2515 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2518 #endif /* SOL_NETLINK */
2521 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2523 ret
= -TARGET_ENOPROTOOPT
;
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2530 abi_ulong optval_addr
, abi_ulong optlen
)
2537 case TARGET_SOL_SOCKET
:
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME
:
2543 case TARGET_SO_RCVTIMEO
: {
2547 optname
= SO_RCVTIMEO
;
2550 if (get_user_u32(len
, optlen
)) {
2551 return -TARGET_EFAULT
;
2554 return -TARGET_EINVAL
;
2558 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2563 if (len
> sizeof(struct target_timeval
)) {
2564 len
= sizeof(struct target_timeval
);
2566 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2567 return -TARGET_EFAULT
;
2569 if (put_user_u32(len
, optlen
)) {
2570 return -TARGET_EFAULT
;
2574 case TARGET_SO_SNDTIMEO
:
2575 optname
= SO_SNDTIMEO
;
2577 case TARGET_SO_PEERCRED
: {
2580 struct target_ucred
*tcr
;
2582 if (get_user_u32(len
, optlen
)) {
2583 return -TARGET_EFAULT
;
2586 return -TARGET_EINVAL
;
2590 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2598 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2599 return -TARGET_EFAULT
;
2601 __put_user(cr
.pid
, &tcr
->pid
);
2602 __put_user(cr
.uid
, &tcr
->uid
);
2603 __put_user(cr
.gid
, &tcr
->gid
);
2604 unlock_user_struct(tcr
, optval_addr
, 1);
2605 if (put_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2610 case TARGET_SO_PEERSEC
: {
2613 if (get_user_u32(len
, optlen
)) {
2614 return -TARGET_EFAULT
;
2617 return -TARGET_EINVAL
;
2619 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2621 return -TARGET_EFAULT
;
2624 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2626 if (put_user_u32(lv
, optlen
)) {
2627 ret
= -TARGET_EFAULT
;
2629 unlock_user(name
, optval_addr
, lv
);
2632 case TARGET_SO_LINGER
:
2636 struct target_linger
*tlg
;
2638 if (get_user_u32(len
, optlen
)) {
2639 return -TARGET_EFAULT
;
2642 return -TARGET_EINVAL
;
2646 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2654 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2655 return -TARGET_EFAULT
;
2657 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2658 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2659 unlock_user_struct(tlg
, optval_addr
, 1);
2660 if (put_user_u32(len
, optlen
)) {
2661 return -TARGET_EFAULT
;
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG
:
2669 case TARGET_SO_REUSEADDR
:
2670 optname
= SO_REUSEADDR
;
2673 case TARGET_SO_REUSEPORT
:
2674 optname
= SO_REUSEPORT
;
2677 case TARGET_SO_TYPE
:
2680 case TARGET_SO_ERROR
:
2683 case TARGET_SO_DONTROUTE
:
2684 optname
= SO_DONTROUTE
;
2686 case TARGET_SO_BROADCAST
:
2687 optname
= SO_BROADCAST
;
2689 case TARGET_SO_SNDBUF
:
2690 optname
= SO_SNDBUF
;
2692 case TARGET_SO_RCVBUF
:
2693 optname
= SO_RCVBUF
;
2695 case TARGET_SO_KEEPALIVE
:
2696 optname
= SO_KEEPALIVE
;
2698 case TARGET_SO_OOBINLINE
:
2699 optname
= SO_OOBINLINE
;
2701 case TARGET_SO_NO_CHECK
:
2702 optname
= SO_NO_CHECK
;
2704 case TARGET_SO_PRIORITY
:
2705 optname
= SO_PRIORITY
;
2708 case TARGET_SO_BSDCOMPAT
:
2709 optname
= SO_BSDCOMPAT
;
2712 case TARGET_SO_PASSCRED
:
2713 optname
= SO_PASSCRED
;
2715 case TARGET_SO_TIMESTAMP
:
2716 optname
= SO_TIMESTAMP
;
2718 case TARGET_SO_RCVLOWAT
:
2719 optname
= SO_RCVLOWAT
;
2721 case TARGET_SO_ACCEPTCONN
:
2722 optname
= SO_ACCEPTCONN
;
2724 case TARGET_SO_PROTOCOL
:
2725 optname
= SO_PROTOCOL
;
2727 case TARGET_SO_DOMAIN
:
2728 optname
= SO_DOMAIN
;
2736 /* TCP and UDP options all take an 'int' value. */
2738 if (get_user_u32(len
, optlen
))
2739 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2743 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2746 if (optname
== SO_TYPE
) {
2747 val
= host_to_target_sock_type(val
);
2752 if (put_user_u32(val
, optval_addr
))
2753 return -TARGET_EFAULT
;
2755 if (put_user_u8(val
, optval_addr
))
2756 return -TARGET_EFAULT
;
2758 if (put_user_u32(len
, optlen
))
2759 return -TARGET_EFAULT
;
2766 case IP_ROUTER_ALERT
:
2770 case IP_MTU_DISCOVER
:
2776 case IP_MULTICAST_TTL
:
2777 case IP_MULTICAST_LOOP
:
2778 if (get_user_u32(len
, optlen
))
2779 return -TARGET_EFAULT
;
2781 return -TARGET_EINVAL
;
2783 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2786 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2788 if (put_user_u32(len
, optlen
)
2789 || put_user_u8(val
, optval_addr
))
2790 return -TARGET_EFAULT
;
2792 if (len
> sizeof(int))
2794 if (put_user_u32(len
, optlen
)
2795 || put_user_u32(val
, optval_addr
))
2796 return -TARGET_EFAULT
;
2800 ret
= -TARGET_ENOPROTOOPT
;
2806 case IPV6_MTU_DISCOVER
:
2809 case IPV6_RECVPKTINFO
:
2810 case IPV6_UNICAST_HOPS
:
2811 case IPV6_MULTICAST_HOPS
:
2812 case IPV6_MULTICAST_LOOP
:
2814 case IPV6_RECVHOPLIMIT
:
2815 case IPV6_2292HOPLIMIT
:
2818 case IPV6_2292PKTINFO
:
2819 case IPV6_RECVTCLASS
:
2820 case IPV6_RECVRTHDR
:
2821 case IPV6_2292RTHDR
:
2822 case IPV6_RECVHOPOPTS
:
2823 case IPV6_2292HOPOPTS
:
2824 case IPV6_RECVDSTOPTS
:
2825 case IPV6_2292DSTOPTS
:
2827 case IPV6_ADDR_PREFERENCES
:
2828 #ifdef IPV6_RECVPATHMTU
2829 case IPV6_RECVPATHMTU
:
2831 #ifdef IPV6_TRANSPARENT
2832 case IPV6_TRANSPARENT
:
2834 #ifdef IPV6_FREEBIND
2837 #ifdef IPV6_RECVORIGDSTADDR
2838 case IPV6_RECVORIGDSTADDR
:
2840 if (get_user_u32(len
, optlen
))
2841 return -TARGET_EFAULT
;
2843 return -TARGET_EINVAL
;
2845 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2848 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2850 if (put_user_u32(len
, optlen
)
2851 || put_user_u8(val
, optval_addr
))
2852 return -TARGET_EFAULT
;
2854 if (len
> sizeof(int))
2856 if (put_user_u32(len
, optlen
)
2857 || put_user_u32(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2862 ret
= -TARGET_ENOPROTOOPT
;
2869 case NETLINK_PKTINFO
:
2870 case NETLINK_BROADCAST_ERROR
:
2871 case NETLINK_NO_ENOBUFS
:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873 case NETLINK_LISTEN_ALL_NSID
:
2874 case NETLINK_CAP_ACK
:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877 case NETLINK_EXT_ACK
:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880 case NETLINK_GET_STRICT_CHK
:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882 if (get_user_u32(len
, optlen
)) {
2883 return -TARGET_EFAULT
;
2885 if (len
!= sizeof(val
)) {
2886 return -TARGET_EINVAL
;
2889 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2893 if (put_user_u32(lv
, optlen
)
2894 || put_user_u32(val
, optval_addr
)) {
2895 return -TARGET_EFAULT
;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899 case NETLINK_LIST_MEMBERSHIPS
:
2903 if (get_user_u32(len
, optlen
)) {
2904 return -TARGET_EFAULT
;
2907 return -TARGET_EINVAL
;
2909 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2910 if (!results
&& len
> 0) {
2911 return -TARGET_EFAULT
;
2914 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2916 unlock_user(results
, optval_addr
, 0);
2919 /* swap host endianess to target endianess. */
2920 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2921 results
[i
] = tswap32(results
[i
]);
2923 if (put_user_u32(lv
, optlen
)) {
2924 return -TARGET_EFAULT
;
2926 unlock_user(results
, optval_addr
, 0);
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2934 #endif /* SOL_NETLINK */
2937 qemu_log_mask(LOG_UNIMP
,
2938 "getsockopt level=%d optname=%d not yet supported\n",
2940 ret
= -TARGET_EOPNOTSUPP
;
2946 /* Convert target low/high pair representing file offset into the host
2947 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948 * as the kernel doesn't handle them either.
2950 static void target_to_host_low_high(abi_ulong tlow
,
2952 unsigned long *hlow
,
2953 unsigned long *hhigh
)
2955 uint64_t off
= tlow
|
2956 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2957 TARGET_LONG_BITS
/ 2;
2960 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2963 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2964 abi_ulong count
, int copy
)
2966 struct target_iovec
*target_vec
;
2968 abi_ulong total_len
, max_len
;
2971 bool bad_address
= false;
2977 if (count
> IOV_MAX
) {
2982 vec
= g_try_new0(struct iovec
, count
);
2988 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2989 count
* sizeof(struct target_iovec
), 1);
2990 if (target_vec
== NULL
) {
2995 /* ??? If host page size > target page size, this will result in a
2996 value larger than what we can actually support. */
2997 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3000 for (i
= 0; i
< count
; i
++) {
3001 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3002 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3007 } else if (len
== 0) {
3008 /* Zero length pointer is ignored. */
3009 vec
[i
].iov_base
= 0;
3011 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3012 /* If the first buffer pointer is bad, this is a fault. But
3013 * subsequent bad buffers will result in a partial write; this
3014 * is realized by filling the vector with null pointers and
3016 if (!vec
[i
].iov_base
) {
3027 if (len
> max_len
- total_len
) {
3028 len
= max_len
- total_len
;
3031 vec
[i
].iov_len
= len
;
3035 unlock_user(target_vec
, target_addr
, 0);
3040 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3041 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3044 unlock_user(target_vec
, target_addr
, 0);
3051 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3052 abi_ulong count
, int copy
)
3054 struct target_iovec
*target_vec
;
3057 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3058 count
* sizeof(struct target_iovec
), 1);
3060 for (i
= 0; i
< count
; i
++) {
3061 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3062 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3066 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3068 unlock_user(target_vec
, target_addr
, 0);
3074 static inline int target_to_host_sock_type(int *type
)
3077 int target_type
= *type
;
3079 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3080 case TARGET_SOCK_DGRAM
:
3081 host_type
= SOCK_DGRAM
;
3083 case TARGET_SOCK_STREAM
:
3084 host_type
= SOCK_STREAM
;
3087 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3090 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3091 #if defined(SOCK_CLOEXEC)
3092 host_type
|= SOCK_CLOEXEC
;
3094 return -TARGET_EINVAL
;
3097 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3098 #if defined(SOCK_NONBLOCK)
3099 host_type
|= SOCK_NONBLOCK
;
3100 #elif !defined(O_NONBLOCK)
3101 return -TARGET_EINVAL
;
3108 /* Try to emulate socket type flags after socket creation. */
3109 static int sock_flags_fixup(int fd
, int target_type
)
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3113 int flags
= fcntl(fd
, F_GETFL
);
3114 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3116 return -TARGET_EINVAL
;
3123 /* do_socket() Must return target values and target errnos. */
3124 static abi_long
do_socket(int domain
, int type
, int protocol
)
3126 int target_type
= type
;
3129 ret
= target_to_host_sock_type(&type
);
3134 if (domain
== PF_NETLINK
&& !(
3135 #ifdef CONFIG_RTNETLINK
3136 protocol
== NETLINK_ROUTE
||
3138 protocol
== NETLINK_KOBJECT_UEVENT
||
3139 protocol
== NETLINK_AUDIT
)) {
3140 return -TARGET_EPROTONOSUPPORT
;
3143 if (domain
== AF_PACKET
||
3144 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3145 protocol
= tswap16(protocol
);
3148 ret
= get_errno(socket(domain
, type
, protocol
));
3150 ret
= sock_flags_fixup(ret
, target_type
);
3151 if (type
== SOCK_PACKET
) {
3152 /* Manage an obsolete case :
3153 * if socket type is SOCK_PACKET, bind by name
3155 fd_trans_register(ret
, &target_packet_trans
);
3156 } else if (domain
== PF_NETLINK
) {
3158 #ifdef CONFIG_RTNETLINK
3160 fd_trans_register(ret
, &target_netlink_route_trans
);
3163 case NETLINK_KOBJECT_UEVENT
:
3164 /* nothing to do: messages are strings */
3167 fd_trans_register(ret
, &target_netlink_audit_trans
);
3170 g_assert_not_reached();
3177 /* do_bind() Must return target values and target errnos. */
3178 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3184 if ((int)addrlen
< 0) {
3185 return -TARGET_EINVAL
;
3188 addr
= alloca(addrlen
+1);
3190 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3194 return get_errno(bind(sockfd
, addr
, addrlen
));
3197 /* do_connect() Must return target values and target errnos. */
3198 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3204 if ((int)addrlen
< 0) {
3205 return -TARGET_EINVAL
;
3208 addr
= alloca(addrlen
+1);
3210 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3214 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3218 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3219 int flags
, int send
)
3225 abi_ulong target_vec
;
3227 if (msgp
->msg_name
) {
3228 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3229 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3230 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3231 tswapal(msgp
->msg_name
),
3233 if (ret
== -TARGET_EFAULT
) {
3234 /* For connected sockets msg_name and msg_namelen must
3235 * be ignored, so returning EFAULT immediately is wrong.
3236 * Instead, pass a bad msg_name to the host kernel, and
3237 * let it decide whether to return EFAULT or not.
3239 msg
.msg_name
= (void *)-1;
3244 msg
.msg_name
= NULL
;
3245 msg
.msg_namelen
= 0;
3247 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3248 msg
.msg_control
= alloca(msg
.msg_controllen
);
3249 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3251 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3253 count
= tswapal(msgp
->msg_iovlen
);
3254 target_vec
= tswapal(msgp
->msg_iov
);
3256 if (count
> IOV_MAX
) {
3257 /* sendrcvmsg returns a different errno for this condition than
3258 * readv/writev, so we must catch it here before lock_iovec() does.
3260 ret
= -TARGET_EMSGSIZE
;
3264 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3265 target_vec
, count
, send
);
3267 ret
= -host_to_target_errno(errno
);
3270 msg
.msg_iovlen
= count
;
3274 if (fd_trans_target_to_host_data(fd
)) {
3277 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3278 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3279 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3280 msg
.msg_iov
->iov_len
);
3282 msg
.msg_iov
->iov_base
= host_msg
;
3283 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3287 ret
= target_to_host_cmsg(&msg
, msgp
);
3289 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3293 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3294 if (!is_error(ret
)) {
3296 if (fd_trans_host_to_target_data(fd
)) {
3297 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3298 MIN(msg
.msg_iov
->iov_len
, len
));
3300 ret
= host_to_target_cmsg(msgp
, &msg
);
3302 if (!is_error(ret
)) {
3303 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3304 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3305 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3306 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3307 msg
.msg_name
, msg
.msg_namelen
);
3319 unlock_iovec(vec
, target_vec
, count
, !send
);
3324 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3325 int flags
, int send
)
3328 struct target_msghdr
*msgp
;
3330 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3334 return -TARGET_EFAULT
;
3336 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3337 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342 * so it might not have this *mmsg-specific flag either.
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3348 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3349 unsigned int vlen
, unsigned int flags
,
3352 struct target_mmsghdr
*mmsgp
;
3356 if (vlen
> UIO_MAXIOV
) {
3360 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3362 return -TARGET_EFAULT
;
3365 for (i
= 0; i
< vlen
; i
++) {
3366 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3367 if (is_error(ret
)) {
3370 mmsgp
[i
].msg_len
= tswap32(ret
);
3371 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372 if (flags
& MSG_WAITFORONE
) {
3373 flags
|= MSG_DONTWAIT
;
3377 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3379 /* Return number of datagrams sent if we sent any at all;
3380 * otherwise return the error.
3388 /* do_accept4() Must return target values and target errnos. */
3389 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3390 abi_ulong target_addrlen_addr
, int flags
)
3392 socklen_t addrlen
, ret_addrlen
;
3397 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3399 if (target_addr
== 0) {
3400 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3403 /* linux returns EFAULT if addrlen pointer is invalid */
3404 if (get_user_u32(addrlen
, target_addrlen_addr
))
3405 return -TARGET_EFAULT
;
3407 if ((int)addrlen
< 0) {
3408 return -TARGET_EINVAL
;
3411 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3412 return -TARGET_EFAULT
;
3415 addr
= alloca(addrlen
);
3417 ret_addrlen
= addrlen
;
3418 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3419 if (!is_error(ret
)) {
3420 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3421 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3422 ret
= -TARGET_EFAULT
;
3428 /* do_getpeername() Must return target values and target errnos. */
3429 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3430 abi_ulong target_addrlen_addr
)
3432 socklen_t addrlen
, ret_addrlen
;
3436 if (get_user_u32(addrlen
, target_addrlen_addr
))
3437 return -TARGET_EFAULT
;
3439 if ((int)addrlen
< 0) {
3440 return -TARGET_EINVAL
;
3443 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3444 return -TARGET_EFAULT
;
3447 addr
= alloca(addrlen
);
3449 ret_addrlen
= addrlen
;
3450 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3451 if (!is_error(ret
)) {
3452 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3453 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3454 ret
= -TARGET_EFAULT
;
3460 /* do_getsockname() Must return target values and target errnos. */
3461 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3462 abi_ulong target_addrlen_addr
)
3464 socklen_t addrlen
, ret_addrlen
;
3468 if (get_user_u32(addrlen
, target_addrlen_addr
))
3469 return -TARGET_EFAULT
;
3471 if ((int)addrlen
< 0) {
3472 return -TARGET_EINVAL
;
3475 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3476 return -TARGET_EFAULT
;
3479 addr
= alloca(addrlen
);
3481 ret_addrlen
= addrlen
;
3482 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3483 if (!is_error(ret
)) {
3484 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3485 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3486 ret
= -TARGET_EFAULT
;
3492 /* do_socketpair() Must return target values and target errnos. */
3493 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3494 abi_ulong target_tab_addr
)
3499 target_to_host_sock_type(&type
);
3501 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3502 if (!is_error(ret
)) {
3503 if (put_user_s32(tab
[0], target_tab_addr
)
3504 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3505 ret
= -TARGET_EFAULT
;
3510 /* do_sendto() Must return target values and target errnos. */
3511 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3512 abi_ulong target_addr
, socklen_t addrlen
)
3516 void *copy_msg
= NULL
;
3519 if ((int)addrlen
< 0) {
3520 return -TARGET_EINVAL
;
3523 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3525 return -TARGET_EFAULT
;
3526 if (fd_trans_target_to_host_data(fd
)) {
3527 copy_msg
= host_msg
;
3528 host_msg
= g_malloc(len
);
3529 memcpy(host_msg
, copy_msg
, len
);
3530 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3536 addr
= alloca(addrlen
+1);
3537 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3541 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3543 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3548 host_msg
= copy_msg
;
3550 unlock_user(host_msg
, msg
, 0);
3554 /* do_recvfrom() Must return target values and target errnos. */
3555 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3556 abi_ulong target_addr
,
3557 abi_ulong target_addrlen
)
3559 socklen_t addrlen
, ret_addrlen
;
3567 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3569 return -TARGET_EFAULT
;
3573 if (get_user_u32(addrlen
, target_addrlen
)) {
3574 ret
= -TARGET_EFAULT
;
3577 if ((int)addrlen
< 0) {
3578 ret
= -TARGET_EINVAL
;
3581 addr
= alloca(addrlen
);
3582 ret_addrlen
= addrlen
;
3583 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3584 addr
, &ret_addrlen
));
3586 addr
= NULL
; /* To keep compiler quiet. */
3587 addrlen
= 0; /* To keep compiler quiet. */
3588 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3590 if (!is_error(ret
)) {
3591 if (fd_trans_host_to_target_data(fd
)) {
3593 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3594 if (is_error(trans
)) {
3600 host_to_target_sockaddr(target_addr
, addr
,
3601 MIN(addrlen
, ret_addrlen
));
3602 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3603 ret
= -TARGET_EFAULT
;
3607 unlock_user(host_msg
, msg
, len
);
3610 unlock_user(host_msg
, msg
, 0);
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
3617 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3619 static const unsigned nargs
[] = { /* number of arguments per operation */
3620 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3621 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3622 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3624 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3626 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3627 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3628 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3629 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3630 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3631 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3632 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3633 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3634 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3635 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3636 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3637 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3638 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3639 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3641 abi_long a
[6]; /* max 6 args */
3644 /* check the range of the first argument num */
3645 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3647 return -TARGET_EINVAL
;
3649 /* ensure we have space for args */
3650 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3651 return -TARGET_EINVAL
;
3653 /* collect the arguments in a[] according to nargs[] */
3654 for (i
= 0; i
< nargs
[num
]; ++i
) {
3655 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3656 return -TARGET_EFAULT
;
3659 /* now when we have the args, invoke the appropriate underlying function */
3661 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3662 return do_socket(a
[0], a
[1], a
[2]);
3663 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3664 return do_bind(a
[0], a
[1], a
[2]);
3665 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3666 return do_connect(a
[0], a
[1], a
[2]);
3667 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3668 return get_errno(listen(a
[0], a
[1]));
3669 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3670 return do_accept4(a
[0], a
[1], a
[2], 0);
3671 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3672 return do_getsockname(a
[0], a
[1], a
[2]);
3673 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3674 return do_getpeername(a
[0], a
[1], a
[2]);
3675 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3676 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3677 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3678 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3679 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3680 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3681 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3683 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3684 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3685 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3686 return get_errno(shutdown(a
[0], a
[1]));
3687 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3688 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3689 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3690 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3691 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3693 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3694 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3695 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3696 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3697 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3699 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3700 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3702 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3703 return -TARGET_EINVAL
;
3708 #define N_SHM_REGIONS 32
3710 static struct shm_region
{
3714 } shm_regions
[N_SHM_REGIONS
];
3716 #ifndef TARGET_SEMID64_DS
3717 /* asm-generic version of this struct */
3718 struct target_semid64_ds
3720 struct target_ipc_perm sem_perm
;
3721 abi_ulong sem_otime
;
3722 #if TARGET_ABI_BITS == 32
3723 abi_ulong __unused1
;
3725 abi_ulong sem_ctime
;
3726 #if TARGET_ABI_BITS == 32
3727 abi_ulong __unused2
;
3729 abi_ulong sem_nsems
;
3730 abi_ulong __unused3
;
3731 abi_ulong __unused4
;
3735 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3736 abi_ulong target_addr
)
3738 struct target_ipc_perm
*target_ip
;
3739 struct target_semid64_ds
*target_sd
;
3741 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3742 return -TARGET_EFAULT
;
3743 target_ip
= &(target_sd
->sem_perm
);
3744 host_ip
->__key
= tswap32(target_ip
->__key
);
3745 host_ip
->uid
= tswap32(target_ip
->uid
);
3746 host_ip
->gid
= tswap32(target_ip
->gid
);
3747 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3748 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3749 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3750 host_ip
->mode
= tswap32(target_ip
->mode
);
3752 host_ip
->mode
= tswap16(target_ip
->mode
);
3754 #if defined(TARGET_PPC)
3755 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3757 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3759 unlock_user_struct(target_sd
, target_addr
, 0);
3763 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3764 struct ipc_perm
*host_ip
)
3766 struct target_ipc_perm
*target_ip
;
3767 struct target_semid64_ds
*target_sd
;
3769 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3770 return -TARGET_EFAULT
;
3771 target_ip
= &(target_sd
->sem_perm
);
3772 target_ip
->__key
= tswap32(host_ip
->__key
);
3773 target_ip
->uid
= tswap32(host_ip
->uid
);
3774 target_ip
->gid
= tswap32(host_ip
->gid
);
3775 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3776 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3777 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3778 target_ip
->mode
= tswap32(host_ip
->mode
);
3780 target_ip
->mode
= tswap16(host_ip
->mode
);
3782 #if defined(TARGET_PPC)
3783 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3785 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3787 unlock_user_struct(target_sd
, target_addr
, 1);
3791 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3792 abi_ulong target_addr
)
3794 struct target_semid64_ds
*target_sd
;
3796 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3797 return -TARGET_EFAULT
;
3798 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3799 return -TARGET_EFAULT
;
3800 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3801 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3802 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3803 unlock_user_struct(target_sd
, target_addr
, 0);
3807 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3808 struct semid_ds
*host_sd
)
3810 struct target_semid64_ds
*target_sd
;
3812 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3813 return -TARGET_EFAULT
;
3814 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3815 return -TARGET_EFAULT
;
3816 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3817 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3818 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3819 unlock_user_struct(target_sd
, target_addr
, 1);
3823 struct target_seminfo
{
3836 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3837 struct seminfo
*host_seminfo
)
3839 struct target_seminfo
*target_seminfo
;
3840 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3841 return -TARGET_EFAULT
;
3842 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3843 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3844 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3845 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3846 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3847 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3848 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3849 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3850 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3851 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3852 unlock_user_struct(target_seminfo
, target_addr
, 1);
3858 struct semid_ds
*buf
;
3859 unsigned short *array
;
3860 struct seminfo
*__buf
;
3863 union target_semun
{
3870 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3871 abi_ulong target_addr
)
3874 unsigned short *array
;
3876 struct semid_ds semid_ds
;
3879 semun
.buf
= &semid_ds
;
3881 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3883 return get_errno(ret
);
3885 nsems
= semid_ds
.sem_nsems
;
3887 *host_array
= g_try_new(unsigned short, nsems
);
3889 return -TARGET_ENOMEM
;
3891 array
= lock_user(VERIFY_READ
, target_addr
,
3892 nsems
*sizeof(unsigned short), 1);
3894 g_free(*host_array
);
3895 return -TARGET_EFAULT
;
3898 for(i
=0; i
<nsems
; i
++) {
3899 __get_user((*host_array
)[i
], &array
[i
]);
3901 unlock_user(array
, target_addr
, 0);
3906 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3907 unsigned short **host_array
)
3910 unsigned short *array
;
3912 struct semid_ds semid_ds
;
3915 semun
.buf
= &semid_ds
;
3917 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3919 return get_errno(ret
);
3921 nsems
= semid_ds
.sem_nsems
;
3923 array
= lock_user(VERIFY_WRITE
, target_addr
,
3924 nsems
*sizeof(unsigned short), 0);
3926 return -TARGET_EFAULT
;
3928 for(i
=0; i
<nsems
; i
++) {
3929 __put_user((*host_array
)[i
], &array
[i
]);
3931 g_free(*host_array
);
3932 unlock_user(array
, target_addr
, 1);
3937 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3938 abi_ulong target_arg
)
3940 union target_semun target_su
= { .buf
= target_arg
};
3942 struct semid_ds dsarg
;
3943 unsigned short *array
= NULL
;
3944 struct seminfo seminfo
;
3945 abi_long ret
= -TARGET_EINVAL
;
3952 /* In 64 bit cross-endian situations, we will erroneously pick up
3953 * the wrong half of the union for the "val" element. To rectify
3954 * this, the entire 8-byte structure is byteswapped, followed by
3955 * a swap of the 4 byte val field. In other cases, the data is
3956 * already in proper host byte order. */
3957 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3958 target_su
.buf
= tswapal(target_su
.buf
);
3959 arg
.val
= tswap32(target_su
.val
);
3961 arg
.val
= target_su
.val
;
3963 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3967 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3971 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3972 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3979 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3983 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3984 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3990 arg
.__buf
= &seminfo
;
3991 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3992 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4000 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4007 struct target_sembuf
{
4008 unsigned short sem_num
;
4013 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4014 abi_ulong target_addr
,
4017 struct target_sembuf
*target_sembuf
;
4020 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4021 nsops
*sizeof(struct target_sembuf
), 1);
4023 return -TARGET_EFAULT
;
4025 for(i
=0; i
<nsops
; i
++) {
4026 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4027 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4028 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4031 unlock_user(target_sembuf
, target_addr
, 0);
4036 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4037 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4040 * This macro is required to handle the s390 variants, which passes the
4041 * arguments in a different order than default.
4044 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4045 (__nsops), (__timeout), (__sops)
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048 (__nsops), 0, (__sops), (__timeout)
4051 static inline abi_long
do_semtimedop(int semid
,
4054 abi_long timeout
, bool time64
)
4056 struct sembuf
*sops
;
4057 struct timespec ts
, *pts
= NULL
;
4063 if (target_to_host_timespec64(pts
, timeout
)) {
4064 return -TARGET_EFAULT
;
4067 if (target_to_host_timespec(pts
, timeout
)) {
4068 return -TARGET_EFAULT
;
4073 if (nsops
> TARGET_SEMOPM
) {
4074 return -TARGET_E2BIG
;
4077 sops
= g_new(struct sembuf
, nsops
);
4079 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4081 return -TARGET_EFAULT
;
4084 ret
= -TARGET_ENOSYS
;
4085 #ifdef __NR_semtimedop
4086 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4089 if (ret
== -TARGET_ENOSYS
) {
4090 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4091 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4099 struct target_msqid_ds
4101 struct target_ipc_perm msg_perm
;
4102 abi_ulong msg_stime
;
4103 #if TARGET_ABI_BITS == 32
4104 abi_ulong __unused1
;
4106 abi_ulong msg_rtime
;
4107 #if TARGET_ABI_BITS == 32
4108 abi_ulong __unused2
;
4110 abi_ulong msg_ctime
;
4111 #if TARGET_ABI_BITS == 32
4112 abi_ulong __unused3
;
4114 abi_ulong __msg_cbytes
;
4116 abi_ulong msg_qbytes
;
4117 abi_ulong msg_lspid
;
4118 abi_ulong msg_lrpid
;
4119 abi_ulong __unused4
;
4120 abi_ulong __unused5
;
4123 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4124 abi_ulong target_addr
)
4126 struct target_msqid_ds
*target_md
;
4128 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4129 return -TARGET_EFAULT
;
4130 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4131 return -TARGET_EFAULT
;
4132 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4133 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4134 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4135 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4136 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4137 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4138 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4139 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4140 unlock_user_struct(target_md
, target_addr
, 0);
4144 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4145 struct msqid_ds
*host_md
)
4147 struct target_msqid_ds
*target_md
;
4149 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4150 return -TARGET_EFAULT
;
4151 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4152 return -TARGET_EFAULT
;
4153 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4154 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4155 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4156 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4157 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4158 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4159 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4160 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4161 unlock_user_struct(target_md
, target_addr
, 1);
4165 struct target_msginfo
{
4173 unsigned short int msgseg
;
4176 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4177 struct msginfo
*host_msginfo
)
4179 struct target_msginfo
*target_msginfo
;
4180 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4181 return -TARGET_EFAULT
;
4182 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4183 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4184 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4185 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4186 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4187 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4188 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4189 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4190 unlock_user_struct(target_msginfo
, target_addr
, 1);
4194 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4196 struct msqid_ds dsarg
;
4197 struct msginfo msginfo
;
4198 abi_long ret
= -TARGET_EINVAL
;
4206 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4207 return -TARGET_EFAULT
;
4208 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4209 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4210 return -TARGET_EFAULT
;
4213 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4217 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4218 if (host_to_target_msginfo(ptr
, &msginfo
))
4219 return -TARGET_EFAULT
;
4226 struct target_msgbuf
{
4231 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4232 ssize_t msgsz
, int msgflg
)
4234 struct target_msgbuf
*target_mb
;
4235 struct msgbuf
*host_mb
;
4239 return -TARGET_EINVAL
;
4242 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4243 return -TARGET_EFAULT
;
4244 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4246 unlock_user_struct(target_mb
, msgp
, 0);
4247 return -TARGET_ENOMEM
;
4249 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4250 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4251 ret
= -TARGET_ENOSYS
;
4253 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4256 if (ret
== -TARGET_ENOSYS
) {
4258 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4261 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4267 unlock_user_struct(target_mb
, msgp
, 0);
4273 #if defined(__sparc__)
4274 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4276 #elif defined(__s390x__)
4277 /* The s390 sys_ipc variant has only five parameters. */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4279 ((long int[]){(long int)__msgp, __msgtyp})
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282 ((long int[]){(long int)__msgp, __msgtyp}), 0
4286 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4287 ssize_t msgsz
, abi_long msgtyp
,
4290 struct target_msgbuf
*target_mb
;
4292 struct msgbuf
*host_mb
;
4296 return -TARGET_EINVAL
;
4299 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4300 return -TARGET_EFAULT
;
4302 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4304 ret
= -TARGET_ENOMEM
;
4307 ret
= -TARGET_ENOSYS
;
4309 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4312 if (ret
== -TARGET_ENOSYS
) {
4313 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4314 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4319 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4320 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4321 if (!target_mtext
) {
4322 ret
= -TARGET_EFAULT
;
4325 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4326 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4329 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4333 unlock_user_struct(target_mb
, msgp
, 1);
4338 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4339 abi_ulong target_addr
)
4341 struct target_shmid_ds
*target_sd
;
4343 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4344 return -TARGET_EFAULT
;
4345 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4346 return -TARGET_EFAULT
;
4347 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4348 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4349 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4350 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4351 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4352 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4353 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4354 unlock_user_struct(target_sd
, target_addr
, 0);
4358 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4359 struct shmid_ds
*host_sd
)
4361 struct target_shmid_ds
*target_sd
;
4363 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4364 return -TARGET_EFAULT
;
4365 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4366 return -TARGET_EFAULT
;
4367 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4368 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4369 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4370 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4371 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4372 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4373 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4374 unlock_user_struct(target_sd
, target_addr
, 1);
4378 struct target_shminfo
{
4386 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4387 struct shminfo
*host_shminfo
)
4389 struct target_shminfo
*target_shminfo
;
4390 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4391 return -TARGET_EFAULT
;
4392 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4393 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4394 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4395 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4396 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4397 unlock_user_struct(target_shminfo
, target_addr
, 1);
4401 struct target_shm_info
{
4406 abi_ulong swap_attempts
;
4407 abi_ulong swap_successes
;
4410 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4411 struct shm_info
*host_shm_info
)
4413 struct target_shm_info
*target_shm_info
;
4414 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4415 return -TARGET_EFAULT
;
4416 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4417 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4418 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4419 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4420 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4421 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4422 unlock_user_struct(target_shm_info
, target_addr
, 1);
4426 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4428 struct shmid_ds dsarg
;
4429 struct shminfo shminfo
;
4430 struct shm_info shm_info
;
4431 abi_long ret
= -TARGET_EINVAL
;
4439 if (target_to_host_shmid_ds(&dsarg
, buf
))
4440 return -TARGET_EFAULT
;
4441 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4442 if (host_to_target_shmid_ds(buf
, &dsarg
))
4443 return -TARGET_EFAULT
;
4446 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4447 if (host_to_target_shminfo(buf
, &shminfo
))
4448 return -TARGET_EFAULT
;
4451 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4452 if (host_to_target_shm_info(buf
, &shm_info
))
4453 return -TARGET_EFAULT
;
4458 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4465 #ifndef TARGET_FORCE_SHMLBA
4466 /* For most architectures, SHMLBA is the same as the page size;
4467 * some architectures have larger values, in which case they should
4468 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4469 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4470 * and defining its own value for SHMLBA.
4472 * The kernel also permits SHMLBA to be set by the architecture to a
4473 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4474 * this means that addresses are rounded to the large size if
4475 * SHM_RND is set but addresses not aligned to that size are not rejected
4476 * as long as they are at least page-aligned. Since the only architecture
4477 * which uses this is ia64 this code doesn't provide for that oddity.
4479 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4481 return TARGET_PAGE_SIZE
;
4485 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4486 int shmid
, abi_ulong shmaddr
, int shmflg
)
4488 CPUState
*cpu
= env_cpu(cpu_env
);
4491 struct shmid_ds shm_info
;
4495 /* shmat pointers are always untagged */
4497 /* find out the length of the shared memory segment */
4498 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4499 if (is_error(ret
)) {
4500 /* can't get length, bail out */
4504 shmlba
= target_shmlba(cpu_env
);
4506 if (shmaddr
& (shmlba
- 1)) {
4507 if (shmflg
& SHM_RND
) {
4508 shmaddr
&= ~(shmlba
- 1);
4510 return -TARGET_EINVAL
;
4513 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4514 return -TARGET_EINVAL
;
4520 * We're mapping shared memory, so ensure we generate code for parallel
4521 * execution and flush old translations. This will work up to the level
4522 * supported by the host -- anything that requires EXCP_ATOMIC will not
4523 * be atomic with respect to an external process.
4525 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4526 cpu
->tcg_cflags
|= CF_PARALLEL
;
4531 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4533 abi_ulong mmap_start
;
4535 /* In order to use the host shmat, we need to honor host SHMLBA. */
4536 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4538 if (mmap_start
== -1) {
4540 host_raddr
= (void *)-1;
4542 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4543 shmflg
| SHM_REMAP
);
4546 if (host_raddr
== (void *)-1) {
4548 return get_errno((long)host_raddr
);
4550 raddr
=h2g((unsigned long)host_raddr
);
4552 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4553 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4554 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4556 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4557 if (!shm_regions
[i
].in_use
) {
4558 shm_regions
[i
].in_use
= true;
4559 shm_regions
[i
].start
= raddr
;
4560 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4570 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4575 /* shmdt pointers are always untagged */
4579 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4580 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4581 shm_regions
[i
].in_use
= false;
4582 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4586 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4593 #ifdef TARGET_NR_ipc
4594 /* ??? This only works with linear mappings. */
4595 /* do_ipc() must return target values and target errnos. */
4596 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4597 unsigned int call
, abi_long first
,
4598 abi_long second
, abi_long third
,
4599 abi_long ptr
, abi_long fifth
)
4604 version
= call
>> 16;
4609 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4611 case IPCOP_semtimedop
:
4613 * The s390 sys_ipc variant has only five parameters instead of six
4614 * (as for default variant) and the only difference is the handling of
4615 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4616 * to a struct timespec where the generic variant uses fifth parameter.
4618 #if defined(TARGET_S390X)
4619 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4621 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4626 ret
= get_errno(semget(first
, second
, third
));
4629 case IPCOP_semctl
: {
4630 /* The semun argument to semctl is passed by value, so dereference the
4633 get_user_ual(atptr
, ptr
);
4634 ret
= do_semctl(first
, second
, third
, atptr
);
4639 ret
= get_errno(msgget(first
, second
));
4643 ret
= do_msgsnd(first
, ptr
, second
, third
);
4647 ret
= do_msgctl(first
, second
, ptr
);
4654 struct target_ipc_kludge
{
4659 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4660 ret
= -TARGET_EFAULT
;
4664 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4666 unlock_user_struct(tmp
, ptr
, 0);
4670 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4679 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4680 if (is_error(raddr
))
4681 return get_errno(raddr
);
4682 if (put_user_ual(raddr
, third
))
4683 return -TARGET_EFAULT
;
4687 ret
= -TARGET_EINVAL
;
4692 ret
= do_shmdt(ptr
);
4696 /* IPC_* flag values are the same on all linux platforms */
4697 ret
= get_errno(shmget(first
, second
, third
));
4700 /* IPC_* and SHM_* command values are the same on all linux platforms */
4702 ret
= do_shmctl(first
, second
, ptr
);
4705 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4707 ret
= -TARGET_ENOSYS
;
4714 /* kernel structure types definitions */
4716 #define STRUCT(name, ...) STRUCT_ ## name,
4717 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 #include "syscall_types.h"
4723 #undef STRUCT_SPECIAL
4725 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4726 #define STRUCT_SPECIAL(name)
4727 #include "syscall_types.h"
4729 #undef STRUCT_SPECIAL
4731 #define MAX_STRUCT_SIZE 4096
4733 #ifdef CONFIG_FIEMAP
4734 /* So fiemap access checks don't overflow on 32 bit systems.
4735 * This is very slightly smaller than the limit imposed by
4736 * the underlying kernel.
4738 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4739 / sizeof(struct fiemap_extent))
4741 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4742 int fd
, int cmd
, abi_long arg
)
4744 /* The parameter for this ioctl is a struct fiemap followed
4745 * by an array of struct fiemap_extent whose size is set
4746 * in fiemap->fm_extent_count. The array is filled in by the
4749 int target_size_in
, target_size_out
;
4751 const argtype
*arg_type
= ie
->arg_type
;
4752 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4755 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4759 assert(arg_type
[0] == TYPE_PTR
);
4760 assert(ie
->access
== IOC_RW
);
4762 target_size_in
= thunk_type_size(arg_type
, 0);
4763 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4765 return -TARGET_EFAULT
;
4767 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4768 unlock_user(argptr
, arg
, 0);
4769 fm
= (struct fiemap
*)buf_temp
;
4770 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4771 return -TARGET_EINVAL
;
4774 outbufsz
= sizeof (*fm
) +
4775 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4777 if (outbufsz
> MAX_STRUCT_SIZE
) {
4778 /* We can't fit all the extents into the fixed size buffer.
4779 * Allocate one that is large enough and use it instead.
4781 fm
= g_try_malloc(outbufsz
);
4783 return -TARGET_ENOMEM
;
4785 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4788 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4789 if (!is_error(ret
)) {
4790 target_size_out
= target_size_in
;
4791 /* An extent_count of 0 means we were only counting the extents
4792 * so there are no structs to copy
4794 if (fm
->fm_extent_count
!= 0) {
4795 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4797 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4799 ret
= -TARGET_EFAULT
;
4801 /* Convert the struct fiemap */
4802 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4803 if (fm
->fm_extent_count
!= 0) {
4804 p
= argptr
+ target_size_in
;
4805 /* ...and then all the struct fiemap_extents */
4806 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4807 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4812 unlock_user(argptr
, arg
, target_size_out
);
4822 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4823 int fd
, int cmd
, abi_long arg
)
4825 const argtype
*arg_type
= ie
->arg_type
;
4829 struct ifconf
*host_ifconf
;
4831 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4832 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4833 int target_ifreq_size
;
4838 abi_long target_ifc_buf
;
4842 assert(arg_type
[0] == TYPE_PTR
);
4843 assert(ie
->access
== IOC_RW
);
4846 target_size
= thunk_type_size(arg_type
, 0);
4848 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4850 return -TARGET_EFAULT
;
4851 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4852 unlock_user(argptr
, arg
, 0);
4854 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4855 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4856 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4858 if (target_ifc_buf
!= 0) {
4859 target_ifc_len
= host_ifconf
->ifc_len
;
4860 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4861 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4863 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4864 if (outbufsz
> MAX_STRUCT_SIZE
) {
4866 * We can't fit all the extents into the fixed size buffer.
4867 * Allocate one that is large enough and use it instead.
4869 host_ifconf
= g_try_malloc(outbufsz
);
4871 return -TARGET_ENOMEM
;
4873 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4876 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4878 host_ifconf
->ifc_len
= host_ifc_len
;
4880 host_ifc_buf
= NULL
;
4882 host_ifconf
->ifc_buf
= host_ifc_buf
;
4884 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4885 if (!is_error(ret
)) {
4886 /* convert host ifc_len to target ifc_len */
4888 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4889 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4890 host_ifconf
->ifc_len
= target_ifc_len
;
4892 /* restore target ifc_buf */
4894 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4896 /* copy struct ifconf to target user */
4898 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4900 return -TARGET_EFAULT
;
4901 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4902 unlock_user(argptr
, arg
, target_size
);
4904 if (target_ifc_buf
!= 0) {
4905 /* copy ifreq[] to target user */
4906 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4907 for (i
= 0; i
< nb_ifreq
; i
++) {
4908 thunk_convert(argptr
+ i
* target_ifreq_size
,
4909 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4910 ifreq_arg_type
, THUNK_TARGET
);
4912 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4917 g_free(host_ifconf
);
4923 #if defined(CONFIG_USBFS)
4924 #if HOST_LONG_BITS > 64
4925 #error USBDEVFS thunks do not support >64 bit hosts yet.
4928 uint64_t target_urb_adr
;
4929 uint64_t target_buf_adr
;
4930 char *target_buf_ptr
;
4931 struct usbdevfs_urb host_urb
;
4934 static GHashTable
*usbdevfs_urb_hashtable(void)
4936 static GHashTable
*urb_hashtable
;
4938 if (!urb_hashtable
) {
4939 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4941 return urb_hashtable
;
4944 static void urb_hashtable_insert(struct live_urb
*urb
)
4946 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4947 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4950 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4952 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4953 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4956 static void urb_hashtable_remove(struct live_urb
*urb
)
4958 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4959 g_hash_table_remove(urb_hashtable
, urb
);
4963 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4964 int fd
, int cmd
, abi_long arg
)
4966 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4967 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4968 struct live_urb
*lurb
;
4972 uintptr_t target_urb_adr
;
4975 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4977 memset(buf_temp
, 0, sizeof(uint64_t));
4978 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4979 if (is_error(ret
)) {
4983 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4984 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4985 if (!lurb
->target_urb_adr
) {
4986 return -TARGET_EFAULT
;
4988 urb_hashtable_remove(lurb
);
4989 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4990 lurb
->host_urb
.buffer_length
);
4991 lurb
->target_buf_ptr
= NULL
;
4993 /* restore the guest buffer pointer */
4994 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4996 /* update the guest urb struct */
4997 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5000 return -TARGET_EFAULT
;
5002 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5003 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5005 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5006 /* write back the urb handle */
5007 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5010 return -TARGET_EFAULT
;
5013 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5014 target_urb_adr
= lurb
->target_urb_adr
;
5015 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5016 unlock_user(argptr
, arg
, target_size
);
5023 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5024 uint8_t *buf_temp
__attribute__((unused
)),
5025 int fd
, int cmd
, abi_long arg
)
5027 struct live_urb
*lurb
;
5029 /* map target address back to host URB with metadata. */
5030 lurb
= urb_hashtable_lookup(arg
);
5032 return -TARGET_EFAULT
;
5034 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5038 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5039 int fd
, int cmd
, abi_long arg
)
5041 const argtype
*arg_type
= ie
->arg_type
;
5046 struct live_urb
*lurb
;
5049 * each submitted URB needs to map to a unique ID for the
5050 * kernel, and that unique ID needs to be a pointer to
5051 * host memory. hence, we need to malloc for each URB.
5052 * isochronous transfers have a variable length struct.
5055 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5057 /* construct host copy of urb and metadata */
5058 lurb
= g_try_new0(struct live_urb
, 1);
5060 return -TARGET_ENOMEM
;
5063 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5066 return -TARGET_EFAULT
;
5068 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5069 unlock_user(argptr
, arg
, 0);
5071 lurb
->target_urb_adr
= arg
;
5072 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5074 /* buffer space used depends on endpoint type so lock the entire buffer */
5075 /* control type urbs should check the buffer contents for true direction */
5076 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5077 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5078 lurb
->host_urb
.buffer_length
, 1);
5079 if (lurb
->target_buf_ptr
== NULL
) {
5081 return -TARGET_EFAULT
;
5084 /* update buffer pointer in host copy */
5085 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5087 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5088 if (is_error(ret
)) {
5089 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5092 urb_hashtable_insert(lurb
);
5097 #endif /* CONFIG_USBFS */
5099 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5100 int cmd
, abi_long arg
)
5103 struct dm_ioctl
*host_dm
;
5104 abi_long guest_data
;
5105 uint32_t guest_data_size
;
5107 const argtype
*arg_type
= ie
->arg_type
;
5109 void *big_buf
= NULL
;
5113 target_size
= thunk_type_size(arg_type
, 0);
5114 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5116 ret
= -TARGET_EFAULT
;
5119 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5120 unlock_user(argptr
, arg
, 0);
5122 /* buf_temp is too small, so fetch things into a bigger buffer */
5123 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5124 memcpy(big_buf
, buf_temp
, target_size
);
5128 guest_data
= arg
+ host_dm
->data_start
;
5129 if ((guest_data
- arg
) < 0) {
5130 ret
= -TARGET_EINVAL
;
5133 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5134 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5136 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5138 ret
= -TARGET_EFAULT
;
5142 switch (ie
->host_cmd
) {
5144 case DM_LIST_DEVICES
:
5147 case DM_DEV_SUSPEND
:
5150 case DM_TABLE_STATUS
:
5151 case DM_TABLE_CLEAR
:
5153 case DM_LIST_VERSIONS
:
5157 case DM_DEV_SET_GEOMETRY
:
5158 /* data contains only strings */
5159 memcpy(host_data
, argptr
, guest_data_size
);
5162 memcpy(host_data
, argptr
, guest_data_size
);
5163 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5167 void *gspec
= argptr
;
5168 void *cur_data
= host_data
;
5169 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5170 int spec_size
= thunk_type_size(arg_type
, 0);
5173 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5174 struct dm_target_spec
*spec
= cur_data
;
5178 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5179 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5181 spec
->next
= sizeof(*spec
) + slen
;
5182 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5184 cur_data
+= spec
->next
;
5189 ret
= -TARGET_EINVAL
;
5190 unlock_user(argptr
, guest_data
, 0);
5193 unlock_user(argptr
, guest_data
, 0);
5195 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5196 if (!is_error(ret
)) {
5197 guest_data
= arg
+ host_dm
->data_start
;
5198 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5199 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5200 switch (ie
->host_cmd
) {
5205 case DM_DEV_SUSPEND
:
5208 case DM_TABLE_CLEAR
:
5210 case DM_DEV_SET_GEOMETRY
:
5211 /* no return data */
5213 case DM_LIST_DEVICES
:
5215 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5216 uint32_t remaining_data
= guest_data_size
;
5217 void *cur_data
= argptr
;
5218 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5219 int nl_size
= 12; /* can't use thunk_size due to alignment */
5222 uint32_t next
= nl
->next
;
5224 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5226 if (remaining_data
< nl
->next
) {
5227 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5230 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5231 strcpy(cur_data
+ nl_size
, nl
->name
);
5232 cur_data
+= nl
->next
;
5233 remaining_data
-= nl
->next
;
5237 nl
= (void*)nl
+ next
;
5242 case DM_TABLE_STATUS
:
5244 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5245 void *cur_data
= argptr
;
5246 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5247 int spec_size
= thunk_type_size(arg_type
, 0);
5250 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5251 uint32_t next
= spec
->next
;
5252 int slen
= strlen((char*)&spec
[1]) + 1;
5253 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5254 if (guest_data_size
< spec
->next
) {
5255 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5258 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5259 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5260 cur_data
= argptr
+ spec
->next
;
5261 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5267 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5268 int count
= *(uint32_t*)hdata
;
5269 uint64_t *hdev
= hdata
+ 8;
5270 uint64_t *gdev
= argptr
+ 8;
5273 *(uint32_t*)argptr
= tswap32(count
);
5274 for (i
= 0; i
< count
; i
++) {
5275 *gdev
= tswap64(*hdev
);
5281 case DM_LIST_VERSIONS
:
5283 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5284 uint32_t remaining_data
= guest_data_size
;
5285 void *cur_data
= argptr
;
5286 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5287 int vers_size
= thunk_type_size(arg_type
, 0);
5290 uint32_t next
= vers
->next
;
5292 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5294 if (remaining_data
< vers
->next
) {
5295 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5298 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5299 strcpy(cur_data
+ vers_size
, vers
->name
);
5300 cur_data
+= vers
->next
;
5301 remaining_data
-= vers
->next
;
5305 vers
= (void*)vers
+ next
;
5310 unlock_user(argptr
, guest_data
, 0);
5311 ret
= -TARGET_EINVAL
;
5314 unlock_user(argptr
, guest_data
, guest_data_size
);
5316 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5318 ret
= -TARGET_EFAULT
;
5321 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5322 unlock_user(argptr
, arg
, target_size
);
5329 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5330 int cmd
, abi_long arg
)
5334 const argtype
*arg_type
= ie
->arg_type
;
5335 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5338 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5339 struct blkpg_partition host_part
;
5341 /* Read and convert blkpg */
5343 target_size
= thunk_type_size(arg_type
, 0);
5344 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5346 ret
= -TARGET_EFAULT
;
5349 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5350 unlock_user(argptr
, arg
, 0);
5352 switch (host_blkpg
->op
) {
5353 case BLKPG_ADD_PARTITION
:
5354 case BLKPG_DEL_PARTITION
:
5355 /* payload is struct blkpg_partition */
5358 /* Unknown opcode */
5359 ret
= -TARGET_EINVAL
;
5363 /* Read and convert blkpg->data */
5364 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5365 target_size
= thunk_type_size(part_arg_type
, 0);
5366 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5368 ret
= -TARGET_EFAULT
;
5371 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5372 unlock_user(argptr
, arg
, 0);
5374 /* Swizzle the data pointer to our local copy and call! */
5375 host_blkpg
->data
= &host_part
;
5376 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5382 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5383 int fd
, int cmd
, abi_long arg
)
5385 const argtype
*arg_type
= ie
->arg_type
;
5386 const StructEntry
*se
;
5387 const argtype
*field_types
;
5388 const int *dst_offsets
, *src_offsets
;
5391 abi_ulong
*target_rt_dev_ptr
= NULL
;
5392 unsigned long *host_rt_dev_ptr
= NULL
;
5396 assert(ie
->access
== IOC_W
);
5397 assert(*arg_type
== TYPE_PTR
);
5399 assert(*arg_type
== TYPE_STRUCT
);
5400 target_size
= thunk_type_size(arg_type
, 0);
5401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5403 return -TARGET_EFAULT
;
5406 assert(*arg_type
== (int)STRUCT_rtentry
);
5407 se
= struct_entries
+ *arg_type
++;
5408 assert(se
->convert
[0] == NULL
);
5409 /* convert struct here to be able to catch rt_dev string */
5410 field_types
= se
->field_types
;
5411 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5412 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5413 for (i
= 0; i
< se
->nb_fields
; i
++) {
5414 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5415 assert(*field_types
== TYPE_PTRVOID
);
5416 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5417 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5418 if (*target_rt_dev_ptr
!= 0) {
5419 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5420 tswapal(*target_rt_dev_ptr
));
5421 if (!*host_rt_dev_ptr
) {
5422 unlock_user(argptr
, arg
, 0);
5423 return -TARGET_EFAULT
;
5426 *host_rt_dev_ptr
= 0;
5431 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5432 argptr
+ src_offsets
[i
],
5433 field_types
, THUNK_HOST
);
5435 unlock_user(argptr
, arg
, 0);
5437 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5439 assert(host_rt_dev_ptr
!= NULL
);
5440 assert(target_rt_dev_ptr
!= NULL
);
5441 if (*host_rt_dev_ptr
!= 0) {
5442 unlock_user((void *)*host_rt_dev_ptr
,
5443 *target_rt_dev_ptr
, 0);
5448 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5449 int fd
, int cmd
, abi_long arg
)
5451 int sig
= target_to_host_signal(arg
);
5452 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5455 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5456 int fd
, int cmd
, abi_long arg
)
5461 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5462 if (is_error(ret
)) {
5466 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5467 if (copy_to_user_timeval(arg
, &tv
)) {
5468 return -TARGET_EFAULT
;
5471 if (copy_to_user_timeval64(arg
, &tv
)) {
5472 return -TARGET_EFAULT
;
5479 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5480 int fd
, int cmd
, abi_long arg
)
5485 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5486 if (is_error(ret
)) {
5490 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5491 if (host_to_target_timespec(arg
, &ts
)) {
5492 return -TARGET_EFAULT
;
5495 if (host_to_target_timespec64(arg
, &ts
)) {
5496 return -TARGET_EFAULT
;
5504 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5505 int fd
, int cmd
, abi_long arg
)
5507 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5508 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5514 static void unlock_drm_version(struct drm_version
*host_ver
,
5515 struct target_drm_version
*target_ver
,
5518 unlock_user(host_ver
->name
, target_ver
->name
,
5519 copy
? host_ver
->name_len
: 0);
5520 unlock_user(host_ver
->date
, target_ver
->date
,
5521 copy
? host_ver
->date_len
: 0);
5522 unlock_user(host_ver
->desc
, target_ver
->desc
,
5523 copy
? host_ver
->desc_len
: 0);
5526 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5527 struct target_drm_version
*target_ver
)
5529 memset(host_ver
, 0, sizeof(*host_ver
));
5531 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5532 if (host_ver
->name_len
) {
5533 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5534 target_ver
->name_len
, 0);
5535 if (!host_ver
->name
) {
5540 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5541 if (host_ver
->date_len
) {
5542 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5543 target_ver
->date_len
, 0);
5544 if (!host_ver
->date
) {
5549 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5550 if (host_ver
->desc_len
) {
5551 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5552 target_ver
->desc_len
, 0);
5553 if (!host_ver
->desc
) {
5560 unlock_drm_version(host_ver
, target_ver
, false);
5564 static inline void host_to_target_drmversion(
5565 struct target_drm_version
*target_ver
,
5566 struct drm_version
*host_ver
)
5568 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5569 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5570 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5571 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5572 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5573 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5574 unlock_drm_version(host_ver
, target_ver
, true);
5577 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5578 int fd
, int cmd
, abi_long arg
)
5580 struct drm_version
*ver
;
5581 struct target_drm_version
*target_ver
;
5584 switch (ie
->host_cmd
) {
5585 case DRM_IOCTL_VERSION
:
5586 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5587 return -TARGET_EFAULT
;
5589 ver
= (struct drm_version
*)buf_temp
;
5590 ret
= target_to_host_drmversion(ver
, target_ver
);
5591 if (!is_error(ret
)) {
5592 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5593 if (is_error(ret
)) {
5594 unlock_drm_version(ver
, target_ver
, false);
5596 host_to_target_drmversion(target_ver
, ver
);
5599 unlock_user_struct(target_ver
, arg
, 0);
5602 return -TARGET_ENOSYS
;
5605 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5606 struct drm_i915_getparam
*gparam
,
5607 int fd
, abi_long arg
)
5611 struct target_drm_i915_getparam
*target_gparam
;
5613 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5614 return -TARGET_EFAULT
;
5617 __get_user(gparam
->param
, &target_gparam
->param
);
5618 gparam
->value
= &value
;
5619 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5620 put_user_s32(value
, target_gparam
->value
);
5622 unlock_user_struct(target_gparam
, arg
, 0);
5626 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5627 int fd
, int cmd
, abi_long arg
)
5629 switch (ie
->host_cmd
) {
5630 case DRM_IOCTL_I915_GETPARAM
:
5631 return do_ioctl_drm_i915_getparam(ie
,
5632 (struct drm_i915_getparam
*)buf_temp
,
5635 return -TARGET_ENOSYS
;
5641 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5642 int fd
, int cmd
, abi_long arg
)
5644 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5645 struct tun_filter
*target_filter
;
5648 assert(ie
->access
== IOC_W
);
5650 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5651 if (!target_filter
) {
5652 return -TARGET_EFAULT
;
5654 filter
->flags
= tswap16(target_filter
->flags
);
5655 filter
->count
= tswap16(target_filter
->count
);
5656 unlock_user(target_filter
, arg
, 0);
5658 if (filter
->count
) {
5659 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5661 return -TARGET_EFAULT
;
5664 target_addr
= lock_user(VERIFY_READ
,
5665 arg
+ offsetof(struct tun_filter
, addr
),
5666 filter
->count
* ETH_ALEN
, 1);
5668 return -TARGET_EFAULT
;
5670 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5671 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5674 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5677 IOCTLEntry ioctl_entries
[] = {
5678 #define IOCTL(cmd, access, ...) \
5679 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5680 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5681 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5682 #define IOCTL_IGNORE(cmd) \
5683 { TARGET_ ## cmd, 0, #cmd },
5688 /* ??? Implement proper locking for ioctls. */
5689 /* do_ioctl() Must return target values and target errnos. */
5690 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5692 const IOCTLEntry
*ie
;
5693 const argtype
*arg_type
;
5695 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5701 if (ie
->target_cmd
== 0) {
5703 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5704 return -TARGET_ENOSYS
;
5706 if (ie
->target_cmd
== cmd
)
5710 arg_type
= ie
->arg_type
;
5712 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5713 } else if (!ie
->host_cmd
) {
5714 /* Some architectures define BSD ioctls in their headers
5715 that are not implemented in Linux. */
5716 return -TARGET_ENOSYS
;
5719 switch(arg_type
[0]) {
5722 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5728 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5732 target_size
= thunk_type_size(arg_type
, 0);
5733 switch(ie
->access
) {
5735 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5736 if (!is_error(ret
)) {
5737 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5739 return -TARGET_EFAULT
;
5740 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5741 unlock_user(argptr
, arg
, target_size
);
5745 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5747 return -TARGET_EFAULT
;
5748 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5749 unlock_user(argptr
, arg
, 0);
5750 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5754 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5756 return -TARGET_EFAULT
;
5757 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5758 unlock_user(argptr
, arg
, 0);
5759 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5760 if (!is_error(ret
)) {
5761 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5763 return -TARGET_EFAULT
;
5764 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5765 unlock_user(argptr
, arg
, target_size
);
5771 qemu_log_mask(LOG_UNIMP
,
5772 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5773 (long)cmd
, arg_type
[0]);
5774 ret
= -TARGET_ENOSYS
;
5780 static const bitmask_transtbl iflag_tbl
[] = {
5781 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5782 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5783 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5784 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5785 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5786 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5787 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5788 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5789 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5790 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5791 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5792 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5793 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5794 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5795 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5799 static const bitmask_transtbl oflag_tbl
[] = {
5800 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5801 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5802 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5803 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5804 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5805 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5806 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5807 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5808 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5809 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5810 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5811 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5812 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5813 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5814 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5815 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5816 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5817 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5818 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5819 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5820 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5821 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5822 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5823 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5827 static const bitmask_transtbl cflag_tbl
[] = {
5828 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5829 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5830 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5831 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5832 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5833 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5834 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5835 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5836 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5837 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5838 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5839 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5840 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5841 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5842 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5843 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5844 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5845 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5846 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5847 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5848 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5849 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5850 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5851 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5852 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5853 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5854 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5855 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5856 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5857 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5858 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5862 static const bitmask_transtbl lflag_tbl
[] = {
5863 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5864 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5865 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5866 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5867 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5868 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5869 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5870 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5871 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5872 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5873 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5874 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5875 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5876 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5877 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5878 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5882 static void target_to_host_termios (void *dst
, const void *src
)
5884 struct host_termios
*host
= dst
;
5885 const struct target_termios
*target
= src
;
5888 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5890 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5892 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5894 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5895 host
->c_line
= target
->c_line
;
5897 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5898 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5899 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5900 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5901 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5902 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5903 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5904 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5905 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5906 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5907 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5908 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5909 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5910 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5911 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5912 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5913 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5914 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5917 static void host_to_target_termios (void *dst
, const void *src
)
5919 struct target_termios
*target
= dst
;
5920 const struct host_termios
*host
= src
;
5923 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5925 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5927 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5929 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5930 target
->c_line
= host
->c_line
;
5932 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5933 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5934 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5935 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5936 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5937 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5938 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5939 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5940 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5941 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5942 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5943 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5944 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5945 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5946 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5947 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5948 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5949 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5952 static const StructEntry struct_termios_def
= {
5953 .convert
= { host_to_target_termios
, target_to_host_termios
},
5954 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5955 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5956 .print
= print_termios
,
5959 static const bitmask_transtbl mmap_flags_tbl
[] = {
5960 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5961 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5962 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5963 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5964 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5965 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5966 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5967 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5968 MAP_DENYWRITE
, MAP_DENYWRITE
},
5969 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5970 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5971 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5972 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5973 MAP_NORESERVE
, MAP_NORESERVE
},
5974 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5975 /* MAP_STACK had been ignored by the kernel for quite some time.
5976 Recognize it for the target insofar as we do not want to pass
5977 it through to the host. */
5978 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5983 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5984 * TARGET_I386 is defined if TARGET_X86_64 is defined
5986 #if defined(TARGET_I386)
5988 /* NOTE: there is really one LDT for all the threads */
5989 static uint8_t *ldt_table
;
5991 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5998 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5999 if (size
> bytecount
)
6001 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6003 return -TARGET_EFAULT
;
6004 /* ??? Should this by byteswapped? */
6005 memcpy(p
, ldt_table
, size
);
6006 unlock_user(p
, ptr
, size
);
6010 /* XXX: add locking support */
6011 static abi_long
write_ldt(CPUX86State
*env
,
6012 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6014 struct target_modify_ldt_ldt_s ldt_info
;
6015 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6016 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6017 int seg_not_present
, useable
, lm
;
6018 uint32_t *lp
, entry_1
, entry_2
;
6020 if (bytecount
!= sizeof(ldt_info
))
6021 return -TARGET_EINVAL
;
6022 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6023 return -TARGET_EFAULT
;
6024 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6025 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6026 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6027 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6028 unlock_user_struct(target_ldt_info
, ptr
, 0);
6030 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6031 return -TARGET_EINVAL
;
6032 seg_32bit
= ldt_info
.flags
& 1;
6033 contents
= (ldt_info
.flags
>> 1) & 3;
6034 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6035 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6036 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6037 useable
= (ldt_info
.flags
>> 6) & 1;
6041 lm
= (ldt_info
.flags
>> 7) & 1;
6043 if (contents
== 3) {
6045 return -TARGET_EINVAL
;
6046 if (seg_not_present
== 0)
6047 return -TARGET_EINVAL
;
6049 /* allocate the LDT */
6051 env
->ldt
.base
= target_mmap(0,
6052 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6053 PROT_READ
|PROT_WRITE
,
6054 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6055 if (env
->ldt
.base
== -1)
6056 return -TARGET_ENOMEM
;
6057 memset(g2h_untagged(env
->ldt
.base
), 0,
6058 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6059 env
->ldt
.limit
= 0xffff;
6060 ldt_table
= g2h_untagged(env
->ldt
.base
);
6063 /* NOTE: same code as Linux kernel */
6064 /* Allow LDTs to be cleared by the user. */
6065 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6068 read_exec_only
== 1 &&
6070 limit_in_pages
== 0 &&
6071 seg_not_present
== 1 &&
6079 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6080 (ldt_info
.limit
& 0x0ffff);
6081 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6082 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6083 (ldt_info
.limit
& 0xf0000) |
6084 ((read_exec_only
^ 1) << 9) |
6086 ((seg_not_present
^ 1) << 15) |
6088 (limit_in_pages
<< 23) |
6092 entry_2
|= (useable
<< 20);
6094 /* Install the new entry ... */
6096 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6097 lp
[0] = tswap32(entry_1
);
6098 lp
[1] = tswap32(entry_2
);
6102 /* specific and weird i386 syscalls */
6103 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6104 unsigned long bytecount
)
6110 ret
= read_ldt(ptr
, bytecount
);
6113 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6116 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6119 ret
= -TARGET_ENOSYS
;
6125 #if defined(TARGET_ABI32)
6126 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6128 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6129 struct target_modify_ldt_ldt_s ldt_info
;
6130 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6131 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6132 int seg_not_present
, useable
, lm
;
6133 uint32_t *lp
, entry_1
, entry_2
;
6136 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6137 if (!target_ldt_info
)
6138 return -TARGET_EFAULT
;
6139 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6140 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6141 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6142 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6143 if (ldt_info
.entry_number
== -1) {
6144 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6145 if (gdt_table
[i
] == 0) {
6146 ldt_info
.entry_number
= i
;
6147 target_ldt_info
->entry_number
= tswap32(i
);
6152 unlock_user_struct(target_ldt_info
, ptr
, 1);
6154 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6155 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6156 return -TARGET_EINVAL
;
6157 seg_32bit
= ldt_info
.flags
& 1;
6158 contents
= (ldt_info
.flags
>> 1) & 3;
6159 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6160 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6161 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6162 useable
= (ldt_info
.flags
>> 6) & 1;
6166 lm
= (ldt_info
.flags
>> 7) & 1;
6169 if (contents
== 3) {
6170 if (seg_not_present
== 0)
6171 return -TARGET_EINVAL
;
6174 /* NOTE: same code as Linux kernel */
6175 /* Allow LDTs to be cleared by the user. */
6176 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6177 if ((contents
== 0 &&
6178 read_exec_only
== 1 &&
6180 limit_in_pages
== 0 &&
6181 seg_not_present
== 1 &&
6189 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6190 (ldt_info
.limit
& 0x0ffff);
6191 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6192 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6193 (ldt_info
.limit
& 0xf0000) |
6194 ((read_exec_only
^ 1) << 9) |
6196 ((seg_not_present
^ 1) << 15) |
6198 (limit_in_pages
<< 23) |
6203 /* Install the new entry ... */
6205 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6206 lp
[0] = tswap32(entry_1
);
6207 lp
[1] = tswap32(entry_2
);
6211 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6213 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6214 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6215 uint32_t base_addr
, limit
, flags
;
6216 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6217 int seg_not_present
, useable
, lm
;
6218 uint32_t *lp
, entry_1
, entry_2
;
6220 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6221 if (!target_ldt_info
)
6222 return -TARGET_EFAULT
;
6223 idx
= tswap32(target_ldt_info
->entry_number
);
6224 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6225 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6226 unlock_user_struct(target_ldt_info
, ptr
, 1);
6227 return -TARGET_EINVAL
;
6229 lp
= (uint32_t *)(gdt_table
+ idx
);
6230 entry_1
= tswap32(lp
[0]);
6231 entry_2
= tswap32(lp
[1]);
6233 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6234 contents
= (entry_2
>> 10) & 3;
6235 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6236 seg_32bit
= (entry_2
>> 22) & 1;
6237 limit_in_pages
= (entry_2
>> 23) & 1;
6238 useable
= (entry_2
>> 20) & 1;
6242 lm
= (entry_2
>> 21) & 1;
6244 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6245 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6246 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6247 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6248 base_addr
= (entry_1
>> 16) |
6249 (entry_2
& 0xff000000) |
6250 ((entry_2
& 0xff) << 16);
6251 target_ldt_info
->base_addr
= tswapal(base_addr
);
6252 target_ldt_info
->limit
= tswap32(limit
);
6253 target_ldt_info
->flags
= tswap32(flags
);
6254 unlock_user_struct(target_ldt_info
, ptr
, 1);
6258 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6260 return -TARGET_ENOSYS
;
6263 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6270 case TARGET_ARCH_SET_GS
:
6271 case TARGET_ARCH_SET_FS
:
6272 if (code
== TARGET_ARCH_SET_GS
)
6276 cpu_x86_load_seg(env
, idx
, 0);
6277 env
->segs
[idx
].base
= addr
;
6279 case TARGET_ARCH_GET_GS
:
6280 case TARGET_ARCH_GET_FS
:
6281 if (code
== TARGET_ARCH_GET_GS
)
6285 val
= env
->segs
[idx
].base
;
6286 if (put_user(val
, addr
, abi_ulong
))
6287 ret
= -TARGET_EFAULT
;
6290 ret
= -TARGET_EINVAL
;
6295 #endif /* defined(TARGET_ABI32 */
6296 #endif /* defined(TARGET_I386) */
6299 * These constants are generic. Supply any that are missing from the host.
6302 # define PR_SET_NAME 15
6303 # define PR_GET_NAME 16
6305 #ifndef PR_SET_FP_MODE
6306 # define PR_SET_FP_MODE 45
6307 # define PR_GET_FP_MODE 46
6308 # define PR_FP_MODE_FR (1 << 0)
6309 # define PR_FP_MODE_FRE (1 << 1)
6311 #ifndef PR_SVE_SET_VL
6312 # define PR_SVE_SET_VL 50
6313 # define PR_SVE_GET_VL 51
6314 # define PR_SVE_VL_LEN_MASK 0xffff
6315 # define PR_SVE_VL_INHERIT (1 << 17)
6317 #ifndef PR_PAC_RESET_KEYS
6318 # define PR_PAC_RESET_KEYS 54
6319 # define PR_PAC_APIAKEY (1 << 0)
6320 # define PR_PAC_APIBKEY (1 << 1)
6321 # define PR_PAC_APDAKEY (1 << 2)
6322 # define PR_PAC_APDBKEY (1 << 3)
6323 # define PR_PAC_APGAKEY (1 << 4)
6325 #ifndef PR_SET_TAGGED_ADDR_CTRL
6326 # define PR_SET_TAGGED_ADDR_CTRL 55
6327 # define PR_GET_TAGGED_ADDR_CTRL 56
6328 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6330 #ifndef PR_MTE_TCF_SHIFT
6331 # define PR_MTE_TCF_SHIFT 1
6332 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6333 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TAG_SHIFT 3
6337 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #ifndef PR_SET_IO_FLUSHER
6340 # define PR_SET_IO_FLUSHER 57
6341 # define PR_GET_IO_FLUSHER 58
6343 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6344 # define PR_SET_SYSCALL_USER_DISPATCH 59
6346 #ifndef PR_SME_SET_VL
6347 # define PR_SME_SET_VL 63
6348 # define PR_SME_GET_VL 64
6349 # define PR_SME_VL_LEN_MASK 0xffff
6350 # define PR_SME_VL_INHERIT (1 << 17)
6353 #include "target_prctl.h"
6355 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6357 return -TARGET_EINVAL
;
6360 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6362 return -TARGET_EINVAL
;
6365 #ifndef do_prctl_get_fp_mode
6366 #define do_prctl_get_fp_mode do_prctl_inval0
6368 #ifndef do_prctl_set_fp_mode
6369 #define do_prctl_set_fp_mode do_prctl_inval1
6371 #ifndef do_prctl_sve_get_vl
6372 #define do_prctl_sve_get_vl do_prctl_inval0
6374 #ifndef do_prctl_sve_set_vl
6375 #define do_prctl_sve_set_vl do_prctl_inval1
6377 #ifndef do_prctl_reset_keys
6378 #define do_prctl_reset_keys do_prctl_inval1
6380 #ifndef do_prctl_set_tagged_addr_ctrl
6381 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6383 #ifndef do_prctl_get_tagged_addr_ctrl
6384 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6386 #ifndef do_prctl_get_unalign
6387 #define do_prctl_get_unalign do_prctl_inval1
6389 #ifndef do_prctl_set_unalign
6390 #define do_prctl_set_unalign do_prctl_inval1
6392 #ifndef do_prctl_sme_get_vl
6393 #define do_prctl_sme_get_vl do_prctl_inval0
6395 #ifndef do_prctl_sme_set_vl
6396 #define do_prctl_sme_set_vl do_prctl_inval1
6399 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6400 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6405 case PR_GET_PDEATHSIG
:
6408 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6410 if (!is_error(ret
) &&
6411 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6412 return -TARGET_EFAULT
;
6416 case PR_SET_PDEATHSIG
:
6417 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6421 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6423 return -TARGET_EFAULT
;
6425 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6427 unlock_user(name
, arg2
, 16);
6432 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6434 return -TARGET_EFAULT
;
6436 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6438 unlock_user(name
, arg2
, 0);
6441 case PR_GET_FP_MODE
:
6442 return do_prctl_get_fp_mode(env
);
6443 case PR_SET_FP_MODE
:
6444 return do_prctl_set_fp_mode(env
, arg2
);
6446 return do_prctl_sve_get_vl(env
);
6448 return do_prctl_sve_set_vl(env
, arg2
);
6450 return do_prctl_sme_get_vl(env
);
6452 return do_prctl_sme_set_vl(env
, arg2
);
6453 case PR_PAC_RESET_KEYS
:
6454 if (arg3
|| arg4
|| arg5
) {
6455 return -TARGET_EINVAL
;
6457 return do_prctl_reset_keys(env
, arg2
);
6458 case PR_SET_TAGGED_ADDR_CTRL
:
6459 if (arg3
|| arg4
|| arg5
) {
6460 return -TARGET_EINVAL
;
6462 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6463 case PR_GET_TAGGED_ADDR_CTRL
:
6464 if (arg2
|| arg3
|| arg4
|| arg5
) {
6465 return -TARGET_EINVAL
;
6467 return do_prctl_get_tagged_addr_ctrl(env
);
6469 case PR_GET_UNALIGN
:
6470 return do_prctl_get_unalign(env
, arg2
);
6471 case PR_SET_UNALIGN
:
6472 return do_prctl_set_unalign(env
, arg2
);
6474 case PR_CAP_AMBIENT
:
6475 case PR_CAPBSET_READ
:
6476 case PR_CAPBSET_DROP
:
6477 case PR_GET_DUMPABLE
:
6478 case PR_SET_DUMPABLE
:
6479 case PR_GET_KEEPCAPS
:
6480 case PR_SET_KEEPCAPS
:
6481 case PR_GET_SECUREBITS
:
6482 case PR_SET_SECUREBITS
:
6485 case PR_GET_TIMERSLACK
:
6486 case PR_SET_TIMERSLACK
:
6488 case PR_MCE_KILL_GET
:
6489 case PR_GET_NO_NEW_PRIVS
:
6490 case PR_SET_NO_NEW_PRIVS
:
6491 case PR_GET_IO_FLUSHER
:
6492 case PR_SET_IO_FLUSHER
:
6493 /* Some prctl options have no pointer arguments and we can pass on. */
6494 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6496 case PR_GET_CHILD_SUBREAPER
:
6497 case PR_SET_CHILD_SUBREAPER
:
6498 case PR_GET_SPECULATION_CTRL
:
6499 case PR_SET_SPECULATION_CTRL
:
6500 case PR_GET_TID_ADDRESS
:
6502 return -TARGET_EINVAL
;
6506 /* Was used for SPE on PowerPC. */
6507 return -TARGET_EINVAL
;
6514 case PR_GET_SECCOMP
:
6515 case PR_SET_SECCOMP
:
6516 case PR_SET_SYSCALL_USER_DISPATCH
:
6517 case PR_GET_THP_DISABLE
:
6518 case PR_SET_THP_DISABLE
:
6521 /* Disable to prevent the target disabling stuff we need. */
6522 return -TARGET_EINVAL
;
6525 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6527 return -TARGET_EINVAL
;
6531 #define NEW_STACK_SIZE 0x40000
6534 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6537 pthread_mutex_t mutex
;
6538 pthread_cond_t cond
;
6541 abi_ulong child_tidptr
;
6542 abi_ulong parent_tidptr
;
6546 static void *clone_func(void *arg
)
6548 new_thread_info
*info
= arg
;
6553 rcu_register_thread();
6554 tcg_register_thread();
6558 ts
= (TaskState
*)cpu
->opaque
;
6559 info
->tid
= sys_gettid();
6561 if (info
->child_tidptr
)
6562 put_user_u32(info
->tid
, info
->child_tidptr
);
6563 if (info
->parent_tidptr
)
6564 put_user_u32(info
->tid
, info
->parent_tidptr
);
6565 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6566 /* Enable signals. */
6567 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6568 /* Signal to the parent that we're ready. */
6569 pthread_mutex_lock(&info
->mutex
);
6570 pthread_cond_broadcast(&info
->cond
);
6571 pthread_mutex_unlock(&info
->mutex
);
6572 /* Wait until the parent has finished initializing the tls state. */
6573 pthread_mutex_lock(&clone_lock
);
6574 pthread_mutex_unlock(&clone_lock
);
6580 /* do_fork() Must return host values and target errnos (unlike most
6581 do_*() functions). */
6582 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6583 abi_ulong parent_tidptr
, target_ulong newtls
,
6584 abi_ulong child_tidptr
)
6586 CPUState
*cpu
= env_cpu(env
);
6590 CPUArchState
*new_env
;
6593 flags
&= ~CLONE_IGNORED_FLAGS
;
6595 /* Emulate vfork() with fork() */
6596 if (flags
& CLONE_VFORK
)
6597 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6599 if (flags
& CLONE_VM
) {
6600 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6601 new_thread_info info
;
6602 pthread_attr_t attr
;
6604 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6605 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6606 return -TARGET_EINVAL
;
6609 ts
= g_new0(TaskState
, 1);
6610 init_task_state(ts
);
6612 /* Grab a mutex so that thread setup appears atomic. */
6613 pthread_mutex_lock(&clone_lock
);
6616 * If this is our first additional thread, we need to ensure we
6617 * generate code for parallel execution and flush old translations.
6618 * Do this now so that the copy gets CF_PARALLEL too.
6620 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6621 cpu
->tcg_cflags
|= CF_PARALLEL
;
6625 /* we create a new CPU instance. */
6626 new_env
= cpu_copy(env
);
6627 /* Init regs that differ from the parent. */
6628 cpu_clone_regs_child(new_env
, newsp
, flags
);
6629 cpu_clone_regs_parent(env
, flags
);
6630 new_cpu
= env_cpu(new_env
);
6631 new_cpu
->opaque
= ts
;
6632 ts
->bprm
= parent_ts
->bprm
;
6633 ts
->info
= parent_ts
->info
;
6634 ts
->signal_mask
= parent_ts
->signal_mask
;
6636 if (flags
& CLONE_CHILD_CLEARTID
) {
6637 ts
->child_tidptr
= child_tidptr
;
6640 if (flags
& CLONE_SETTLS
) {
6641 cpu_set_tls (new_env
, newtls
);
6644 memset(&info
, 0, sizeof(info
));
6645 pthread_mutex_init(&info
.mutex
, NULL
);
6646 pthread_mutex_lock(&info
.mutex
);
6647 pthread_cond_init(&info
.cond
, NULL
);
6649 if (flags
& CLONE_CHILD_SETTID
) {
6650 info
.child_tidptr
= child_tidptr
;
6652 if (flags
& CLONE_PARENT_SETTID
) {
6653 info
.parent_tidptr
= parent_tidptr
;
6656 ret
= pthread_attr_init(&attr
);
6657 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6658 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6659 /* It is not safe to deliver signals until the child has finished
6660 initializing, so temporarily block all signals. */
6661 sigfillset(&sigmask
);
6662 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6663 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6665 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6666 /* TODO: Free new CPU state if thread creation failed. */
6668 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6669 pthread_attr_destroy(&attr
);
6671 /* Wait for the child to initialize. */
6672 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6677 pthread_mutex_unlock(&info
.mutex
);
6678 pthread_cond_destroy(&info
.cond
);
6679 pthread_mutex_destroy(&info
.mutex
);
6680 pthread_mutex_unlock(&clone_lock
);
6682 /* if no CLONE_VM, we consider it is a fork */
6683 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6684 return -TARGET_EINVAL
;
6687 /* We can't support custom termination signals */
6688 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6689 return -TARGET_EINVAL
;
6692 if (block_signals()) {
6693 return -QEMU_ERESTARTSYS
;
6699 /* Child Process. */
6700 cpu_clone_regs_child(env
, newsp
, flags
);
6702 /* There is a race condition here. The parent process could
6703 theoretically read the TID in the child process before the child
6704 tid is set. This would require using either ptrace
6705 (not implemented) or having *_tidptr to point at a shared memory
6706 mapping. We can't repeat the spinlock hack used above because
6707 the child process gets its own copy of the lock. */
6708 if (flags
& CLONE_CHILD_SETTID
)
6709 put_user_u32(sys_gettid(), child_tidptr
);
6710 if (flags
& CLONE_PARENT_SETTID
)
6711 put_user_u32(sys_gettid(), parent_tidptr
);
6712 ts
= (TaskState
*)cpu
->opaque
;
6713 if (flags
& CLONE_SETTLS
)
6714 cpu_set_tls (env
, newtls
);
6715 if (flags
& CLONE_CHILD_CLEARTID
)
6716 ts
->child_tidptr
= child_tidptr
;
6718 cpu_clone_regs_parent(env
, flags
);
6725 /* warning : doesn't handle linux specific flags... */
6726 static int target_to_host_fcntl_cmd(int cmd
)
6731 case TARGET_F_DUPFD
:
6732 case TARGET_F_GETFD
:
6733 case TARGET_F_SETFD
:
6734 case TARGET_F_GETFL
:
6735 case TARGET_F_SETFL
:
6736 case TARGET_F_OFD_GETLK
:
6737 case TARGET_F_OFD_SETLK
:
6738 case TARGET_F_OFD_SETLKW
:
6741 case TARGET_F_GETLK
:
6744 case TARGET_F_SETLK
:
6747 case TARGET_F_SETLKW
:
6750 case TARGET_F_GETOWN
:
6753 case TARGET_F_SETOWN
:
6756 case TARGET_F_GETSIG
:
6759 case TARGET_F_SETSIG
:
6762 #if TARGET_ABI_BITS == 32
6763 case TARGET_F_GETLK64
:
6766 case TARGET_F_SETLK64
:
6769 case TARGET_F_SETLKW64
:
6773 case TARGET_F_SETLEASE
:
6776 case TARGET_F_GETLEASE
:
6779 #ifdef F_DUPFD_CLOEXEC
6780 case TARGET_F_DUPFD_CLOEXEC
:
6781 ret
= F_DUPFD_CLOEXEC
;
6784 case TARGET_F_NOTIFY
:
6788 case TARGET_F_GETOWN_EX
:
6793 case TARGET_F_SETOWN_EX
:
6798 case TARGET_F_SETPIPE_SZ
:
6801 case TARGET_F_GETPIPE_SZ
:
6806 case TARGET_F_ADD_SEALS
:
6809 case TARGET_F_GET_SEALS
:
6814 ret
= -TARGET_EINVAL
;
6818 #if defined(__powerpc64__)
6819 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6820 * is not supported by kernel. The glibc fcntl call actually adjusts
6821 * them to 5, 6 and 7 before making the syscall(). Since we make the
6822 * syscall directly, adjust to what is supported by the kernel.
6824 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6825 ret
-= F_GETLK64
- 5;
6832 #define FLOCK_TRANSTBL \
6834 TRANSTBL_CONVERT(F_RDLCK); \
6835 TRANSTBL_CONVERT(F_WRLCK); \
6836 TRANSTBL_CONVERT(F_UNLCK); \
6839 static int target_to_host_flock(int type
)
6841 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6843 #undef TRANSTBL_CONVERT
6844 return -TARGET_EINVAL
;
6847 static int host_to_target_flock(int type
)
6849 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6851 #undef TRANSTBL_CONVERT
6852 /* if we don't know how to convert the value coming
6853 * from the host we copy to the target field as-is
6858 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6859 abi_ulong target_flock_addr
)
6861 struct target_flock
*target_fl
;
6864 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6865 return -TARGET_EFAULT
;
6868 __get_user(l_type
, &target_fl
->l_type
);
6869 l_type
= target_to_host_flock(l_type
);
6873 fl
->l_type
= l_type
;
6874 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6875 __get_user(fl
->l_start
, &target_fl
->l_start
);
6876 __get_user(fl
->l_len
, &target_fl
->l_len
);
6877 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6878 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6882 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6883 const struct flock64
*fl
)
6885 struct target_flock
*target_fl
;
6888 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6889 return -TARGET_EFAULT
;
6892 l_type
= host_to_target_flock(fl
->l_type
);
6893 __put_user(l_type
, &target_fl
->l_type
);
6894 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6895 __put_user(fl
->l_start
, &target_fl
->l_start
);
6896 __put_user(fl
->l_len
, &target_fl
->l_len
);
6897 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6898 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6902 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6903 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6905 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6906 struct target_oabi_flock64
{
6914 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6915 abi_ulong target_flock_addr
)
6917 struct target_oabi_flock64
*target_fl
;
6920 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6921 return -TARGET_EFAULT
;
6924 __get_user(l_type
, &target_fl
->l_type
);
6925 l_type
= target_to_host_flock(l_type
);
6929 fl
->l_type
= l_type
;
6930 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6931 __get_user(fl
->l_start
, &target_fl
->l_start
);
6932 __get_user(fl
->l_len
, &target_fl
->l_len
);
6933 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6934 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6938 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6939 const struct flock64
*fl
)
6941 struct target_oabi_flock64
*target_fl
;
6944 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6945 return -TARGET_EFAULT
;
6948 l_type
= host_to_target_flock(fl
->l_type
);
6949 __put_user(l_type
, &target_fl
->l_type
);
6950 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6951 __put_user(fl
->l_start
, &target_fl
->l_start
);
6952 __put_user(fl
->l_len
, &target_fl
->l_len
);
6953 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6954 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6959 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6960 abi_ulong target_flock_addr
)
6962 struct target_flock64
*target_fl
;
6965 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6966 return -TARGET_EFAULT
;
6969 __get_user(l_type
, &target_fl
->l_type
);
6970 l_type
= target_to_host_flock(l_type
);
6974 fl
->l_type
= l_type
;
6975 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6976 __get_user(fl
->l_start
, &target_fl
->l_start
);
6977 __get_user(fl
->l_len
, &target_fl
->l_len
);
6978 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6979 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6983 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6984 const struct flock64
*fl
)
6986 struct target_flock64
*target_fl
;
6989 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6990 return -TARGET_EFAULT
;
6993 l_type
= host_to_target_flock(fl
->l_type
);
6994 __put_user(l_type
, &target_fl
->l_type
);
6995 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6996 __put_user(fl
->l_start
, &target_fl
->l_start
);
6997 __put_user(fl
->l_len
, &target_fl
->l_len
);
6998 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6999 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7003 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7005 struct flock64 fl64
;
7007 struct f_owner_ex fox
;
7008 struct target_f_owner_ex
*target_fox
;
7011 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7013 if (host_cmd
== -TARGET_EINVAL
)
7017 case TARGET_F_GETLK
:
7018 ret
= copy_from_user_flock(&fl64
, arg
);
7022 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7024 ret
= copy_to_user_flock(arg
, &fl64
);
7028 case TARGET_F_SETLK
:
7029 case TARGET_F_SETLKW
:
7030 ret
= copy_from_user_flock(&fl64
, arg
);
7034 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7037 case TARGET_F_GETLK64
:
7038 case TARGET_F_OFD_GETLK
:
7039 ret
= copy_from_user_flock64(&fl64
, arg
);
7043 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7045 ret
= copy_to_user_flock64(arg
, &fl64
);
7048 case TARGET_F_SETLK64
:
7049 case TARGET_F_SETLKW64
:
7050 case TARGET_F_OFD_SETLK
:
7051 case TARGET_F_OFD_SETLKW
:
7052 ret
= copy_from_user_flock64(&fl64
, arg
);
7056 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7059 case TARGET_F_GETFL
:
7060 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7062 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7066 case TARGET_F_SETFL
:
7067 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7068 target_to_host_bitmask(arg
,
7073 case TARGET_F_GETOWN_EX
:
7074 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7076 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7077 return -TARGET_EFAULT
;
7078 target_fox
->type
= tswap32(fox
.type
);
7079 target_fox
->pid
= tswap32(fox
.pid
);
7080 unlock_user_struct(target_fox
, arg
, 1);
7086 case TARGET_F_SETOWN_EX
:
7087 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7088 return -TARGET_EFAULT
;
7089 fox
.type
= tswap32(target_fox
->type
);
7090 fox
.pid
= tswap32(target_fox
->pid
);
7091 unlock_user_struct(target_fox
, arg
, 0);
7092 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7096 case TARGET_F_SETSIG
:
7097 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7100 case TARGET_F_GETSIG
:
7101 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7104 case TARGET_F_SETOWN
:
7105 case TARGET_F_GETOWN
:
7106 case TARGET_F_SETLEASE
:
7107 case TARGET_F_GETLEASE
:
7108 case TARGET_F_SETPIPE_SZ
:
7109 case TARGET_F_GETPIPE_SZ
:
7110 case TARGET_F_ADD_SEALS
:
7111 case TARGET_F_GET_SEALS
:
7112 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7116 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7124 static inline int high2lowuid(int uid
)
7132 static inline int high2lowgid(int gid
)
7140 static inline int low2highuid(int uid
)
7142 if ((int16_t)uid
== -1)
7148 static inline int low2highgid(int gid
)
7150 if ((int16_t)gid
== -1)
7155 static inline int tswapid(int id
)
7160 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7162 #else /* !USE_UID16 */
7163 static inline int high2lowuid(int uid
)
7167 static inline int high2lowgid(int gid
)
7171 static inline int low2highuid(int uid
)
7175 static inline int low2highgid(int gid
)
7179 static inline int tswapid(int id
)
7184 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7186 #endif /* USE_UID16 */
7188 /* We must do direct syscalls for setting UID/GID, because we want to
7189 * implement the Linux system call semantics of "change only for this thread",
7190 * not the libc/POSIX semantics of "change for all threads in process".
7191 * (See http://ewontfix.com/17/ for more details.)
7192 * We use the 32-bit version of the syscalls if present; if it is not
7193 * then either the host architecture supports 32-bit UIDs natively with
7194 * the standard syscall, or the 16-bit UID is the best we can do.
7196 #ifdef __NR_setuid32
7197 #define __NR_sys_setuid __NR_setuid32
7199 #define __NR_sys_setuid __NR_setuid
7201 #ifdef __NR_setgid32
7202 #define __NR_sys_setgid __NR_setgid32
7204 #define __NR_sys_setgid __NR_setgid
7206 #ifdef __NR_setresuid32
7207 #define __NR_sys_setresuid __NR_setresuid32
7209 #define __NR_sys_setresuid __NR_setresuid
7211 #ifdef __NR_setresgid32
7212 #define __NR_sys_setresgid __NR_setresgid32
7214 #define __NR_sys_setresgid __NR_setresgid
7217 _syscall1(int, sys_setuid
, uid_t
, uid
)
7218 _syscall1(int, sys_setgid
, gid_t
, gid
)
7219 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7220 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7222 void syscall_init(void)
7225 const argtype
*arg_type
;
7228 thunk_init(STRUCT_MAX
);
7230 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7231 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7232 #include "syscall_types.h"
7234 #undef STRUCT_SPECIAL
7236 /* we patch the ioctl size if necessary. We rely on the fact that
7237 no ioctl has all the bits at '1' in the size field */
7239 while (ie
->target_cmd
!= 0) {
7240 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7241 TARGET_IOC_SIZEMASK
) {
7242 arg_type
= ie
->arg_type
;
7243 if (arg_type
[0] != TYPE_PTR
) {
7244 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7249 size
= thunk_type_size(arg_type
, 0);
7250 ie
->target_cmd
= (ie
->target_cmd
&
7251 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7252 (size
<< TARGET_IOC_SIZESHIFT
);
7255 /* automatic consistency check if same arch */
7256 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7257 (defined(__x86_64__) && defined(TARGET_X86_64))
7258 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7259 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7260 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7267 #ifdef TARGET_NR_truncate64
7268 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7273 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7277 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7281 #ifdef TARGET_NR_ftruncate64
7282 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7287 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7291 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7295 #if defined(TARGET_NR_timer_settime) || \
7296 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7297 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7298 abi_ulong target_addr
)
7300 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7301 offsetof(struct target_itimerspec
,
7303 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7304 offsetof(struct target_itimerspec
,
7306 return -TARGET_EFAULT
;
7313 #if defined(TARGET_NR_timer_settime64) || \
7314 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7315 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7316 abi_ulong target_addr
)
7318 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7319 offsetof(struct target__kernel_itimerspec
,
7321 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7322 offsetof(struct target__kernel_itimerspec
,
7324 return -TARGET_EFAULT
;
7331 #if ((defined(TARGET_NR_timerfd_gettime) || \
7332 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7333 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7334 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7335 struct itimerspec
*host_its
)
7337 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7339 &host_its
->it_interval
) ||
7340 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7342 &host_its
->it_value
)) {
7343 return -TARGET_EFAULT
;
7349 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7350 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7351 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7352 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7353 struct itimerspec
*host_its
)
7355 if (host_to_target_timespec64(target_addr
+
7356 offsetof(struct target__kernel_itimerspec
,
7358 &host_its
->it_interval
) ||
7359 host_to_target_timespec64(target_addr
+
7360 offsetof(struct target__kernel_itimerspec
,
7362 &host_its
->it_value
)) {
7363 return -TARGET_EFAULT
;
7369 #if defined(TARGET_NR_adjtimex) || \
7370 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7371 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7372 abi_long target_addr
)
7374 struct target_timex
*target_tx
;
7376 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7377 return -TARGET_EFAULT
;
7380 __get_user(host_tx
->modes
, &target_tx
->modes
);
7381 __get_user(host_tx
->offset
, &target_tx
->offset
);
7382 __get_user(host_tx
->freq
, &target_tx
->freq
);
7383 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7384 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7385 __get_user(host_tx
->status
, &target_tx
->status
);
7386 __get_user(host_tx
->constant
, &target_tx
->constant
);
7387 __get_user(host_tx
->precision
, &target_tx
->precision
);
7388 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7389 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7390 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7391 __get_user(host_tx
->tick
, &target_tx
->tick
);
7392 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7393 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7394 __get_user(host_tx
->shift
, &target_tx
->shift
);
7395 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7396 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7397 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7398 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7399 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7400 __get_user(host_tx
->tai
, &target_tx
->tai
);
7402 unlock_user_struct(target_tx
, target_addr
, 0);
7406 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7407 struct timex
*host_tx
)
7409 struct target_timex
*target_tx
;
7411 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7412 return -TARGET_EFAULT
;
7415 __put_user(host_tx
->modes
, &target_tx
->modes
);
7416 __put_user(host_tx
->offset
, &target_tx
->offset
);
7417 __put_user(host_tx
->freq
, &target_tx
->freq
);
7418 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7419 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7420 __put_user(host_tx
->status
, &target_tx
->status
);
7421 __put_user(host_tx
->constant
, &target_tx
->constant
);
7422 __put_user(host_tx
->precision
, &target_tx
->precision
);
7423 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7424 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7425 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7426 __put_user(host_tx
->tick
, &target_tx
->tick
);
7427 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7428 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7429 __put_user(host_tx
->shift
, &target_tx
->shift
);
7430 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7431 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7432 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7433 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7434 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7435 __put_user(host_tx
->tai
, &target_tx
->tai
);
7437 unlock_user_struct(target_tx
, target_addr
, 1);
7443 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7444 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7445 abi_long target_addr
)
7447 struct target__kernel_timex
*target_tx
;
7449 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7450 offsetof(struct target__kernel_timex
,
7452 return -TARGET_EFAULT
;
7455 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7456 return -TARGET_EFAULT
;
7459 __get_user(host_tx
->modes
, &target_tx
->modes
);
7460 __get_user(host_tx
->offset
, &target_tx
->offset
);
7461 __get_user(host_tx
->freq
, &target_tx
->freq
);
7462 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7463 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7464 __get_user(host_tx
->status
, &target_tx
->status
);
7465 __get_user(host_tx
->constant
, &target_tx
->constant
);
7466 __get_user(host_tx
->precision
, &target_tx
->precision
);
7467 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7468 __get_user(host_tx
->tick
, &target_tx
->tick
);
7469 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7470 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7471 __get_user(host_tx
->shift
, &target_tx
->shift
);
7472 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7473 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7474 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7475 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7476 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7477 __get_user(host_tx
->tai
, &target_tx
->tai
);
7479 unlock_user_struct(target_tx
, target_addr
, 0);
7483 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7484 struct timex
*host_tx
)
7486 struct target__kernel_timex
*target_tx
;
7488 if (copy_to_user_timeval64(target_addr
+
7489 offsetof(struct target__kernel_timex
, time
),
7491 return -TARGET_EFAULT
;
7494 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7495 return -TARGET_EFAULT
;
7498 __put_user(host_tx
->modes
, &target_tx
->modes
);
7499 __put_user(host_tx
->offset
, &target_tx
->offset
);
7500 __put_user(host_tx
->freq
, &target_tx
->freq
);
7501 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7502 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7503 __put_user(host_tx
->status
, &target_tx
->status
);
7504 __put_user(host_tx
->constant
, &target_tx
->constant
);
7505 __put_user(host_tx
->precision
, &target_tx
->precision
);
7506 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7507 __put_user(host_tx
->tick
, &target_tx
->tick
);
7508 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7509 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7510 __put_user(host_tx
->shift
, &target_tx
->shift
);
7511 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7512 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7513 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7514 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7515 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7516 __put_user(host_tx
->tai
, &target_tx
->tai
);
7518 unlock_user_struct(target_tx
, target_addr
, 1);
7523 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7524 #define sigev_notify_thread_id _sigev_un._tid
7527 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7528 abi_ulong target_addr
)
7530 struct target_sigevent
*target_sevp
;
7532 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7533 return -TARGET_EFAULT
;
7536 /* This union is awkward on 64 bit systems because it has a 32 bit
7537 * integer and a pointer in it; we follow the conversion approach
7538 * used for handling sigval types in signal.c so the guest should get
7539 * the correct value back even if we did a 64 bit byteswap and it's
7540 * using the 32 bit integer.
7542 host_sevp
->sigev_value
.sival_ptr
=
7543 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7544 host_sevp
->sigev_signo
=
7545 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7546 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7547 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7549 unlock_user_struct(target_sevp
, target_addr
, 1);
7553 #if defined(TARGET_NR_mlockall)
7554 static inline int target_to_host_mlockall_arg(int arg
)
7558 if (arg
& TARGET_MCL_CURRENT
) {
7559 result
|= MCL_CURRENT
;
7561 if (arg
& TARGET_MCL_FUTURE
) {
7562 result
|= MCL_FUTURE
;
7565 if (arg
& TARGET_MCL_ONFAULT
) {
7566 result
|= MCL_ONFAULT
;
7574 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7575 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7576 defined(TARGET_NR_newfstatat))
7577 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7578 abi_ulong target_addr
,
7579 struct stat
*host_st
)
7581 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7582 if (cpu_env
->eabi
) {
7583 struct target_eabi_stat64
*target_st
;
7585 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7586 return -TARGET_EFAULT
;
7587 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7588 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7589 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7590 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7591 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7593 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7594 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7595 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7596 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7597 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7598 __put_user(host_st
->st_size
, &target_st
->st_size
);
7599 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7600 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7601 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7602 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7603 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7604 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7605 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7606 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7607 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7609 unlock_user_struct(target_st
, target_addr
, 1);
7613 #if defined(TARGET_HAS_STRUCT_STAT64)
7614 struct target_stat64
*target_st
;
7616 struct target_stat
*target_st
;
7619 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7620 return -TARGET_EFAULT
;
7621 memset(target_st
, 0, sizeof(*target_st
));
7622 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7623 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7624 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7625 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7627 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7628 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7629 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7630 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7631 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7632 /* XXX: better use of kernel struct */
7633 __put_user(host_st
->st_size
, &target_st
->st_size
);
7634 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7635 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7636 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7637 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7638 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7639 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7640 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7641 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7642 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7644 unlock_user_struct(target_st
, target_addr
, 1);
7651 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7652 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7653 abi_ulong target_addr
)
7655 struct target_statx
*target_stx
;
7657 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7658 return -TARGET_EFAULT
;
7660 memset(target_stx
, 0, sizeof(*target_stx
));
7662 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7663 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7664 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7665 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7666 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7667 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7668 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7669 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7670 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7671 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7672 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7673 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7674 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7675 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7676 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7677 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7678 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7679 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7680 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7681 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7682 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7683 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7684 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7686 unlock_user_struct(target_stx
, target_addr
, 1);
7692 static int do_sys_futex(int *uaddr
, int op
, int val
,
7693 const struct timespec
*timeout
, int *uaddr2
,
7696 #if HOST_LONG_BITS == 64
7697 #if defined(__NR_futex)
7698 /* always a 64-bit time_t, it doesn't define _time64 version */
7699 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7702 #else /* HOST_LONG_BITS == 64 */
7703 #if defined(__NR_futex_time64)
7704 if (sizeof(timeout
->tv_sec
) == 8) {
7705 /* _time64 function on 32bit arch */
7706 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7709 #if defined(__NR_futex)
7710 /* old function on 32bit arch */
7711 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7713 #endif /* HOST_LONG_BITS == 64 */
7714 g_assert_not_reached();
7717 static int do_safe_futex(int *uaddr
, int op
, int val
,
7718 const struct timespec
*timeout
, int *uaddr2
,
7721 #if HOST_LONG_BITS == 64
7722 #if defined(__NR_futex)
7723 /* always a 64-bit time_t, it doesn't define _time64 version */
7724 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7726 #else /* HOST_LONG_BITS == 64 */
7727 #if defined(__NR_futex_time64)
7728 if (sizeof(timeout
->tv_sec
) == 8) {
7729 /* _time64 function on 32bit arch */
7730 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7734 #if defined(__NR_futex)
7735 /* old function on 32bit arch */
7736 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7738 #endif /* HOST_LONG_BITS == 64 */
7739 return -TARGET_ENOSYS
;
7742 /* ??? Using host futex calls even when target atomic operations
7743 are not really atomic probably breaks things. However implementing
7744 futexes locally would make futexes shared between multiple processes
7745 tricky. However they're probably useless because guest atomic
7746 operations won't work either. */
7747 #if defined(TARGET_NR_futex)
7748 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7749 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7751 struct timespec ts
, *pts
;
7754 /* ??? We assume FUTEX_* constants are the same on both host
7756 #ifdef FUTEX_CMD_MASK
7757 base_op
= op
& FUTEX_CMD_MASK
;
7763 case FUTEX_WAIT_BITSET
:
7766 target_to_host_timespec(pts
, timeout
);
7770 return do_safe_futex(g2h(cpu
, uaddr
),
7771 op
, tswap32(val
), pts
, NULL
, val3
);
7773 return do_safe_futex(g2h(cpu
, uaddr
),
7774 op
, val
, NULL
, NULL
, 0);
7776 return do_safe_futex(g2h(cpu
, uaddr
),
7777 op
, val
, NULL
, NULL
, 0);
7779 case FUTEX_CMP_REQUEUE
:
7781 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7782 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7783 But the prototype takes a `struct timespec *'; insert casts
7784 to satisfy the compiler. We do not need to tswap TIMEOUT
7785 since it's not compared to guest memory. */
7786 pts
= (struct timespec
*)(uintptr_t) timeout
;
7787 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7788 (base_op
== FUTEX_CMP_REQUEUE
7789 ? tswap32(val3
) : val3
));
7791 return -TARGET_ENOSYS
;
7796 #if defined(TARGET_NR_futex_time64)
7797 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7798 int val
, target_ulong timeout
,
7799 target_ulong uaddr2
, int val3
)
7801 struct timespec ts
, *pts
;
7804 /* ??? We assume FUTEX_* constants are the same on both host
7806 #ifdef FUTEX_CMD_MASK
7807 base_op
= op
& FUTEX_CMD_MASK
;
7813 case FUTEX_WAIT_BITSET
:
7816 if (target_to_host_timespec64(pts
, timeout
)) {
7817 return -TARGET_EFAULT
;
7822 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7823 tswap32(val
), pts
, NULL
, val3
);
7825 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7827 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7829 case FUTEX_CMP_REQUEUE
:
7831 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7832 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7833 But the prototype takes a `struct timespec *'; insert casts
7834 to satisfy the compiler. We do not need to tswap TIMEOUT
7835 since it's not compared to guest memory. */
7836 pts
= (struct timespec
*)(uintptr_t) timeout
;
7837 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7838 (base_op
== FUTEX_CMP_REQUEUE
7839 ? tswap32(val3
) : val3
));
7841 return -TARGET_ENOSYS
;
7846 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7847 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7848 abi_long handle
, abi_long mount_id
,
7851 struct file_handle
*target_fh
;
7852 struct file_handle
*fh
;
7856 unsigned int size
, total_size
;
7858 if (get_user_s32(size
, handle
)) {
7859 return -TARGET_EFAULT
;
7862 name
= lock_user_string(pathname
);
7864 return -TARGET_EFAULT
;
7867 total_size
= sizeof(struct file_handle
) + size
;
7868 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7870 unlock_user(name
, pathname
, 0);
7871 return -TARGET_EFAULT
;
7874 fh
= g_malloc0(total_size
);
7875 fh
->handle_bytes
= size
;
7877 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7878 unlock_user(name
, pathname
, 0);
7880 /* man name_to_handle_at(2):
7881 * Other than the use of the handle_bytes field, the caller should treat
7882 * the file_handle structure as an opaque data type
7885 memcpy(target_fh
, fh
, total_size
);
7886 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7887 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7889 unlock_user(target_fh
, handle
, total_size
);
7891 if (put_user_s32(mid
, mount_id
)) {
7892 return -TARGET_EFAULT
;
7900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7901 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7904 struct file_handle
*target_fh
;
7905 struct file_handle
*fh
;
7906 unsigned int size
, total_size
;
7909 if (get_user_s32(size
, handle
)) {
7910 return -TARGET_EFAULT
;
7913 total_size
= sizeof(struct file_handle
) + size
;
7914 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7916 return -TARGET_EFAULT
;
7919 fh
= g_memdup(target_fh
, total_size
);
7920 fh
->handle_bytes
= size
;
7921 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7923 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7924 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7928 unlock_user(target_fh
, handle
, total_size
);
7934 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7936 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7939 target_sigset_t
*target_mask
;
7943 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7944 return -TARGET_EINVAL
;
7946 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7947 return -TARGET_EFAULT
;
7950 target_to_host_sigset(&host_mask
, target_mask
);
7952 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7954 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7956 fd_trans_register(ret
, &target_signalfd_trans
);
7959 unlock_user_struct(target_mask
, mask
, 0);
7965 /* Map host to target signal numbers for the wait family of syscalls.
7966 Assume all other status bits are the same. */
7967 int host_to_target_waitstatus(int status
)
7969 if (WIFSIGNALED(status
)) {
7970 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7972 if (WIFSTOPPED(status
)) {
7973 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7979 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
7981 CPUState
*cpu
= env_cpu(cpu_env
);
7982 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7985 for (i
= 0; i
< bprm
->argc
; i
++) {
7986 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7988 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7996 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
7998 CPUState
*cpu
= env_cpu(cpu_env
);
7999 TaskState
*ts
= cpu
->opaque
;
8000 GSList
*map_info
= read_self_maps();
8004 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8005 MapInfo
*e
= (MapInfo
*) s
->data
;
8007 if (h2g_valid(e
->start
)) {
8008 unsigned long min
= e
->start
;
8009 unsigned long max
= e
->end
;
8010 int flags
= page_get_flags(h2g(min
));
8013 max
= h2g_valid(max
- 1) ?
8014 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8016 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8020 if (h2g(min
) == ts
->info
->stack_limit
) {
8026 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8027 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8028 h2g(min
), h2g(max
- 1) + 1,
8029 (flags
& PAGE_READ
) ? 'r' : '-',
8030 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8031 (flags
& PAGE_EXEC
) ? 'x' : '-',
8032 e
->is_priv
? 'p' : 's',
8033 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8035 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8042 free_self_maps(map_info
);
8044 #ifdef TARGET_VSYSCALL_PAGE
8046 * We only support execution from the vsyscall page.
8047 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8049 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8050 " --xp 00000000 00:00 0",
8051 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8052 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8058 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8060 CPUState
*cpu
= env_cpu(cpu_env
);
8061 TaskState
*ts
= cpu
->opaque
;
8062 g_autoptr(GString
) buf
= g_string_new(NULL
);
8065 for (i
= 0; i
< 44; i
++) {
8068 g_string_printf(buf
, FMT_pid
" ", getpid());
8069 } else if (i
== 1) {
8071 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8072 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8073 g_string_printf(buf
, "(%.15s) ", bin
);
8074 } else if (i
== 3) {
8076 g_string_printf(buf
, FMT_pid
" ", getppid());
8077 } else if (i
== 21) {
8079 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8080 } else if (i
== 27) {
8082 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8084 /* for the rest, there is MasterCard */
8085 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8088 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8096 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8098 CPUState
*cpu
= env_cpu(cpu_env
);
8099 TaskState
*ts
= cpu
->opaque
;
8100 abi_ulong auxv
= ts
->info
->saved_auxv
;
8101 abi_ulong len
= ts
->info
->auxv_len
;
8105 * Auxiliary vector is stored in target process stack.
8106 * read in whole auxv vector and copy it to file
8108 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8112 r
= write(fd
, ptr
, len
);
8119 lseek(fd
, 0, SEEK_SET
);
8120 unlock_user(ptr
, auxv
, len
);
8126 static int is_proc_myself(const char *filename
, const char *entry
)
8128 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8129 filename
+= strlen("/proc/");
8130 if (!strncmp(filename
, "self/", strlen("self/"))) {
8131 filename
+= strlen("self/");
8132 } else if (*filename
>= '1' && *filename
<= '9') {
8134 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8135 if (!strncmp(filename
, myself
, strlen(myself
))) {
8136 filename
+= strlen(myself
);
8143 if (!strcmp(filename
, entry
)) {
8150 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8151 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8152 static int is_proc(const char *filename
, const char *entry
)
8154 return strcmp(filename
, entry
) == 0;
8158 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8159 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8166 fp
= fopen("/proc/net/route", "r");
8173 read
= getline(&line
, &len
, fp
);
8174 dprintf(fd
, "%s", line
);
8178 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8180 uint32_t dest
, gw
, mask
;
8181 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8184 fields
= sscanf(line
,
8185 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8186 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8187 &mask
, &mtu
, &window
, &irtt
);
8191 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8192 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8193 metric
, tswap32(mask
), mtu
, window
, irtt
);
8203 #if defined(TARGET_SPARC)
8204 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8206 dprintf(fd
, "type\t\t: sun4u\n");
8211 #if defined(TARGET_HPPA)
8212 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8214 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8215 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8216 dprintf(fd
, "capabilities\t: os32\n");
8217 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8218 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8223 #if defined(TARGET_M68K)
8224 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8226 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8231 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8234 const char *filename
;
8235 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8236 int (*cmp
)(const char *s1
, const char *s2
);
8238 const struct fake_open
*fake_open
;
8239 static const struct fake_open fakes
[] = {
8240 { "maps", open_self_maps
, is_proc_myself
},
8241 { "stat", open_self_stat
, is_proc_myself
},
8242 { "auxv", open_self_auxv
, is_proc_myself
},
8243 { "cmdline", open_self_cmdline
, is_proc_myself
},
8244 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8245 { "/proc/net/route", open_net_route
, is_proc
},
8247 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8248 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8250 #if defined(TARGET_M68K)
8251 { "/proc/hardware", open_hardware
, is_proc
},
8253 { NULL
, NULL
, NULL
}
8256 if (is_proc_myself(pathname
, "exe")) {
8257 int execfd
= qemu_getauxval(AT_EXECFD
);
8258 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8261 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8262 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8267 if (fake_open
->filename
) {
8269 char filename
[PATH_MAX
];
8272 /* create temporary file to map stat to */
8273 tmpdir
= getenv("TMPDIR");
8276 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8277 fd
= mkstemp(filename
);
8283 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8289 lseek(fd
, 0, SEEK_SET
);
8294 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8297 #define TIMER_MAGIC 0x0caf0000
8298 #define TIMER_MAGIC_MASK 0xffff0000
8300 /* Convert QEMU provided timer ID back to internal 16bit index format */
8301 static target_timer_t
get_timer_id(abi_long arg
)
8303 target_timer_t timerid
= arg
;
8305 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8306 return -TARGET_EINVAL
;
8311 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8312 return -TARGET_EINVAL
;
8318 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8320 abi_ulong target_addr
,
8323 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8324 unsigned host_bits
= sizeof(*host_mask
) * 8;
8325 abi_ulong
*target_mask
;
8328 assert(host_size
>= target_size
);
8330 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8332 return -TARGET_EFAULT
;
8334 memset(host_mask
, 0, host_size
);
8336 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8337 unsigned bit
= i
* target_bits
;
8340 __get_user(val
, &target_mask
[i
]);
8341 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8342 if (val
& (1UL << j
)) {
8343 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8348 unlock_user(target_mask
, target_addr
, 0);
8352 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8354 abi_ulong target_addr
,
8357 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8358 unsigned host_bits
= sizeof(*host_mask
) * 8;
8359 abi_ulong
*target_mask
;
8362 assert(host_size
>= target_size
);
8364 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8366 return -TARGET_EFAULT
;
8369 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8370 unsigned bit
= i
* target_bits
;
8373 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8374 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8378 __put_user(val
, &target_mask
[i
]);
8381 unlock_user(target_mask
, target_addr
, target_size
);
8385 #ifdef TARGET_NR_getdents
8386 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8388 g_autofree
void *hdirp
= NULL
;
8390 int hlen
, hoff
, toff
;
8391 int hreclen
, treclen
;
8392 off64_t prev_diroff
= 0;
8394 hdirp
= g_try_malloc(count
);
8396 return -TARGET_ENOMEM
;
8399 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8400 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8402 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8405 hlen
= get_errno(hlen
);
8406 if (is_error(hlen
)) {
8410 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8412 return -TARGET_EFAULT
;
8415 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8416 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8417 struct linux_dirent
*hde
= hdirp
+ hoff
;
8419 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8421 struct target_dirent
*tde
= tdirp
+ toff
;
8425 namelen
= strlen(hde
->d_name
);
8426 hreclen
= hde
->d_reclen
;
8427 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8428 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8430 if (toff
+ treclen
> count
) {
8432 * If the host struct is smaller than the target struct, or
8433 * requires less alignment and thus packs into less space,
8434 * then the host can return more entries than we can pass
8438 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8442 * Return what we have, resetting the file pointer to the
8443 * location of the first record not returned.
8445 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8449 prev_diroff
= hde
->d_off
;
8450 tde
->d_ino
= tswapal(hde
->d_ino
);
8451 tde
->d_off
= tswapal(hde
->d_off
);
8452 tde
->d_reclen
= tswap16(treclen
);
8453 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8456 * The getdents type is in what was formerly a padding byte at the
8457 * end of the structure.
8459 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8460 type
= *((uint8_t *)hde
+ hreclen
- 1);
8464 *((uint8_t *)tde
+ treclen
- 1) = type
;
8467 unlock_user(tdirp
, arg2
, toff
);
8470 #endif /* TARGET_NR_getdents */
8472 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8473 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8475 g_autofree
void *hdirp
= NULL
;
8477 int hlen
, hoff
, toff
;
8478 int hreclen
, treclen
;
8479 off64_t prev_diroff
= 0;
8481 hdirp
= g_try_malloc(count
);
8483 return -TARGET_ENOMEM
;
8486 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8487 if (is_error(hlen
)) {
8491 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8493 return -TARGET_EFAULT
;
8496 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8497 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8498 struct target_dirent64
*tde
= tdirp
+ toff
;
8501 namelen
= strlen(hde
->d_name
) + 1;
8502 hreclen
= hde
->d_reclen
;
8503 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8504 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8506 if (toff
+ treclen
> count
) {
8508 * If the host struct is smaller than the target struct, or
8509 * requires less alignment and thus packs into less space,
8510 * then the host can return more entries than we can pass
8514 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8518 * Return what we have, resetting the file pointer to the
8519 * location of the first record not returned.
8521 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8525 prev_diroff
= hde
->d_off
;
8526 tde
->d_ino
= tswap64(hde
->d_ino
);
8527 tde
->d_off
= tswap64(hde
->d_off
);
8528 tde
->d_reclen
= tswap16(treclen
);
8529 tde
->d_type
= hde
->d_type
;
8530 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8533 unlock_user(tdirp
, arg2
, toff
);
8536 #endif /* TARGET_NR_getdents64 */
8538 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8539 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8542 /* This is an internal helper for do_syscall so that it is easier
8543 * to have a single return point, so that actions, such as logging
8544 * of syscall results, can be performed.
8545 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8547 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8548 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8549 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8552 CPUState
*cpu
= env_cpu(cpu_env
);
8554 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8555 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8556 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8557 || defined(TARGET_NR_statx)
8560 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8561 || defined(TARGET_NR_fstatfs)
8567 case TARGET_NR_exit
:
8568 /* In old applications this may be used to implement _exit(2).
8569 However in threaded applications it is used for thread termination,
8570 and _exit_group is used for application termination.
8571 Do thread termination if we have more then one thread. */
8573 if (block_signals()) {
8574 return -QEMU_ERESTARTSYS
;
8577 pthread_mutex_lock(&clone_lock
);
8579 if (CPU_NEXT(first_cpu
)) {
8580 TaskState
*ts
= cpu
->opaque
;
8582 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8583 object_unref(OBJECT(cpu
));
8585 * At this point the CPU should be unrealized and removed
8586 * from cpu lists. We can clean-up the rest of the thread
8587 * data without the lock held.
8590 pthread_mutex_unlock(&clone_lock
);
8592 if (ts
->child_tidptr
) {
8593 put_user_u32(0, ts
->child_tidptr
);
8594 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8595 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8599 rcu_unregister_thread();
8603 pthread_mutex_unlock(&clone_lock
);
8604 preexit_cleanup(cpu_env
, arg1
);
8606 return 0; /* avoid warning */
8607 case TARGET_NR_read
:
8608 if (arg2
== 0 && arg3
== 0) {
8609 return get_errno(safe_read(arg1
, 0, 0));
8611 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8612 return -TARGET_EFAULT
;
8613 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8615 fd_trans_host_to_target_data(arg1
)) {
8616 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8618 unlock_user(p
, arg2
, ret
);
8621 case TARGET_NR_write
:
8622 if (arg2
== 0 && arg3
== 0) {
8623 return get_errno(safe_write(arg1
, 0, 0));
8625 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8626 return -TARGET_EFAULT
;
8627 if (fd_trans_target_to_host_data(arg1
)) {
8628 void *copy
= g_malloc(arg3
);
8629 memcpy(copy
, p
, arg3
);
8630 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8632 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8636 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8638 unlock_user(p
, arg2
, 0);
8641 #ifdef TARGET_NR_open
8642 case TARGET_NR_open
:
8643 if (!(p
= lock_user_string(arg1
)))
8644 return -TARGET_EFAULT
;
8645 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8646 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8648 fd_trans_unregister(ret
);
8649 unlock_user(p
, arg1
, 0);
8652 case TARGET_NR_openat
:
8653 if (!(p
= lock_user_string(arg2
)))
8654 return -TARGET_EFAULT
;
8655 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8656 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8658 fd_trans_unregister(ret
);
8659 unlock_user(p
, arg2
, 0);
8661 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8662 case TARGET_NR_name_to_handle_at
:
8663 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8666 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667 case TARGET_NR_open_by_handle_at
:
8668 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8669 fd_trans_unregister(ret
);
8672 case TARGET_NR_close
:
8673 fd_trans_unregister(arg1
);
8674 return get_errno(close(arg1
));
8677 return do_brk(arg1
);
8678 #ifdef TARGET_NR_fork
8679 case TARGET_NR_fork
:
8680 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8682 #ifdef TARGET_NR_waitpid
8683 case TARGET_NR_waitpid
:
8686 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8687 if (!is_error(ret
) && arg2
&& ret
8688 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8689 return -TARGET_EFAULT
;
8693 #ifdef TARGET_NR_waitid
8694 case TARGET_NR_waitid
:
8698 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8699 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8700 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8701 return -TARGET_EFAULT
;
8702 host_to_target_siginfo(p
, &info
);
8703 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8708 #ifdef TARGET_NR_creat /* not on alpha */
8709 case TARGET_NR_creat
:
8710 if (!(p
= lock_user_string(arg1
)))
8711 return -TARGET_EFAULT
;
8712 ret
= get_errno(creat(p
, arg2
));
8713 fd_trans_unregister(ret
);
8714 unlock_user(p
, arg1
, 0);
8717 #ifdef TARGET_NR_link
8718 case TARGET_NR_link
:
8721 p
= lock_user_string(arg1
);
8722 p2
= lock_user_string(arg2
);
8724 ret
= -TARGET_EFAULT
;
8726 ret
= get_errno(link(p
, p2
));
8727 unlock_user(p2
, arg2
, 0);
8728 unlock_user(p
, arg1
, 0);
8732 #if defined(TARGET_NR_linkat)
8733 case TARGET_NR_linkat
:
8737 return -TARGET_EFAULT
;
8738 p
= lock_user_string(arg2
);
8739 p2
= lock_user_string(arg4
);
8741 ret
= -TARGET_EFAULT
;
8743 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8744 unlock_user(p
, arg2
, 0);
8745 unlock_user(p2
, arg4
, 0);
8749 #ifdef TARGET_NR_unlink
8750 case TARGET_NR_unlink
:
8751 if (!(p
= lock_user_string(arg1
)))
8752 return -TARGET_EFAULT
;
8753 ret
= get_errno(unlink(p
));
8754 unlock_user(p
, arg1
, 0);
8757 #if defined(TARGET_NR_unlinkat)
8758 case TARGET_NR_unlinkat
:
8759 if (!(p
= lock_user_string(arg2
)))
8760 return -TARGET_EFAULT
;
8761 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8762 unlock_user(p
, arg2
, 0);
8765 case TARGET_NR_execve
:
8767 char **argp
, **envp
;
8770 abi_ulong guest_argp
;
8771 abi_ulong guest_envp
;
8777 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8778 if (get_user_ual(addr
, gp
))
8779 return -TARGET_EFAULT
;
8786 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8787 if (get_user_ual(addr
, gp
))
8788 return -TARGET_EFAULT
;
8794 argp
= g_new0(char *, argc
+ 1);
8795 envp
= g_new0(char *, envc
+ 1);
8797 for (gp
= guest_argp
, q
= argp
; gp
;
8798 gp
+= sizeof(abi_ulong
), q
++) {
8799 if (get_user_ual(addr
, gp
))
8803 if (!(*q
= lock_user_string(addr
)))
8808 for (gp
= guest_envp
, q
= envp
; gp
;
8809 gp
+= sizeof(abi_ulong
), q
++) {
8810 if (get_user_ual(addr
, gp
))
8814 if (!(*q
= lock_user_string(addr
)))
8819 if (!(p
= lock_user_string(arg1
)))
8821 /* Although execve() is not an interruptible syscall it is
8822 * a special case where we must use the safe_syscall wrapper:
8823 * if we allow a signal to happen before we make the host
8824 * syscall then we will 'lose' it, because at the point of
8825 * execve the process leaves QEMU's control. So we use the
8826 * safe syscall wrapper to ensure that we either take the
8827 * signal as a guest signal, or else it does not happen
8828 * before the execve completes and makes it the other
8829 * program's problem.
8831 ret
= get_errno(safe_execve(p
, argp
, envp
));
8832 unlock_user(p
, arg1
, 0);
8837 ret
= -TARGET_EFAULT
;
8840 for (gp
= guest_argp
, q
= argp
; *q
;
8841 gp
+= sizeof(abi_ulong
), q
++) {
8842 if (get_user_ual(addr
, gp
)
8845 unlock_user(*q
, addr
, 0);
8847 for (gp
= guest_envp
, q
= envp
; *q
;
8848 gp
+= sizeof(abi_ulong
), q
++) {
8849 if (get_user_ual(addr
, gp
)
8852 unlock_user(*q
, addr
, 0);
8859 case TARGET_NR_chdir
:
8860 if (!(p
= lock_user_string(arg1
)))
8861 return -TARGET_EFAULT
;
8862 ret
= get_errno(chdir(p
));
8863 unlock_user(p
, arg1
, 0);
8865 #ifdef TARGET_NR_time
8866 case TARGET_NR_time
:
8869 ret
= get_errno(time(&host_time
));
8872 && put_user_sal(host_time
, arg1
))
8873 return -TARGET_EFAULT
;
8877 #ifdef TARGET_NR_mknod
8878 case TARGET_NR_mknod
:
8879 if (!(p
= lock_user_string(arg1
)))
8880 return -TARGET_EFAULT
;
8881 ret
= get_errno(mknod(p
, arg2
, arg3
));
8882 unlock_user(p
, arg1
, 0);
8885 #if defined(TARGET_NR_mknodat)
8886 case TARGET_NR_mknodat
:
8887 if (!(p
= lock_user_string(arg2
)))
8888 return -TARGET_EFAULT
;
8889 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8890 unlock_user(p
, arg2
, 0);
8893 #ifdef TARGET_NR_chmod
8894 case TARGET_NR_chmod
:
8895 if (!(p
= lock_user_string(arg1
)))
8896 return -TARGET_EFAULT
;
8897 ret
= get_errno(chmod(p
, arg2
));
8898 unlock_user(p
, arg1
, 0);
8901 #ifdef TARGET_NR_lseek
8902 case TARGET_NR_lseek
:
8903 return get_errno(lseek(arg1
, arg2
, arg3
));
8905 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8906 /* Alpha specific */
8907 case TARGET_NR_getxpid
:
8908 cpu_env
->ir
[IR_A4
] = getppid();
8909 return get_errno(getpid());
8911 #ifdef TARGET_NR_getpid
8912 case TARGET_NR_getpid
:
8913 return get_errno(getpid());
8915 case TARGET_NR_mount
:
8917 /* need to look at the data field */
8921 p
= lock_user_string(arg1
);
8923 return -TARGET_EFAULT
;
8929 p2
= lock_user_string(arg2
);
8932 unlock_user(p
, arg1
, 0);
8934 return -TARGET_EFAULT
;
8938 p3
= lock_user_string(arg3
);
8941 unlock_user(p
, arg1
, 0);
8943 unlock_user(p2
, arg2
, 0);
8944 return -TARGET_EFAULT
;
8950 /* FIXME - arg5 should be locked, but it isn't clear how to
8951 * do that since it's not guaranteed to be a NULL-terminated
8955 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8957 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8959 ret
= get_errno(ret
);
8962 unlock_user(p
, arg1
, 0);
8964 unlock_user(p2
, arg2
, 0);
8966 unlock_user(p3
, arg3
, 0);
8970 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8971 #if defined(TARGET_NR_umount)
8972 case TARGET_NR_umount
:
8974 #if defined(TARGET_NR_oldumount)
8975 case TARGET_NR_oldumount
:
8977 if (!(p
= lock_user_string(arg1
)))
8978 return -TARGET_EFAULT
;
8979 ret
= get_errno(umount(p
));
8980 unlock_user(p
, arg1
, 0);
8983 #ifdef TARGET_NR_stime /* not on alpha */
8984 case TARGET_NR_stime
:
8988 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8989 return -TARGET_EFAULT
;
8991 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8994 #ifdef TARGET_NR_alarm /* not on alpha */
8995 case TARGET_NR_alarm
:
8998 #ifdef TARGET_NR_pause /* not on alpha */
8999 case TARGET_NR_pause
:
9000 if (!block_signals()) {
9001 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9003 return -TARGET_EINTR
;
9005 #ifdef TARGET_NR_utime
9006 case TARGET_NR_utime
:
9008 struct utimbuf tbuf
, *host_tbuf
;
9009 struct target_utimbuf
*target_tbuf
;
9011 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9012 return -TARGET_EFAULT
;
9013 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9014 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9015 unlock_user_struct(target_tbuf
, arg2
, 0);
9020 if (!(p
= lock_user_string(arg1
)))
9021 return -TARGET_EFAULT
;
9022 ret
= get_errno(utime(p
, host_tbuf
));
9023 unlock_user(p
, arg1
, 0);
9027 #ifdef TARGET_NR_utimes
9028 case TARGET_NR_utimes
:
9030 struct timeval
*tvp
, tv
[2];
9032 if (copy_from_user_timeval(&tv
[0], arg2
)
9033 || copy_from_user_timeval(&tv
[1],
9034 arg2
+ sizeof(struct target_timeval
)))
9035 return -TARGET_EFAULT
;
9040 if (!(p
= lock_user_string(arg1
)))
9041 return -TARGET_EFAULT
;
9042 ret
= get_errno(utimes(p
, tvp
));
9043 unlock_user(p
, arg1
, 0);
9047 #if defined(TARGET_NR_futimesat)
9048 case TARGET_NR_futimesat
:
9050 struct timeval
*tvp
, tv
[2];
9052 if (copy_from_user_timeval(&tv
[0], arg3
)
9053 || copy_from_user_timeval(&tv
[1],
9054 arg3
+ sizeof(struct target_timeval
)))
9055 return -TARGET_EFAULT
;
9060 if (!(p
= lock_user_string(arg2
))) {
9061 return -TARGET_EFAULT
;
9063 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9064 unlock_user(p
, arg2
, 0);
9068 #ifdef TARGET_NR_access
9069 case TARGET_NR_access
:
9070 if (!(p
= lock_user_string(arg1
))) {
9071 return -TARGET_EFAULT
;
9073 ret
= get_errno(access(path(p
), arg2
));
9074 unlock_user(p
, arg1
, 0);
9077 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9078 case TARGET_NR_faccessat
:
9079 if (!(p
= lock_user_string(arg2
))) {
9080 return -TARGET_EFAULT
;
9082 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9083 unlock_user(p
, arg2
, 0);
9086 #ifdef TARGET_NR_nice /* not on alpha */
9087 case TARGET_NR_nice
:
9088 return get_errno(nice(arg1
));
9090 case TARGET_NR_sync
:
9093 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9094 case TARGET_NR_syncfs
:
9095 return get_errno(syncfs(arg1
));
9097 case TARGET_NR_kill
:
9098 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9099 #ifdef TARGET_NR_rename
9100 case TARGET_NR_rename
:
9103 p
= lock_user_string(arg1
);
9104 p2
= lock_user_string(arg2
);
9106 ret
= -TARGET_EFAULT
;
9108 ret
= get_errno(rename(p
, p2
));
9109 unlock_user(p2
, arg2
, 0);
9110 unlock_user(p
, arg1
, 0);
9114 #if defined(TARGET_NR_renameat)
9115 case TARGET_NR_renameat
:
9118 p
= lock_user_string(arg2
);
9119 p2
= lock_user_string(arg4
);
9121 ret
= -TARGET_EFAULT
;
9123 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9124 unlock_user(p2
, arg4
, 0);
9125 unlock_user(p
, arg2
, 0);
9129 #if defined(TARGET_NR_renameat2)
9130 case TARGET_NR_renameat2
:
9133 p
= lock_user_string(arg2
);
9134 p2
= lock_user_string(arg4
);
9136 ret
= -TARGET_EFAULT
;
9138 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9140 unlock_user(p2
, arg4
, 0);
9141 unlock_user(p
, arg2
, 0);
9145 #ifdef TARGET_NR_mkdir
9146 case TARGET_NR_mkdir
:
9147 if (!(p
= lock_user_string(arg1
)))
9148 return -TARGET_EFAULT
;
9149 ret
= get_errno(mkdir(p
, arg2
));
9150 unlock_user(p
, arg1
, 0);
9153 #if defined(TARGET_NR_mkdirat)
9154 case TARGET_NR_mkdirat
:
9155 if (!(p
= lock_user_string(arg2
)))
9156 return -TARGET_EFAULT
;
9157 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9158 unlock_user(p
, arg2
, 0);
9161 #ifdef TARGET_NR_rmdir
9162 case TARGET_NR_rmdir
:
9163 if (!(p
= lock_user_string(arg1
)))
9164 return -TARGET_EFAULT
;
9165 ret
= get_errno(rmdir(p
));
9166 unlock_user(p
, arg1
, 0);
9170 ret
= get_errno(dup(arg1
));
9172 fd_trans_dup(arg1
, ret
);
9175 #ifdef TARGET_NR_pipe
9176 case TARGET_NR_pipe
:
9177 return do_pipe(cpu_env
, arg1
, 0, 0);
9179 #ifdef TARGET_NR_pipe2
9180 case TARGET_NR_pipe2
:
9181 return do_pipe(cpu_env
, arg1
,
9182 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9184 case TARGET_NR_times
:
9186 struct target_tms
*tmsp
;
9188 ret
= get_errno(times(&tms
));
9190 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9192 return -TARGET_EFAULT
;
9193 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9194 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9195 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9196 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9199 ret
= host_to_target_clock_t(ret
);
9202 case TARGET_NR_acct
:
9204 ret
= get_errno(acct(NULL
));
9206 if (!(p
= lock_user_string(arg1
))) {
9207 return -TARGET_EFAULT
;
9209 ret
= get_errno(acct(path(p
)));
9210 unlock_user(p
, arg1
, 0);
9213 #ifdef TARGET_NR_umount2
9214 case TARGET_NR_umount2
:
9215 if (!(p
= lock_user_string(arg1
)))
9216 return -TARGET_EFAULT
;
9217 ret
= get_errno(umount2(p
, arg2
));
9218 unlock_user(p
, arg1
, 0);
9221 case TARGET_NR_ioctl
:
9222 return do_ioctl(arg1
, arg2
, arg3
);
9223 #ifdef TARGET_NR_fcntl
9224 case TARGET_NR_fcntl
:
9225 return do_fcntl(arg1
, arg2
, arg3
);
9227 case TARGET_NR_setpgid
:
9228 return get_errno(setpgid(arg1
, arg2
));
9229 case TARGET_NR_umask
:
9230 return get_errno(umask(arg1
));
9231 case TARGET_NR_chroot
:
9232 if (!(p
= lock_user_string(arg1
)))
9233 return -TARGET_EFAULT
;
9234 ret
= get_errno(chroot(p
));
9235 unlock_user(p
, arg1
, 0);
9237 #ifdef TARGET_NR_dup2
9238 case TARGET_NR_dup2
:
9239 ret
= get_errno(dup2(arg1
, arg2
));
9241 fd_trans_dup(arg1
, arg2
);
9245 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9246 case TARGET_NR_dup3
:
9250 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9253 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9254 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9256 fd_trans_dup(arg1
, arg2
);
9261 #ifdef TARGET_NR_getppid /* not on alpha */
9262 case TARGET_NR_getppid
:
9263 return get_errno(getppid());
9265 #ifdef TARGET_NR_getpgrp
9266 case TARGET_NR_getpgrp
:
9267 return get_errno(getpgrp());
9269 case TARGET_NR_setsid
:
9270 return get_errno(setsid());
9271 #ifdef TARGET_NR_sigaction
9272 case TARGET_NR_sigaction
:
9274 #if defined(TARGET_MIPS)
9275 struct target_sigaction act
, oact
, *pact
, *old_act
;
9278 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9279 return -TARGET_EFAULT
;
9280 act
._sa_handler
= old_act
->_sa_handler
;
9281 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9282 act
.sa_flags
= old_act
->sa_flags
;
9283 unlock_user_struct(old_act
, arg2
, 0);
9289 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9291 if (!is_error(ret
) && arg3
) {
9292 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9293 return -TARGET_EFAULT
;
9294 old_act
->_sa_handler
= oact
._sa_handler
;
9295 old_act
->sa_flags
= oact
.sa_flags
;
9296 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9297 old_act
->sa_mask
.sig
[1] = 0;
9298 old_act
->sa_mask
.sig
[2] = 0;
9299 old_act
->sa_mask
.sig
[3] = 0;
9300 unlock_user_struct(old_act
, arg3
, 1);
9303 struct target_old_sigaction
*old_act
;
9304 struct target_sigaction act
, oact
, *pact
;
9306 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9307 return -TARGET_EFAULT
;
9308 act
._sa_handler
= old_act
->_sa_handler
;
9309 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9310 act
.sa_flags
= old_act
->sa_flags
;
9311 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9312 act
.sa_restorer
= old_act
->sa_restorer
;
9314 unlock_user_struct(old_act
, arg2
, 0);
9319 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9320 if (!is_error(ret
) && arg3
) {
9321 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9322 return -TARGET_EFAULT
;
9323 old_act
->_sa_handler
= oact
._sa_handler
;
9324 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9325 old_act
->sa_flags
= oact
.sa_flags
;
9326 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9327 old_act
->sa_restorer
= oact
.sa_restorer
;
9329 unlock_user_struct(old_act
, arg3
, 1);
9335 case TARGET_NR_rt_sigaction
:
9338 * For Alpha and SPARC this is a 5 argument syscall, with
9339 * a 'restorer' parameter which must be copied into the
9340 * sa_restorer field of the sigaction struct.
9341 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9342 * and arg5 is the sigsetsize.
9344 #if defined(TARGET_ALPHA)
9345 target_ulong sigsetsize
= arg4
;
9346 target_ulong restorer
= arg5
;
9347 #elif defined(TARGET_SPARC)
9348 target_ulong restorer
= arg4
;
9349 target_ulong sigsetsize
= arg5
;
9351 target_ulong sigsetsize
= arg4
;
9352 target_ulong restorer
= 0;
9354 struct target_sigaction
*act
= NULL
;
9355 struct target_sigaction
*oact
= NULL
;
9357 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9358 return -TARGET_EINVAL
;
9360 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9361 return -TARGET_EFAULT
;
9363 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9364 ret
= -TARGET_EFAULT
;
9366 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9368 unlock_user_struct(oact
, arg3
, 1);
9372 unlock_user_struct(act
, arg2
, 0);
9376 #ifdef TARGET_NR_sgetmask /* not on alpha */
9377 case TARGET_NR_sgetmask
:
9380 abi_ulong target_set
;
9381 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9383 host_to_target_old_sigset(&target_set
, &cur_set
);
9389 #ifdef TARGET_NR_ssetmask /* not on alpha */
9390 case TARGET_NR_ssetmask
:
9393 abi_ulong target_set
= arg1
;
9394 target_to_host_old_sigset(&set
, &target_set
);
9395 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9397 host_to_target_old_sigset(&target_set
, &oset
);
9403 #ifdef TARGET_NR_sigprocmask
9404 case TARGET_NR_sigprocmask
:
9406 #if defined(TARGET_ALPHA)
9407 sigset_t set
, oldset
;
9412 case TARGET_SIG_BLOCK
:
9415 case TARGET_SIG_UNBLOCK
:
9418 case TARGET_SIG_SETMASK
:
9422 return -TARGET_EINVAL
;
9425 target_to_host_old_sigset(&set
, &mask
);
9427 ret
= do_sigprocmask(how
, &set
, &oldset
);
9428 if (!is_error(ret
)) {
9429 host_to_target_old_sigset(&mask
, &oldset
);
9431 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9434 sigset_t set
, oldset
, *set_ptr
;
9438 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9440 return -TARGET_EFAULT
;
9442 target_to_host_old_sigset(&set
, p
);
9443 unlock_user(p
, arg2
, 0);
9446 case TARGET_SIG_BLOCK
:
9449 case TARGET_SIG_UNBLOCK
:
9452 case TARGET_SIG_SETMASK
:
9456 return -TARGET_EINVAL
;
9462 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9463 if (!is_error(ret
) && arg3
) {
9464 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9465 return -TARGET_EFAULT
;
9466 host_to_target_old_sigset(p
, &oldset
);
9467 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9473 case TARGET_NR_rt_sigprocmask
:
9476 sigset_t set
, oldset
, *set_ptr
;
9478 if (arg4
!= sizeof(target_sigset_t
)) {
9479 return -TARGET_EINVAL
;
9483 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9485 return -TARGET_EFAULT
;
9487 target_to_host_sigset(&set
, p
);
9488 unlock_user(p
, arg2
, 0);
9491 case TARGET_SIG_BLOCK
:
9494 case TARGET_SIG_UNBLOCK
:
9497 case TARGET_SIG_SETMASK
:
9501 return -TARGET_EINVAL
;
9507 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9508 if (!is_error(ret
) && arg3
) {
9509 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9510 return -TARGET_EFAULT
;
9511 host_to_target_sigset(p
, &oldset
);
9512 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9516 #ifdef TARGET_NR_sigpending
9517 case TARGET_NR_sigpending
:
9520 ret
= get_errno(sigpending(&set
));
9521 if (!is_error(ret
)) {
9522 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9523 return -TARGET_EFAULT
;
9524 host_to_target_old_sigset(p
, &set
);
9525 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9530 case TARGET_NR_rt_sigpending
:
9534 /* Yes, this check is >, not != like most. We follow the kernel's
9535 * logic and it does it like this because it implements
9536 * NR_sigpending through the same code path, and in that case
9537 * the old_sigset_t is smaller in size.
9539 if (arg2
> sizeof(target_sigset_t
)) {
9540 return -TARGET_EINVAL
;
9543 ret
= get_errno(sigpending(&set
));
9544 if (!is_error(ret
)) {
9545 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9546 return -TARGET_EFAULT
;
9547 host_to_target_sigset(p
, &set
);
9548 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9552 #ifdef TARGET_NR_sigsuspend
9553 case TARGET_NR_sigsuspend
:
9557 #if defined(TARGET_ALPHA)
9558 TaskState
*ts
= cpu
->opaque
;
9559 /* target_to_host_old_sigset will bswap back */
9560 abi_ulong mask
= tswapal(arg1
);
9561 set
= &ts
->sigsuspend_mask
;
9562 target_to_host_old_sigset(set
, &mask
);
9564 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9569 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9570 finish_sigsuspend_mask(ret
);
9574 case TARGET_NR_rt_sigsuspend
:
9578 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9582 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9583 finish_sigsuspend_mask(ret
);
9586 #ifdef TARGET_NR_rt_sigtimedwait
9587 case TARGET_NR_rt_sigtimedwait
:
9590 struct timespec uts
, *puts
;
9593 if (arg4
!= sizeof(target_sigset_t
)) {
9594 return -TARGET_EINVAL
;
9597 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9598 return -TARGET_EFAULT
;
9599 target_to_host_sigset(&set
, p
);
9600 unlock_user(p
, arg1
, 0);
9603 if (target_to_host_timespec(puts
, arg3
)) {
9604 return -TARGET_EFAULT
;
9609 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9611 if (!is_error(ret
)) {
9613 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9616 return -TARGET_EFAULT
;
9618 host_to_target_siginfo(p
, &uinfo
);
9619 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9621 ret
= host_to_target_signal(ret
);
9626 #ifdef TARGET_NR_rt_sigtimedwait_time64
9627 case TARGET_NR_rt_sigtimedwait_time64
:
9630 struct timespec uts
, *puts
;
9633 if (arg4
!= sizeof(target_sigset_t
)) {
9634 return -TARGET_EINVAL
;
9637 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9639 return -TARGET_EFAULT
;
9641 target_to_host_sigset(&set
, p
);
9642 unlock_user(p
, arg1
, 0);
9645 if (target_to_host_timespec64(puts
, arg3
)) {
9646 return -TARGET_EFAULT
;
9651 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9653 if (!is_error(ret
)) {
9655 p
= lock_user(VERIFY_WRITE
, arg2
,
9656 sizeof(target_siginfo_t
), 0);
9658 return -TARGET_EFAULT
;
9660 host_to_target_siginfo(p
, &uinfo
);
9661 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9663 ret
= host_to_target_signal(ret
);
9668 case TARGET_NR_rt_sigqueueinfo
:
9672 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9674 return -TARGET_EFAULT
;
9676 target_to_host_siginfo(&uinfo
, p
);
9677 unlock_user(p
, arg3
, 0);
9678 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9681 case TARGET_NR_rt_tgsigqueueinfo
:
9685 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9687 return -TARGET_EFAULT
;
9689 target_to_host_siginfo(&uinfo
, p
);
9690 unlock_user(p
, arg4
, 0);
9691 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9694 #ifdef TARGET_NR_sigreturn
9695 case TARGET_NR_sigreturn
:
9696 if (block_signals()) {
9697 return -QEMU_ERESTARTSYS
;
9699 return do_sigreturn(cpu_env
);
9701 case TARGET_NR_rt_sigreturn
:
9702 if (block_signals()) {
9703 return -QEMU_ERESTARTSYS
;
9705 return do_rt_sigreturn(cpu_env
);
9706 case TARGET_NR_sethostname
:
9707 if (!(p
= lock_user_string(arg1
)))
9708 return -TARGET_EFAULT
;
9709 ret
= get_errno(sethostname(p
, arg2
));
9710 unlock_user(p
, arg1
, 0);
9712 #ifdef TARGET_NR_setrlimit
9713 case TARGET_NR_setrlimit
:
9715 int resource
= target_to_host_resource(arg1
);
9716 struct target_rlimit
*target_rlim
;
9718 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9719 return -TARGET_EFAULT
;
9720 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9721 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9722 unlock_user_struct(target_rlim
, arg2
, 0);
9724 * If we just passed through resource limit settings for memory then
9725 * they would also apply to QEMU's own allocations, and QEMU will
9726 * crash or hang or die if its allocations fail. Ideally we would
9727 * track the guest allocations in QEMU and apply the limits ourselves.
9728 * For now, just tell the guest the call succeeded but don't actually
9731 if (resource
!= RLIMIT_AS
&&
9732 resource
!= RLIMIT_DATA
&&
9733 resource
!= RLIMIT_STACK
) {
9734 return get_errno(setrlimit(resource
, &rlim
));
9740 #ifdef TARGET_NR_getrlimit
9741 case TARGET_NR_getrlimit
:
9743 int resource
= target_to_host_resource(arg1
);
9744 struct target_rlimit
*target_rlim
;
9747 ret
= get_errno(getrlimit(resource
, &rlim
));
9748 if (!is_error(ret
)) {
9749 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9750 return -TARGET_EFAULT
;
9751 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9752 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9753 unlock_user_struct(target_rlim
, arg2
, 1);
9758 case TARGET_NR_getrusage
:
9760 struct rusage rusage
;
9761 ret
= get_errno(getrusage(arg1
, &rusage
));
9762 if (!is_error(ret
)) {
9763 ret
= host_to_target_rusage(arg2
, &rusage
);
9767 #if defined(TARGET_NR_gettimeofday)
9768 case TARGET_NR_gettimeofday
:
9773 ret
= get_errno(gettimeofday(&tv
, &tz
));
9774 if (!is_error(ret
)) {
9775 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9776 return -TARGET_EFAULT
;
9778 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9779 return -TARGET_EFAULT
;
9785 #if defined(TARGET_NR_settimeofday)
9786 case TARGET_NR_settimeofday
:
9788 struct timeval tv
, *ptv
= NULL
;
9789 struct timezone tz
, *ptz
= NULL
;
9792 if (copy_from_user_timeval(&tv
, arg1
)) {
9793 return -TARGET_EFAULT
;
9799 if (copy_from_user_timezone(&tz
, arg2
)) {
9800 return -TARGET_EFAULT
;
9805 return get_errno(settimeofday(ptv
, ptz
));
9808 #if defined(TARGET_NR_select)
9809 case TARGET_NR_select
:
9810 #if defined(TARGET_WANT_NI_OLD_SELECT)
9811 /* some architectures used to have old_select here
9812 * but now ENOSYS it.
9814 ret
= -TARGET_ENOSYS
;
9815 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9816 ret
= do_old_select(arg1
);
9818 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9822 #ifdef TARGET_NR_pselect6
9823 case TARGET_NR_pselect6
:
9824 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9826 #ifdef TARGET_NR_pselect6_time64
9827 case TARGET_NR_pselect6_time64
:
9828 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9830 #ifdef TARGET_NR_symlink
9831 case TARGET_NR_symlink
:
9834 p
= lock_user_string(arg1
);
9835 p2
= lock_user_string(arg2
);
9837 ret
= -TARGET_EFAULT
;
9839 ret
= get_errno(symlink(p
, p2
));
9840 unlock_user(p2
, arg2
, 0);
9841 unlock_user(p
, arg1
, 0);
9845 #if defined(TARGET_NR_symlinkat)
9846 case TARGET_NR_symlinkat
:
9849 p
= lock_user_string(arg1
);
9850 p2
= lock_user_string(arg3
);
9852 ret
= -TARGET_EFAULT
;
9854 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9855 unlock_user(p2
, arg3
, 0);
9856 unlock_user(p
, arg1
, 0);
9860 #ifdef TARGET_NR_readlink
9861 case TARGET_NR_readlink
:
9864 p
= lock_user_string(arg1
);
9865 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9867 ret
= -TARGET_EFAULT
;
9869 /* Short circuit this for the magic exe check. */
9870 ret
= -TARGET_EINVAL
;
9871 } else if (is_proc_myself((const char *)p
, "exe")) {
9872 char real
[PATH_MAX
], *temp
;
9873 temp
= realpath(exec_path
, real
);
9874 /* Return value is # of bytes that we wrote to the buffer. */
9876 ret
= get_errno(-1);
9878 /* Don't worry about sign mismatch as earlier mapping
9879 * logic would have thrown a bad address error. */
9880 ret
= MIN(strlen(real
), arg3
);
9881 /* We cannot NUL terminate the string. */
9882 memcpy(p2
, real
, ret
);
9885 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9887 unlock_user(p2
, arg2
, ret
);
9888 unlock_user(p
, arg1
, 0);
9892 #if defined(TARGET_NR_readlinkat)
9893 case TARGET_NR_readlinkat
:
9896 p
= lock_user_string(arg2
);
9897 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9899 ret
= -TARGET_EFAULT
;
9900 } else if (is_proc_myself((const char *)p
, "exe")) {
9901 char real
[PATH_MAX
], *temp
;
9902 temp
= realpath(exec_path
, real
);
9903 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9904 snprintf((char *)p2
, arg4
, "%s", real
);
9906 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9908 unlock_user(p2
, arg3
, ret
);
9909 unlock_user(p
, arg2
, 0);
9913 #ifdef TARGET_NR_swapon
9914 case TARGET_NR_swapon
:
9915 if (!(p
= lock_user_string(arg1
)))
9916 return -TARGET_EFAULT
;
9917 ret
= get_errno(swapon(p
, arg2
));
9918 unlock_user(p
, arg1
, 0);
9921 case TARGET_NR_reboot
:
9922 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9923 /* arg4 must be ignored in all other cases */
9924 p
= lock_user_string(arg4
);
9926 return -TARGET_EFAULT
;
9928 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9929 unlock_user(p
, arg4
, 0);
9931 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9934 #ifdef TARGET_NR_mmap
9935 case TARGET_NR_mmap
:
9936 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9937 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9938 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9939 || defined(TARGET_S390X)
9942 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9943 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9944 return -TARGET_EFAULT
;
9951 unlock_user(v
, arg1
, 0);
9952 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9953 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9957 /* mmap pointers are always untagged */
9958 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9959 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9965 #ifdef TARGET_NR_mmap2
9966 case TARGET_NR_mmap2
:
9968 #define MMAP_SHIFT 12
9970 ret
= target_mmap(arg1
, arg2
, arg3
,
9971 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9972 arg5
, arg6
<< MMAP_SHIFT
);
9973 return get_errno(ret
);
9975 case TARGET_NR_munmap
:
9976 arg1
= cpu_untagged_addr(cpu
, arg1
);
9977 return get_errno(target_munmap(arg1
, arg2
));
9978 case TARGET_NR_mprotect
:
9979 arg1
= cpu_untagged_addr(cpu
, arg1
);
9981 TaskState
*ts
= cpu
->opaque
;
9982 /* Special hack to detect libc making the stack executable. */
9983 if ((arg3
& PROT_GROWSDOWN
)
9984 && arg1
>= ts
->info
->stack_limit
9985 && arg1
<= ts
->info
->start_stack
) {
9986 arg3
&= ~PROT_GROWSDOWN
;
9987 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9988 arg1
= ts
->info
->stack_limit
;
9991 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9992 #ifdef TARGET_NR_mremap
9993 case TARGET_NR_mremap
:
9994 arg1
= cpu_untagged_addr(cpu
, arg1
);
9995 /* mremap new_addr (arg5) is always untagged */
9996 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9998 /* ??? msync/mlock/munlock are broken for softmmu. */
9999 #ifdef TARGET_NR_msync
10000 case TARGET_NR_msync
:
10001 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
10003 #ifdef TARGET_NR_mlock
10004 case TARGET_NR_mlock
:
10005 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10007 #ifdef TARGET_NR_munlock
10008 case TARGET_NR_munlock
:
10009 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10011 #ifdef TARGET_NR_mlockall
10012 case TARGET_NR_mlockall
:
10013 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10015 #ifdef TARGET_NR_munlockall
10016 case TARGET_NR_munlockall
:
10017 return get_errno(munlockall());
10019 #ifdef TARGET_NR_truncate
10020 case TARGET_NR_truncate
:
10021 if (!(p
= lock_user_string(arg1
)))
10022 return -TARGET_EFAULT
;
10023 ret
= get_errno(truncate(p
, arg2
));
10024 unlock_user(p
, arg1
, 0);
10027 #ifdef TARGET_NR_ftruncate
10028 case TARGET_NR_ftruncate
:
10029 return get_errno(ftruncate(arg1
, arg2
));
10031 case TARGET_NR_fchmod
:
10032 return get_errno(fchmod(arg1
, arg2
));
10033 #if defined(TARGET_NR_fchmodat)
10034 case TARGET_NR_fchmodat
:
10035 if (!(p
= lock_user_string(arg2
)))
10036 return -TARGET_EFAULT
;
10037 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10038 unlock_user(p
, arg2
, 0);
10041 case TARGET_NR_getpriority
:
10042 /* Note that negative values are valid for getpriority, so we must
10043 differentiate based on errno settings. */
10045 ret
= getpriority(arg1
, arg2
);
10046 if (ret
== -1 && errno
!= 0) {
10047 return -host_to_target_errno(errno
);
10049 #ifdef TARGET_ALPHA
10050 /* Return value is the unbiased priority. Signal no error. */
10051 cpu_env
->ir
[IR_V0
] = 0;
10053 /* Return value is a biased priority to avoid negative numbers. */
10057 case TARGET_NR_setpriority
:
10058 return get_errno(setpriority(arg1
, arg2
, arg3
));
10059 #ifdef TARGET_NR_statfs
10060 case TARGET_NR_statfs
:
10061 if (!(p
= lock_user_string(arg1
))) {
10062 return -TARGET_EFAULT
;
10064 ret
= get_errno(statfs(path(p
), &stfs
));
10065 unlock_user(p
, arg1
, 0);
10067 if (!is_error(ret
)) {
10068 struct target_statfs
*target_stfs
;
10070 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10071 return -TARGET_EFAULT
;
10072 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10073 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10074 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10075 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10076 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10077 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10078 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10079 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10080 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10081 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10082 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10083 #ifdef _STATFS_F_FLAGS
10084 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10086 __put_user(0, &target_stfs
->f_flags
);
10088 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10089 unlock_user_struct(target_stfs
, arg2
, 1);
10093 #ifdef TARGET_NR_fstatfs
10094 case TARGET_NR_fstatfs
:
10095 ret
= get_errno(fstatfs(arg1
, &stfs
));
10096 goto convert_statfs
;
10098 #ifdef TARGET_NR_statfs64
10099 case TARGET_NR_statfs64
:
10100 if (!(p
= lock_user_string(arg1
))) {
10101 return -TARGET_EFAULT
;
10103 ret
= get_errno(statfs(path(p
), &stfs
));
10104 unlock_user(p
, arg1
, 0);
10106 if (!is_error(ret
)) {
10107 struct target_statfs64
*target_stfs
;
10109 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10110 return -TARGET_EFAULT
;
10111 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10112 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10113 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10114 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10115 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10116 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10117 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10118 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10119 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10120 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10121 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10122 #ifdef _STATFS_F_FLAGS
10123 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10125 __put_user(0, &target_stfs
->f_flags
);
10127 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10128 unlock_user_struct(target_stfs
, arg3
, 1);
10131 case TARGET_NR_fstatfs64
:
10132 ret
= get_errno(fstatfs(arg1
, &stfs
));
10133 goto convert_statfs64
;
10135 #ifdef TARGET_NR_socketcall
10136 case TARGET_NR_socketcall
:
10137 return do_socketcall(arg1
, arg2
);
10139 #ifdef TARGET_NR_accept
10140 case TARGET_NR_accept
:
10141 return do_accept4(arg1
, arg2
, arg3
, 0);
10143 #ifdef TARGET_NR_accept4
10144 case TARGET_NR_accept4
:
10145 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10147 #ifdef TARGET_NR_bind
10148 case TARGET_NR_bind
:
10149 return do_bind(arg1
, arg2
, arg3
);
10151 #ifdef TARGET_NR_connect
10152 case TARGET_NR_connect
:
10153 return do_connect(arg1
, arg2
, arg3
);
10155 #ifdef TARGET_NR_getpeername
10156 case TARGET_NR_getpeername
:
10157 return do_getpeername(arg1
, arg2
, arg3
);
10159 #ifdef TARGET_NR_getsockname
10160 case TARGET_NR_getsockname
:
10161 return do_getsockname(arg1
, arg2
, arg3
);
10163 #ifdef TARGET_NR_getsockopt
10164 case TARGET_NR_getsockopt
:
10165 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10167 #ifdef TARGET_NR_listen
10168 case TARGET_NR_listen
:
10169 return get_errno(listen(arg1
, arg2
));
10171 #ifdef TARGET_NR_recv
10172 case TARGET_NR_recv
:
10173 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10175 #ifdef TARGET_NR_recvfrom
10176 case TARGET_NR_recvfrom
:
10177 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10179 #ifdef TARGET_NR_recvmsg
10180 case TARGET_NR_recvmsg
:
10181 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10183 #ifdef TARGET_NR_send
10184 case TARGET_NR_send
:
10185 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10187 #ifdef TARGET_NR_sendmsg
10188 case TARGET_NR_sendmsg
:
10189 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10191 #ifdef TARGET_NR_sendmmsg
10192 case TARGET_NR_sendmmsg
:
10193 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10195 #ifdef TARGET_NR_recvmmsg
10196 case TARGET_NR_recvmmsg
:
10197 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10199 #ifdef TARGET_NR_sendto
10200 case TARGET_NR_sendto
:
10201 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10203 #ifdef TARGET_NR_shutdown
10204 case TARGET_NR_shutdown
:
10205 return get_errno(shutdown(arg1
, arg2
));
10207 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10208 case TARGET_NR_getrandom
:
10209 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10211 return -TARGET_EFAULT
;
10213 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10214 unlock_user(p
, arg1
, ret
);
10217 #ifdef TARGET_NR_socket
10218 case TARGET_NR_socket
:
10219 return do_socket(arg1
, arg2
, arg3
);
10221 #ifdef TARGET_NR_socketpair
10222 case TARGET_NR_socketpair
:
10223 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10225 #ifdef TARGET_NR_setsockopt
10226 case TARGET_NR_setsockopt
:
10227 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10229 #if defined(TARGET_NR_syslog)
10230 case TARGET_NR_syslog
:
10235 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10236 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10237 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10238 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10239 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10240 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10241 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10242 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10243 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10244 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10245 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10246 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10249 return -TARGET_EINVAL
;
10254 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10256 return -TARGET_EFAULT
;
10258 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10259 unlock_user(p
, arg2
, arg3
);
10263 return -TARGET_EINVAL
;
10268 case TARGET_NR_setitimer
:
10270 struct itimerval value
, ovalue
, *pvalue
;
10274 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10275 || copy_from_user_timeval(&pvalue
->it_value
,
10276 arg2
+ sizeof(struct target_timeval
)))
10277 return -TARGET_EFAULT
;
10281 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10282 if (!is_error(ret
) && arg3
) {
10283 if (copy_to_user_timeval(arg3
,
10284 &ovalue
.it_interval
)
10285 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10287 return -TARGET_EFAULT
;
10291 case TARGET_NR_getitimer
:
10293 struct itimerval value
;
10295 ret
= get_errno(getitimer(arg1
, &value
));
10296 if (!is_error(ret
) && arg2
) {
10297 if (copy_to_user_timeval(arg2
,
10298 &value
.it_interval
)
10299 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10301 return -TARGET_EFAULT
;
10305 #ifdef TARGET_NR_stat
10306 case TARGET_NR_stat
:
10307 if (!(p
= lock_user_string(arg1
))) {
10308 return -TARGET_EFAULT
;
10310 ret
= get_errno(stat(path(p
), &st
));
10311 unlock_user(p
, arg1
, 0);
10314 #ifdef TARGET_NR_lstat
10315 case TARGET_NR_lstat
:
10316 if (!(p
= lock_user_string(arg1
))) {
10317 return -TARGET_EFAULT
;
10319 ret
= get_errno(lstat(path(p
), &st
));
10320 unlock_user(p
, arg1
, 0);
10323 #ifdef TARGET_NR_fstat
10324 case TARGET_NR_fstat
:
10326 ret
= get_errno(fstat(arg1
, &st
));
10327 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10330 if (!is_error(ret
)) {
10331 struct target_stat
*target_st
;
10333 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10334 return -TARGET_EFAULT
;
10335 memset(target_st
, 0, sizeof(*target_st
));
10336 __put_user(st
.st_dev
, &target_st
->st_dev
);
10337 __put_user(st
.st_ino
, &target_st
->st_ino
);
10338 __put_user(st
.st_mode
, &target_st
->st_mode
);
10339 __put_user(st
.st_uid
, &target_st
->st_uid
);
10340 __put_user(st
.st_gid
, &target_st
->st_gid
);
10341 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10342 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10343 __put_user(st
.st_size
, &target_st
->st_size
);
10344 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10345 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10346 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10347 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10348 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10349 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10350 __put_user(st
.st_atim
.tv_nsec
,
10351 &target_st
->target_st_atime_nsec
);
10352 __put_user(st
.st_mtim
.tv_nsec
,
10353 &target_st
->target_st_mtime_nsec
);
10354 __put_user(st
.st_ctim
.tv_nsec
,
10355 &target_st
->target_st_ctime_nsec
);
10357 unlock_user_struct(target_st
, arg2
, 1);
10362 case TARGET_NR_vhangup
:
10363 return get_errno(vhangup());
10364 #ifdef TARGET_NR_syscall
10365 case TARGET_NR_syscall
:
10366 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10367 arg6
, arg7
, arg8
, 0);
10369 #if defined(TARGET_NR_wait4)
10370 case TARGET_NR_wait4
:
10373 abi_long status_ptr
= arg2
;
10374 struct rusage rusage
, *rusage_ptr
;
10375 abi_ulong target_rusage
= arg4
;
10376 abi_long rusage_err
;
10378 rusage_ptr
= &rusage
;
10381 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10382 if (!is_error(ret
)) {
10383 if (status_ptr
&& ret
) {
10384 status
= host_to_target_waitstatus(status
);
10385 if (put_user_s32(status
, status_ptr
))
10386 return -TARGET_EFAULT
;
10388 if (target_rusage
) {
10389 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10398 #ifdef TARGET_NR_swapoff
10399 case TARGET_NR_swapoff
:
10400 if (!(p
= lock_user_string(arg1
)))
10401 return -TARGET_EFAULT
;
10402 ret
= get_errno(swapoff(p
));
10403 unlock_user(p
, arg1
, 0);
10406 case TARGET_NR_sysinfo
:
10408 struct target_sysinfo
*target_value
;
10409 struct sysinfo value
;
10410 ret
= get_errno(sysinfo(&value
));
10411 if (!is_error(ret
) && arg1
)
10413 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10414 return -TARGET_EFAULT
;
10415 __put_user(value
.uptime
, &target_value
->uptime
);
10416 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10417 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10418 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10419 __put_user(value
.totalram
, &target_value
->totalram
);
10420 __put_user(value
.freeram
, &target_value
->freeram
);
10421 __put_user(value
.sharedram
, &target_value
->sharedram
);
10422 __put_user(value
.bufferram
, &target_value
->bufferram
);
10423 __put_user(value
.totalswap
, &target_value
->totalswap
);
10424 __put_user(value
.freeswap
, &target_value
->freeswap
);
10425 __put_user(value
.procs
, &target_value
->procs
);
10426 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10427 __put_user(value
.freehigh
, &target_value
->freehigh
);
10428 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10429 unlock_user_struct(target_value
, arg1
, 1);
10433 #ifdef TARGET_NR_ipc
10434 case TARGET_NR_ipc
:
10435 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10437 #ifdef TARGET_NR_semget
10438 case TARGET_NR_semget
:
10439 return get_errno(semget(arg1
, arg2
, arg3
));
10441 #ifdef TARGET_NR_semop
10442 case TARGET_NR_semop
:
10443 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10445 #ifdef TARGET_NR_semtimedop
10446 case TARGET_NR_semtimedop
:
10447 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10449 #ifdef TARGET_NR_semtimedop_time64
10450 case TARGET_NR_semtimedop_time64
:
10451 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10453 #ifdef TARGET_NR_semctl
10454 case TARGET_NR_semctl
:
10455 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10457 #ifdef TARGET_NR_msgctl
10458 case TARGET_NR_msgctl
:
10459 return do_msgctl(arg1
, arg2
, arg3
);
10461 #ifdef TARGET_NR_msgget
10462 case TARGET_NR_msgget
:
10463 return get_errno(msgget(arg1
, arg2
));
10465 #ifdef TARGET_NR_msgrcv
10466 case TARGET_NR_msgrcv
:
10467 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10469 #ifdef TARGET_NR_msgsnd
10470 case TARGET_NR_msgsnd
:
10471 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10473 #ifdef TARGET_NR_shmget
10474 case TARGET_NR_shmget
:
10475 return get_errno(shmget(arg1
, arg2
, arg3
));
10477 #ifdef TARGET_NR_shmctl
10478 case TARGET_NR_shmctl
:
10479 return do_shmctl(arg1
, arg2
, arg3
);
10481 #ifdef TARGET_NR_shmat
10482 case TARGET_NR_shmat
:
10483 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10485 #ifdef TARGET_NR_shmdt
10486 case TARGET_NR_shmdt
:
10487 return do_shmdt(arg1
);
10489 case TARGET_NR_fsync
:
10490 return get_errno(fsync(arg1
));
10491 case TARGET_NR_clone
:
10492 /* Linux manages to have three different orderings for its
10493 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10494 * match the kernel's CONFIG_CLONE_* settings.
10495 * Microblaze is further special in that it uses a sixth
10496 * implicit argument to clone for the TLS pointer.
10498 #if defined(TARGET_MICROBLAZE)
10499 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10500 #elif defined(TARGET_CLONE_BACKWARDS)
10501 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10502 #elif defined(TARGET_CLONE_BACKWARDS2)
10503 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10505 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10508 #ifdef __NR_exit_group
10509 /* new thread calls */
10510 case TARGET_NR_exit_group
:
10511 preexit_cleanup(cpu_env
, arg1
);
10512 return get_errno(exit_group(arg1
));
10514 case TARGET_NR_setdomainname
:
10515 if (!(p
= lock_user_string(arg1
)))
10516 return -TARGET_EFAULT
;
10517 ret
= get_errno(setdomainname(p
, arg2
));
10518 unlock_user(p
, arg1
, 0);
10520 case TARGET_NR_uname
:
10521 /* no need to transcode because we use the linux syscall */
10523 struct new_utsname
* buf
;
10525 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10526 return -TARGET_EFAULT
;
10527 ret
= get_errno(sys_uname(buf
));
10528 if (!is_error(ret
)) {
10529 /* Overwrite the native machine name with whatever is being
10531 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10532 sizeof(buf
->machine
));
10533 /* Allow the user to override the reported release. */
10534 if (qemu_uname_release
&& *qemu_uname_release
) {
10535 g_strlcpy(buf
->release
, qemu_uname_release
,
10536 sizeof(buf
->release
));
10539 unlock_user_struct(buf
, arg1
, 1);
10543 case TARGET_NR_modify_ldt
:
10544 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10545 #if !defined(TARGET_X86_64)
10546 case TARGET_NR_vm86
:
10547 return do_vm86(cpu_env
, arg1
, arg2
);
10550 #if defined(TARGET_NR_adjtimex)
10551 case TARGET_NR_adjtimex
:
10553 struct timex host_buf
;
10555 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10556 return -TARGET_EFAULT
;
10558 ret
= get_errno(adjtimex(&host_buf
));
10559 if (!is_error(ret
)) {
10560 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10561 return -TARGET_EFAULT
;
10567 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10568 case TARGET_NR_clock_adjtime
:
10570 struct timex htx
, *phtx
= &htx
;
10572 if (target_to_host_timex(phtx
, arg2
) != 0) {
10573 return -TARGET_EFAULT
;
10575 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10576 if (!is_error(ret
) && phtx
) {
10577 if (host_to_target_timex(arg2
, phtx
) != 0) {
10578 return -TARGET_EFAULT
;
10584 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10585 case TARGET_NR_clock_adjtime64
:
10589 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10590 return -TARGET_EFAULT
;
10592 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10593 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10594 return -TARGET_EFAULT
;
10599 case TARGET_NR_getpgid
:
10600 return get_errno(getpgid(arg1
));
10601 case TARGET_NR_fchdir
:
10602 return get_errno(fchdir(arg1
));
10603 case TARGET_NR_personality
:
10604 return get_errno(personality(arg1
));
10605 #ifdef TARGET_NR__llseek /* Not on alpha */
10606 case TARGET_NR__llseek
:
10609 #if !defined(__NR_llseek)
10610 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10612 ret
= get_errno(res
);
10617 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10619 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10620 return -TARGET_EFAULT
;
10625 #ifdef TARGET_NR_getdents
10626 case TARGET_NR_getdents
:
10627 return do_getdents(arg1
, arg2
, arg3
);
10628 #endif /* TARGET_NR_getdents */
10629 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10630 case TARGET_NR_getdents64
:
10631 return do_getdents64(arg1
, arg2
, arg3
);
10632 #endif /* TARGET_NR_getdents64 */
10633 #if defined(TARGET_NR__newselect)
10634 case TARGET_NR__newselect
:
10635 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10637 #ifdef TARGET_NR_poll
10638 case TARGET_NR_poll
:
10639 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10641 #ifdef TARGET_NR_ppoll
10642 case TARGET_NR_ppoll
:
10643 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10645 #ifdef TARGET_NR_ppoll_time64
10646 case TARGET_NR_ppoll_time64
:
10647 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10649 case TARGET_NR_flock
:
10650 /* NOTE: the flock constant seems to be the same for every
10652 return get_errno(safe_flock(arg1
, arg2
));
10653 case TARGET_NR_readv
:
10655 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10657 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10658 unlock_iovec(vec
, arg2
, arg3
, 1);
10660 ret
= -host_to_target_errno(errno
);
10664 case TARGET_NR_writev
:
10666 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10668 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10669 unlock_iovec(vec
, arg2
, arg3
, 0);
10671 ret
= -host_to_target_errno(errno
);
10675 #if defined(TARGET_NR_preadv)
10676 case TARGET_NR_preadv
:
10678 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10680 unsigned long low
, high
;
10682 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10683 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10684 unlock_iovec(vec
, arg2
, arg3
, 1);
10686 ret
= -host_to_target_errno(errno
);
10691 #if defined(TARGET_NR_pwritev)
10692 case TARGET_NR_pwritev
:
10694 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10696 unsigned long low
, high
;
10698 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10699 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10700 unlock_iovec(vec
, arg2
, arg3
, 0);
10702 ret
= -host_to_target_errno(errno
);
10707 case TARGET_NR_getsid
:
10708 return get_errno(getsid(arg1
));
10709 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10710 case TARGET_NR_fdatasync
:
10711 return get_errno(fdatasync(arg1
));
10713 case TARGET_NR_sched_getaffinity
:
10715 unsigned int mask_size
;
10716 unsigned long *mask
;
10719 * sched_getaffinity needs multiples of ulong, so need to take
10720 * care of mismatches between target ulong and host ulong sizes.
10722 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10723 return -TARGET_EINVAL
;
10725 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10727 mask
= alloca(mask_size
);
10728 memset(mask
, 0, mask_size
);
10729 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10731 if (!is_error(ret
)) {
10733 /* More data returned than the caller's buffer will fit.
10734 * This only happens if sizeof(abi_long) < sizeof(long)
10735 * and the caller passed us a buffer holding an odd number
10736 * of abi_longs. If the host kernel is actually using the
10737 * extra 4 bytes then fail EINVAL; otherwise we can just
10738 * ignore them and only copy the interesting part.
10740 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10741 if (numcpus
> arg2
* 8) {
10742 return -TARGET_EINVAL
;
10747 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10748 return -TARGET_EFAULT
;
10753 case TARGET_NR_sched_setaffinity
:
10755 unsigned int mask_size
;
10756 unsigned long *mask
;
10759 * sched_setaffinity needs multiples of ulong, so need to take
10760 * care of mismatches between target ulong and host ulong sizes.
10762 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10763 return -TARGET_EINVAL
;
10765 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10766 mask
= alloca(mask_size
);
10768 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10773 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10775 case TARGET_NR_getcpu
:
10777 unsigned cpu
, node
;
10778 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10779 arg2
? &node
: NULL
,
10781 if (is_error(ret
)) {
10784 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10785 return -TARGET_EFAULT
;
10787 if (arg2
&& put_user_u32(node
, arg2
)) {
10788 return -TARGET_EFAULT
;
10792 case TARGET_NR_sched_setparam
:
10794 struct target_sched_param
*target_schp
;
10795 struct sched_param schp
;
10798 return -TARGET_EINVAL
;
10800 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10801 return -TARGET_EFAULT
;
10803 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10804 unlock_user_struct(target_schp
, arg2
, 0);
10805 return get_errno(sys_sched_setparam(arg1
, &schp
));
10807 case TARGET_NR_sched_getparam
:
10809 struct target_sched_param
*target_schp
;
10810 struct sched_param schp
;
10813 return -TARGET_EINVAL
;
10815 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10816 if (!is_error(ret
)) {
10817 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10818 return -TARGET_EFAULT
;
10820 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10821 unlock_user_struct(target_schp
, arg2
, 1);
10825 case TARGET_NR_sched_setscheduler
:
10827 struct target_sched_param
*target_schp
;
10828 struct sched_param schp
;
10830 return -TARGET_EINVAL
;
10832 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10833 return -TARGET_EFAULT
;
10835 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10836 unlock_user_struct(target_schp
, arg3
, 0);
10837 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10839 case TARGET_NR_sched_getscheduler
:
10840 return get_errno(sys_sched_getscheduler(arg1
));
10841 case TARGET_NR_sched_getattr
:
10843 struct target_sched_attr
*target_scha
;
10844 struct sched_attr scha
;
10846 return -TARGET_EINVAL
;
10848 if (arg3
> sizeof(scha
)) {
10849 arg3
= sizeof(scha
);
10851 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10852 if (!is_error(ret
)) {
10853 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10854 if (!target_scha
) {
10855 return -TARGET_EFAULT
;
10857 target_scha
->size
= tswap32(scha
.size
);
10858 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10859 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10860 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10861 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10862 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10863 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10864 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10865 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10866 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10867 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10869 unlock_user(target_scha
, arg2
, arg3
);
10873 case TARGET_NR_sched_setattr
:
10875 struct target_sched_attr
*target_scha
;
10876 struct sched_attr scha
;
10880 return -TARGET_EINVAL
;
10882 if (get_user_u32(size
, arg2
)) {
10883 return -TARGET_EFAULT
;
10886 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10888 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10889 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10890 return -TARGET_EFAULT
;
10892 return -TARGET_E2BIG
;
10895 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
10898 } else if (zeroed
== 0) {
10899 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10900 return -TARGET_EFAULT
;
10902 return -TARGET_E2BIG
;
10904 if (size
> sizeof(struct target_sched_attr
)) {
10905 size
= sizeof(struct target_sched_attr
);
10908 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
10909 if (!target_scha
) {
10910 return -TARGET_EFAULT
;
10913 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
10914 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
10915 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
10916 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
10917 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
10918 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
10919 scha
.sched_period
= tswap64(target_scha
->sched_period
);
10920 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
10921 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
10922 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
10924 unlock_user(target_scha
, arg2
, 0);
10925 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
10927 case TARGET_NR_sched_yield
:
10928 return get_errno(sched_yield());
10929 case TARGET_NR_sched_get_priority_max
:
10930 return get_errno(sched_get_priority_max(arg1
));
10931 case TARGET_NR_sched_get_priority_min
:
10932 return get_errno(sched_get_priority_min(arg1
));
10933 #ifdef TARGET_NR_sched_rr_get_interval
10934 case TARGET_NR_sched_rr_get_interval
:
10936 struct timespec ts
;
10937 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10938 if (!is_error(ret
)) {
10939 ret
= host_to_target_timespec(arg2
, &ts
);
10944 #ifdef TARGET_NR_sched_rr_get_interval_time64
10945 case TARGET_NR_sched_rr_get_interval_time64
:
10947 struct timespec ts
;
10948 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10949 if (!is_error(ret
)) {
10950 ret
= host_to_target_timespec64(arg2
, &ts
);
10955 #if defined(TARGET_NR_nanosleep)
10956 case TARGET_NR_nanosleep
:
10958 struct timespec req
, rem
;
10959 target_to_host_timespec(&req
, arg1
);
10960 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10961 if (is_error(ret
) && arg2
) {
10962 host_to_target_timespec(arg2
, &rem
);
10967 case TARGET_NR_prctl
:
10968 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
10970 #ifdef TARGET_NR_arch_prctl
10971 case TARGET_NR_arch_prctl
:
10972 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10974 #ifdef TARGET_NR_pread64
10975 case TARGET_NR_pread64
:
10976 if (regpairs_aligned(cpu_env
, num
)) {
10980 if (arg2
== 0 && arg3
== 0) {
10981 /* Special-case NULL buffer and zero length, which should succeed */
10984 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10986 return -TARGET_EFAULT
;
10989 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10990 unlock_user(p
, arg2
, ret
);
10992 case TARGET_NR_pwrite64
:
10993 if (regpairs_aligned(cpu_env
, num
)) {
10997 if (arg2
== 0 && arg3
== 0) {
10998 /* Special-case NULL buffer and zero length, which should succeed */
11001 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11003 return -TARGET_EFAULT
;
11006 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11007 unlock_user(p
, arg2
, 0);
11010 case TARGET_NR_getcwd
:
11011 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11012 return -TARGET_EFAULT
;
11013 ret
= get_errno(sys_getcwd1(p
, arg2
));
11014 unlock_user(p
, arg1
, ret
);
11016 case TARGET_NR_capget
:
11017 case TARGET_NR_capset
:
11019 struct target_user_cap_header
*target_header
;
11020 struct target_user_cap_data
*target_data
= NULL
;
11021 struct __user_cap_header_struct header
;
11022 struct __user_cap_data_struct data
[2];
11023 struct __user_cap_data_struct
*dataptr
= NULL
;
11024 int i
, target_datalen
;
11025 int data_items
= 1;
11027 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11028 return -TARGET_EFAULT
;
11030 header
.version
= tswap32(target_header
->version
);
11031 header
.pid
= tswap32(target_header
->pid
);
11033 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11034 /* Version 2 and up takes pointer to two user_data structs */
11038 target_datalen
= sizeof(*target_data
) * data_items
;
11041 if (num
== TARGET_NR_capget
) {
11042 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11044 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11046 if (!target_data
) {
11047 unlock_user_struct(target_header
, arg1
, 0);
11048 return -TARGET_EFAULT
;
11051 if (num
== TARGET_NR_capset
) {
11052 for (i
= 0; i
< data_items
; i
++) {
11053 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11054 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11055 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11062 if (num
== TARGET_NR_capget
) {
11063 ret
= get_errno(capget(&header
, dataptr
));
11065 ret
= get_errno(capset(&header
, dataptr
));
11068 /* The kernel always updates version for both capget and capset */
11069 target_header
->version
= tswap32(header
.version
);
11070 unlock_user_struct(target_header
, arg1
, 1);
11073 if (num
== TARGET_NR_capget
) {
11074 for (i
= 0; i
< data_items
; i
++) {
11075 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11076 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11077 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11079 unlock_user(target_data
, arg2
, target_datalen
);
11081 unlock_user(target_data
, arg2
, 0);
11086 case TARGET_NR_sigaltstack
:
11087 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11089 #ifdef CONFIG_SENDFILE
11090 #ifdef TARGET_NR_sendfile
11091 case TARGET_NR_sendfile
:
11093 off_t
*offp
= NULL
;
11096 ret
= get_user_sal(off
, arg3
);
11097 if (is_error(ret
)) {
11102 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11103 if (!is_error(ret
) && arg3
) {
11104 abi_long ret2
= put_user_sal(off
, arg3
);
11105 if (is_error(ret2
)) {
11112 #ifdef TARGET_NR_sendfile64
11113 case TARGET_NR_sendfile64
:
11115 off_t
*offp
= NULL
;
11118 ret
= get_user_s64(off
, arg3
);
11119 if (is_error(ret
)) {
11124 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11125 if (!is_error(ret
) && arg3
) {
11126 abi_long ret2
= put_user_s64(off
, arg3
);
11127 if (is_error(ret2
)) {
11135 #ifdef TARGET_NR_vfork
11136 case TARGET_NR_vfork
:
11137 return get_errno(do_fork(cpu_env
,
11138 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11141 #ifdef TARGET_NR_ugetrlimit
11142 case TARGET_NR_ugetrlimit
:
11144 struct rlimit rlim
;
11145 int resource
= target_to_host_resource(arg1
);
11146 ret
= get_errno(getrlimit(resource
, &rlim
));
11147 if (!is_error(ret
)) {
11148 struct target_rlimit
*target_rlim
;
11149 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11150 return -TARGET_EFAULT
;
11151 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11152 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11153 unlock_user_struct(target_rlim
, arg2
, 1);
11158 #ifdef TARGET_NR_truncate64
11159 case TARGET_NR_truncate64
:
11160 if (!(p
= lock_user_string(arg1
)))
11161 return -TARGET_EFAULT
;
11162 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11163 unlock_user(p
, arg1
, 0);
11166 #ifdef TARGET_NR_ftruncate64
11167 case TARGET_NR_ftruncate64
:
11168 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11170 #ifdef TARGET_NR_stat64
11171 case TARGET_NR_stat64
:
11172 if (!(p
= lock_user_string(arg1
))) {
11173 return -TARGET_EFAULT
;
11175 ret
= get_errno(stat(path(p
), &st
));
11176 unlock_user(p
, arg1
, 0);
11177 if (!is_error(ret
))
11178 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11181 #ifdef TARGET_NR_lstat64
11182 case TARGET_NR_lstat64
:
11183 if (!(p
= lock_user_string(arg1
))) {
11184 return -TARGET_EFAULT
;
11186 ret
= get_errno(lstat(path(p
), &st
));
11187 unlock_user(p
, arg1
, 0);
11188 if (!is_error(ret
))
11189 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11192 #ifdef TARGET_NR_fstat64
11193 case TARGET_NR_fstat64
:
11194 ret
= get_errno(fstat(arg1
, &st
));
11195 if (!is_error(ret
))
11196 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11199 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11200 #ifdef TARGET_NR_fstatat64
11201 case TARGET_NR_fstatat64
:
11203 #ifdef TARGET_NR_newfstatat
11204 case TARGET_NR_newfstatat
:
11206 if (!(p
= lock_user_string(arg2
))) {
11207 return -TARGET_EFAULT
;
11209 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11210 unlock_user(p
, arg2
, 0);
11211 if (!is_error(ret
))
11212 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11215 #if defined(TARGET_NR_statx)
11216 case TARGET_NR_statx
:
11218 struct target_statx
*target_stx
;
11222 p
= lock_user_string(arg2
);
11224 return -TARGET_EFAULT
;
11226 #if defined(__NR_statx)
11229 * It is assumed that struct statx is architecture independent.
11231 struct target_statx host_stx
;
11234 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11235 if (!is_error(ret
)) {
11236 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11237 unlock_user(p
, arg2
, 0);
11238 return -TARGET_EFAULT
;
11242 if (ret
!= -TARGET_ENOSYS
) {
11243 unlock_user(p
, arg2
, 0);
11248 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11249 unlock_user(p
, arg2
, 0);
11251 if (!is_error(ret
)) {
11252 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11253 return -TARGET_EFAULT
;
11255 memset(target_stx
, 0, sizeof(*target_stx
));
11256 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11257 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11258 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11259 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11260 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11261 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11262 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11263 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11264 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11265 __put_user(st
.st_size
, &target_stx
->stx_size
);
11266 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11267 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11268 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11269 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11270 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11271 unlock_user_struct(target_stx
, arg5
, 1);
11276 #ifdef TARGET_NR_lchown
11277 case TARGET_NR_lchown
:
11278 if (!(p
= lock_user_string(arg1
)))
11279 return -TARGET_EFAULT
;
11280 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11281 unlock_user(p
, arg1
, 0);
11284 #ifdef TARGET_NR_getuid
11285 case TARGET_NR_getuid
:
11286 return get_errno(high2lowuid(getuid()));
11288 #ifdef TARGET_NR_getgid
11289 case TARGET_NR_getgid
:
11290 return get_errno(high2lowgid(getgid()));
11292 #ifdef TARGET_NR_geteuid
11293 case TARGET_NR_geteuid
:
11294 return get_errno(high2lowuid(geteuid()));
11296 #ifdef TARGET_NR_getegid
11297 case TARGET_NR_getegid
:
11298 return get_errno(high2lowgid(getegid()));
11300 case TARGET_NR_setreuid
:
11301 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11302 case TARGET_NR_setregid
:
11303 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11304 case TARGET_NR_getgroups
:
11306 int gidsetsize
= arg1
;
11307 target_id
*target_grouplist
;
11311 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11312 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11313 if (gidsetsize
== 0)
11315 if (!is_error(ret
)) {
11316 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11317 if (!target_grouplist
)
11318 return -TARGET_EFAULT
;
11319 for(i
= 0;i
< ret
; i
++)
11320 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11321 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11325 case TARGET_NR_setgroups
:
11327 int gidsetsize
= arg1
;
11328 target_id
*target_grouplist
;
11329 gid_t
*grouplist
= NULL
;
11332 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11333 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11334 if (!target_grouplist
) {
11335 return -TARGET_EFAULT
;
11337 for (i
= 0; i
< gidsetsize
; i
++) {
11338 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11340 unlock_user(target_grouplist
, arg2
, 0);
11342 return get_errno(setgroups(gidsetsize
, grouplist
));
11344 case TARGET_NR_fchown
:
11345 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11346 #if defined(TARGET_NR_fchownat)
11347 case TARGET_NR_fchownat
:
11348 if (!(p
= lock_user_string(arg2
)))
11349 return -TARGET_EFAULT
;
11350 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11351 low2highgid(arg4
), arg5
));
11352 unlock_user(p
, arg2
, 0);
11355 #ifdef TARGET_NR_setresuid
11356 case TARGET_NR_setresuid
:
11357 return get_errno(sys_setresuid(low2highuid(arg1
),
11359 low2highuid(arg3
)));
11361 #ifdef TARGET_NR_getresuid
11362 case TARGET_NR_getresuid
:
11364 uid_t ruid
, euid
, suid
;
11365 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11366 if (!is_error(ret
)) {
11367 if (put_user_id(high2lowuid(ruid
), arg1
)
11368 || put_user_id(high2lowuid(euid
), arg2
)
11369 || put_user_id(high2lowuid(suid
), arg3
))
11370 return -TARGET_EFAULT
;
11375 #ifdef TARGET_NR_getresgid
11376 case TARGET_NR_setresgid
:
11377 return get_errno(sys_setresgid(low2highgid(arg1
),
11379 low2highgid(arg3
)));
11381 #ifdef TARGET_NR_getresgid
11382 case TARGET_NR_getresgid
:
11384 gid_t rgid
, egid
, sgid
;
11385 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11386 if (!is_error(ret
)) {
11387 if (put_user_id(high2lowgid(rgid
), arg1
)
11388 || put_user_id(high2lowgid(egid
), arg2
)
11389 || put_user_id(high2lowgid(sgid
), arg3
))
11390 return -TARGET_EFAULT
;
11395 #ifdef TARGET_NR_chown
11396 case TARGET_NR_chown
:
11397 if (!(p
= lock_user_string(arg1
)))
11398 return -TARGET_EFAULT
;
11399 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11400 unlock_user(p
, arg1
, 0);
11403 case TARGET_NR_setuid
:
11404 return get_errno(sys_setuid(low2highuid(arg1
)));
11405 case TARGET_NR_setgid
:
11406 return get_errno(sys_setgid(low2highgid(arg1
)));
11407 case TARGET_NR_setfsuid
:
11408 return get_errno(setfsuid(arg1
));
11409 case TARGET_NR_setfsgid
:
11410 return get_errno(setfsgid(arg1
));
11412 #ifdef TARGET_NR_lchown32
11413 case TARGET_NR_lchown32
:
11414 if (!(p
= lock_user_string(arg1
)))
11415 return -TARGET_EFAULT
;
11416 ret
= get_errno(lchown(p
, arg2
, arg3
));
11417 unlock_user(p
, arg1
, 0);
11420 #ifdef TARGET_NR_getuid32
11421 case TARGET_NR_getuid32
:
11422 return get_errno(getuid());
11425 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11426 /* Alpha specific */
11427 case TARGET_NR_getxuid
:
11431 cpu_env
->ir
[IR_A4
]=euid
;
11433 return get_errno(getuid());
11435 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11436 /* Alpha specific */
11437 case TARGET_NR_getxgid
:
11441 cpu_env
->ir
[IR_A4
]=egid
;
11443 return get_errno(getgid());
11445 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11446 /* Alpha specific */
11447 case TARGET_NR_osf_getsysinfo
:
11448 ret
= -TARGET_EOPNOTSUPP
;
11450 case TARGET_GSI_IEEE_FP_CONTROL
:
11452 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11453 uint64_t swcr
= cpu_env
->swcr
;
11455 swcr
&= ~SWCR_STATUS_MASK
;
11456 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11458 if (put_user_u64 (swcr
, arg2
))
11459 return -TARGET_EFAULT
;
11464 /* case GSI_IEEE_STATE_AT_SIGNAL:
11465 -- Not implemented in linux kernel.
11467 -- Retrieves current unaligned access state; not much used.
11468 case GSI_PROC_TYPE:
11469 -- Retrieves implver information; surely not used.
11470 case GSI_GET_HWRPB:
11471 -- Grabs a copy of the HWRPB; surely not used.
11476 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11477 /* Alpha specific */
11478 case TARGET_NR_osf_setsysinfo
:
11479 ret
= -TARGET_EOPNOTSUPP
;
11481 case TARGET_SSI_IEEE_FP_CONTROL
:
11483 uint64_t swcr
, fpcr
;
11485 if (get_user_u64 (swcr
, arg2
)) {
11486 return -TARGET_EFAULT
;
11490 * The kernel calls swcr_update_status to update the
11491 * status bits from the fpcr at every point that it
11492 * could be queried. Therefore, we store the status
11493 * bits only in FPCR.
11495 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11497 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11498 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11499 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11500 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11505 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11507 uint64_t exc
, fpcr
, fex
;
11509 if (get_user_u64(exc
, arg2
)) {
11510 return -TARGET_EFAULT
;
11512 exc
&= SWCR_STATUS_MASK
;
11513 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11515 /* Old exceptions are not signaled. */
11516 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11518 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11519 fex
&= (cpu_env
)->swcr
;
11521 /* Update the hardware fpcr. */
11522 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11523 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11526 int si_code
= TARGET_FPE_FLTUNK
;
11527 target_siginfo_t info
;
11529 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11530 si_code
= TARGET_FPE_FLTUND
;
11532 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11533 si_code
= TARGET_FPE_FLTRES
;
11535 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11536 si_code
= TARGET_FPE_FLTUND
;
11538 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11539 si_code
= TARGET_FPE_FLTOVF
;
11541 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11542 si_code
= TARGET_FPE_FLTDIV
;
11544 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11545 si_code
= TARGET_FPE_FLTINV
;
11548 info
.si_signo
= SIGFPE
;
11550 info
.si_code
= si_code
;
11551 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11552 queue_signal(cpu_env
, info
.si_signo
,
11553 QEMU_SI_FAULT
, &info
);
11559 /* case SSI_NVPAIRS:
11560 -- Used with SSIN_UACPROC to enable unaligned accesses.
11561 case SSI_IEEE_STATE_AT_SIGNAL:
11562 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11563 -- Not implemented in linux kernel
11568 #ifdef TARGET_NR_osf_sigprocmask
11569 /* Alpha specific. */
11570 case TARGET_NR_osf_sigprocmask
:
11574 sigset_t set
, oldset
;
11577 case TARGET_SIG_BLOCK
:
11580 case TARGET_SIG_UNBLOCK
:
11583 case TARGET_SIG_SETMASK
:
11587 return -TARGET_EINVAL
;
11590 target_to_host_old_sigset(&set
, &mask
);
11591 ret
= do_sigprocmask(how
, &set
, &oldset
);
11593 host_to_target_old_sigset(&mask
, &oldset
);
11600 #ifdef TARGET_NR_getgid32
11601 case TARGET_NR_getgid32
:
11602 return get_errno(getgid());
11604 #ifdef TARGET_NR_geteuid32
11605 case TARGET_NR_geteuid32
:
11606 return get_errno(geteuid());
11608 #ifdef TARGET_NR_getegid32
11609 case TARGET_NR_getegid32
:
11610 return get_errno(getegid());
11612 #ifdef TARGET_NR_setreuid32
11613 case TARGET_NR_setreuid32
:
11614 return get_errno(setreuid(arg1
, arg2
));
11616 #ifdef TARGET_NR_setregid32
11617 case TARGET_NR_setregid32
:
11618 return get_errno(setregid(arg1
, arg2
));
11620 #ifdef TARGET_NR_getgroups32
11621 case TARGET_NR_getgroups32
:
11623 int gidsetsize
= arg1
;
11624 uint32_t *target_grouplist
;
11628 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11629 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11630 if (gidsetsize
== 0)
11632 if (!is_error(ret
)) {
11633 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11634 if (!target_grouplist
) {
11635 return -TARGET_EFAULT
;
11637 for(i
= 0;i
< ret
; i
++)
11638 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11639 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11644 #ifdef TARGET_NR_setgroups32
11645 case TARGET_NR_setgroups32
:
11647 int gidsetsize
= arg1
;
11648 uint32_t *target_grouplist
;
11652 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11653 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11654 if (!target_grouplist
) {
11655 return -TARGET_EFAULT
;
11657 for(i
= 0;i
< gidsetsize
; i
++)
11658 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11659 unlock_user(target_grouplist
, arg2
, 0);
11660 return get_errno(setgroups(gidsetsize
, grouplist
));
11663 #ifdef TARGET_NR_fchown32
11664 case TARGET_NR_fchown32
:
11665 return get_errno(fchown(arg1
, arg2
, arg3
));
11667 #ifdef TARGET_NR_setresuid32
11668 case TARGET_NR_setresuid32
:
11669 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11671 #ifdef TARGET_NR_getresuid32
11672 case TARGET_NR_getresuid32
:
11674 uid_t ruid
, euid
, suid
;
11675 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11676 if (!is_error(ret
)) {
11677 if (put_user_u32(ruid
, arg1
)
11678 || put_user_u32(euid
, arg2
)
11679 || put_user_u32(suid
, arg3
))
11680 return -TARGET_EFAULT
;
11685 #ifdef TARGET_NR_setresgid32
11686 case TARGET_NR_setresgid32
:
11687 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11689 #ifdef TARGET_NR_getresgid32
11690 case TARGET_NR_getresgid32
:
11692 gid_t rgid
, egid
, sgid
;
11693 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11694 if (!is_error(ret
)) {
11695 if (put_user_u32(rgid
, arg1
)
11696 || put_user_u32(egid
, arg2
)
11697 || put_user_u32(sgid
, arg3
))
11698 return -TARGET_EFAULT
;
11703 #ifdef TARGET_NR_chown32
11704 case TARGET_NR_chown32
:
11705 if (!(p
= lock_user_string(arg1
)))
11706 return -TARGET_EFAULT
;
11707 ret
= get_errno(chown(p
, arg2
, arg3
));
11708 unlock_user(p
, arg1
, 0);
11711 #ifdef TARGET_NR_setuid32
11712 case TARGET_NR_setuid32
:
11713 return get_errno(sys_setuid(arg1
));
11715 #ifdef TARGET_NR_setgid32
11716 case TARGET_NR_setgid32
:
11717 return get_errno(sys_setgid(arg1
));
11719 #ifdef TARGET_NR_setfsuid32
11720 case TARGET_NR_setfsuid32
:
11721 return get_errno(setfsuid(arg1
));
11723 #ifdef TARGET_NR_setfsgid32
11724 case TARGET_NR_setfsgid32
:
11725 return get_errno(setfsgid(arg1
));
11727 #ifdef TARGET_NR_mincore
11728 case TARGET_NR_mincore
:
11730 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11732 return -TARGET_ENOMEM
;
11734 p
= lock_user_string(arg3
);
11736 ret
= -TARGET_EFAULT
;
11738 ret
= get_errno(mincore(a
, arg2
, p
));
11739 unlock_user(p
, arg3
, ret
);
11741 unlock_user(a
, arg1
, 0);
11745 #ifdef TARGET_NR_arm_fadvise64_64
11746 case TARGET_NR_arm_fadvise64_64
:
11747 /* arm_fadvise64_64 looks like fadvise64_64 but
11748 * with different argument order: fd, advice, offset, len
11749 * rather than the usual fd, offset, len, advice.
11750 * Note that offset and len are both 64-bit so appear as
11751 * pairs of 32-bit registers.
11753 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11754 target_offset64(arg5
, arg6
), arg2
);
11755 return -host_to_target_errno(ret
);
11758 #if TARGET_ABI_BITS == 32
11760 #ifdef TARGET_NR_fadvise64_64
11761 case TARGET_NR_fadvise64_64
:
11762 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11763 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11771 /* 6 args: fd, offset (high, low), len (high, low), advice */
11772 if (regpairs_aligned(cpu_env
, num
)) {
11773 /* offset is in (3,4), len in (5,6) and advice in 7 */
11781 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11782 target_offset64(arg4
, arg5
), arg6
);
11783 return -host_to_target_errno(ret
);
11786 #ifdef TARGET_NR_fadvise64
11787 case TARGET_NR_fadvise64
:
11788 /* 5 args: fd, offset (high, low), len, advice */
11789 if (regpairs_aligned(cpu_env
, num
)) {
11790 /* offset is in (3,4), len in 5 and advice in 6 */
11796 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11797 return -host_to_target_errno(ret
);
11800 #else /* not a 32-bit ABI */
11801 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11802 #ifdef TARGET_NR_fadvise64_64
11803 case TARGET_NR_fadvise64_64
:
11805 #ifdef TARGET_NR_fadvise64
11806 case TARGET_NR_fadvise64
:
11808 #ifdef TARGET_S390X
11810 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11811 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11812 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11813 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11817 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11819 #endif /* end of 64-bit ABI fadvise handling */
11821 #ifdef TARGET_NR_madvise
11822 case TARGET_NR_madvise
:
11823 return target_madvise(arg1
, arg2
, arg3
);
11825 #ifdef TARGET_NR_fcntl64
11826 case TARGET_NR_fcntl64
:
11830 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11831 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11834 if (!cpu_env
->eabi
) {
11835 copyfrom
= copy_from_user_oabi_flock64
;
11836 copyto
= copy_to_user_oabi_flock64
;
11840 cmd
= target_to_host_fcntl_cmd(arg2
);
11841 if (cmd
== -TARGET_EINVAL
) {
11846 case TARGET_F_GETLK64
:
11847 ret
= copyfrom(&fl
, arg3
);
11851 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11853 ret
= copyto(arg3
, &fl
);
11857 case TARGET_F_SETLK64
:
11858 case TARGET_F_SETLKW64
:
11859 ret
= copyfrom(&fl
, arg3
);
11863 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11866 ret
= do_fcntl(arg1
, arg2
, arg3
);
11872 #ifdef TARGET_NR_cacheflush
11873 case TARGET_NR_cacheflush
:
11874 /* self-modifying code is handled automatically, so nothing needed */
11877 #ifdef TARGET_NR_getpagesize
11878 case TARGET_NR_getpagesize
:
11879 return TARGET_PAGE_SIZE
;
11881 case TARGET_NR_gettid
:
11882 return get_errno(sys_gettid());
11883 #ifdef TARGET_NR_readahead
11884 case TARGET_NR_readahead
:
11885 #if TARGET_ABI_BITS == 32
11886 if (regpairs_aligned(cpu_env
, num
)) {
11891 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11893 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11898 #ifdef TARGET_NR_setxattr
11899 case TARGET_NR_listxattr
:
11900 case TARGET_NR_llistxattr
:
11904 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11906 return -TARGET_EFAULT
;
11909 p
= lock_user_string(arg1
);
11911 if (num
== TARGET_NR_listxattr
) {
11912 ret
= get_errno(listxattr(p
, b
, arg3
));
11914 ret
= get_errno(llistxattr(p
, b
, arg3
));
11917 ret
= -TARGET_EFAULT
;
11919 unlock_user(p
, arg1
, 0);
11920 unlock_user(b
, arg2
, arg3
);
11923 case TARGET_NR_flistxattr
:
11927 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11929 return -TARGET_EFAULT
;
11932 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11933 unlock_user(b
, arg2
, arg3
);
11936 case TARGET_NR_setxattr
:
11937 case TARGET_NR_lsetxattr
:
11939 void *p
, *n
, *v
= 0;
11941 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11943 return -TARGET_EFAULT
;
11946 p
= lock_user_string(arg1
);
11947 n
= lock_user_string(arg2
);
11949 if (num
== TARGET_NR_setxattr
) {
11950 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11952 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11955 ret
= -TARGET_EFAULT
;
11957 unlock_user(p
, arg1
, 0);
11958 unlock_user(n
, arg2
, 0);
11959 unlock_user(v
, arg3
, 0);
11962 case TARGET_NR_fsetxattr
:
11966 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11968 return -TARGET_EFAULT
;
11971 n
= lock_user_string(arg2
);
11973 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11975 ret
= -TARGET_EFAULT
;
11977 unlock_user(n
, arg2
, 0);
11978 unlock_user(v
, arg3
, 0);
11981 case TARGET_NR_getxattr
:
11982 case TARGET_NR_lgetxattr
:
11984 void *p
, *n
, *v
= 0;
11986 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11988 return -TARGET_EFAULT
;
11991 p
= lock_user_string(arg1
);
11992 n
= lock_user_string(arg2
);
11994 if (num
== TARGET_NR_getxattr
) {
11995 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11997 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12000 ret
= -TARGET_EFAULT
;
12002 unlock_user(p
, arg1
, 0);
12003 unlock_user(n
, arg2
, 0);
12004 unlock_user(v
, arg3
, arg4
);
12007 case TARGET_NR_fgetxattr
:
12011 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12013 return -TARGET_EFAULT
;
12016 n
= lock_user_string(arg2
);
12018 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12020 ret
= -TARGET_EFAULT
;
12022 unlock_user(n
, arg2
, 0);
12023 unlock_user(v
, arg3
, arg4
);
12026 case TARGET_NR_removexattr
:
12027 case TARGET_NR_lremovexattr
:
12030 p
= lock_user_string(arg1
);
12031 n
= lock_user_string(arg2
);
12033 if (num
== TARGET_NR_removexattr
) {
12034 ret
= get_errno(removexattr(p
, n
));
12036 ret
= get_errno(lremovexattr(p
, n
));
12039 ret
= -TARGET_EFAULT
;
12041 unlock_user(p
, arg1
, 0);
12042 unlock_user(n
, arg2
, 0);
12045 case TARGET_NR_fremovexattr
:
12048 n
= lock_user_string(arg2
);
12050 ret
= get_errno(fremovexattr(arg1
, n
));
12052 ret
= -TARGET_EFAULT
;
12054 unlock_user(n
, arg2
, 0);
12058 #endif /* CONFIG_ATTR */
12059 #ifdef TARGET_NR_set_thread_area
12060 case TARGET_NR_set_thread_area
:
12061 #if defined(TARGET_MIPS)
12062 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12064 #elif defined(TARGET_CRIS)
12066 ret
= -TARGET_EINVAL
;
12068 cpu_env
->pregs
[PR_PID
] = arg1
;
12072 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12073 return do_set_thread_area(cpu_env
, arg1
);
12074 #elif defined(TARGET_M68K)
12076 TaskState
*ts
= cpu
->opaque
;
12077 ts
->tp_value
= arg1
;
12081 return -TARGET_ENOSYS
;
12084 #ifdef TARGET_NR_get_thread_area
12085 case TARGET_NR_get_thread_area
:
12086 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12087 return do_get_thread_area(cpu_env
, arg1
);
12088 #elif defined(TARGET_M68K)
12090 TaskState
*ts
= cpu
->opaque
;
12091 return ts
->tp_value
;
12094 return -TARGET_ENOSYS
;
12097 #ifdef TARGET_NR_getdomainname
12098 case TARGET_NR_getdomainname
:
12099 return -TARGET_ENOSYS
;
12102 #ifdef TARGET_NR_clock_settime
12103 case TARGET_NR_clock_settime
:
12105 struct timespec ts
;
12107 ret
= target_to_host_timespec(&ts
, arg2
);
12108 if (!is_error(ret
)) {
12109 ret
= get_errno(clock_settime(arg1
, &ts
));
12114 #ifdef TARGET_NR_clock_settime64
12115 case TARGET_NR_clock_settime64
:
12117 struct timespec ts
;
12119 ret
= target_to_host_timespec64(&ts
, arg2
);
12120 if (!is_error(ret
)) {
12121 ret
= get_errno(clock_settime(arg1
, &ts
));
12126 #ifdef TARGET_NR_clock_gettime
12127 case TARGET_NR_clock_gettime
:
12129 struct timespec ts
;
12130 ret
= get_errno(clock_gettime(arg1
, &ts
));
12131 if (!is_error(ret
)) {
12132 ret
= host_to_target_timespec(arg2
, &ts
);
12137 #ifdef TARGET_NR_clock_gettime64
12138 case TARGET_NR_clock_gettime64
:
12140 struct timespec ts
;
12141 ret
= get_errno(clock_gettime(arg1
, &ts
));
12142 if (!is_error(ret
)) {
12143 ret
= host_to_target_timespec64(arg2
, &ts
);
12148 #ifdef TARGET_NR_clock_getres
12149 case TARGET_NR_clock_getres
:
12151 struct timespec ts
;
12152 ret
= get_errno(clock_getres(arg1
, &ts
));
12153 if (!is_error(ret
)) {
12154 host_to_target_timespec(arg2
, &ts
);
12159 #ifdef TARGET_NR_clock_getres_time64
12160 case TARGET_NR_clock_getres_time64
:
12162 struct timespec ts
;
12163 ret
= get_errno(clock_getres(arg1
, &ts
));
12164 if (!is_error(ret
)) {
12165 host_to_target_timespec64(arg2
, &ts
);
12170 #ifdef TARGET_NR_clock_nanosleep
12171 case TARGET_NR_clock_nanosleep
:
12173 struct timespec ts
;
12174 if (target_to_host_timespec(&ts
, arg3
)) {
12175 return -TARGET_EFAULT
;
12177 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12178 &ts
, arg4
? &ts
: NULL
));
12180 * if the call is interrupted by a signal handler, it fails
12181 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12182 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12184 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12185 host_to_target_timespec(arg4
, &ts
)) {
12186 return -TARGET_EFAULT
;
12192 #ifdef TARGET_NR_clock_nanosleep_time64
12193 case TARGET_NR_clock_nanosleep_time64
:
12195 struct timespec ts
;
12197 if (target_to_host_timespec64(&ts
, arg3
)) {
12198 return -TARGET_EFAULT
;
12201 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12202 &ts
, arg4
? &ts
: NULL
));
12204 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12205 host_to_target_timespec64(arg4
, &ts
)) {
12206 return -TARGET_EFAULT
;
12212 #if defined(TARGET_NR_set_tid_address)
12213 case TARGET_NR_set_tid_address
:
12215 TaskState
*ts
= cpu
->opaque
;
12216 ts
->child_tidptr
= arg1
;
12217 /* do not call host set_tid_address() syscall, instead return tid() */
12218 return get_errno(sys_gettid());
12222 case TARGET_NR_tkill
:
12223 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12225 case TARGET_NR_tgkill
:
12226 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12227 target_to_host_signal(arg3
)));
12229 #ifdef TARGET_NR_set_robust_list
12230 case TARGET_NR_set_robust_list
:
12231 case TARGET_NR_get_robust_list
:
12232 /* The ABI for supporting robust futexes has userspace pass
12233 * the kernel a pointer to a linked list which is updated by
12234 * userspace after the syscall; the list is walked by the kernel
12235 * when the thread exits. Since the linked list in QEMU guest
12236 * memory isn't a valid linked list for the host and we have
12237 * no way to reliably intercept the thread-death event, we can't
12238 * support these. Silently return ENOSYS so that guest userspace
12239 * falls back to a non-robust futex implementation (which should
12240 * be OK except in the corner case of the guest crashing while
12241 * holding a mutex that is shared with another process via
12244 return -TARGET_ENOSYS
;
12247 #if defined(TARGET_NR_utimensat)
12248 case TARGET_NR_utimensat
:
12250 struct timespec
*tsp
, ts
[2];
12254 if (target_to_host_timespec(ts
, arg3
)) {
12255 return -TARGET_EFAULT
;
12257 if (target_to_host_timespec(ts
+ 1, arg3
+
12258 sizeof(struct target_timespec
))) {
12259 return -TARGET_EFAULT
;
12264 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12266 if (!(p
= lock_user_string(arg2
))) {
12267 return -TARGET_EFAULT
;
12269 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12270 unlock_user(p
, arg2
, 0);
12275 #ifdef TARGET_NR_utimensat_time64
12276 case TARGET_NR_utimensat_time64
:
12278 struct timespec
*tsp
, ts
[2];
12282 if (target_to_host_timespec64(ts
, arg3
)) {
12283 return -TARGET_EFAULT
;
12285 if (target_to_host_timespec64(ts
+ 1, arg3
+
12286 sizeof(struct target__kernel_timespec
))) {
12287 return -TARGET_EFAULT
;
12292 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12294 p
= lock_user_string(arg2
);
12296 return -TARGET_EFAULT
;
12298 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12299 unlock_user(p
, arg2
, 0);
12304 #ifdef TARGET_NR_futex
12305 case TARGET_NR_futex
:
12306 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12308 #ifdef TARGET_NR_futex_time64
12309 case TARGET_NR_futex_time64
:
12310 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12312 #ifdef CONFIG_INOTIFY
12313 #if defined(TARGET_NR_inotify_init)
12314 case TARGET_NR_inotify_init
:
12315 ret
= get_errno(inotify_init());
12317 fd_trans_register(ret
, &target_inotify_trans
);
12321 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12322 case TARGET_NR_inotify_init1
:
12323 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12324 fcntl_flags_tbl
)));
12326 fd_trans_register(ret
, &target_inotify_trans
);
12330 #if defined(TARGET_NR_inotify_add_watch)
12331 case TARGET_NR_inotify_add_watch
:
12332 p
= lock_user_string(arg2
);
12333 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12334 unlock_user(p
, arg2
, 0);
12337 #if defined(TARGET_NR_inotify_rm_watch)
12338 case TARGET_NR_inotify_rm_watch
:
12339 return get_errno(inotify_rm_watch(arg1
, arg2
));
12343 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12344 case TARGET_NR_mq_open
:
12346 struct mq_attr posix_mq_attr
;
12347 struct mq_attr
*pposix_mq_attr
;
12350 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12351 pposix_mq_attr
= NULL
;
12353 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12354 return -TARGET_EFAULT
;
12356 pposix_mq_attr
= &posix_mq_attr
;
12358 p
= lock_user_string(arg1
- 1);
12360 return -TARGET_EFAULT
;
12362 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12363 unlock_user (p
, arg1
, 0);
12367 case TARGET_NR_mq_unlink
:
12368 p
= lock_user_string(arg1
- 1);
12370 return -TARGET_EFAULT
;
12372 ret
= get_errno(mq_unlink(p
));
12373 unlock_user (p
, arg1
, 0);
12376 #ifdef TARGET_NR_mq_timedsend
12377 case TARGET_NR_mq_timedsend
:
12379 struct timespec ts
;
12381 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12383 if (target_to_host_timespec(&ts
, arg5
)) {
12384 return -TARGET_EFAULT
;
12386 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12387 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12388 return -TARGET_EFAULT
;
12391 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12393 unlock_user (p
, arg2
, arg3
);
12397 #ifdef TARGET_NR_mq_timedsend_time64
12398 case TARGET_NR_mq_timedsend_time64
:
12400 struct timespec ts
;
12402 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12404 if (target_to_host_timespec64(&ts
, arg5
)) {
12405 return -TARGET_EFAULT
;
12407 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12408 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12409 return -TARGET_EFAULT
;
12412 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12414 unlock_user(p
, arg2
, arg3
);
12419 #ifdef TARGET_NR_mq_timedreceive
12420 case TARGET_NR_mq_timedreceive
:
12422 struct timespec ts
;
12425 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12427 if (target_to_host_timespec(&ts
, arg5
)) {
12428 return -TARGET_EFAULT
;
12430 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12432 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12433 return -TARGET_EFAULT
;
12436 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12439 unlock_user (p
, arg2
, arg3
);
12441 put_user_u32(prio
, arg4
);
12445 #ifdef TARGET_NR_mq_timedreceive_time64
12446 case TARGET_NR_mq_timedreceive_time64
:
12448 struct timespec ts
;
12451 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12453 if (target_to_host_timespec64(&ts
, arg5
)) {
12454 return -TARGET_EFAULT
;
12456 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12458 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12459 return -TARGET_EFAULT
;
12462 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12465 unlock_user(p
, arg2
, arg3
);
12467 put_user_u32(prio
, arg4
);
12473 /* Not implemented for now... */
12474 /* case TARGET_NR_mq_notify: */
12477 case TARGET_NR_mq_getsetattr
:
12479 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12482 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12483 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12484 &posix_mq_attr_out
));
12485 } else if (arg3
!= 0) {
12486 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12488 if (ret
== 0 && arg3
!= 0) {
12489 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12495 #ifdef CONFIG_SPLICE
12496 #ifdef TARGET_NR_tee
12497 case TARGET_NR_tee
:
12499 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12503 #ifdef TARGET_NR_splice
12504 case TARGET_NR_splice
:
12506 loff_t loff_in
, loff_out
;
12507 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12509 if (get_user_u64(loff_in
, arg2
)) {
12510 return -TARGET_EFAULT
;
12512 ploff_in
= &loff_in
;
12515 if (get_user_u64(loff_out
, arg4
)) {
12516 return -TARGET_EFAULT
;
12518 ploff_out
= &loff_out
;
12520 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12522 if (put_user_u64(loff_in
, arg2
)) {
12523 return -TARGET_EFAULT
;
12527 if (put_user_u64(loff_out
, arg4
)) {
12528 return -TARGET_EFAULT
;
12534 #ifdef TARGET_NR_vmsplice
12535 case TARGET_NR_vmsplice
:
12537 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12539 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12540 unlock_iovec(vec
, arg2
, arg3
, 0);
12542 ret
= -host_to_target_errno(errno
);
12547 #endif /* CONFIG_SPLICE */
12548 #ifdef CONFIG_EVENTFD
12549 #if defined(TARGET_NR_eventfd)
12550 case TARGET_NR_eventfd
:
12551 ret
= get_errno(eventfd(arg1
, 0));
12553 fd_trans_register(ret
, &target_eventfd_trans
);
12557 #if defined(TARGET_NR_eventfd2)
12558 case TARGET_NR_eventfd2
:
12560 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12561 if (arg2
& TARGET_O_NONBLOCK
) {
12562 host_flags
|= O_NONBLOCK
;
12564 if (arg2
& TARGET_O_CLOEXEC
) {
12565 host_flags
|= O_CLOEXEC
;
12567 ret
= get_errno(eventfd(arg1
, host_flags
));
12569 fd_trans_register(ret
, &target_eventfd_trans
);
12574 #endif /* CONFIG_EVENTFD */
12575 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12576 case TARGET_NR_fallocate
:
12577 #if TARGET_ABI_BITS == 32
12578 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12579 target_offset64(arg5
, arg6
)));
12581 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12585 #if defined(CONFIG_SYNC_FILE_RANGE)
12586 #if defined(TARGET_NR_sync_file_range)
12587 case TARGET_NR_sync_file_range
:
12588 #if TARGET_ABI_BITS == 32
12589 #if defined(TARGET_MIPS)
12590 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12591 target_offset64(arg5
, arg6
), arg7
));
12593 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12594 target_offset64(arg4
, arg5
), arg6
));
12595 #endif /* !TARGET_MIPS */
12597 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12601 #if defined(TARGET_NR_sync_file_range2) || \
12602 defined(TARGET_NR_arm_sync_file_range)
12603 #if defined(TARGET_NR_sync_file_range2)
12604 case TARGET_NR_sync_file_range2
:
12606 #if defined(TARGET_NR_arm_sync_file_range)
12607 case TARGET_NR_arm_sync_file_range
:
12609 /* This is like sync_file_range but the arguments are reordered */
12610 #if TARGET_ABI_BITS == 32
12611 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12612 target_offset64(arg5
, arg6
), arg2
));
12614 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12619 #if defined(TARGET_NR_signalfd4)
12620 case TARGET_NR_signalfd4
:
12621 return do_signalfd4(arg1
, arg2
, arg4
);
12623 #if defined(TARGET_NR_signalfd)
12624 case TARGET_NR_signalfd
:
12625 return do_signalfd4(arg1
, arg2
, 0);
12627 #if defined(CONFIG_EPOLL)
12628 #if defined(TARGET_NR_epoll_create)
12629 case TARGET_NR_epoll_create
:
12630 return get_errno(epoll_create(arg1
));
12632 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12633 case TARGET_NR_epoll_create1
:
12634 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12636 #if defined(TARGET_NR_epoll_ctl)
12637 case TARGET_NR_epoll_ctl
:
12639 struct epoll_event ep
;
12640 struct epoll_event
*epp
= 0;
12642 if (arg2
!= EPOLL_CTL_DEL
) {
12643 struct target_epoll_event
*target_ep
;
12644 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12645 return -TARGET_EFAULT
;
12647 ep
.events
= tswap32(target_ep
->events
);
12649 * The epoll_data_t union is just opaque data to the kernel,
12650 * so we transfer all 64 bits across and need not worry what
12651 * actual data type it is.
12653 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12654 unlock_user_struct(target_ep
, arg4
, 0);
12657 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12658 * non-null pointer, even though this argument is ignored.
12663 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12667 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12668 #if defined(TARGET_NR_epoll_wait)
12669 case TARGET_NR_epoll_wait
:
12671 #if defined(TARGET_NR_epoll_pwait)
12672 case TARGET_NR_epoll_pwait
:
12675 struct target_epoll_event
*target_ep
;
12676 struct epoll_event
*ep
;
12678 int maxevents
= arg3
;
12679 int timeout
= arg4
;
12681 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12682 return -TARGET_EINVAL
;
12685 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12686 maxevents
* sizeof(struct target_epoll_event
), 1);
12688 return -TARGET_EFAULT
;
12691 ep
= g_try_new(struct epoll_event
, maxevents
);
12693 unlock_user(target_ep
, arg2
, 0);
12694 return -TARGET_ENOMEM
;
12698 #if defined(TARGET_NR_epoll_pwait)
12699 case TARGET_NR_epoll_pwait
:
12701 sigset_t
*set
= NULL
;
12704 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12710 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12711 set
, SIGSET_T_SIZE
));
12714 finish_sigsuspend_mask(ret
);
12719 #if defined(TARGET_NR_epoll_wait)
12720 case TARGET_NR_epoll_wait
:
12721 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12726 ret
= -TARGET_ENOSYS
;
12728 if (!is_error(ret
)) {
12730 for (i
= 0; i
< ret
; i
++) {
12731 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12732 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12734 unlock_user(target_ep
, arg2
,
12735 ret
* sizeof(struct target_epoll_event
));
12737 unlock_user(target_ep
, arg2
, 0);
12744 #ifdef TARGET_NR_prlimit64
12745 case TARGET_NR_prlimit64
:
12747 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12748 struct target_rlimit64
*target_rnew
, *target_rold
;
12749 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12750 int resource
= target_to_host_resource(arg2
);
12752 if (arg3
&& (resource
!= RLIMIT_AS
&&
12753 resource
!= RLIMIT_DATA
&&
12754 resource
!= RLIMIT_STACK
)) {
12755 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12756 return -TARGET_EFAULT
;
12758 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12759 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12760 unlock_user_struct(target_rnew
, arg3
, 0);
12764 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12765 if (!is_error(ret
) && arg4
) {
12766 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12767 return -TARGET_EFAULT
;
12769 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12770 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12771 unlock_user_struct(target_rold
, arg4
, 1);
12776 #ifdef TARGET_NR_gethostname
12777 case TARGET_NR_gethostname
:
12779 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12781 ret
= get_errno(gethostname(name
, arg2
));
12782 unlock_user(name
, arg1
, arg2
);
12784 ret
= -TARGET_EFAULT
;
12789 #ifdef TARGET_NR_atomic_cmpxchg_32
12790 case TARGET_NR_atomic_cmpxchg_32
:
12792 /* should use start_exclusive from main.c */
12793 abi_ulong mem_value
;
12794 if (get_user_u32(mem_value
, arg6
)) {
12795 target_siginfo_t info
;
12796 info
.si_signo
= SIGSEGV
;
12798 info
.si_code
= TARGET_SEGV_MAPERR
;
12799 info
._sifields
._sigfault
._addr
= arg6
;
12800 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12804 if (mem_value
== arg2
)
12805 put_user_u32(arg1
, arg6
);
12809 #ifdef TARGET_NR_atomic_barrier
12810 case TARGET_NR_atomic_barrier
:
12811 /* Like the kernel implementation and the
12812 qemu arm barrier, no-op this? */
12816 #ifdef TARGET_NR_timer_create
12817 case TARGET_NR_timer_create
:
12819 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12821 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12824 int timer_index
= next_free_host_timer();
12826 if (timer_index
< 0) {
12827 ret
= -TARGET_EAGAIN
;
12829 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12832 phost_sevp
= &host_sevp
;
12833 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12839 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12843 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12844 return -TARGET_EFAULT
;
12852 #ifdef TARGET_NR_timer_settime
12853 case TARGET_NR_timer_settime
:
12855 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12856 * struct itimerspec * old_value */
12857 target_timer_t timerid
= get_timer_id(arg1
);
12861 } else if (arg3
== 0) {
12862 ret
= -TARGET_EINVAL
;
12864 timer_t htimer
= g_posix_timers
[timerid
];
12865 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12867 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12868 return -TARGET_EFAULT
;
12871 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12872 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12873 return -TARGET_EFAULT
;
12880 #ifdef TARGET_NR_timer_settime64
12881 case TARGET_NR_timer_settime64
:
12883 target_timer_t timerid
= get_timer_id(arg1
);
12887 } else if (arg3
== 0) {
12888 ret
= -TARGET_EINVAL
;
12890 timer_t htimer
= g_posix_timers
[timerid
];
12891 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12893 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12894 return -TARGET_EFAULT
;
12897 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12898 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12899 return -TARGET_EFAULT
;
12906 #ifdef TARGET_NR_timer_gettime
12907 case TARGET_NR_timer_gettime
:
12909 /* args: timer_t timerid, struct itimerspec *curr_value */
12910 target_timer_t timerid
= get_timer_id(arg1
);
12914 } else if (!arg2
) {
12915 ret
= -TARGET_EFAULT
;
12917 timer_t htimer
= g_posix_timers
[timerid
];
12918 struct itimerspec hspec
;
12919 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12921 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12922 ret
= -TARGET_EFAULT
;
12929 #ifdef TARGET_NR_timer_gettime64
12930 case TARGET_NR_timer_gettime64
:
12932 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12933 target_timer_t timerid
= get_timer_id(arg1
);
12937 } else if (!arg2
) {
12938 ret
= -TARGET_EFAULT
;
12940 timer_t htimer
= g_posix_timers
[timerid
];
12941 struct itimerspec hspec
;
12942 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12944 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12945 ret
= -TARGET_EFAULT
;
12952 #ifdef TARGET_NR_timer_getoverrun
12953 case TARGET_NR_timer_getoverrun
:
12955 /* args: timer_t timerid */
12956 target_timer_t timerid
= get_timer_id(arg1
);
12961 timer_t htimer
= g_posix_timers
[timerid
];
12962 ret
= get_errno(timer_getoverrun(htimer
));
12968 #ifdef TARGET_NR_timer_delete
12969 case TARGET_NR_timer_delete
:
12971 /* args: timer_t timerid */
12972 target_timer_t timerid
= get_timer_id(arg1
);
12977 timer_t htimer
= g_posix_timers
[timerid
];
12978 ret
= get_errno(timer_delete(htimer
));
12979 g_posix_timers
[timerid
] = 0;
12985 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12986 case TARGET_NR_timerfd_create
:
12987 return get_errno(timerfd_create(arg1
,
12988 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12991 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12992 case TARGET_NR_timerfd_gettime
:
12994 struct itimerspec its_curr
;
12996 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12998 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12999 return -TARGET_EFAULT
;
13005 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13006 case TARGET_NR_timerfd_gettime64
:
13008 struct itimerspec its_curr
;
13010 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13012 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13013 return -TARGET_EFAULT
;
13019 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13020 case TARGET_NR_timerfd_settime
:
13022 struct itimerspec its_new
, its_old
, *p_new
;
13025 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13026 return -TARGET_EFAULT
;
13033 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13035 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13036 return -TARGET_EFAULT
;
13042 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13043 case TARGET_NR_timerfd_settime64
:
13045 struct itimerspec its_new
, its_old
, *p_new
;
13048 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13049 return -TARGET_EFAULT
;
13056 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13058 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13059 return -TARGET_EFAULT
;
13065 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13066 case TARGET_NR_ioprio_get
:
13067 return get_errno(ioprio_get(arg1
, arg2
));
13070 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13071 case TARGET_NR_ioprio_set
:
13072 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13075 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13076 case TARGET_NR_setns
:
13077 return get_errno(setns(arg1
, arg2
));
13079 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13080 case TARGET_NR_unshare
:
13081 return get_errno(unshare(arg1
));
13083 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13084 case TARGET_NR_kcmp
:
13085 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13087 #ifdef TARGET_NR_swapcontext
13088 case TARGET_NR_swapcontext
:
13089 /* PowerPC specific. */
13090 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13092 #ifdef TARGET_NR_memfd_create
13093 case TARGET_NR_memfd_create
:
13094 p
= lock_user_string(arg1
);
13096 return -TARGET_EFAULT
;
13098 ret
= get_errno(memfd_create(p
, arg2
));
13099 fd_trans_unregister(ret
);
13100 unlock_user(p
, arg1
, 0);
13103 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13104 case TARGET_NR_membarrier
:
13105 return get_errno(membarrier(arg1
, arg2
));
13108 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13109 case TARGET_NR_copy_file_range
:
13111 loff_t inoff
, outoff
;
13112 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13115 if (get_user_u64(inoff
, arg2
)) {
13116 return -TARGET_EFAULT
;
13121 if (get_user_u64(outoff
, arg4
)) {
13122 return -TARGET_EFAULT
;
13126 /* Do not sign-extend the count parameter. */
13127 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13128 (abi_ulong
)arg5
, arg6
));
13129 if (!is_error(ret
) && ret
> 0) {
13131 if (put_user_u64(inoff
, arg2
)) {
13132 return -TARGET_EFAULT
;
13136 if (put_user_u64(outoff
, arg4
)) {
13137 return -TARGET_EFAULT
;
13145 #if defined(TARGET_NR_pivot_root)
13146 case TARGET_NR_pivot_root
:
13149 p
= lock_user_string(arg1
); /* new_root */
13150 p2
= lock_user_string(arg2
); /* put_old */
13152 ret
= -TARGET_EFAULT
;
13154 ret
= get_errno(pivot_root(p
, p2
));
13156 unlock_user(p2
, arg2
, 0);
13157 unlock_user(p
, arg1
, 0);
13163 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13164 return -TARGET_ENOSYS
;
13169 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13170 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13171 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13174 CPUState
*cpu
= env_cpu(cpu_env
);
13177 #ifdef DEBUG_ERESTARTSYS
13178 /* Debug-only code for exercising the syscall-restart code paths
13179 * in the per-architecture cpu main loops: restart every syscall
13180 * the guest makes once before letting it through.
13186 return -QEMU_ERESTARTSYS
;
13191 record_syscall_start(cpu
, num
, arg1
,
13192 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13194 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13195 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13198 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13199 arg5
, arg6
, arg7
, arg8
);
13201 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13202 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13203 arg3
, arg4
, arg5
, arg6
);
13206 record_syscall_return(cpu
, num
, ret
);