4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/if_tun.h>
60 #include <linux/errqueue.h>
61 #include <linux/random.h>
63 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
77 #ifdef HAVE_SYS_KCOV_H
81 #define termios host_termios
82 #define winsize host_winsize
83 #define termio host_termio
84 #define sgttyb host_sgttyb /* same as target */
85 #define tchars host_tchars /* same as target */
86 #define ltchars host_ltchars /* same as target */
88 #include <linux/termios.h>
89 #include <linux/unistd.h>
90 #include <linux/cdrom.h>
91 #include <linux/hdreg.h>
92 #include <linux/soundcard.h>
94 #include <linux/mtio.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
100 #include <linux/fb.h>
101 #if defined(CONFIG_USBFS)
102 #include <linux/usbdevice_fs.h>
103 #include <linux/usb/ch9.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include <netpacket/packet.h>
112 #include <linux/netlink.h>
113 #include <linux/if_alg.h>
114 #include <linux/rtc.h>
115 #include <sound/asound.h>
117 #include <linux/btrfs.h>
120 #include <libdrm/drm.h>
121 #include <libdrm/i915_drm.h>
123 #include "linux_loop.h"
127 #include "qemu/guest-random.h"
128 #include "qemu/selfmap.h"
129 #include "user/syscall-trace.h"
130 #include "qapi/error.h"
131 #include "fd-trans.h"
135 #define CLONE_IO 0x80000000 /* Clone io context */
138 /* We can't directly call the host clone syscall, because this will
139 * badly confuse libc (breaking mutexes, for example). So we must
140 * divide clone flags into:
141 * * flag combinations that look like pthread_create()
142 * * flag combinations that look like fork()
143 * * flags we can implement within QEMU itself
144 * * flags we can't support and will return an error for
146 /* For thread creation, all these flags must be present; for
147 * fork, none must be present.
149 #define CLONE_THREAD_FLAGS \
150 (CLONE_VM | CLONE_FS | CLONE_FILES | \
151 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
153 /* These flags are ignored:
154 * CLONE_DETACHED is now ignored by the kernel;
155 * CLONE_IO is just an optimisation hint to the I/O scheduler
157 #define CLONE_IGNORED_FLAGS \
158 (CLONE_DETACHED | CLONE_IO)
160 /* Flags for fork which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_FORK_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
165 /* Flags for thread creation which we can implement within QEMU itself */
166 #define CLONE_OPTIONAL_THREAD_FLAGS \
167 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
168 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
170 #define CLONE_INVALID_FORK_FLAGS \
171 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
173 #define CLONE_INVALID_THREAD_FLAGS \
174 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
175 CLONE_IGNORED_FLAGS))
177 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
178 * have almost all been allocated. We cannot support any of
179 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
180 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
181 * The checks against the invalid thread masks above will catch these.
182 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
185 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
186 * once. This exercises the codepaths for restart.
188 //#define DEBUG_ERESTARTSYS
190 //#include <linux/msdos_fs.h>
191 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
192 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
202 #define _syscall0(type,name) \
203 static type name (void) \
205 return syscall(__NR_##name); \
208 #define _syscall1(type,name,type1,arg1) \
209 static type name (type1 arg1) \
211 return syscall(__NR_##name, arg1); \
214 #define _syscall2(type,name,type1,arg1,type2,arg2) \
215 static type name (type1 arg1,type2 arg2) \
217 return syscall(__NR_##name, arg1, arg2); \
220 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
221 static type name (type1 arg1,type2 arg2,type3 arg3) \
223 return syscall(__NR_##name, arg1, arg2, arg3); \
226 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
232 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
236 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
240 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
241 type5,arg5,type6,arg6) \
242 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
245 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
249 #define __NR_sys_uname __NR_uname
250 #define __NR_sys_getcwd1 __NR_getcwd
251 #define __NR_sys_getdents __NR_getdents
252 #define __NR_sys_getdents64 __NR_getdents64
253 #define __NR_sys_getpriority __NR_getpriority
254 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
255 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
256 #define __NR_sys_syslog __NR_syslog
257 #if defined(__NR_futex)
258 # define __NR_sys_futex __NR_futex
260 #if defined(__NR_futex_time64)
261 # define __NR_sys_futex_time64 __NR_futex_time64
263 #define __NR_sys_inotify_init __NR_inotify_init
264 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
265 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
266 #define __NR_sys_statx __NR_statx
268 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
269 #define __NR__llseek __NR_lseek
272 /* Newer kernel ports have llseek() instead of _llseek() */
273 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
274 #define TARGET_NR__llseek TARGET_NR_llseek
277 #define __NR_sys_gettid __NR_gettid
278 _syscall0(int, sys_gettid
)
280 /* For the 64-bit guest on 32-bit host case we must emulate
281 * getdents using getdents64, because otherwise the host
282 * might hand us back more dirent records than we can fit
283 * into the guest buffer after structure format conversion.
284 * Otherwise we emulate getdents with getdents if the host has it.
286 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
287 #define EMULATE_GETDENTS_WITH_GETDENTS
290 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
291 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
293 #if (defined(TARGET_NR_getdents) && \
294 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
295 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
296 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
298 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
299 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
300 loff_t
*, res
, uint
, wh
);
302 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
303 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
305 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
306 #ifdef __NR_exit_group
307 _syscall1(int,exit_group
,int,error_code
)
309 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
310 _syscall1(int,set_tid_address
,int *,tidptr
)
312 #if defined(__NR_futex)
313 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
314 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
316 #if defined(__NR_futex_time64)
317 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
318 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
320 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
321 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
322 unsigned long *, user_mask_ptr
);
323 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
324 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
325 unsigned long *, user_mask_ptr
);
326 #define __NR_sys_getcpu __NR_getcpu
327 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
328 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
330 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
331 struct __user_cap_data_struct
*, data
);
332 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
333 struct __user_cap_data_struct
*, data
);
334 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
335 _syscall2(int, ioprio_get
, int, which
, int, who
)
337 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
338 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
340 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
341 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
344 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
345 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
346 unsigned long, idx1
, unsigned long, idx2
)
350 * It is assumed that struct statx is architecture independent.
352 #if defined(TARGET_NR_statx) && defined(__NR_statx)
353 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
354 unsigned int, mask
, struct target_statx
*, statxbuf
)
356 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
357 _syscall2(int, membarrier
, int, cmd
, int, flags
)
360 static bitmask_transtbl fcntl_flags_tbl
[] = {
361 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
362 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
363 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
364 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
365 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
366 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
367 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
368 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
369 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
370 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
371 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
372 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
373 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
374 #if defined(O_DIRECT)
375 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
377 #if defined(O_NOATIME)
378 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
380 #if defined(O_CLOEXEC)
381 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
384 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
386 #if defined(O_TMPFILE)
387 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
389 /* Don't terminate the list prematurely on 64-bit host+guest. */
390 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
391 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
396 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
398 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
399 #if defined(__NR_utimensat)
400 #define __NR_sys_utimensat __NR_utimensat
401 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
402 const struct timespec
*,tsp
,int,flags
)
404 static int sys_utimensat(int dirfd
, const char *pathname
,
405 const struct timespec times
[2], int flags
)
411 #endif /* TARGET_NR_utimensat */
413 #ifdef TARGET_NR_renameat2
414 #if defined(__NR_renameat2)
415 #define __NR_sys_renameat2 __NR_renameat2
416 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
417 const char *, new, unsigned int, flags
)
419 static int sys_renameat2(int oldfd
, const char *old
,
420 int newfd
, const char *new, int flags
)
423 return renameat(oldfd
, old
, newfd
, new);
429 #endif /* TARGET_NR_renameat2 */
431 #ifdef CONFIG_INOTIFY
432 #include <sys/inotify.h>
434 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
435 static int sys_inotify_init(void)
437 return (inotify_init());
440 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
441 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
443 return (inotify_add_watch(fd
, pathname
, mask
));
446 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
447 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
449 return (inotify_rm_watch(fd
, wd
));
452 #ifdef CONFIG_INOTIFY1
453 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
454 static int sys_inotify_init1(int flags
)
456 return (inotify_init1(flags
));
461 /* Userspace can usually survive runtime without inotify */
462 #undef TARGET_NR_inotify_init
463 #undef TARGET_NR_inotify_init1
464 #undef TARGET_NR_inotify_add_watch
465 #undef TARGET_NR_inotify_rm_watch
466 #endif /* CONFIG_INOTIFY */
468 #if defined(TARGET_NR_prlimit64)
469 #ifndef __NR_prlimit64
470 # define __NR_prlimit64 -1
472 #define __NR_sys_prlimit64 __NR_prlimit64
473 /* The glibc rlimit structure may not be that used by the underlying syscall */
474 struct host_rlimit64
{
478 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
479 const struct host_rlimit64
*, new_limit
,
480 struct host_rlimit64
*, old_limit
)
484 #if defined(TARGET_NR_timer_create)
485 /* Maximum of 32 active POSIX timers allowed at any one time. */
486 static timer_t g_posix_timers
[32] = { 0, } ;
488 static inline int next_free_host_timer(void)
491 /* FIXME: Does finding the next free slot require a lock? */
492 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
493 if (g_posix_timers
[k
] == 0) {
494 g_posix_timers
[k
] = (timer_t
) 1;
502 #define ERRNO_TABLE_SIZE 1200
504 /* target_to_host_errno_table[] is initialized from
505 * host_to_target_errno_table[] in syscall_init(). */
506 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
510 * This list is the union of errno values overridden in asm-<arch>/errno.h
511 * minus the errnos that are not actually generic to all archs.
513 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
514 [EAGAIN
] = TARGET_EAGAIN
,
515 [EIDRM
] = TARGET_EIDRM
,
516 [ECHRNG
] = TARGET_ECHRNG
,
517 [EL2NSYNC
] = TARGET_EL2NSYNC
,
518 [EL3HLT
] = TARGET_EL3HLT
,
519 [EL3RST
] = TARGET_EL3RST
,
520 [ELNRNG
] = TARGET_ELNRNG
,
521 [EUNATCH
] = TARGET_EUNATCH
,
522 [ENOCSI
] = TARGET_ENOCSI
,
523 [EL2HLT
] = TARGET_EL2HLT
,
524 [EDEADLK
] = TARGET_EDEADLK
,
525 [ENOLCK
] = TARGET_ENOLCK
,
526 [EBADE
] = TARGET_EBADE
,
527 [EBADR
] = TARGET_EBADR
,
528 [EXFULL
] = TARGET_EXFULL
,
529 [ENOANO
] = TARGET_ENOANO
,
530 [EBADRQC
] = TARGET_EBADRQC
,
531 [EBADSLT
] = TARGET_EBADSLT
,
532 [EBFONT
] = TARGET_EBFONT
,
533 [ENOSTR
] = TARGET_ENOSTR
,
534 [ENODATA
] = TARGET_ENODATA
,
535 [ETIME
] = TARGET_ETIME
,
536 [ENOSR
] = TARGET_ENOSR
,
537 [ENONET
] = TARGET_ENONET
,
538 [ENOPKG
] = TARGET_ENOPKG
,
539 [EREMOTE
] = TARGET_EREMOTE
,
540 [ENOLINK
] = TARGET_ENOLINK
,
541 [EADV
] = TARGET_EADV
,
542 [ESRMNT
] = TARGET_ESRMNT
,
543 [ECOMM
] = TARGET_ECOMM
,
544 [EPROTO
] = TARGET_EPROTO
,
545 [EDOTDOT
] = TARGET_EDOTDOT
,
546 [EMULTIHOP
] = TARGET_EMULTIHOP
,
547 [EBADMSG
] = TARGET_EBADMSG
,
548 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
549 [EOVERFLOW
] = TARGET_EOVERFLOW
,
550 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
551 [EBADFD
] = TARGET_EBADFD
,
552 [EREMCHG
] = TARGET_EREMCHG
,
553 [ELIBACC
] = TARGET_ELIBACC
,
554 [ELIBBAD
] = TARGET_ELIBBAD
,
555 [ELIBSCN
] = TARGET_ELIBSCN
,
556 [ELIBMAX
] = TARGET_ELIBMAX
,
557 [ELIBEXEC
] = TARGET_ELIBEXEC
,
558 [EILSEQ
] = TARGET_EILSEQ
,
559 [ENOSYS
] = TARGET_ENOSYS
,
560 [ELOOP
] = TARGET_ELOOP
,
561 [ERESTART
] = TARGET_ERESTART
,
562 [ESTRPIPE
] = TARGET_ESTRPIPE
,
563 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
564 [EUSERS
] = TARGET_EUSERS
,
565 [ENOTSOCK
] = TARGET_ENOTSOCK
,
566 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
567 [EMSGSIZE
] = TARGET_EMSGSIZE
,
568 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
569 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
570 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
571 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
572 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
573 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
574 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
575 [EADDRINUSE
] = TARGET_EADDRINUSE
,
576 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
577 [ENETDOWN
] = TARGET_ENETDOWN
,
578 [ENETUNREACH
] = TARGET_ENETUNREACH
,
579 [ENETRESET
] = TARGET_ENETRESET
,
580 [ECONNABORTED
] = TARGET_ECONNABORTED
,
581 [ECONNRESET
] = TARGET_ECONNRESET
,
582 [ENOBUFS
] = TARGET_ENOBUFS
,
583 [EISCONN
] = TARGET_EISCONN
,
584 [ENOTCONN
] = TARGET_ENOTCONN
,
585 [EUCLEAN
] = TARGET_EUCLEAN
,
586 [ENOTNAM
] = TARGET_ENOTNAM
,
587 [ENAVAIL
] = TARGET_ENAVAIL
,
588 [EISNAM
] = TARGET_EISNAM
,
589 [EREMOTEIO
] = TARGET_EREMOTEIO
,
590 [EDQUOT
] = TARGET_EDQUOT
,
591 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
592 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
593 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
594 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
595 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
596 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
597 [EALREADY
] = TARGET_EALREADY
,
598 [EINPROGRESS
] = TARGET_EINPROGRESS
,
599 [ESTALE
] = TARGET_ESTALE
,
600 [ECANCELED
] = TARGET_ECANCELED
,
601 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
602 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
604 [ENOKEY
] = TARGET_ENOKEY
,
607 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
610 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
613 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
616 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
618 #ifdef ENOTRECOVERABLE
619 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
622 [ENOMSG
] = TARGET_ENOMSG
,
625 [ERFKILL
] = TARGET_ERFKILL
,
628 [EHWPOISON
] = TARGET_EHWPOISON
,
632 static inline int host_to_target_errno(int err
)
634 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
635 host_to_target_errno_table
[err
]) {
636 return host_to_target_errno_table
[err
];
641 static inline int target_to_host_errno(int err
)
643 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
644 target_to_host_errno_table
[err
]) {
645 return target_to_host_errno_table
[err
];
650 static inline abi_long
get_errno(abi_long ret
)
653 return -host_to_target_errno(errno
);
658 const char *target_strerror(int err
)
660 if (err
== TARGET_ERESTARTSYS
) {
661 return "To be restarted";
663 if (err
== TARGET_QEMU_ESIGRETURN
) {
664 return "Successful exit from sigreturn";
667 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
670 return strerror(target_to_host_errno(err
));
673 #define safe_syscall0(type, name) \
674 static type safe_##name(void) \
676 return safe_syscall(__NR_##name); \
679 #define safe_syscall1(type, name, type1, arg1) \
680 static type safe_##name(type1 arg1) \
682 return safe_syscall(__NR_##name, arg1); \
685 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
686 static type safe_##name(type1 arg1, type2 arg2) \
688 return safe_syscall(__NR_##name, arg1, arg2); \
691 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
694 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
697 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
699 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
701 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
704 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
705 type4, arg4, type5, arg5) \
706 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
712 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
713 type4, arg4, type5, arg5, type6, arg6) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715 type5 arg5, type6 arg6) \
717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
720 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
721 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
722 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
723 int, flags
, mode_t
, mode
)
724 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
725 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
726 struct rusage
*, rusage
)
728 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
729 int, options
, struct rusage
*, rusage
)
730 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
731 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
732 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
733 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
734 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
736 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
737 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
738 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
741 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
742 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
744 #if defined(__NR_futex)
745 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
746 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
748 #if defined(__NR_futex_time64)
749 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
750 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
752 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
753 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
754 safe_syscall2(int, tkill
, int, tid
, int, sig
)
755 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
756 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
757 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
758 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
759 unsigned long, pos_l
, unsigned long, pos_h
)
760 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
761 unsigned long, pos_l
, unsigned long, pos_h
)
762 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
764 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
765 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
766 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
767 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
768 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
769 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
770 safe_syscall2(int, flock
, int, fd
, int, operation
)
771 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
772 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
773 const struct timespec
*, uts
, size_t, sigsetsize
)
775 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
777 #if defined(TARGET_NR_nanosleep)
778 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
779 struct timespec
*, rem
)
781 #if defined(TARGET_NR_clock_nanosleep) || \
782 defined(TARGET_NR_clock_nanosleep_time64)
783 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
784 const struct timespec
*, req
, struct timespec
*, rem
)
788 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
791 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
792 void *, ptr
, long, fifth
)
796 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
800 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
801 long, msgtype
, int, flags
)
803 #ifdef __NR_semtimedop
804 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
805 unsigned, nsops
, const struct timespec
*, timeout
)
807 #if defined(TARGET_NR_mq_timedsend) || \
808 defined(TARGET_NR_mq_timedsend_time64)
809 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
810 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
812 #if defined(TARGET_NR_mq_timedreceive) || \
813 defined(TARGET_NR_mq_timedreceive_time64)
814 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
815 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
817 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
818 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
819 int, outfd
, loff_t
*, poutoff
, size_t, length
,
823 /* We do ioctl like this rather than via safe_syscall3 to preserve the
824 * "third argument might be integer or pointer or not present" behaviour of
827 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
828 /* Similarly for fcntl. Note that callers must always:
829 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
830 * use the flock64 struct rather than unsuffixed flock
831 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
834 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
836 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
839 static inline int host_to_target_sock_type(int host_type
)
843 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
845 target_type
= TARGET_SOCK_DGRAM
;
848 target_type
= TARGET_SOCK_STREAM
;
851 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
855 #if defined(SOCK_CLOEXEC)
856 if (host_type
& SOCK_CLOEXEC
) {
857 target_type
|= TARGET_SOCK_CLOEXEC
;
861 #if defined(SOCK_NONBLOCK)
862 if (host_type
& SOCK_NONBLOCK
) {
863 target_type
|= TARGET_SOCK_NONBLOCK
;
870 static abi_ulong target_brk
;
871 static abi_ulong target_original_brk
;
872 static abi_ulong brk_page
;
874 void target_set_brk(abi_ulong new_brk
)
876 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
877 brk_page
= HOST_PAGE_ALIGN(target_brk
);
880 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
881 #define DEBUGF_BRK(message, args...)
883 /* do_brk() must return target values and target errnos. */
884 abi_long
do_brk(abi_ulong new_brk
)
886 abi_long mapped_addr
;
887 abi_ulong new_alloc_size
;
889 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
895 if (new_brk
< target_original_brk
) {
896 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
901 /* If the new brk is less than the highest page reserved to the
902 * target heap allocation, set it and we're almost done... */
903 if (new_brk
<= brk_page
) {
904 /* Heap contents are initialized to zero, as for anonymous
906 if (new_brk
> target_brk
) {
907 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
909 target_brk
= new_brk
;
910 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
914 /* We need to allocate more memory after the brk... Note that
915 * we don't use MAP_FIXED because that will map over the top of
916 * any existing mapping (like the one with the host libc or qemu
917 * itself); instead we treat "mapped but at wrong address" as
918 * a failure and unmap again.
920 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
921 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
922 PROT_READ
|PROT_WRITE
,
923 MAP_ANON
|MAP_PRIVATE
, 0, 0));
925 if (mapped_addr
== brk_page
) {
926 /* Heap contents are initialized to zero, as for anonymous
927 * mapped pages. Technically the new pages are already
928 * initialized to zero since they *are* anonymous mapped
929 * pages, however we have to take care with the contents that
930 * come from the remaining part of the previous page: it may
931 * contains garbage data due to a previous heap usage (grown
933 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
935 target_brk
= new_brk
;
936 brk_page
= HOST_PAGE_ALIGN(target_brk
);
937 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
940 } else if (mapped_addr
!= -1) {
941 /* Mapped but at wrong address, meaning there wasn't actually
942 * enough space for this brk.
944 target_munmap(mapped_addr
, new_alloc_size
);
946 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
949 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
952 #if defined(TARGET_ALPHA)
953 /* We (partially) emulate OSF/1 on Alpha, which requires we
954 return a proper errno, not an unchanged brk value. */
955 return -TARGET_ENOMEM
;
957 /* For everything else, return the previous break. */
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
962 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
963 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
964 abi_ulong target_fds_addr
,
968 abi_ulong b
, *target_fds
;
970 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
971 if (!(target_fds
= lock_user(VERIFY_READ
,
973 sizeof(abi_ulong
) * nw
,
975 return -TARGET_EFAULT
;
979 for (i
= 0; i
< nw
; i
++) {
980 /* grab the abi_ulong */
981 __get_user(b
, &target_fds
[i
]);
982 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
983 /* check the bit inside the abi_ulong */
990 unlock_user(target_fds
, target_fds_addr
, 0);
995 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
996 abi_ulong target_fds_addr
,
999 if (target_fds_addr
) {
1000 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1001 return -TARGET_EFAULT
;
1009 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1015 abi_ulong
*target_fds
;
1017 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1018 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1020 sizeof(abi_ulong
) * nw
,
1022 return -TARGET_EFAULT
;
1025 for (i
= 0; i
< nw
; i
++) {
1027 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1028 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1031 __put_user(v
, &target_fds
[i
]);
1034 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1040 #if defined(__alpha__)
1041 #define HOST_HZ 1024
1046 static inline abi_long
host_to_target_clock_t(long ticks
)
1048 #if HOST_HZ == TARGET_HZ
1051 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1055 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1056 const struct rusage
*rusage
)
1058 struct target_rusage
*target_rusage
;
1060 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1061 return -TARGET_EFAULT
;
1062 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1063 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1064 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1065 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1066 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1067 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1068 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1069 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1070 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1071 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1072 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1073 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1074 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1075 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1076 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1077 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1078 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1079 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1080 unlock_user_struct(target_rusage
, target_addr
, 1);
1085 #ifdef TARGET_NR_setrlimit
1086 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1088 abi_ulong target_rlim_swap
;
1091 target_rlim_swap
= tswapal(target_rlim
);
1092 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1093 return RLIM_INFINITY
;
1095 result
= target_rlim_swap
;
1096 if (target_rlim_swap
!= (rlim_t
)result
)
1097 return RLIM_INFINITY
;
1103 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1104 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1106 abi_ulong target_rlim_swap
;
1109 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1110 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1112 target_rlim_swap
= rlim
;
1113 result
= tswapal(target_rlim_swap
);
1119 static inline int target_to_host_resource(int code
)
1122 case TARGET_RLIMIT_AS
:
1124 case TARGET_RLIMIT_CORE
:
1126 case TARGET_RLIMIT_CPU
:
1128 case TARGET_RLIMIT_DATA
:
1130 case TARGET_RLIMIT_FSIZE
:
1131 return RLIMIT_FSIZE
;
1132 case TARGET_RLIMIT_LOCKS
:
1133 return RLIMIT_LOCKS
;
1134 case TARGET_RLIMIT_MEMLOCK
:
1135 return RLIMIT_MEMLOCK
;
1136 case TARGET_RLIMIT_MSGQUEUE
:
1137 return RLIMIT_MSGQUEUE
;
1138 case TARGET_RLIMIT_NICE
:
1140 case TARGET_RLIMIT_NOFILE
:
1141 return RLIMIT_NOFILE
;
1142 case TARGET_RLIMIT_NPROC
:
1143 return RLIMIT_NPROC
;
1144 case TARGET_RLIMIT_RSS
:
1146 case TARGET_RLIMIT_RTPRIO
:
1147 return RLIMIT_RTPRIO
;
1148 case TARGET_RLIMIT_SIGPENDING
:
1149 return RLIMIT_SIGPENDING
;
1150 case TARGET_RLIMIT_STACK
:
1151 return RLIMIT_STACK
;
1157 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1158 abi_ulong target_tv_addr
)
1160 struct target_timeval
*target_tv
;
1162 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1163 return -TARGET_EFAULT
;
1166 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1167 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1169 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1174 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1175 const struct timeval
*tv
)
1177 struct target_timeval
*target_tv
;
1179 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1180 return -TARGET_EFAULT
;
1183 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1184 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1186 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1191 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1192 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1193 abi_ulong target_tv_addr
)
1195 struct target__kernel_sock_timeval
*target_tv
;
1197 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1198 return -TARGET_EFAULT
;
1201 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1202 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1204 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1210 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1211 const struct timeval
*tv
)
1213 struct target__kernel_sock_timeval
*target_tv
;
1215 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1216 return -TARGET_EFAULT
;
1219 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1220 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1222 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1227 #if defined(TARGET_NR_futex) || \
1228 defined(TARGET_NR_rt_sigtimedwait) || \
1229 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1230 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1231 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1232 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1233 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1234 defined(TARGET_NR_timer_settime) || \
1235 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1236 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1237 abi_ulong target_addr
)
1239 struct target_timespec
*target_ts
;
1241 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1242 return -TARGET_EFAULT
;
1244 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1245 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1246 unlock_user_struct(target_ts
, target_addr
, 0);
1251 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1252 defined(TARGET_NR_timer_settime64) || \
1253 defined(TARGET_NR_mq_timedsend_time64) || \
1254 defined(TARGET_NR_mq_timedreceive_time64) || \
1255 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1256 defined(TARGET_NR_clock_nanosleep_time64) || \
1257 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1258 defined(TARGET_NR_utimensat) || \
1259 defined(TARGET_NR_utimensat_time64) || \
1260 defined(TARGET_NR_semtimedop_time64) || \
1261 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1262 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1263 abi_ulong target_addr
)
1265 struct target__kernel_timespec
*target_ts
;
1267 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1268 return -TARGET_EFAULT
;
1270 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1271 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1272 /* in 32bit mode, this drops the padding */
1273 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1274 unlock_user_struct(target_ts
, target_addr
, 0);
1279 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1280 struct timespec
*host_ts
)
1282 struct target_timespec
*target_ts
;
1284 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1285 return -TARGET_EFAULT
;
1287 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1288 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1289 unlock_user_struct(target_ts
, target_addr
, 1);
1293 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1294 struct timespec
*host_ts
)
1296 struct target__kernel_timespec
*target_ts
;
1298 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1299 return -TARGET_EFAULT
;
1301 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1302 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1303 unlock_user_struct(target_ts
, target_addr
, 1);
1307 #if defined(TARGET_NR_gettimeofday)
1308 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1309 struct timezone
*tz
)
1311 struct target_timezone
*target_tz
;
1313 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1314 return -TARGET_EFAULT
;
1317 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1318 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1320 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1326 #if defined(TARGET_NR_settimeofday)
1327 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1328 abi_ulong target_tz_addr
)
1330 struct target_timezone
*target_tz
;
1332 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1333 return -TARGET_EFAULT
;
1336 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1337 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1339 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1345 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1348 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1349 abi_ulong target_mq_attr_addr
)
1351 struct target_mq_attr
*target_mq_attr
;
1353 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1354 target_mq_attr_addr
, 1))
1355 return -TARGET_EFAULT
;
1357 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1358 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1359 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1360 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1362 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1367 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1368 const struct mq_attr
*attr
)
1370 struct target_mq_attr
*target_mq_attr
;
1372 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1373 target_mq_attr_addr
, 0))
1374 return -TARGET_EFAULT
;
1376 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1377 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1378 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1379 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1381 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1387 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1388 /* do_select() must return target values and target errnos. */
1389 static abi_long
do_select(int n
,
1390 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1391 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1393 fd_set rfds
, wfds
, efds
;
1394 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1396 struct timespec ts
, *ts_ptr
;
1399 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1403 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1407 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1412 if (target_tv_addr
) {
1413 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1414 return -TARGET_EFAULT
;
1415 ts
.tv_sec
= tv
.tv_sec
;
1416 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1422 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1425 if (!is_error(ret
)) {
1426 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1427 return -TARGET_EFAULT
;
1428 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1429 return -TARGET_EFAULT
;
1430 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1431 return -TARGET_EFAULT
;
1433 if (target_tv_addr
) {
1434 tv
.tv_sec
= ts
.tv_sec
;
1435 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1436 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1437 return -TARGET_EFAULT
;
1445 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1446 static abi_long
do_old_select(abi_ulong arg1
)
1448 struct target_sel_arg_struct
*sel
;
1449 abi_ulong inp
, outp
, exp
, tvp
;
1452 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1453 return -TARGET_EFAULT
;
1456 nsel
= tswapal(sel
->n
);
1457 inp
= tswapal(sel
->inp
);
1458 outp
= tswapal(sel
->outp
);
1459 exp
= tswapal(sel
->exp
);
1460 tvp
= tswapal(sel
->tvp
);
1462 unlock_user_struct(sel
, arg1
, 0);
1464 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1469 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1470 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1471 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1474 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1475 fd_set rfds
, wfds
, efds
;
1476 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1477 struct timespec ts
, *ts_ptr
;
1481 * The 6th arg is actually two args smashed together,
1482 * so we cannot use the C library.
1490 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1491 target_sigset_t
*target_sigset
;
1499 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1503 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1507 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1513 * This takes a timespec, and not a timeval, so we cannot
1514 * use the do_select() helper ...
1518 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1519 return -TARGET_EFAULT
;
1522 if (target_to_host_timespec(&ts
, ts_addr
)) {
1523 return -TARGET_EFAULT
;
1531 /* Extract the two packed args for the sigset */
1534 sig
.size
= SIGSET_T_SIZE
;
1536 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1538 return -TARGET_EFAULT
;
1540 arg_sigset
= tswapal(arg7
[0]);
1541 arg_sigsize
= tswapal(arg7
[1]);
1542 unlock_user(arg7
, arg6
, 0);
1546 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1547 /* Like the kernel, we enforce correct size sigsets */
1548 return -TARGET_EINVAL
;
1550 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1551 sizeof(*target_sigset
), 1);
1552 if (!target_sigset
) {
1553 return -TARGET_EFAULT
;
1555 target_to_host_sigset(&set
, target_sigset
);
1556 unlock_user(target_sigset
, arg_sigset
, 0);
1564 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1567 if (!is_error(ret
)) {
1568 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1569 return -TARGET_EFAULT
;
1571 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1572 return -TARGET_EFAULT
;
1574 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1575 return -TARGET_EFAULT
;
1578 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1579 return -TARGET_EFAULT
;
1582 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1583 return -TARGET_EFAULT
;
1591 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1592 defined(TARGET_NR_ppoll_time64)
1593 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1594 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1596 struct target_pollfd
*target_pfd
;
1597 unsigned int nfds
= arg2
;
1605 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1606 return -TARGET_EINVAL
;
1608 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1609 sizeof(struct target_pollfd
) * nfds
, 1);
1611 return -TARGET_EFAULT
;
1614 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1615 for (i
= 0; i
< nfds
; i
++) {
1616 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1617 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1621 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1622 target_sigset_t
*target_set
;
1623 sigset_t _set
, *set
= &_set
;
1627 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1628 unlock_user(target_pfd
, arg1
, 0);
1629 return -TARGET_EFAULT
;
1632 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1633 unlock_user(target_pfd
, arg1
, 0);
1634 return -TARGET_EFAULT
;
1642 if (arg5
!= sizeof(target_sigset_t
)) {
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EINVAL
;
1647 target_set
= lock_user(VERIFY_READ
, arg4
,
1648 sizeof(target_sigset_t
), 1);
1650 unlock_user(target_pfd
, arg1
, 0);
1651 return -TARGET_EFAULT
;
1653 target_to_host_sigset(set
, target_set
);
1658 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1659 set
, SIGSET_T_SIZE
));
1661 if (!is_error(ret
) && arg3
) {
1663 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1664 return -TARGET_EFAULT
;
1667 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1668 return -TARGET_EFAULT
;
1673 unlock_user(target_set
, arg4
, 0);
1676 struct timespec ts
, *pts
;
1679 /* Convert ms to secs, ns */
1680 ts
.tv_sec
= arg3
/ 1000;
1681 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1684 /* -ve poll() timeout means "infinite" */
1687 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1690 if (!is_error(ret
)) {
1691 for (i
= 0; i
< nfds
; i
++) {
1692 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1695 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1700 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1703 return pipe2(host_pipe
, flags
);
1709 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1710 int flags
, int is_pipe2
)
1714 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1717 return get_errno(ret
);
1719 /* Several targets have special calling conventions for the original
1720 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1722 #if defined(TARGET_ALPHA)
1723 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1724 return host_pipe
[0];
1725 #elif defined(TARGET_MIPS)
1726 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1727 return host_pipe
[0];
1728 #elif defined(TARGET_SH4)
1729 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1730 return host_pipe
[0];
1731 #elif defined(TARGET_SPARC)
1732 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1733 return host_pipe
[0];
1737 if (put_user_s32(host_pipe
[0], pipedes
)
1738 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1739 return -TARGET_EFAULT
;
1740 return get_errno(ret
);
1743 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1744 abi_ulong target_addr
,
1747 struct target_ip_mreqn
*target_smreqn
;
1749 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1751 return -TARGET_EFAULT
;
1752 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1753 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1754 if (len
== sizeof(struct target_ip_mreqn
))
1755 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1756 unlock_user(target_smreqn
, target_addr
, 0);
1761 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1762 abi_ulong target_addr
,
1765 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1766 sa_family_t sa_family
;
1767 struct target_sockaddr
*target_saddr
;
1769 if (fd_trans_target_to_host_addr(fd
)) {
1770 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1773 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1775 return -TARGET_EFAULT
;
1777 sa_family
= tswap16(target_saddr
->sa_family
);
1779 /* Oops. The caller might send a incomplete sun_path; sun_path
1780 * must be terminated by \0 (see the manual page), but
1781 * unfortunately it is quite common to specify sockaddr_un
1782 * length as "strlen(x->sun_path)" while it should be
1783 * "strlen(...) + 1". We'll fix that here if needed.
1784 * Linux kernel has a similar feature.
1787 if (sa_family
== AF_UNIX
) {
1788 if (len
< unix_maxlen
&& len
> 0) {
1789 char *cp
= (char*)target_saddr
;
1791 if ( cp
[len
-1] && !cp
[len
] )
1794 if (len
> unix_maxlen
)
1798 memcpy(addr
, target_saddr
, len
);
1799 addr
->sa_family
= sa_family
;
1800 if (sa_family
== AF_NETLINK
) {
1801 struct sockaddr_nl
*nladdr
;
1803 nladdr
= (struct sockaddr_nl
*)addr
;
1804 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1805 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1806 } else if (sa_family
== AF_PACKET
) {
1807 struct target_sockaddr_ll
*lladdr
;
1809 lladdr
= (struct target_sockaddr_ll
*)addr
;
1810 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1811 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1813 unlock_user(target_saddr
, target_addr
, 0);
1818 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1819 struct sockaddr
*addr
,
1822 struct target_sockaddr
*target_saddr
;
1829 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1831 return -TARGET_EFAULT
;
1832 memcpy(target_saddr
, addr
, len
);
1833 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1834 sizeof(target_saddr
->sa_family
)) {
1835 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1837 if (addr
->sa_family
== AF_NETLINK
&&
1838 len
>= sizeof(struct target_sockaddr_nl
)) {
1839 struct target_sockaddr_nl
*target_nl
=
1840 (struct target_sockaddr_nl
*)target_saddr
;
1841 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1842 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1843 } else if (addr
->sa_family
== AF_PACKET
) {
1844 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1845 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1846 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1847 } else if (addr
->sa_family
== AF_INET6
&&
1848 len
>= sizeof(struct target_sockaddr_in6
)) {
1849 struct target_sockaddr_in6
*target_in6
=
1850 (struct target_sockaddr_in6
*)target_saddr
;
1851 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1853 unlock_user(target_saddr
, target_addr
, len
);
1858 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1859 struct target_msghdr
*target_msgh
)
1861 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1862 abi_long msg_controllen
;
1863 abi_ulong target_cmsg_addr
;
1864 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1865 socklen_t space
= 0;
1867 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1868 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1870 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1871 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1872 target_cmsg_start
= target_cmsg
;
1874 return -TARGET_EFAULT
;
1876 while (cmsg
&& target_cmsg
) {
1877 void *data
= CMSG_DATA(cmsg
);
1878 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1880 int len
= tswapal(target_cmsg
->cmsg_len
)
1881 - sizeof(struct target_cmsghdr
);
1883 space
+= CMSG_SPACE(len
);
1884 if (space
> msgh
->msg_controllen
) {
1885 space
-= CMSG_SPACE(len
);
1886 /* This is a QEMU bug, since we allocated the payload
1887 * area ourselves (unlike overflow in host-to-target
1888 * conversion, which is just the guest giving us a buffer
1889 * that's too small). It can't happen for the payload types
1890 * we currently support; if it becomes an issue in future
1891 * we would need to improve our allocation strategy to
1892 * something more intelligent than "twice the size of the
1893 * target buffer we're reading from".
1895 qemu_log_mask(LOG_UNIMP
,
1896 ("Unsupported ancillary data %d/%d: "
1897 "unhandled msg size\n"),
1898 tswap32(target_cmsg
->cmsg_level
),
1899 tswap32(target_cmsg
->cmsg_type
));
1903 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1904 cmsg
->cmsg_level
= SOL_SOCKET
;
1906 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1908 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1909 cmsg
->cmsg_len
= CMSG_LEN(len
);
1911 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1912 int *fd
= (int *)data
;
1913 int *target_fd
= (int *)target_data
;
1914 int i
, numfds
= len
/ sizeof(int);
1916 for (i
= 0; i
< numfds
; i
++) {
1917 __get_user(fd
[i
], target_fd
+ i
);
1919 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1920 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1921 struct ucred
*cred
= (struct ucred
*)data
;
1922 struct target_ucred
*target_cred
=
1923 (struct target_ucred
*)target_data
;
1925 __get_user(cred
->pid
, &target_cred
->pid
);
1926 __get_user(cred
->uid
, &target_cred
->uid
);
1927 __get_user(cred
->gid
, &target_cred
->gid
);
1929 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1930 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1931 memcpy(data
, target_data
, len
);
1934 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1935 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1938 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1940 msgh
->msg_controllen
= space
;
1944 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1945 struct msghdr
*msgh
)
1947 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1948 abi_long msg_controllen
;
1949 abi_ulong target_cmsg_addr
;
1950 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1951 socklen_t space
= 0;
1953 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1954 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1956 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1957 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1958 target_cmsg_start
= target_cmsg
;
1960 return -TARGET_EFAULT
;
1962 while (cmsg
&& target_cmsg
) {
1963 void *data
= CMSG_DATA(cmsg
);
1964 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1966 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1967 int tgt_len
, tgt_space
;
1969 /* We never copy a half-header but may copy half-data;
1970 * this is Linux's behaviour in put_cmsg(). Note that
1971 * truncation here is a guest problem (which we report
1972 * to the guest via the CTRUNC bit), unlike truncation
1973 * in target_to_host_cmsg, which is a QEMU bug.
1975 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1976 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1980 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1981 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1983 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1985 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1987 /* Payload types which need a different size of payload on
1988 * the target must adjust tgt_len here.
1991 switch (cmsg
->cmsg_level
) {
1993 switch (cmsg
->cmsg_type
) {
1995 tgt_len
= sizeof(struct target_timeval
);
2005 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2006 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2007 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2010 /* We must now copy-and-convert len bytes of payload
2011 * into tgt_len bytes of destination space. Bear in mind
2012 * that in both source and destination we may be dealing
2013 * with a truncated value!
2015 switch (cmsg
->cmsg_level
) {
2017 switch (cmsg
->cmsg_type
) {
2020 int *fd
= (int *)data
;
2021 int *target_fd
= (int *)target_data
;
2022 int i
, numfds
= tgt_len
/ sizeof(int);
2024 for (i
= 0; i
< numfds
; i
++) {
2025 __put_user(fd
[i
], target_fd
+ i
);
2031 struct timeval
*tv
= (struct timeval
*)data
;
2032 struct target_timeval
*target_tv
=
2033 (struct target_timeval
*)target_data
;
2035 if (len
!= sizeof(struct timeval
) ||
2036 tgt_len
!= sizeof(struct target_timeval
)) {
2040 /* copy struct timeval to target */
2041 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2042 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2045 case SCM_CREDENTIALS
:
2047 struct ucred
*cred
= (struct ucred
*)data
;
2048 struct target_ucred
*target_cred
=
2049 (struct target_ucred
*)target_data
;
2051 __put_user(cred
->pid
, &target_cred
->pid
);
2052 __put_user(cred
->uid
, &target_cred
->uid
);
2053 __put_user(cred
->gid
, &target_cred
->gid
);
2062 switch (cmsg
->cmsg_type
) {
2065 uint32_t *v
= (uint32_t *)data
;
2066 uint32_t *t_int
= (uint32_t *)target_data
;
2068 if (len
!= sizeof(uint32_t) ||
2069 tgt_len
!= sizeof(uint32_t)) {
2072 __put_user(*v
, t_int
);
2078 struct sock_extended_err ee
;
2079 struct sockaddr_in offender
;
2081 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2082 struct errhdr_t
*target_errh
=
2083 (struct errhdr_t
*)target_data
;
2085 if (len
!= sizeof(struct errhdr_t
) ||
2086 tgt_len
!= sizeof(struct errhdr_t
)) {
2089 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2090 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2091 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2092 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2093 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2094 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2095 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2096 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2097 (void *) &errh
->offender
, sizeof(errh
->offender
));
2106 switch (cmsg
->cmsg_type
) {
2109 uint32_t *v
= (uint32_t *)data
;
2110 uint32_t *t_int
= (uint32_t *)target_data
;
2112 if (len
!= sizeof(uint32_t) ||
2113 tgt_len
!= sizeof(uint32_t)) {
2116 __put_user(*v
, t_int
);
2122 struct sock_extended_err ee
;
2123 struct sockaddr_in6 offender
;
2125 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2126 struct errhdr6_t
*target_errh
=
2127 (struct errhdr6_t
*)target_data
;
2129 if (len
!= sizeof(struct errhdr6_t
) ||
2130 tgt_len
!= sizeof(struct errhdr6_t
)) {
2133 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2134 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2135 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2136 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2137 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2138 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2139 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2140 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2141 (void *) &errh
->offender
, sizeof(errh
->offender
));
2151 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2152 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2153 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2154 if (tgt_len
> len
) {
2155 memset(target_data
+ len
, 0, tgt_len
- len
);
2159 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2160 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2161 if (msg_controllen
< tgt_space
) {
2162 tgt_space
= msg_controllen
;
2164 msg_controllen
-= tgt_space
;
2166 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2167 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2170 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2172 target_msgh
->msg_controllen
= tswapal(space
);
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2178 abi_ulong optval_addr
, socklen_t optlen
)
2182 struct ip_mreqn
*ip_mreq
;
2183 struct ip_mreq_source
*ip_mreq_source
;
2187 /* TCP options all take an 'int' value. */
2188 if (optlen
< sizeof(uint32_t))
2189 return -TARGET_EINVAL
;
2191 if (get_user_u32(val
, optval_addr
))
2192 return -TARGET_EFAULT
;
2193 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2200 case IP_ROUTER_ALERT
:
2204 case IP_MTU_DISCOVER
:
2211 case IP_MULTICAST_TTL
:
2212 case IP_MULTICAST_LOOP
:
2214 if (optlen
>= sizeof(uint32_t)) {
2215 if (get_user_u32(val
, optval_addr
))
2216 return -TARGET_EFAULT
;
2217 } else if (optlen
>= 1) {
2218 if (get_user_u8(val
, optval_addr
))
2219 return -TARGET_EFAULT
;
2221 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2223 case IP_ADD_MEMBERSHIP
:
2224 case IP_DROP_MEMBERSHIP
:
2225 if (optlen
< sizeof (struct target_ip_mreq
) ||
2226 optlen
> sizeof (struct target_ip_mreqn
))
2227 return -TARGET_EINVAL
;
2229 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2230 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2231 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2234 case IP_BLOCK_SOURCE
:
2235 case IP_UNBLOCK_SOURCE
:
2236 case IP_ADD_SOURCE_MEMBERSHIP
:
2237 case IP_DROP_SOURCE_MEMBERSHIP
:
2238 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2239 return -TARGET_EINVAL
;
2241 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2242 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2243 unlock_user (ip_mreq_source
, optval_addr
, 0);
2252 case IPV6_MTU_DISCOVER
:
2255 case IPV6_RECVPKTINFO
:
2256 case IPV6_UNICAST_HOPS
:
2257 case IPV6_MULTICAST_HOPS
:
2258 case IPV6_MULTICAST_LOOP
:
2260 case IPV6_RECVHOPLIMIT
:
2261 case IPV6_2292HOPLIMIT
:
2264 case IPV6_2292PKTINFO
:
2265 case IPV6_RECVTCLASS
:
2266 case IPV6_RECVRTHDR
:
2267 case IPV6_2292RTHDR
:
2268 case IPV6_RECVHOPOPTS
:
2269 case IPV6_2292HOPOPTS
:
2270 case IPV6_RECVDSTOPTS
:
2271 case IPV6_2292DSTOPTS
:
2273 #ifdef IPV6_RECVPATHMTU
2274 case IPV6_RECVPATHMTU
:
2276 #ifdef IPV6_TRANSPARENT
2277 case IPV6_TRANSPARENT
:
2279 #ifdef IPV6_FREEBIND
2282 #ifdef IPV6_RECVORIGDSTADDR
2283 case IPV6_RECVORIGDSTADDR
:
2286 if (optlen
< sizeof(uint32_t)) {
2287 return -TARGET_EINVAL
;
2289 if (get_user_u32(val
, optval_addr
)) {
2290 return -TARGET_EFAULT
;
2292 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2293 &val
, sizeof(val
)));
2297 struct in6_pktinfo pki
;
2299 if (optlen
< sizeof(pki
)) {
2300 return -TARGET_EINVAL
;
2303 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2304 return -TARGET_EFAULT
;
2307 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2309 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2310 &pki
, sizeof(pki
)));
2313 case IPV6_ADD_MEMBERSHIP
:
2314 case IPV6_DROP_MEMBERSHIP
:
2316 struct ipv6_mreq ipv6mreq
;
2318 if (optlen
< sizeof(ipv6mreq
)) {
2319 return -TARGET_EINVAL
;
2322 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2323 return -TARGET_EFAULT
;
2326 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2328 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2329 &ipv6mreq
, sizeof(ipv6mreq
)));
2340 struct icmp6_filter icmp6f
;
2342 if (optlen
> sizeof(icmp6f
)) {
2343 optlen
= sizeof(icmp6f
);
2346 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2347 return -TARGET_EFAULT
;
2350 for (val
= 0; val
< 8; val
++) {
2351 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2354 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2366 /* those take an u32 value */
2367 if (optlen
< sizeof(uint32_t)) {
2368 return -TARGET_EINVAL
;
2371 if (get_user_u32(val
, optval_addr
)) {
2372 return -TARGET_EFAULT
;
2374 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2375 &val
, sizeof(val
)));
2382 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2387 char *alg_key
= g_malloc(optlen
);
2390 return -TARGET_ENOMEM
;
2392 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2394 return -TARGET_EFAULT
;
2396 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2401 case ALG_SET_AEAD_AUTHSIZE
:
2403 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2412 case TARGET_SOL_SOCKET
:
2414 case TARGET_SO_RCVTIMEO
:
2418 optname
= SO_RCVTIMEO
;
2421 if (optlen
!= sizeof(struct target_timeval
)) {
2422 return -TARGET_EINVAL
;
2425 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2426 return -TARGET_EFAULT
;
2429 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2433 case TARGET_SO_SNDTIMEO
:
2434 optname
= SO_SNDTIMEO
;
2436 case TARGET_SO_ATTACH_FILTER
:
2438 struct target_sock_fprog
*tfprog
;
2439 struct target_sock_filter
*tfilter
;
2440 struct sock_fprog fprog
;
2441 struct sock_filter
*filter
;
2444 if (optlen
!= sizeof(*tfprog
)) {
2445 return -TARGET_EINVAL
;
2447 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2448 return -TARGET_EFAULT
;
2450 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2451 tswapal(tfprog
->filter
), 0)) {
2452 unlock_user_struct(tfprog
, optval_addr
, 1);
2453 return -TARGET_EFAULT
;
2456 fprog
.len
= tswap16(tfprog
->len
);
2457 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2458 if (filter
== NULL
) {
2459 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2460 unlock_user_struct(tfprog
, optval_addr
, 1);
2461 return -TARGET_ENOMEM
;
2463 for (i
= 0; i
< fprog
.len
; i
++) {
2464 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2465 filter
[i
].jt
= tfilter
[i
].jt
;
2466 filter
[i
].jf
= tfilter
[i
].jf
;
2467 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2469 fprog
.filter
= filter
;
2471 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2472 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2475 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2476 unlock_user_struct(tfprog
, optval_addr
, 1);
2479 case TARGET_SO_BINDTODEVICE
:
2481 char *dev_ifname
, *addr_ifname
;
2483 if (optlen
> IFNAMSIZ
- 1) {
2484 optlen
= IFNAMSIZ
- 1;
2486 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2488 return -TARGET_EFAULT
;
2490 optname
= SO_BINDTODEVICE
;
2491 addr_ifname
= alloca(IFNAMSIZ
);
2492 memcpy(addr_ifname
, dev_ifname
, optlen
);
2493 addr_ifname
[optlen
] = 0;
2494 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2495 addr_ifname
, optlen
));
2496 unlock_user (dev_ifname
, optval_addr
, 0);
2499 case TARGET_SO_LINGER
:
2502 struct target_linger
*tlg
;
2504 if (optlen
!= sizeof(struct target_linger
)) {
2505 return -TARGET_EINVAL
;
2507 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2508 return -TARGET_EFAULT
;
2510 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2511 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2512 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2514 unlock_user_struct(tlg
, optval_addr
, 0);
2517 /* Options with 'int' argument. */
2518 case TARGET_SO_DEBUG
:
2521 case TARGET_SO_REUSEADDR
:
2522 optname
= SO_REUSEADDR
;
2525 case TARGET_SO_REUSEPORT
:
2526 optname
= SO_REUSEPORT
;
2529 case TARGET_SO_TYPE
:
2532 case TARGET_SO_ERROR
:
2535 case TARGET_SO_DONTROUTE
:
2536 optname
= SO_DONTROUTE
;
2538 case TARGET_SO_BROADCAST
:
2539 optname
= SO_BROADCAST
;
2541 case TARGET_SO_SNDBUF
:
2542 optname
= SO_SNDBUF
;
2544 case TARGET_SO_SNDBUFFORCE
:
2545 optname
= SO_SNDBUFFORCE
;
2547 case TARGET_SO_RCVBUF
:
2548 optname
= SO_RCVBUF
;
2550 case TARGET_SO_RCVBUFFORCE
:
2551 optname
= SO_RCVBUFFORCE
;
2553 case TARGET_SO_KEEPALIVE
:
2554 optname
= SO_KEEPALIVE
;
2556 case TARGET_SO_OOBINLINE
:
2557 optname
= SO_OOBINLINE
;
2559 case TARGET_SO_NO_CHECK
:
2560 optname
= SO_NO_CHECK
;
2562 case TARGET_SO_PRIORITY
:
2563 optname
= SO_PRIORITY
;
2566 case TARGET_SO_BSDCOMPAT
:
2567 optname
= SO_BSDCOMPAT
;
2570 case TARGET_SO_PASSCRED
:
2571 optname
= SO_PASSCRED
;
2573 case TARGET_SO_PASSSEC
:
2574 optname
= SO_PASSSEC
;
2576 case TARGET_SO_TIMESTAMP
:
2577 optname
= SO_TIMESTAMP
;
2579 case TARGET_SO_RCVLOWAT
:
2580 optname
= SO_RCVLOWAT
;
2585 if (optlen
< sizeof(uint32_t))
2586 return -TARGET_EINVAL
;
2588 if (get_user_u32(val
, optval_addr
))
2589 return -TARGET_EFAULT
;
2590 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2595 case NETLINK_PKTINFO
:
2596 case NETLINK_ADD_MEMBERSHIP
:
2597 case NETLINK_DROP_MEMBERSHIP
:
2598 case NETLINK_BROADCAST_ERROR
:
2599 case NETLINK_NO_ENOBUFS
:
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601 case NETLINK_LISTEN_ALL_NSID
:
2602 case NETLINK_CAP_ACK
:
2603 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2605 case NETLINK_EXT_ACK
:
2606 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2607 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2608 case NETLINK_GET_STRICT_CHK
:
2609 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2615 if (optlen
< sizeof(uint32_t)) {
2616 return -TARGET_EINVAL
;
2618 if (get_user_u32(val
, optval_addr
)) {
2619 return -TARGET_EFAULT
;
2621 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2624 #endif /* SOL_NETLINK */
2627 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2629 ret
= -TARGET_ENOPROTOOPT
;
2634 /* do_getsockopt() Must return target values and target errnos. */
2635 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2636 abi_ulong optval_addr
, abi_ulong optlen
)
2643 case TARGET_SOL_SOCKET
:
2646 /* These don't just return a single integer */
2647 case TARGET_SO_PEERNAME
:
2649 case TARGET_SO_RCVTIMEO
: {
2653 optname
= SO_RCVTIMEO
;
2656 if (get_user_u32(len
, optlen
)) {
2657 return -TARGET_EFAULT
;
2660 return -TARGET_EINVAL
;
2664 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2669 if (len
> sizeof(struct target_timeval
)) {
2670 len
= sizeof(struct target_timeval
);
2672 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2673 return -TARGET_EFAULT
;
2675 if (put_user_u32(len
, optlen
)) {
2676 return -TARGET_EFAULT
;
2680 case TARGET_SO_SNDTIMEO
:
2681 optname
= SO_SNDTIMEO
;
2683 case TARGET_SO_PEERCRED
: {
2686 struct target_ucred
*tcr
;
2688 if (get_user_u32(len
, optlen
)) {
2689 return -TARGET_EFAULT
;
2692 return -TARGET_EINVAL
;
2696 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2704 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2705 return -TARGET_EFAULT
;
2707 __put_user(cr
.pid
, &tcr
->pid
);
2708 __put_user(cr
.uid
, &tcr
->uid
);
2709 __put_user(cr
.gid
, &tcr
->gid
);
2710 unlock_user_struct(tcr
, optval_addr
, 1);
2711 if (put_user_u32(len
, optlen
)) {
2712 return -TARGET_EFAULT
;
2716 case TARGET_SO_PEERSEC
: {
2719 if (get_user_u32(len
, optlen
)) {
2720 return -TARGET_EFAULT
;
2723 return -TARGET_EINVAL
;
2725 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2727 return -TARGET_EFAULT
;
2730 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2732 if (put_user_u32(lv
, optlen
)) {
2733 ret
= -TARGET_EFAULT
;
2735 unlock_user(name
, optval_addr
, lv
);
2738 case TARGET_SO_LINGER
:
2742 struct target_linger
*tlg
;
2744 if (get_user_u32(len
, optlen
)) {
2745 return -TARGET_EFAULT
;
2748 return -TARGET_EINVAL
;
2752 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2760 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2761 return -TARGET_EFAULT
;
2763 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2764 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2765 unlock_user_struct(tlg
, optval_addr
, 1);
2766 if (put_user_u32(len
, optlen
)) {
2767 return -TARGET_EFAULT
;
2771 /* Options with 'int' argument. */
2772 case TARGET_SO_DEBUG
:
2775 case TARGET_SO_REUSEADDR
:
2776 optname
= SO_REUSEADDR
;
2779 case TARGET_SO_REUSEPORT
:
2780 optname
= SO_REUSEPORT
;
2783 case TARGET_SO_TYPE
:
2786 case TARGET_SO_ERROR
:
2789 case TARGET_SO_DONTROUTE
:
2790 optname
= SO_DONTROUTE
;
2792 case TARGET_SO_BROADCAST
:
2793 optname
= SO_BROADCAST
;
2795 case TARGET_SO_SNDBUF
:
2796 optname
= SO_SNDBUF
;
2798 case TARGET_SO_RCVBUF
:
2799 optname
= SO_RCVBUF
;
2801 case TARGET_SO_KEEPALIVE
:
2802 optname
= SO_KEEPALIVE
;
2804 case TARGET_SO_OOBINLINE
:
2805 optname
= SO_OOBINLINE
;
2807 case TARGET_SO_NO_CHECK
:
2808 optname
= SO_NO_CHECK
;
2810 case TARGET_SO_PRIORITY
:
2811 optname
= SO_PRIORITY
;
2814 case TARGET_SO_BSDCOMPAT
:
2815 optname
= SO_BSDCOMPAT
;
2818 case TARGET_SO_PASSCRED
:
2819 optname
= SO_PASSCRED
;
2821 case TARGET_SO_TIMESTAMP
:
2822 optname
= SO_TIMESTAMP
;
2824 case TARGET_SO_RCVLOWAT
:
2825 optname
= SO_RCVLOWAT
;
2827 case TARGET_SO_ACCEPTCONN
:
2828 optname
= SO_ACCEPTCONN
;
2835 /* TCP options all take an 'int' value. */
2837 if (get_user_u32(len
, optlen
))
2838 return -TARGET_EFAULT
;
2840 return -TARGET_EINVAL
;
2842 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2845 if (optname
== SO_TYPE
) {
2846 val
= host_to_target_sock_type(val
);
2851 if (put_user_u32(val
, optval_addr
))
2852 return -TARGET_EFAULT
;
2854 if (put_user_u8(val
, optval_addr
))
2855 return -TARGET_EFAULT
;
2857 if (put_user_u32(len
, optlen
))
2858 return -TARGET_EFAULT
;
2865 case IP_ROUTER_ALERT
:
2869 case IP_MTU_DISCOVER
:
2875 case IP_MULTICAST_TTL
:
2876 case IP_MULTICAST_LOOP
:
2877 if (get_user_u32(len
, optlen
))
2878 return -TARGET_EFAULT
;
2880 return -TARGET_EINVAL
;
2882 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2885 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2887 if (put_user_u32(len
, optlen
)
2888 || put_user_u8(val
, optval_addr
))
2889 return -TARGET_EFAULT
;
2891 if (len
> sizeof(int))
2893 if (put_user_u32(len
, optlen
)
2894 || put_user_u32(val
, optval_addr
))
2895 return -TARGET_EFAULT
;
2899 ret
= -TARGET_ENOPROTOOPT
;
2905 case IPV6_MTU_DISCOVER
:
2908 case IPV6_RECVPKTINFO
:
2909 case IPV6_UNICAST_HOPS
:
2910 case IPV6_MULTICAST_HOPS
:
2911 case IPV6_MULTICAST_LOOP
:
2913 case IPV6_RECVHOPLIMIT
:
2914 case IPV6_2292HOPLIMIT
:
2917 case IPV6_2292PKTINFO
:
2918 case IPV6_RECVTCLASS
:
2919 case IPV6_RECVRTHDR
:
2920 case IPV6_2292RTHDR
:
2921 case IPV6_RECVHOPOPTS
:
2922 case IPV6_2292HOPOPTS
:
2923 case IPV6_RECVDSTOPTS
:
2924 case IPV6_2292DSTOPTS
:
2926 #ifdef IPV6_RECVPATHMTU
2927 case IPV6_RECVPATHMTU
:
2929 #ifdef IPV6_TRANSPARENT
2930 case IPV6_TRANSPARENT
:
2932 #ifdef IPV6_FREEBIND
2935 #ifdef IPV6_RECVORIGDSTADDR
2936 case IPV6_RECVORIGDSTADDR
:
2938 if (get_user_u32(len
, optlen
))
2939 return -TARGET_EFAULT
;
2941 return -TARGET_EINVAL
;
2943 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2946 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2948 if (put_user_u32(len
, optlen
)
2949 || put_user_u8(val
, optval_addr
))
2950 return -TARGET_EFAULT
;
2952 if (len
> sizeof(int))
2954 if (put_user_u32(len
, optlen
)
2955 || put_user_u32(val
, optval_addr
))
2956 return -TARGET_EFAULT
;
2960 ret
= -TARGET_ENOPROTOOPT
;
2967 case NETLINK_PKTINFO
:
2968 case NETLINK_BROADCAST_ERROR
:
2969 case NETLINK_NO_ENOBUFS
:
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2971 case NETLINK_LISTEN_ALL_NSID
:
2972 case NETLINK_CAP_ACK
:
2973 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2975 case NETLINK_EXT_ACK
:
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2978 case NETLINK_GET_STRICT_CHK
:
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2980 if (get_user_u32(len
, optlen
)) {
2981 return -TARGET_EFAULT
;
2983 if (len
!= sizeof(val
)) {
2984 return -TARGET_EINVAL
;
2987 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2991 if (put_user_u32(lv
, optlen
)
2992 || put_user_u32(val
, optval_addr
)) {
2993 return -TARGET_EFAULT
;
2996 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2997 case NETLINK_LIST_MEMBERSHIPS
:
3001 if (get_user_u32(len
, optlen
)) {
3002 return -TARGET_EFAULT
;
3005 return -TARGET_EINVAL
;
3007 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3009 return -TARGET_EFAULT
;
3012 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3014 unlock_user(results
, optval_addr
, 0);
3017 /* swap host endianess to target endianess. */
3018 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3019 results
[i
] = tswap32(results
[i
]);
3021 if (put_user_u32(lv
, optlen
)) {
3022 return -TARGET_EFAULT
;
3024 unlock_user(results
, optval_addr
, 0);
3027 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3032 #endif /* SOL_NETLINK */
3035 qemu_log_mask(LOG_UNIMP
,
3036 "getsockopt level=%d optname=%d not yet supported\n",
3038 ret
= -TARGET_EOPNOTSUPP
;
3044 /* Convert target low/high pair representing file offset into the host
3045 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3046 * as the kernel doesn't handle them either.
3048 static void target_to_host_low_high(abi_ulong tlow
,
3050 unsigned long *hlow
,
3051 unsigned long *hhigh
)
3053 uint64_t off
= tlow
|
3054 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3055 TARGET_LONG_BITS
/ 2;
3058 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3061 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3062 abi_ulong count
, int copy
)
3064 struct target_iovec
*target_vec
;
3066 abi_ulong total_len
, max_len
;
3069 bool bad_address
= false;
3075 if (count
> IOV_MAX
) {
3080 vec
= g_try_new0(struct iovec
, count
);
3086 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3087 count
* sizeof(struct target_iovec
), 1);
3088 if (target_vec
== NULL
) {
3093 /* ??? If host page size > target page size, this will result in a
3094 value larger than what we can actually support. */
3095 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3098 for (i
= 0; i
< count
; i
++) {
3099 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3100 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3105 } else if (len
== 0) {
3106 /* Zero length pointer is ignored. */
3107 vec
[i
].iov_base
= 0;
3109 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3110 /* If the first buffer pointer is bad, this is a fault. But
3111 * subsequent bad buffers will result in a partial write; this
3112 * is realized by filling the vector with null pointers and
3114 if (!vec
[i
].iov_base
) {
3125 if (len
> max_len
- total_len
) {
3126 len
= max_len
- total_len
;
3129 vec
[i
].iov_len
= len
;
3133 unlock_user(target_vec
, target_addr
, 0);
3138 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3139 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3142 unlock_user(target_vec
, target_addr
, 0);
3149 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3150 abi_ulong count
, int copy
)
3152 struct target_iovec
*target_vec
;
3155 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3156 count
* sizeof(struct target_iovec
), 1);
3158 for (i
= 0; i
< count
; i
++) {
3159 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3160 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3164 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3166 unlock_user(target_vec
, target_addr
, 0);
3172 static inline int target_to_host_sock_type(int *type
)
3175 int target_type
= *type
;
3177 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3178 case TARGET_SOCK_DGRAM
:
3179 host_type
= SOCK_DGRAM
;
3181 case TARGET_SOCK_STREAM
:
3182 host_type
= SOCK_STREAM
;
3185 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3188 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3189 #if defined(SOCK_CLOEXEC)
3190 host_type
|= SOCK_CLOEXEC
;
3192 return -TARGET_EINVAL
;
3195 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3196 #if defined(SOCK_NONBLOCK)
3197 host_type
|= SOCK_NONBLOCK
;
3198 #elif !defined(O_NONBLOCK)
3199 return -TARGET_EINVAL
;
3206 /* Try to emulate socket type flags after socket creation. */
3207 static int sock_flags_fixup(int fd
, int target_type
)
3209 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3210 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3211 int flags
= fcntl(fd
, F_GETFL
);
3212 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3214 return -TARGET_EINVAL
;
3221 /* do_socket() Must return target values and target errnos. */
3222 static abi_long
do_socket(int domain
, int type
, int protocol
)
3224 int target_type
= type
;
3227 ret
= target_to_host_sock_type(&type
);
3232 if (domain
== PF_NETLINK
&& !(
3233 #ifdef CONFIG_RTNETLINK
3234 protocol
== NETLINK_ROUTE
||
3236 protocol
== NETLINK_KOBJECT_UEVENT
||
3237 protocol
== NETLINK_AUDIT
)) {
3238 return -TARGET_EPROTONOSUPPORT
;
3241 if (domain
== AF_PACKET
||
3242 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3243 protocol
= tswap16(protocol
);
3246 ret
= get_errno(socket(domain
, type
, protocol
));
3248 ret
= sock_flags_fixup(ret
, target_type
);
3249 if (type
== SOCK_PACKET
) {
3250 /* Manage an obsolete case :
3251 * if socket type is SOCK_PACKET, bind by name
3253 fd_trans_register(ret
, &target_packet_trans
);
3254 } else if (domain
== PF_NETLINK
) {
3256 #ifdef CONFIG_RTNETLINK
3258 fd_trans_register(ret
, &target_netlink_route_trans
);
3261 case NETLINK_KOBJECT_UEVENT
:
3262 /* nothing to do: messages are strings */
3265 fd_trans_register(ret
, &target_netlink_audit_trans
);
3268 g_assert_not_reached();
3275 /* do_bind() Must return target values and target errnos. */
3276 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3282 if ((int)addrlen
< 0) {
3283 return -TARGET_EINVAL
;
3286 addr
= alloca(addrlen
+1);
3288 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3292 return get_errno(bind(sockfd
, addr
, addrlen
));
3295 /* do_connect() Must return target values and target errnos. */
3296 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3302 if ((int)addrlen
< 0) {
3303 return -TARGET_EINVAL
;
3306 addr
= alloca(addrlen
+1);
3308 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3312 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3315 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3316 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3317 int flags
, int send
)
3323 abi_ulong target_vec
;
3325 if (msgp
->msg_name
) {
3326 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3327 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3328 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3329 tswapal(msgp
->msg_name
),
3331 if (ret
== -TARGET_EFAULT
) {
3332 /* For connected sockets msg_name and msg_namelen must
3333 * be ignored, so returning EFAULT immediately is wrong.
3334 * Instead, pass a bad msg_name to the host kernel, and
3335 * let it decide whether to return EFAULT or not.
3337 msg
.msg_name
= (void *)-1;
3342 msg
.msg_name
= NULL
;
3343 msg
.msg_namelen
= 0;
3345 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3346 msg
.msg_control
= alloca(msg
.msg_controllen
);
3347 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3349 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3351 count
= tswapal(msgp
->msg_iovlen
);
3352 target_vec
= tswapal(msgp
->msg_iov
);
3354 if (count
> IOV_MAX
) {
3355 /* sendrcvmsg returns a different errno for this condition than
3356 * readv/writev, so we must catch it here before lock_iovec() does.
3358 ret
= -TARGET_EMSGSIZE
;
3362 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3363 target_vec
, count
, send
);
3365 ret
= -host_to_target_errno(errno
);
3368 msg
.msg_iovlen
= count
;
3372 if (fd_trans_target_to_host_data(fd
)) {
3375 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3376 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3377 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3378 msg
.msg_iov
->iov_len
);
3380 msg
.msg_iov
->iov_base
= host_msg
;
3381 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3385 ret
= target_to_host_cmsg(&msg
, msgp
);
3387 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3391 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3392 if (!is_error(ret
)) {
3394 if (fd_trans_host_to_target_data(fd
)) {
3395 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3396 MIN(msg
.msg_iov
->iov_len
, len
));
3398 ret
= host_to_target_cmsg(msgp
, &msg
);
3400 if (!is_error(ret
)) {
3401 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3402 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3403 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3404 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3405 msg
.msg_name
, msg
.msg_namelen
);
3417 unlock_iovec(vec
, target_vec
, count
, !send
);
3422 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3423 int flags
, int send
)
3426 struct target_msghdr
*msgp
;
3428 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3432 return -TARGET_EFAULT
;
3434 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3435 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3439 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3440 * so it might not have this *mmsg-specific flag either.
3442 #ifndef MSG_WAITFORONE
3443 #define MSG_WAITFORONE 0x10000
3446 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3447 unsigned int vlen
, unsigned int flags
,
3450 struct target_mmsghdr
*mmsgp
;
3454 if (vlen
> UIO_MAXIOV
) {
3458 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3460 return -TARGET_EFAULT
;
3463 for (i
= 0; i
< vlen
; i
++) {
3464 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3465 if (is_error(ret
)) {
3468 mmsgp
[i
].msg_len
= tswap32(ret
);
3469 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3470 if (flags
& MSG_WAITFORONE
) {
3471 flags
|= MSG_DONTWAIT
;
3475 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3477 /* Return number of datagrams sent if we sent any at all;
3478 * otherwise return the error.
3486 /* do_accept4() Must return target values and target errnos. */
3487 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3488 abi_ulong target_addrlen_addr
, int flags
)
3490 socklen_t addrlen
, ret_addrlen
;
3495 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3497 if (target_addr
== 0) {
3498 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3501 /* linux returns EFAULT if addrlen pointer is invalid */
3502 if (get_user_u32(addrlen
, target_addrlen_addr
))
3503 return -TARGET_EFAULT
;
3505 if ((int)addrlen
< 0) {
3506 return -TARGET_EINVAL
;
3509 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3510 return -TARGET_EFAULT
;
3512 addr
= alloca(addrlen
);
3514 ret_addrlen
= addrlen
;
3515 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3516 if (!is_error(ret
)) {
3517 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3518 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3519 ret
= -TARGET_EFAULT
;
3525 /* do_getpeername() Must return target values and target errnos. */
3526 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3527 abi_ulong target_addrlen_addr
)
3529 socklen_t addrlen
, ret_addrlen
;
3533 if (get_user_u32(addrlen
, target_addrlen_addr
))
3534 return -TARGET_EFAULT
;
3536 if ((int)addrlen
< 0) {
3537 return -TARGET_EINVAL
;
3540 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3541 return -TARGET_EFAULT
;
3543 addr
= alloca(addrlen
);
3545 ret_addrlen
= addrlen
;
3546 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3547 if (!is_error(ret
)) {
3548 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3549 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3550 ret
= -TARGET_EFAULT
;
3556 /* do_getsockname() Must return target values and target errnos. */
3557 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3558 abi_ulong target_addrlen_addr
)
3560 socklen_t addrlen
, ret_addrlen
;
3564 if (get_user_u32(addrlen
, target_addrlen_addr
))
3565 return -TARGET_EFAULT
;
3567 if ((int)addrlen
< 0) {
3568 return -TARGET_EINVAL
;
3571 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3572 return -TARGET_EFAULT
;
3574 addr
= alloca(addrlen
);
3576 ret_addrlen
= addrlen
;
3577 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3578 if (!is_error(ret
)) {
3579 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3580 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3581 ret
= -TARGET_EFAULT
;
3587 /* do_socketpair() Must return target values and target errnos. */
3588 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3589 abi_ulong target_tab_addr
)
3594 target_to_host_sock_type(&type
);
3596 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3597 if (!is_error(ret
)) {
3598 if (put_user_s32(tab
[0], target_tab_addr
)
3599 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3600 ret
= -TARGET_EFAULT
;
3605 /* do_sendto() Must return target values and target errnos. */
3606 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3607 abi_ulong target_addr
, socklen_t addrlen
)
3611 void *copy_msg
= NULL
;
3614 if ((int)addrlen
< 0) {
3615 return -TARGET_EINVAL
;
3618 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3620 return -TARGET_EFAULT
;
3621 if (fd_trans_target_to_host_data(fd
)) {
3622 copy_msg
= host_msg
;
3623 host_msg
= g_malloc(len
);
3624 memcpy(host_msg
, copy_msg
, len
);
3625 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3631 addr
= alloca(addrlen
+1);
3632 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3636 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3638 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3643 host_msg
= copy_msg
;
3645 unlock_user(host_msg
, msg
, 0);
3649 /* do_recvfrom() Must return target values and target errnos. */
3650 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3651 abi_ulong target_addr
,
3652 abi_ulong target_addrlen
)
3654 socklen_t addrlen
, ret_addrlen
;
3659 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3661 return -TARGET_EFAULT
;
3663 if (get_user_u32(addrlen
, target_addrlen
)) {
3664 ret
= -TARGET_EFAULT
;
3667 if ((int)addrlen
< 0) {
3668 ret
= -TARGET_EINVAL
;
3671 addr
= alloca(addrlen
);
3672 ret_addrlen
= addrlen
;
3673 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3674 addr
, &ret_addrlen
));
3676 addr
= NULL
; /* To keep compiler quiet. */
3677 addrlen
= 0; /* To keep compiler quiet. */
3678 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3680 if (!is_error(ret
)) {
3681 if (fd_trans_host_to_target_data(fd
)) {
3683 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3684 if (is_error(trans
)) {
3690 host_to_target_sockaddr(target_addr
, addr
,
3691 MIN(addrlen
, ret_addrlen
));
3692 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3693 ret
= -TARGET_EFAULT
;
3697 unlock_user(host_msg
, msg
, len
);
3700 unlock_user(host_msg
, msg
, 0);
3705 #ifdef TARGET_NR_socketcall
3706 /* do_socketcall() must return target values and target errnos. */
3707 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3709 static const unsigned nargs
[] = { /* number of arguments per operation */
3710 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3711 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3712 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3713 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3714 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3715 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3716 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3717 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3718 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3719 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3720 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3721 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3722 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3723 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3724 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3725 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3726 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3727 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3728 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3729 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3731 abi_long a
[6]; /* max 6 args */
3734 /* check the range of the first argument num */
3735 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3736 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3737 return -TARGET_EINVAL
;
3739 /* ensure we have space for args */
3740 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3741 return -TARGET_EINVAL
;
3743 /* collect the arguments in a[] according to nargs[] */
3744 for (i
= 0; i
< nargs
[num
]; ++i
) {
3745 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3746 return -TARGET_EFAULT
;
3749 /* now when we have the args, invoke the appropriate underlying function */
3751 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3752 return do_socket(a
[0], a
[1], a
[2]);
3753 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3754 return do_bind(a
[0], a
[1], a
[2]);
3755 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3756 return do_connect(a
[0], a
[1], a
[2]);
3757 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3758 return get_errno(listen(a
[0], a
[1]));
3759 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3760 return do_accept4(a
[0], a
[1], a
[2], 0);
3761 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3762 return do_getsockname(a
[0], a
[1], a
[2]);
3763 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3764 return do_getpeername(a
[0], a
[1], a
[2]);
3765 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3766 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3767 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3768 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3769 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3770 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3771 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3772 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3773 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3774 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3775 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3776 return get_errno(shutdown(a
[0], a
[1]));
3777 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3778 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3779 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3780 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3781 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3782 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3783 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3784 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3785 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3786 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3787 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3788 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3789 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3790 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3792 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3793 return -TARGET_EINVAL
;
3798 #define N_SHM_REGIONS 32
3800 static struct shm_region
{
3804 } shm_regions
[N_SHM_REGIONS
];
3806 #ifndef TARGET_SEMID64_DS
3807 /* asm-generic version of this struct */
3808 struct target_semid64_ds
3810 struct target_ipc_perm sem_perm
;
3811 abi_ulong sem_otime
;
3812 #if TARGET_ABI_BITS == 32
3813 abi_ulong __unused1
;
3815 abi_ulong sem_ctime
;
3816 #if TARGET_ABI_BITS == 32
3817 abi_ulong __unused2
;
3819 abi_ulong sem_nsems
;
3820 abi_ulong __unused3
;
3821 abi_ulong __unused4
;
3825 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3826 abi_ulong target_addr
)
3828 struct target_ipc_perm
*target_ip
;
3829 struct target_semid64_ds
*target_sd
;
3831 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3832 return -TARGET_EFAULT
;
3833 target_ip
= &(target_sd
->sem_perm
);
3834 host_ip
->__key
= tswap32(target_ip
->__key
);
3835 host_ip
->uid
= tswap32(target_ip
->uid
);
3836 host_ip
->gid
= tswap32(target_ip
->gid
);
3837 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3838 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3839 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3840 host_ip
->mode
= tswap32(target_ip
->mode
);
3842 host_ip
->mode
= tswap16(target_ip
->mode
);
3844 #if defined(TARGET_PPC)
3845 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3847 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3849 unlock_user_struct(target_sd
, target_addr
, 0);
3853 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3854 struct ipc_perm
*host_ip
)
3856 struct target_ipc_perm
*target_ip
;
3857 struct target_semid64_ds
*target_sd
;
3859 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3860 return -TARGET_EFAULT
;
3861 target_ip
= &(target_sd
->sem_perm
);
3862 target_ip
->__key
= tswap32(host_ip
->__key
);
3863 target_ip
->uid
= tswap32(host_ip
->uid
);
3864 target_ip
->gid
= tswap32(host_ip
->gid
);
3865 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3866 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868 target_ip
->mode
= tswap32(host_ip
->mode
);
3870 target_ip
->mode
= tswap16(host_ip
->mode
);
3872 #if defined(TARGET_PPC)
3873 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3875 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3877 unlock_user_struct(target_sd
, target_addr
, 1);
3881 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3882 abi_ulong target_addr
)
3884 struct target_semid64_ds
*target_sd
;
3886 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3887 return -TARGET_EFAULT
;
3888 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3889 return -TARGET_EFAULT
;
3890 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3891 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3892 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3893 unlock_user_struct(target_sd
, target_addr
, 0);
3897 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3898 struct semid_ds
*host_sd
)
3900 struct target_semid64_ds
*target_sd
;
3902 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3903 return -TARGET_EFAULT
;
3904 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3905 return -TARGET_EFAULT
;
3906 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3907 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3908 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3909 unlock_user_struct(target_sd
, target_addr
, 1);
3913 struct target_seminfo
{
3926 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3927 struct seminfo
*host_seminfo
)
3929 struct target_seminfo
*target_seminfo
;
3930 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3931 return -TARGET_EFAULT
;
3932 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3933 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3934 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3935 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3936 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3937 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3938 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3939 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3940 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3941 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3942 unlock_user_struct(target_seminfo
, target_addr
, 1);
3948 struct semid_ds
*buf
;
3949 unsigned short *array
;
3950 struct seminfo
*__buf
;
3953 union target_semun
{
3960 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3961 abi_ulong target_addr
)
3964 unsigned short *array
;
3966 struct semid_ds semid_ds
;
3969 semun
.buf
= &semid_ds
;
3971 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3973 return get_errno(ret
);
3975 nsems
= semid_ds
.sem_nsems
;
3977 *host_array
= g_try_new(unsigned short, nsems
);
3979 return -TARGET_ENOMEM
;
3981 array
= lock_user(VERIFY_READ
, target_addr
,
3982 nsems
*sizeof(unsigned short), 1);
3984 g_free(*host_array
);
3985 return -TARGET_EFAULT
;
3988 for(i
=0; i
<nsems
; i
++) {
3989 __get_user((*host_array
)[i
], &array
[i
]);
3991 unlock_user(array
, target_addr
, 0);
3996 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3997 unsigned short **host_array
)
4000 unsigned short *array
;
4002 struct semid_ds semid_ds
;
4005 semun
.buf
= &semid_ds
;
4007 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4009 return get_errno(ret
);
4011 nsems
= semid_ds
.sem_nsems
;
4013 array
= lock_user(VERIFY_WRITE
, target_addr
,
4014 nsems
*sizeof(unsigned short), 0);
4016 return -TARGET_EFAULT
;
4018 for(i
=0; i
<nsems
; i
++) {
4019 __put_user((*host_array
)[i
], &array
[i
]);
4021 g_free(*host_array
);
4022 unlock_user(array
, target_addr
, 1);
4027 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4028 abi_ulong target_arg
)
4030 union target_semun target_su
= { .buf
= target_arg
};
4032 struct semid_ds dsarg
;
4033 unsigned short *array
= NULL
;
4034 struct seminfo seminfo
;
4035 abi_long ret
= -TARGET_EINVAL
;
4042 /* In 64 bit cross-endian situations, we will erroneously pick up
4043 * the wrong half of the union for the "val" element. To rectify
4044 * this, the entire 8-byte structure is byteswapped, followed by
4045 * a swap of the 4 byte val field. In other cases, the data is
4046 * already in proper host byte order. */
4047 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4048 target_su
.buf
= tswapal(target_su
.buf
);
4049 arg
.val
= tswap32(target_su
.val
);
4051 arg
.val
= target_su
.val
;
4053 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4057 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4061 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4062 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4069 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4073 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4074 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4080 arg
.__buf
= &seminfo
;
4081 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4082 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4090 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4097 struct target_sembuf
{
4098 unsigned short sem_num
;
4103 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4104 abi_ulong target_addr
,
4107 struct target_sembuf
*target_sembuf
;
4110 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4111 nsops
*sizeof(struct target_sembuf
), 1);
4113 return -TARGET_EFAULT
;
4115 for(i
=0; i
<nsops
; i
++) {
4116 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4117 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4118 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4121 unlock_user(target_sembuf
, target_addr
, 0);
4126 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4127 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4130 * This macro is required to handle the s390 variants, which passes the
4131 * arguments in a different order than default.
4134 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4135 (__nsops), (__timeout), (__sops)
4137 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4138 (__nsops), 0, (__sops), (__timeout)
4141 static inline abi_long
do_semtimedop(int semid
,
4144 abi_long timeout
, bool time64
)
4146 struct sembuf
*sops
;
4147 struct timespec ts
, *pts
= NULL
;
4153 if (target_to_host_timespec64(pts
, timeout
)) {
4154 return -TARGET_EFAULT
;
4157 if (target_to_host_timespec(pts
, timeout
)) {
4158 return -TARGET_EFAULT
;
4163 if (nsops
> TARGET_SEMOPM
) {
4164 return -TARGET_E2BIG
;
4167 sops
= g_new(struct sembuf
, nsops
);
4169 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4171 return -TARGET_EFAULT
;
4174 ret
= -TARGET_ENOSYS
;
4175 #ifdef __NR_semtimedop
4176 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4179 if (ret
== -TARGET_ENOSYS
) {
4180 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4181 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4189 struct target_msqid_ds
4191 struct target_ipc_perm msg_perm
;
4192 abi_ulong msg_stime
;
4193 #if TARGET_ABI_BITS == 32
4194 abi_ulong __unused1
;
4196 abi_ulong msg_rtime
;
4197 #if TARGET_ABI_BITS == 32
4198 abi_ulong __unused2
;
4200 abi_ulong msg_ctime
;
4201 #if TARGET_ABI_BITS == 32
4202 abi_ulong __unused3
;
4204 abi_ulong __msg_cbytes
;
4206 abi_ulong msg_qbytes
;
4207 abi_ulong msg_lspid
;
4208 abi_ulong msg_lrpid
;
4209 abi_ulong __unused4
;
4210 abi_ulong __unused5
;
4213 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4214 abi_ulong target_addr
)
4216 struct target_msqid_ds
*target_md
;
4218 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4219 return -TARGET_EFAULT
;
4220 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4221 return -TARGET_EFAULT
;
4222 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4223 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4224 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4225 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4226 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4227 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4228 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4229 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4230 unlock_user_struct(target_md
, target_addr
, 0);
4234 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4235 struct msqid_ds
*host_md
)
4237 struct target_msqid_ds
*target_md
;
4239 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4240 return -TARGET_EFAULT
;
4241 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4242 return -TARGET_EFAULT
;
4243 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4244 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4245 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4246 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4247 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4248 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4249 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4250 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4251 unlock_user_struct(target_md
, target_addr
, 1);
4255 struct target_msginfo
{
4263 unsigned short int msgseg
;
4266 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4267 struct msginfo
*host_msginfo
)
4269 struct target_msginfo
*target_msginfo
;
4270 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4271 return -TARGET_EFAULT
;
4272 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4273 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4274 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4275 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4276 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4277 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4278 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4279 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4280 unlock_user_struct(target_msginfo
, target_addr
, 1);
4284 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4286 struct msqid_ds dsarg
;
4287 struct msginfo msginfo
;
4288 abi_long ret
= -TARGET_EINVAL
;
4296 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4297 return -TARGET_EFAULT
;
4298 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4299 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4300 return -TARGET_EFAULT
;
4303 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4307 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4308 if (host_to_target_msginfo(ptr
, &msginfo
))
4309 return -TARGET_EFAULT
;
4316 struct target_msgbuf
{
4321 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4322 ssize_t msgsz
, int msgflg
)
4324 struct target_msgbuf
*target_mb
;
4325 struct msgbuf
*host_mb
;
4329 return -TARGET_EINVAL
;
4332 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4333 return -TARGET_EFAULT
;
4334 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4336 unlock_user_struct(target_mb
, msgp
, 0);
4337 return -TARGET_ENOMEM
;
4339 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4340 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4341 ret
= -TARGET_ENOSYS
;
4343 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4346 if (ret
== -TARGET_ENOSYS
) {
4348 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4351 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4357 unlock_user_struct(target_mb
, msgp
, 0);
4363 #if defined(__sparc__)
4364 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4365 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4366 #elif defined(__s390x__)
4367 /* The s390 sys_ipc variant has only five parameters. */
4368 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4369 ((long int[]){(long int)__msgp, __msgtyp})
4371 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4372 ((long int[]){(long int)__msgp, __msgtyp}), 0
4376 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4377 ssize_t msgsz
, abi_long msgtyp
,
4380 struct target_msgbuf
*target_mb
;
4382 struct msgbuf
*host_mb
;
4386 return -TARGET_EINVAL
;
4389 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4390 return -TARGET_EFAULT
;
4392 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4394 ret
= -TARGET_ENOMEM
;
4397 ret
= -TARGET_ENOSYS
;
4399 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4402 if (ret
== -TARGET_ENOSYS
) {
4403 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4404 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4409 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4410 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4411 if (!target_mtext
) {
4412 ret
= -TARGET_EFAULT
;
4415 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4416 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4419 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4423 unlock_user_struct(target_mb
, msgp
, 1);
4428 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4429 abi_ulong target_addr
)
4431 struct target_shmid_ds
*target_sd
;
4433 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4434 return -TARGET_EFAULT
;
4435 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4436 return -TARGET_EFAULT
;
4437 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4438 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4439 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4440 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4441 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4442 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4443 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4444 unlock_user_struct(target_sd
, target_addr
, 0);
4448 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4449 struct shmid_ds
*host_sd
)
4451 struct target_shmid_ds
*target_sd
;
4453 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4454 return -TARGET_EFAULT
;
4455 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4456 return -TARGET_EFAULT
;
4457 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4458 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4459 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4460 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4461 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4462 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4463 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4464 unlock_user_struct(target_sd
, target_addr
, 1);
4468 struct target_shminfo
{
4476 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4477 struct shminfo
*host_shminfo
)
4479 struct target_shminfo
*target_shminfo
;
4480 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4481 return -TARGET_EFAULT
;
4482 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4483 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4484 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4485 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4486 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4487 unlock_user_struct(target_shminfo
, target_addr
, 1);
4491 struct target_shm_info
{
4496 abi_ulong swap_attempts
;
4497 abi_ulong swap_successes
;
4500 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4501 struct shm_info
*host_shm_info
)
4503 struct target_shm_info
*target_shm_info
;
4504 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4505 return -TARGET_EFAULT
;
4506 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4507 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4508 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4509 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4510 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4511 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4512 unlock_user_struct(target_shm_info
, target_addr
, 1);
4516 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4518 struct shmid_ds dsarg
;
4519 struct shminfo shminfo
;
4520 struct shm_info shm_info
;
4521 abi_long ret
= -TARGET_EINVAL
;
4529 if (target_to_host_shmid_ds(&dsarg
, buf
))
4530 return -TARGET_EFAULT
;
4531 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4532 if (host_to_target_shmid_ds(buf
, &dsarg
))
4533 return -TARGET_EFAULT
;
4536 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4537 if (host_to_target_shminfo(buf
, &shminfo
))
4538 return -TARGET_EFAULT
;
4541 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4542 if (host_to_target_shm_info(buf
, &shm_info
))
4543 return -TARGET_EFAULT
;
4548 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4555 #ifndef TARGET_FORCE_SHMLBA
4556 /* For most architectures, SHMLBA is the same as the page size;
4557 * some architectures have larger values, in which case they should
4558 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4559 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4560 * and defining its own value for SHMLBA.
4562 * The kernel also permits SHMLBA to be set by the architecture to a
4563 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4564 * this means that addresses are rounded to the large size if
4565 * SHM_RND is set but addresses not aligned to that size are not rejected
4566 * as long as they are at least page-aligned. Since the only architecture
4567 * which uses this is ia64 this code doesn't provide for that oddity.
4569 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4571 return TARGET_PAGE_SIZE
;
4575 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4576 int shmid
, abi_ulong shmaddr
, int shmflg
)
4580 struct shmid_ds shm_info
;
4584 /* find out the length of the shared memory segment */
4585 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4586 if (is_error(ret
)) {
4587 /* can't get length, bail out */
4591 shmlba
= target_shmlba(cpu_env
);
4593 if (shmaddr
& (shmlba
- 1)) {
4594 if (shmflg
& SHM_RND
) {
4595 shmaddr
&= ~(shmlba
- 1);
4597 return -TARGET_EINVAL
;
4600 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4601 return -TARGET_EINVAL
;
4607 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4609 abi_ulong mmap_start
;
4611 /* In order to use the host shmat, we need to honor host SHMLBA. */
4612 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4614 if (mmap_start
== -1) {
4616 host_raddr
= (void *)-1;
4618 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4621 if (host_raddr
== (void *)-1) {
4623 return get_errno((long)host_raddr
);
4625 raddr
=h2g((unsigned long)host_raddr
);
4627 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4628 PAGE_VALID
| PAGE_READ
|
4629 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4631 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4632 if (!shm_regions
[i
].in_use
) {
4633 shm_regions
[i
].in_use
= true;
4634 shm_regions
[i
].start
= raddr
;
4635 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4645 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4652 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4653 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4654 shm_regions
[i
].in_use
= false;
4655 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4659 rv
= get_errno(shmdt(g2h(shmaddr
)));
4666 #ifdef TARGET_NR_ipc
4667 /* ??? This only works with linear mappings. */
4668 /* do_ipc() must return target values and target errnos. */
4669 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4670 unsigned int call
, abi_long first
,
4671 abi_long second
, abi_long third
,
4672 abi_long ptr
, abi_long fifth
)
4677 version
= call
>> 16;
4682 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4684 case IPCOP_semtimedop
:
4686 * The s390 sys_ipc variant has only five parameters instead of six
4687 * (as for default variant) and the only difference is the handling of
4688 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4689 * to a struct timespec where the generic variant uses fifth parameter.
4691 #if defined(TARGET_S390X)
4692 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4694 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4699 ret
= get_errno(semget(first
, second
, third
));
4702 case IPCOP_semctl
: {
4703 /* The semun argument to semctl is passed by value, so dereference the
4706 get_user_ual(atptr
, ptr
);
4707 ret
= do_semctl(first
, second
, third
, atptr
);
4712 ret
= get_errno(msgget(first
, second
));
4716 ret
= do_msgsnd(first
, ptr
, second
, third
);
4720 ret
= do_msgctl(first
, second
, ptr
);
4727 struct target_ipc_kludge
{
4732 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4733 ret
= -TARGET_EFAULT
;
4737 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4739 unlock_user_struct(tmp
, ptr
, 0);
4743 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4752 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4753 if (is_error(raddr
))
4754 return get_errno(raddr
);
4755 if (put_user_ual(raddr
, third
))
4756 return -TARGET_EFAULT
;
4760 ret
= -TARGET_EINVAL
;
4765 ret
= do_shmdt(ptr
);
4769 /* IPC_* flag values are the same on all linux platforms */
4770 ret
= get_errno(shmget(first
, second
, third
));
4773 /* IPC_* and SHM_* command values are the same on all linux platforms */
4775 ret
= do_shmctl(first
, second
, ptr
);
4778 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4780 ret
= -TARGET_ENOSYS
;
4787 /* kernel structure types definitions */
4789 #define STRUCT(name, ...) STRUCT_ ## name,
4790 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4792 #include "syscall_types.h"
4796 #undef STRUCT_SPECIAL
4798 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4799 #define STRUCT_SPECIAL(name)
4800 #include "syscall_types.h"
4802 #undef STRUCT_SPECIAL
4804 #define MAX_STRUCT_SIZE 4096
4806 #ifdef CONFIG_FIEMAP
4807 /* So fiemap access checks don't overflow on 32 bit systems.
4808 * This is very slightly smaller than the limit imposed by
4809 * the underlying kernel.
4811 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4812 / sizeof(struct fiemap_extent))
4814 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4815 int fd
, int cmd
, abi_long arg
)
4817 /* The parameter for this ioctl is a struct fiemap followed
4818 * by an array of struct fiemap_extent whose size is set
4819 * in fiemap->fm_extent_count. The array is filled in by the
4822 int target_size_in
, target_size_out
;
4824 const argtype
*arg_type
= ie
->arg_type
;
4825 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4828 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4832 assert(arg_type
[0] == TYPE_PTR
);
4833 assert(ie
->access
== IOC_RW
);
4835 target_size_in
= thunk_type_size(arg_type
, 0);
4836 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4838 return -TARGET_EFAULT
;
4840 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4841 unlock_user(argptr
, arg
, 0);
4842 fm
= (struct fiemap
*)buf_temp
;
4843 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4844 return -TARGET_EINVAL
;
4847 outbufsz
= sizeof (*fm
) +
4848 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4850 if (outbufsz
> MAX_STRUCT_SIZE
) {
4851 /* We can't fit all the extents into the fixed size buffer.
4852 * Allocate one that is large enough and use it instead.
4854 fm
= g_try_malloc(outbufsz
);
4856 return -TARGET_ENOMEM
;
4858 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4861 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4862 if (!is_error(ret
)) {
4863 target_size_out
= target_size_in
;
4864 /* An extent_count of 0 means we were only counting the extents
4865 * so there are no structs to copy
4867 if (fm
->fm_extent_count
!= 0) {
4868 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4870 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4872 ret
= -TARGET_EFAULT
;
4874 /* Convert the struct fiemap */
4875 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4876 if (fm
->fm_extent_count
!= 0) {
4877 p
= argptr
+ target_size_in
;
4878 /* ...and then all the struct fiemap_extents */
4879 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4880 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4885 unlock_user(argptr
, arg
, target_size_out
);
4895 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4896 int fd
, int cmd
, abi_long arg
)
4898 const argtype
*arg_type
= ie
->arg_type
;
4902 struct ifconf
*host_ifconf
;
4904 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4905 int target_ifreq_size
;
4910 abi_long target_ifc_buf
;
4914 assert(arg_type
[0] == TYPE_PTR
);
4915 assert(ie
->access
== IOC_RW
);
4918 target_size
= thunk_type_size(arg_type
, 0);
4920 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4922 return -TARGET_EFAULT
;
4923 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4924 unlock_user(argptr
, arg
, 0);
4926 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4927 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4928 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4930 if (target_ifc_buf
!= 0) {
4931 target_ifc_len
= host_ifconf
->ifc_len
;
4932 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4933 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4935 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4936 if (outbufsz
> MAX_STRUCT_SIZE
) {
4938 * We can't fit all the extents into the fixed size buffer.
4939 * Allocate one that is large enough and use it instead.
4941 host_ifconf
= malloc(outbufsz
);
4943 return -TARGET_ENOMEM
;
4945 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4948 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4950 host_ifconf
->ifc_len
= host_ifc_len
;
4952 host_ifc_buf
= NULL
;
4954 host_ifconf
->ifc_buf
= host_ifc_buf
;
4956 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4957 if (!is_error(ret
)) {
4958 /* convert host ifc_len to target ifc_len */
4960 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4961 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4962 host_ifconf
->ifc_len
= target_ifc_len
;
4964 /* restore target ifc_buf */
4966 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4968 /* copy struct ifconf to target user */
4970 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4972 return -TARGET_EFAULT
;
4973 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4974 unlock_user(argptr
, arg
, target_size
);
4976 if (target_ifc_buf
!= 0) {
4977 /* copy ifreq[] to target user */
4978 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4979 for (i
= 0; i
< nb_ifreq
; i
++) {
4980 thunk_convert(argptr
+ i
* target_ifreq_size
,
4981 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4982 ifreq_arg_type
, THUNK_TARGET
);
4984 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4995 #if defined(CONFIG_USBFS)
4996 #if HOST_LONG_BITS > 64
4997 #error USBDEVFS thunks do not support >64 bit hosts yet.
5000 uint64_t target_urb_adr
;
5001 uint64_t target_buf_adr
;
5002 char *target_buf_ptr
;
5003 struct usbdevfs_urb host_urb
;
5006 static GHashTable
*usbdevfs_urb_hashtable(void)
5008 static GHashTable
*urb_hashtable
;
5010 if (!urb_hashtable
) {
5011 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5013 return urb_hashtable
;
5016 static void urb_hashtable_insert(struct live_urb
*urb
)
5018 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5019 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5022 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5024 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5025 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5028 static void urb_hashtable_remove(struct live_urb
*urb
)
5030 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5031 g_hash_table_remove(urb_hashtable
, urb
);
5035 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5036 int fd
, int cmd
, abi_long arg
)
5038 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5039 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5040 struct live_urb
*lurb
;
5044 uintptr_t target_urb_adr
;
5047 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5049 memset(buf_temp
, 0, sizeof(uint64_t));
5050 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5051 if (is_error(ret
)) {
5055 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5056 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5057 if (!lurb
->target_urb_adr
) {
5058 return -TARGET_EFAULT
;
5060 urb_hashtable_remove(lurb
);
5061 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5062 lurb
->host_urb
.buffer_length
);
5063 lurb
->target_buf_ptr
= NULL
;
5065 /* restore the guest buffer pointer */
5066 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5068 /* update the guest urb struct */
5069 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5072 return -TARGET_EFAULT
;
5074 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5075 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5077 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5078 /* write back the urb handle */
5079 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5082 return -TARGET_EFAULT
;
5085 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5086 target_urb_adr
= lurb
->target_urb_adr
;
5087 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5088 unlock_user(argptr
, arg
, target_size
);
5095 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5096 uint8_t *buf_temp
__attribute__((unused
)),
5097 int fd
, int cmd
, abi_long arg
)
5099 struct live_urb
*lurb
;
5101 /* map target address back to host URB with metadata. */
5102 lurb
= urb_hashtable_lookup(arg
);
5104 return -TARGET_EFAULT
;
5106 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5110 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5111 int fd
, int cmd
, abi_long arg
)
5113 const argtype
*arg_type
= ie
->arg_type
;
5118 struct live_urb
*lurb
;
5121 * each submitted URB needs to map to a unique ID for the
5122 * kernel, and that unique ID needs to be a pointer to
5123 * host memory. hence, we need to malloc for each URB.
5124 * isochronous transfers have a variable length struct.
5127 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5129 /* construct host copy of urb and metadata */
5130 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5132 return -TARGET_ENOMEM
;
5135 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5138 return -TARGET_EFAULT
;
5140 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5141 unlock_user(argptr
, arg
, 0);
5143 lurb
->target_urb_adr
= arg
;
5144 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5146 /* buffer space used depends on endpoint type so lock the entire buffer */
5147 /* control type urbs should check the buffer contents for true direction */
5148 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5149 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5150 lurb
->host_urb
.buffer_length
, 1);
5151 if (lurb
->target_buf_ptr
== NULL
) {
5153 return -TARGET_EFAULT
;
5156 /* update buffer pointer in host copy */
5157 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5159 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5160 if (is_error(ret
)) {
5161 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5164 urb_hashtable_insert(lurb
);
5169 #endif /* CONFIG_USBFS */
5171 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5172 int cmd
, abi_long arg
)
5175 struct dm_ioctl
*host_dm
;
5176 abi_long guest_data
;
5177 uint32_t guest_data_size
;
5179 const argtype
*arg_type
= ie
->arg_type
;
5181 void *big_buf
= NULL
;
5185 target_size
= thunk_type_size(arg_type
, 0);
5186 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5188 ret
= -TARGET_EFAULT
;
5191 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5192 unlock_user(argptr
, arg
, 0);
5194 /* buf_temp is too small, so fetch things into a bigger buffer */
5195 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5196 memcpy(big_buf
, buf_temp
, target_size
);
5200 guest_data
= arg
+ host_dm
->data_start
;
5201 if ((guest_data
- arg
) < 0) {
5202 ret
= -TARGET_EINVAL
;
5205 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5206 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5208 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5210 ret
= -TARGET_EFAULT
;
5214 switch (ie
->host_cmd
) {
5216 case DM_LIST_DEVICES
:
5219 case DM_DEV_SUSPEND
:
5222 case DM_TABLE_STATUS
:
5223 case DM_TABLE_CLEAR
:
5225 case DM_LIST_VERSIONS
:
5229 case DM_DEV_SET_GEOMETRY
:
5230 /* data contains only strings */
5231 memcpy(host_data
, argptr
, guest_data_size
);
5234 memcpy(host_data
, argptr
, guest_data_size
);
5235 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5239 void *gspec
= argptr
;
5240 void *cur_data
= host_data
;
5241 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5242 int spec_size
= thunk_type_size(arg_type
, 0);
5245 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5246 struct dm_target_spec
*spec
= cur_data
;
5250 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5251 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5253 spec
->next
= sizeof(*spec
) + slen
;
5254 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5256 cur_data
+= spec
->next
;
5261 ret
= -TARGET_EINVAL
;
5262 unlock_user(argptr
, guest_data
, 0);
5265 unlock_user(argptr
, guest_data
, 0);
5267 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5268 if (!is_error(ret
)) {
5269 guest_data
= arg
+ host_dm
->data_start
;
5270 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5271 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5272 switch (ie
->host_cmd
) {
5277 case DM_DEV_SUSPEND
:
5280 case DM_TABLE_CLEAR
:
5282 case DM_DEV_SET_GEOMETRY
:
5283 /* no return data */
5285 case DM_LIST_DEVICES
:
5287 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5288 uint32_t remaining_data
= guest_data_size
;
5289 void *cur_data
= argptr
;
5290 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5291 int nl_size
= 12; /* can't use thunk_size due to alignment */
5294 uint32_t next
= nl
->next
;
5296 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5298 if (remaining_data
< nl
->next
) {
5299 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5302 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5303 strcpy(cur_data
+ nl_size
, nl
->name
);
5304 cur_data
+= nl
->next
;
5305 remaining_data
-= nl
->next
;
5309 nl
= (void*)nl
+ next
;
5314 case DM_TABLE_STATUS
:
5316 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5317 void *cur_data
= argptr
;
5318 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5319 int spec_size
= thunk_type_size(arg_type
, 0);
5322 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5323 uint32_t next
= spec
->next
;
5324 int slen
= strlen((char*)&spec
[1]) + 1;
5325 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5326 if (guest_data_size
< spec
->next
) {
5327 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5330 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5331 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5332 cur_data
= argptr
+ spec
->next
;
5333 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5339 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5340 int count
= *(uint32_t*)hdata
;
5341 uint64_t *hdev
= hdata
+ 8;
5342 uint64_t *gdev
= argptr
+ 8;
5345 *(uint32_t*)argptr
= tswap32(count
);
5346 for (i
= 0; i
< count
; i
++) {
5347 *gdev
= tswap64(*hdev
);
5353 case DM_LIST_VERSIONS
:
5355 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5356 uint32_t remaining_data
= guest_data_size
;
5357 void *cur_data
= argptr
;
5358 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5359 int vers_size
= thunk_type_size(arg_type
, 0);
5362 uint32_t next
= vers
->next
;
5364 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5366 if (remaining_data
< vers
->next
) {
5367 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5370 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5371 strcpy(cur_data
+ vers_size
, vers
->name
);
5372 cur_data
+= vers
->next
;
5373 remaining_data
-= vers
->next
;
5377 vers
= (void*)vers
+ next
;
5382 unlock_user(argptr
, guest_data
, 0);
5383 ret
= -TARGET_EINVAL
;
5386 unlock_user(argptr
, guest_data
, guest_data_size
);
5388 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5390 ret
= -TARGET_EFAULT
;
5393 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5394 unlock_user(argptr
, arg
, target_size
);
5401 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5402 int cmd
, abi_long arg
)
5406 const argtype
*arg_type
= ie
->arg_type
;
5407 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5410 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5411 struct blkpg_partition host_part
;
5413 /* Read and convert blkpg */
5415 target_size
= thunk_type_size(arg_type
, 0);
5416 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5418 ret
= -TARGET_EFAULT
;
5421 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5422 unlock_user(argptr
, arg
, 0);
5424 switch (host_blkpg
->op
) {
5425 case BLKPG_ADD_PARTITION
:
5426 case BLKPG_DEL_PARTITION
:
5427 /* payload is struct blkpg_partition */
5430 /* Unknown opcode */
5431 ret
= -TARGET_EINVAL
;
5435 /* Read and convert blkpg->data */
5436 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5437 target_size
= thunk_type_size(part_arg_type
, 0);
5438 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5440 ret
= -TARGET_EFAULT
;
5443 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5444 unlock_user(argptr
, arg
, 0);
5446 /* Swizzle the data pointer to our local copy and call! */
5447 host_blkpg
->data
= &host_part
;
5448 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5454 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5455 int fd
, int cmd
, abi_long arg
)
5457 const argtype
*arg_type
= ie
->arg_type
;
5458 const StructEntry
*se
;
5459 const argtype
*field_types
;
5460 const int *dst_offsets
, *src_offsets
;
5463 abi_ulong
*target_rt_dev_ptr
= NULL
;
5464 unsigned long *host_rt_dev_ptr
= NULL
;
5468 assert(ie
->access
== IOC_W
);
5469 assert(*arg_type
== TYPE_PTR
);
5471 assert(*arg_type
== TYPE_STRUCT
);
5472 target_size
= thunk_type_size(arg_type
, 0);
5473 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5475 return -TARGET_EFAULT
;
5478 assert(*arg_type
== (int)STRUCT_rtentry
);
5479 se
= struct_entries
+ *arg_type
++;
5480 assert(se
->convert
[0] == NULL
);
5481 /* convert struct here to be able to catch rt_dev string */
5482 field_types
= se
->field_types
;
5483 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5484 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5485 for (i
= 0; i
< se
->nb_fields
; i
++) {
5486 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5487 assert(*field_types
== TYPE_PTRVOID
);
5488 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5489 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5490 if (*target_rt_dev_ptr
!= 0) {
5491 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5492 tswapal(*target_rt_dev_ptr
));
5493 if (!*host_rt_dev_ptr
) {
5494 unlock_user(argptr
, arg
, 0);
5495 return -TARGET_EFAULT
;
5498 *host_rt_dev_ptr
= 0;
5503 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5504 argptr
+ src_offsets
[i
],
5505 field_types
, THUNK_HOST
);
5507 unlock_user(argptr
, arg
, 0);
5509 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5511 assert(host_rt_dev_ptr
!= NULL
);
5512 assert(target_rt_dev_ptr
!= NULL
);
5513 if (*host_rt_dev_ptr
!= 0) {
5514 unlock_user((void *)*host_rt_dev_ptr
,
5515 *target_rt_dev_ptr
, 0);
5520 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5521 int fd
, int cmd
, abi_long arg
)
5523 int sig
= target_to_host_signal(arg
);
5524 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5527 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5528 int fd
, int cmd
, abi_long arg
)
5533 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5534 if (is_error(ret
)) {
5538 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5539 if (copy_to_user_timeval(arg
, &tv
)) {
5540 return -TARGET_EFAULT
;
5543 if (copy_to_user_timeval64(arg
, &tv
)) {
5544 return -TARGET_EFAULT
;
5551 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5552 int fd
, int cmd
, abi_long arg
)
5557 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5558 if (is_error(ret
)) {
5562 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5563 if (host_to_target_timespec(arg
, &ts
)) {
5564 return -TARGET_EFAULT
;
5567 if (host_to_target_timespec64(arg
, &ts
)) {
5568 return -TARGET_EFAULT
;
5576 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5577 int fd
, int cmd
, abi_long arg
)
5579 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5580 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5586 static void unlock_drm_version(struct drm_version
*host_ver
,
5587 struct target_drm_version
*target_ver
,
5590 unlock_user(host_ver
->name
, target_ver
->name
,
5591 copy
? host_ver
->name_len
: 0);
5592 unlock_user(host_ver
->date
, target_ver
->date
,
5593 copy
? host_ver
->date_len
: 0);
5594 unlock_user(host_ver
->desc
, target_ver
->desc
,
5595 copy
? host_ver
->desc_len
: 0);
5598 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5599 struct target_drm_version
*target_ver
)
5601 memset(host_ver
, 0, sizeof(*host_ver
));
5603 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5604 if (host_ver
->name_len
) {
5605 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5606 target_ver
->name_len
, 0);
5607 if (!host_ver
->name
) {
5612 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5613 if (host_ver
->date_len
) {
5614 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5615 target_ver
->date_len
, 0);
5616 if (!host_ver
->date
) {
5621 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5622 if (host_ver
->desc_len
) {
5623 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5624 target_ver
->desc_len
, 0);
5625 if (!host_ver
->desc
) {
5632 unlock_drm_version(host_ver
, target_ver
, false);
5636 static inline void host_to_target_drmversion(
5637 struct target_drm_version
*target_ver
,
5638 struct drm_version
*host_ver
)
5640 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5641 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5642 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5643 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5644 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5645 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5646 unlock_drm_version(host_ver
, target_ver
, true);
5649 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5650 int fd
, int cmd
, abi_long arg
)
5652 struct drm_version
*ver
;
5653 struct target_drm_version
*target_ver
;
5656 switch (ie
->host_cmd
) {
5657 case DRM_IOCTL_VERSION
:
5658 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5659 return -TARGET_EFAULT
;
5661 ver
= (struct drm_version
*)buf_temp
;
5662 ret
= target_to_host_drmversion(ver
, target_ver
);
5663 if (!is_error(ret
)) {
5664 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5665 if (is_error(ret
)) {
5666 unlock_drm_version(ver
, target_ver
, false);
5668 host_to_target_drmversion(target_ver
, ver
);
5671 unlock_user_struct(target_ver
, arg
, 0);
5674 return -TARGET_ENOSYS
;
5677 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5678 struct drm_i915_getparam
*gparam
,
5679 int fd
, abi_long arg
)
5683 struct target_drm_i915_getparam
*target_gparam
;
5685 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5686 return -TARGET_EFAULT
;
5689 __get_user(gparam
->param
, &target_gparam
->param
);
5690 gparam
->value
= &value
;
5691 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5692 put_user_s32(value
, target_gparam
->value
);
5694 unlock_user_struct(target_gparam
, arg
, 0);
5698 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5699 int fd
, int cmd
, abi_long arg
)
5701 switch (ie
->host_cmd
) {
5702 case DRM_IOCTL_I915_GETPARAM
:
5703 return do_ioctl_drm_i915_getparam(ie
,
5704 (struct drm_i915_getparam
*)buf_temp
,
5707 return -TARGET_ENOSYS
;
5713 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5714 int fd
, int cmd
, abi_long arg
)
5716 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5717 struct tun_filter
*target_filter
;
5720 assert(ie
->access
== IOC_W
);
5722 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5723 if (!target_filter
) {
5724 return -TARGET_EFAULT
;
5726 filter
->flags
= tswap16(target_filter
->flags
);
5727 filter
->count
= tswap16(target_filter
->count
);
5728 unlock_user(target_filter
, arg
, 0);
5730 if (filter
->count
) {
5731 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5733 return -TARGET_EFAULT
;
5736 target_addr
= lock_user(VERIFY_READ
,
5737 arg
+ offsetof(struct tun_filter
, addr
),
5738 filter
->count
* ETH_ALEN
, 1);
5740 return -TARGET_EFAULT
;
5742 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5743 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5746 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5749 IOCTLEntry ioctl_entries
[] = {
5750 #define IOCTL(cmd, access, ...) \
5751 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5752 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5753 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5754 #define IOCTL_IGNORE(cmd) \
5755 { TARGET_ ## cmd, 0, #cmd },
5760 /* ??? Implement proper locking for ioctls. */
5761 /* do_ioctl() Must return target values and target errnos. */
5762 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5764 const IOCTLEntry
*ie
;
5765 const argtype
*arg_type
;
5767 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5773 if (ie
->target_cmd
== 0) {
5775 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5776 return -TARGET_ENOSYS
;
5778 if (ie
->target_cmd
== cmd
)
5782 arg_type
= ie
->arg_type
;
5784 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5785 } else if (!ie
->host_cmd
) {
5786 /* Some architectures define BSD ioctls in their headers
5787 that are not implemented in Linux. */
5788 return -TARGET_ENOSYS
;
5791 switch(arg_type
[0]) {
5794 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5800 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5804 target_size
= thunk_type_size(arg_type
, 0);
5805 switch(ie
->access
) {
5807 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5808 if (!is_error(ret
)) {
5809 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5811 return -TARGET_EFAULT
;
5812 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5813 unlock_user(argptr
, arg
, target_size
);
5817 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5819 return -TARGET_EFAULT
;
5820 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5821 unlock_user(argptr
, arg
, 0);
5822 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5826 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5828 return -TARGET_EFAULT
;
5829 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5830 unlock_user(argptr
, arg
, 0);
5831 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5832 if (!is_error(ret
)) {
5833 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5835 return -TARGET_EFAULT
;
5836 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5837 unlock_user(argptr
, arg
, target_size
);
5843 qemu_log_mask(LOG_UNIMP
,
5844 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5845 (long)cmd
, arg_type
[0]);
5846 ret
= -TARGET_ENOSYS
;
5852 static const bitmask_transtbl iflag_tbl
[] = {
5853 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5854 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5855 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5856 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5857 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5858 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5859 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5860 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5861 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5862 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5863 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5864 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5865 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5866 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5867 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5871 static const bitmask_transtbl oflag_tbl
[] = {
5872 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5873 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5874 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5875 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5876 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5877 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5878 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5879 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5880 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5881 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5882 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5883 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5884 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5885 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5886 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5887 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5888 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5889 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5890 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5891 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5892 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5893 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5894 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5895 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5899 static const bitmask_transtbl cflag_tbl
[] = {
5900 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5901 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5902 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5903 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5904 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5905 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5906 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5907 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5908 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5909 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5910 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5911 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5912 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5913 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5914 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5915 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5916 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5917 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5918 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5919 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5920 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5921 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5922 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5923 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5924 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5925 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5926 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5927 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5928 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5929 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5930 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5934 static const bitmask_transtbl lflag_tbl
[] = {
5935 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5936 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5937 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5938 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5939 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5940 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5941 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5942 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5943 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5944 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5945 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5946 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5947 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5948 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5949 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5950 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5954 static void target_to_host_termios (void *dst
, const void *src
)
5956 struct host_termios
*host
= dst
;
5957 const struct target_termios
*target
= src
;
5960 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5962 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5964 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5966 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5967 host
->c_line
= target
->c_line
;
5969 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5970 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5971 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5972 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5973 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5974 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5975 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5976 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5977 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5978 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5979 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5980 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5981 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5982 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5983 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5984 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5985 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5986 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5989 static void host_to_target_termios (void *dst
, const void *src
)
5991 struct target_termios
*target
= dst
;
5992 const struct host_termios
*host
= src
;
5995 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5997 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5999 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6001 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6002 target
->c_line
= host
->c_line
;
6004 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6005 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6006 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6007 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6008 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6009 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6010 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6011 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6012 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6013 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6014 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6015 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6016 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6017 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6018 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6019 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6020 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6021 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6024 static const StructEntry struct_termios_def
= {
6025 .convert
= { host_to_target_termios
, target_to_host_termios
},
6026 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6027 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6028 .print
= print_termios
,
6031 static bitmask_transtbl mmap_flags_tbl
[] = {
6032 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6033 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6034 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6035 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6036 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6037 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6038 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6039 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6040 MAP_DENYWRITE
, MAP_DENYWRITE
},
6041 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6042 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6043 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6044 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6045 MAP_NORESERVE
, MAP_NORESERVE
},
6046 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6047 /* MAP_STACK had been ignored by the kernel for quite some time.
6048 Recognize it for the target insofar as we do not want to pass
6049 it through to the host. */
6050 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6055 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6056 * TARGET_I386 is defined if TARGET_X86_64 is defined
6058 #if defined(TARGET_I386)
6060 /* NOTE: there is really one LDT for all the threads */
6061 static uint8_t *ldt_table
;
6063 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6070 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6071 if (size
> bytecount
)
6073 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6075 return -TARGET_EFAULT
;
6076 /* ??? Should this by byteswapped? */
6077 memcpy(p
, ldt_table
, size
);
6078 unlock_user(p
, ptr
, size
);
6082 /* XXX: add locking support */
6083 static abi_long
write_ldt(CPUX86State
*env
,
6084 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6086 struct target_modify_ldt_ldt_s ldt_info
;
6087 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6088 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6089 int seg_not_present
, useable
, lm
;
6090 uint32_t *lp
, entry_1
, entry_2
;
6092 if (bytecount
!= sizeof(ldt_info
))
6093 return -TARGET_EINVAL
;
6094 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6095 return -TARGET_EFAULT
;
6096 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6097 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6098 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6099 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6100 unlock_user_struct(target_ldt_info
, ptr
, 0);
6102 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6103 return -TARGET_EINVAL
;
6104 seg_32bit
= ldt_info
.flags
& 1;
6105 contents
= (ldt_info
.flags
>> 1) & 3;
6106 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6107 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6108 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6109 useable
= (ldt_info
.flags
>> 6) & 1;
6113 lm
= (ldt_info
.flags
>> 7) & 1;
6115 if (contents
== 3) {
6117 return -TARGET_EINVAL
;
6118 if (seg_not_present
== 0)
6119 return -TARGET_EINVAL
;
6121 /* allocate the LDT */
6123 env
->ldt
.base
= target_mmap(0,
6124 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6125 PROT_READ
|PROT_WRITE
,
6126 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6127 if (env
->ldt
.base
== -1)
6128 return -TARGET_ENOMEM
;
6129 memset(g2h(env
->ldt
.base
), 0,
6130 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6131 env
->ldt
.limit
= 0xffff;
6132 ldt_table
= g2h(env
->ldt
.base
);
6135 /* NOTE: same code as Linux kernel */
6136 /* Allow LDTs to be cleared by the user. */
6137 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6140 read_exec_only
== 1 &&
6142 limit_in_pages
== 0 &&
6143 seg_not_present
== 1 &&
6151 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6152 (ldt_info
.limit
& 0x0ffff);
6153 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6154 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6155 (ldt_info
.limit
& 0xf0000) |
6156 ((read_exec_only
^ 1) << 9) |
6158 ((seg_not_present
^ 1) << 15) |
6160 (limit_in_pages
<< 23) |
6164 entry_2
|= (useable
<< 20);
6166 /* Install the new entry ... */
6168 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6169 lp
[0] = tswap32(entry_1
);
6170 lp
[1] = tswap32(entry_2
);
6174 /* specific and weird i386 syscalls */
6175 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6176 unsigned long bytecount
)
6182 ret
= read_ldt(ptr
, bytecount
);
6185 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6188 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6191 ret
= -TARGET_ENOSYS
;
6197 #if defined(TARGET_ABI32)
6198 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6200 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6201 struct target_modify_ldt_ldt_s ldt_info
;
6202 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6203 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6204 int seg_not_present
, useable
, lm
;
6205 uint32_t *lp
, entry_1
, entry_2
;
6208 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6209 if (!target_ldt_info
)
6210 return -TARGET_EFAULT
;
6211 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6212 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6213 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6214 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6215 if (ldt_info
.entry_number
== -1) {
6216 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6217 if (gdt_table
[i
] == 0) {
6218 ldt_info
.entry_number
= i
;
6219 target_ldt_info
->entry_number
= tswap32(i
);
6224 unlock_user_struct(target_ldt_info
, ptr
, 1);
6226 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6227 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6228 return -TARGET_EINVAL
;
6229 seg_32bit
= ldt_info
.flags
& 1;
6230 contents
= (ldt_info
.flags
>> 1) & 3;
6231 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6232 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6233 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6234 useable
= (ldt_info
.flags
>> 6) & 1;
6238 lm
= (ldt_info
.flags
>> 7) & 1;
6241 if (contents
== 3) {
6242 if (seg_not_present
== 0)
6243 return -TARGET_EINVAL
;
6246 /* NOTE: same code as Linux kernel */
6247 /* Allow LDTs to be cleared by the user. */
6248 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6249 if ((contents
== 0 &&
6250 read_exec_only
== 1 &&
6252 limit_in_pages
== 0 &&
6253 seg_not_present
== 1 &&
6261 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6262 (ldt_info
.limit
& 0x0ffff);
6263 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6264 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6265 (ldt_info
.limit
& 0xf0000) |
6266 ((read_exec_only
^ 1) << 9) |
6268 ((seg_not_present
^ 1) << 15) |
6270 (limit_in_pages
<< 23) |
6275 /* Install the new entry ... */
6277 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6278 lp
[0] = tswap32(entry_1
);
6279 lp
[1] = tswap32(entry_2
);
6283 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6285 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6286 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6287 uint32_t base_addr
, limit
, flags
;
6288 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6289 int seg_not_present
, useable
, lm
;
6290 uint32_t *lp
, entry_1
, entry_2
;
6292 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6293 if (!target_ldt_info
)
6294 return -TARGET_EFAULT
;
6295 idx
= tswap32(target_ldt_info
->entry_number
);
6296 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6297 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6298 unlock_user_struct(target_ldt_info
, ptr
, 1);
6299 return -TARGET_EINVAL
;
6301 lp
= (uint32_t *)(gdt_table
+ idx
);
6302 entry_1
= tswap32(lp
[0]);
6303 entry_2
= tswap32(lp
[1]);
6305 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6306 contents
= (entry_2
>> 10) & 3;
6307 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6308 seg_32bit
= (entry_2
>> 22) & 1;
6309 limit_in_pages
= (entry_2
>> 23) & 1;
6310 useable
= (entry_2
>> 20) & 1;
6314 lm
= (entry_2
>> 21) & 1;
6316 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6317 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6318 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6319 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6320 base_addr
= (entry_1
>> 16) |
6321 (entry_2
& 0xff000000) |
6322 ((entry_2
& 0xff) << 16);
6323 target_ldt_info
->base_addr
= tswapal(base_addr
);
6324 target_ldt_info
->limit
= tswap32(limit
);
6325 target_ldt_info
->flags
= tswap32(flags
);
6326 unlock_user_struct(target_ldt_info
, ptr
, 1);
6330 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6332 return -TARGET_ENOSYS
;
6335 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6342 case TARGET_ARCH_SET_GS
:
6343 case TARGET_ARCH_SET_FS
:
6344 if (code
== TARGET_ARCH_SET_GS
)
6348 cpu_x86_load_seg(env
, idx
, 0);
6349 env
->segs
[idx
].base
= addr
;
6351 case TARGET_ARCH_GET_GS
:
6352 case TARGET_ARCH_GET_FS
:
6353 if (code
== TARGET_ARCH_GET_GS
)
6357 val
= env
->segs
[idx
].base
;
6358 if (put_user(val
, addr
, abi_ulong
))
6359 ret
= -TARGET_EFAULT
;
6362 ret
= -TARGET_EINVAL
;
6367 #endif /* defined(TARGET_ABI32 */
6369 #endif /* defined(TARGET_I386) */
6371 #define NEW_STACK_SIZE 0x40000
6374 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6377 pthread_mutex_t mutex
;
6378 pthread_cond_t cond
;
6381 abi_ulong child_tidptr
;
6382 abi_ulong parent_tidptr
;
6386 static void *clone_func(void *arg
)
6388 new_thread_info
*info
= arg
;
6393 rcu_register_thread();
6394 tcg_register_thread();
6398 ts
= (TaskState
*)cpu
->opaque
;
6399 info
->tid
= sys_gettid();
6401 if (info
->child_tidptr
)
6402 put_user_u32(info
->tid
, info
->child_tidptr
);
6403 if (info
->parent_tidptr
)
6404 put_user_u32(info
->tid
, info
->parent_tidptr
);
6405 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6406 /* Enable signals. */
6407 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6408 /* Signal to the parent that we're ready. */
6409 pthread_mutex_lock(&info
->mutex
);
6410 pthread_cond_broadcast(&info
->cond
);
6411 pthread_mutex_unlock(&info
->mutex
);
6412 /* Wait until the parent has finished initializing the tls state. */
6413 pthread_mutex_lock(&clone_lock
);
6414 pthread_mutex_unlock(&clone_lock
);
6420 /* do_fork() Must return host values and target errnos (unlike most
6421 do_*() functions). */
6422 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6423 abi_ulong parent_tidptr
, target_ulong newtls
,
6424 abi_ulong child_tidptr
)
6426 CPUState
*cpu
= env_cpu(env
);
6430 CPUArchState
*new_env
;
6433 flags
&= ~CLONE_IGNORED_FLAGS
;
6435 /* Emulate vfork() with fork() */
6436 if (flags
& CLONE_VFORK
)
6437 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6439 if (flags
& CLONE_VM
) {
6440 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6441 new_thread_info info
;
6442 pthread_attr_t attr
;
6444 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6445 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6446 return -TARGET_EINVAL
;
6449 ts
= g_new0(TaskState
, 1);
6450 init_task_state(ts
);
6452 /* Grab a mutex so that thread setup appears atomic. */
6453 pthread_mutex_lock(&clone_lock
);
6455 /* we create a new CPU instance. */
6456 new_env
= cpu_copy(env
);
6457 /* Init regs that differ from the parent. */
6458 cpu_clone_regs_child(new_env
, newsp
, flags
);
6459 cpu_clone_regs_parent(env
, flags
);
6460 new_cpu
= env_cpu(new_env
);
6461 new_cpu
->opaque
= ts
;
6462 ts
->bprm
= parent_ts
->bprm
;
6463 ts
->info
= parent_ts
->info
;
6464 ts
->signal_mask
= parent_ts
->signal_mask
;
6466 if (flags
& CLONE_CHILD_CLEARTID
) {
6467 ts
->child_tidptr
= child_tidptr
;
6470 if (flags
& CLONE_SETTLS
) {
6471 cpu_set_tls (new_env
, newtls
);
6474 memset(&info
, 0, sizeof(info
));
6475 pthread_mutex_init(&info
.mutex
, NULL
);
6476 pthread_mutex_lock(&info
.mutex
);
6477 pthread_cond_init(&info
.cond
, NULL
);
6479 if (flags
& CLONE_CHILD_SETTID
) {
6480 info
.child_tidptr
= child_tidptr
;
6482 if (flags
& CLONE_PARENT_SETTID
) {
6483 info
.parent_tidptr
= parent_tidptr
;
6486 ret
= pthread_attr_init(&attr
);
6487 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6488 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6489 /* It is not safe to deliver signals until the child has finished
6490 initializing, so temporarily block all signals. */
6491 sigfillset(&sigmask
);
6492 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6493 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6495 /* If this is our first additional thread, we need to ensure we
6496 * generate code for parallel execution and flush old translations.
6498 if (!parallel_cpus
) {
6499 parallel_cpus
= true;
6503 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6504 /* TODO: Free new CPU state if thread creation failed. */
6506 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6507 pthread_attr_destroy(&attr
);
6509 /* Wait for the child to initialize. */
6510 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6515 pthread_mutex_unlock(&info
.mutex
);
6516 pthread_cond_destroy(&info
.cond
);
6517 pthread_mutex_destroy(&info
.mutex
);
6518 pthread_mutex_unlock(&clone_lock
);
6520 /* if no CLONE_VM, we consider it is a fork */
6521 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6522 return -TARGET_EINVAL
;
6525 /* We can't support custom termination signals */
6526 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6527 return -TARGET_EINVAL
;
6530 if (block_signals()) {
6531 return -TARGET_ERESTARTSYS
;
6537 /* Child Process. */
6538 cpu_clone_regs_child(env
, newsp
, flags
);
6540 /* There is a race condition here. The parent process could
6541 theoretically read the TID in the child process before the child
6542 tid is set. This would require using either ptrace
6543 (not implemented) or having *_tidptr to point at a shared memory
6544 mapping. We can't repeat the spinlock hack used above because
6545 the child process gets its own copy of the lock. */
6546 if (flags
& CLONE_CHILD_SETTID
)
6547 put_user_u32(sys_gettid(), child_tidptr
);
6548 if (flags
& CLONE_PARENT_SETTID
)
6549 put_user_u32(sys_gettid(), parent_tidptr
);
6550 ts
= (TaskState
*)cpu
->opaque
;
6551 if (flags
& CLONE_SETTLS
)
6552 cpu_set_tls (env
, newtls
);
6553 if (flags
& CLONE_CHILD_CLEARTID
)
6554 ts
->child_tidptr
= child_tidptr
;
6556 cpu_clone_regs_parent(env
, flags
);
6563 /* warning : doesn't handle linux specific flags... */
6564 static int target_to_host_fcntl_cmd(int cmd
)
6569 case TARGET_F_DUPFD
:
6570 case TARGET_F_GETFD
:
6571 case TARGET_F_SETFD
:
6572 case TARGET_F_GETFL
:
6573 case TARGET_F_SETFL
:
6574 case TARGET_F_OFD_GETLK
:
6575 case TARGET_F_OFD_SETLK
:
6576 case TARGET_F_OFD_SETLKW
:
6579 case TARGET_F_GETLK
:
6582 case TARGET_F_SETLK
:
6585 case TARGET_F_SETLKW
:
6588 case TARGET_F_GETOWN
:
6591 case TARGET_F_SETOWN
:
6594 case TARGET_F_GETSIG
:
6597 case TARGET_F_SETSIG
:
6600 #if TARGET_ABI_BITS == 32
6601 case TARGET_F_GETLK64
:
6604 case TARGET_F_SETLK64
:
6607 case TARGET_F_SETLKW64
:
6611 case TARGET_F_SETLEASE
:
6614 case TARGET_F_GETLEASE
:
6617 #ifdef F_DUPFD_CLOEXEC
6618 case TARGET_F_DUPFD_CLOEXEC
:
6619 ret
= F_DUPFD_CLOEXEC
;
6622 case TARGET_F_NOTIFY
:
6626 case TARGET_F_GETOWN_EX
:
6631 case TARGET_F_SETOWN_EX
:
6636 case TARGET_F_SETPIPE_SZ
:
6639 case TARGET_F_GETPIPE_SZ
:
6644 ret
= -TARGET_EINVAL
;
6648 #if defined(__powerpc64__)
6649 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6650 * is not supported by kernel. The glibc fcntl call actually adjusts
6651 * them to 5, 6 and 7 before making the syscall(). Since we make the
6652 * syscall directly, adjust to what is supported by the kernel.
6654 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6655 ret
-= F_GETLK64
- 5;
6662 #define FLOCK_TRANSTBL \
6664 TRANSTBL_CONVERT(F_RDLCK); \
6665 TRANSTBL_CONVERT(F_WRLCK); \
6666 TRANSTBL_CONVERT(F_UNLCK); \
6667 TRANSTBL_CONVERT(F_EXLCK); \
6668 TRANSTBL_CONVERT(F_SHLCK); \
6671 static int target_to_host_flock(int type
)
6673 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6675 #undef TRANSTBL_CONVERT
6676 return -TARGET_EINVAL
;
6679 static int host_to_target_flock(int type
)
6681 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6683 #undef TRANSTBL_CONVERT
6684 /* if we don't know how to convert the value coming
6685 * from the host we copy to the target field as-is
6690 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6691 abi_ulong target_flock_addr
)
6693 struct target_flock
*target_fl
;
6696 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6697 return -TARGET_EFAULT
;
6700 __get_user(l_type
, &target_fl
->l_type
);
6701 l_type
= target_to_host_flock(l_type
);
6705 fl
->l_type
= l_type
;
6706 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6707 __get_user(fl
->l_start
, &target_fl
->l_start
);
6708 __get_user(fl
->l_len
, &target_fl
->l_len
);
6709 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6710 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6714 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6715 const struct flock64
*fl
)
6717 struct target_flock
*target_fl
;
6720 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6721 return -TARGET_EFAULT
;
6724 l_type
= host_to_target_flock(fl
->l_type
);
6725 __put_user(l_type
, &target_fl
->l_type
);
6726 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6727 __put_user(fl
->l_start
, &target_fl
->l_start
);
6728 __put_user(fl
->l_len
, &target_fl
->l_len
);
6729 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6730 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6734 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6735 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6737 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6738 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6739 abi_ulong target_flock_addr
)
6741 struct target_oabi_flock64
*target_fl
;
6744 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6745 return -TARGET_EFAULT
;
6748 __get_user(l_type
, &target_fl
->l_type
);
6749 l_type
= target_to_host_flock(l_type
);
6753 fl
->l_type
= l_type
;
6754 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6755 __get_user(fl
->l_start
, &target_fl
->l_start
);
6756 __get_user(fl
->l_len
, &target_fl
->l_len
);
6757 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6758 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6762 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6763 const struct flock64
*fl
)
6765 struct target_oabi_flock64
*target_fl
;
6768 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6769 return -TARGET_EFAULT
;
6772 l_type
= host_to_target_flock(fl
->l_type
);
6773 __put_user(l_type
, &target_fl
->l_type
);
6774 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6775 __put_user(fl
->l_start
, &target_fl
->l_start
);
6776 __put_user(fl
->l_len
, &target_fl
->l_len
);
6777 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6778 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6783 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6784 abi_ulong target_flock_addr
)
6786 struct target_flock64
*target_fl
;
6789 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6790 return -TARGET_EFAULT
;
6793 __get_user(l_type
, &target_fl
->l_type
);
6794 l_type
= target_to_host_flock(l_type
);
6798 fl
->l_type
= l_type
;
6799 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6800 __get_user(fl
->l_start
, &target_fl
->l_start
);
6801 __get_user(fl
->l_len
, &target_fl
->l_len
);
6802 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6803 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6807 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6808 const struct flock64
*fl
)
6810 struct target_flock64
*target_fl
;
6813 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6814 return -TARGET_EFAULT
;
6817 l_type
= host_to_target_flock(fl
->l_type
);
6818 __put_user(l_type
, &target_fl
->l_type
);
6819 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6820 __put_user(fl
->l_start
, &target_fl
->l_start
);
6821 __put_user(fl
->l_len
, &target_fl
->l_len
);
6822 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6823 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6827 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6829 struct flock64 fl64
;
6831 struct f_owner_ex fox
;
6832 struct target_f_owner_ex
*target_fox
;
6835 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6837 if (host_cmd
== -TARGET_EINVAL
)
6841 case TARGET_F_GETLK
:
6842 ret
= copy_from_user_flock(&fl64
, arg
);
6846 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6848 ret
= copy_to_user_flock(arg
, &fl64
);
6852 case TARGET_F_SETLK
:
6853 case TARGET_F_SETLKW
:
6854 ret
= copy_from_user_flock(&fl64
, arg
);
6858 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6861 case TARGET_F_GETLK64
:
6862 case TARGET_F_OFD_GETLK
:
6863 ret
= copy_from_user_flock64(&fl64
, arg
);
6867 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6869 ret
= copy_to_user_flock64(arg
, &fl64
);
6872 case TARGET_F_SETLK64
:
6873 case TARGET_F_SETLKW64
:
6874 case TARGET_F_OFD_SETLK
:
6875 case TARGET_F_OFD_SETLKW
:
6876 ret
= copy_from_user_flock64(&fl64
, arg
);
6880 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6883 case TARGET_F_GETFL
:
6884 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6886 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6890 case TARGET_F_SETFL
:
6891 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6892 target_to_host_bitmask(arg
,
6897 case TARGET_F_GETOWN_EX
:
6898 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6900 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6901 return -TARGET_EFAULT
;
6902 target_fox
->type
= tswap32(fox
.type
);
6903 target_fox
->pid
= tswap32(fox
.pid
);
6904 unlock_user_struct(target_fox
, arg
, 1);
6910 case TARGET_F_SETOWN_EX
:
6911 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6912 return -TARGET_EFAULT
;
6913 fox
.type
= tswap32(target_fox
->type
);
6914 fox
.pid
= tswap32(target_fox
->pid
);
6915 unlock_user_struct(target_fox
, arg
, 0);
6916 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6920 case TARGET_F_SETSIG
:
6921 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6924 case TARGET_F_GETSIG
:
6925 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6928 case TARGET_F_SETOWN
:
6929 case TARGET_F_GETOWN
:
6930 case TARGET_F_SETLEASE
:
6931 case TARGET_F_GETLEASE
:
6932 case TARGET_F_SETPIPE_SZ
:
6933 case TARGET_F_GETPIPE_SZ
:
6934 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6938 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6946 static inline int high2lowuid(int uid
)
6954 static inline int high2lowgid(int gid
)
6962 static inline int low2highuid(int uid
)
6964 if ((int16_t)uid
== -1)
6970 static inline int low2highgid(int gid
)
6972 if ((int16_t)gid
== -1)
6977 static inline int tswapid(int id
)
6982 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6984 #else /* !USE_UID16 */
6985 static inline int high2lowuid(int uid
)
6989 static inline int high2lowgid(int gid
)
6993 static inline int low2highuid(int uid
)
6997 static inline int low2highgid(int gid
)
7001 static inline int tswapid(int id
)
7006 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7008 #endif /* USE_UID16 */
7010 /* We must do direct syscalls for setting UID/GID, because we want to
7011 * implement the Linux system call semantics of "change only for this thread",
7012 * not the libc/POSIX semantics of "change for all threads in process".
7013 * (See http://ewontfix.com/17/ for more details.)
7014 * We use the 32-bit version of the syscalls if present; if it is not
7015 * then either the host architecture supports 32-bit UIDs natively with
7016 * the standard syscall, or the 16-bit UID is the best we can do.
7018 #ifdef __NR_setuid32
7019 #define __NR_sys_setuid __NR_setuid32
7021 #define __NR_sys_setuid __NR_setuid
7023 #ifdef __NR_setgid32
7024 #define __NR_sys_setgid __NR_setgid32
7026 #define __NR_sys_setgid __NR_setgid
7028 #ifdef __NR_setresuid32
7029 #define __NR_sys_setresuid __NR_setresuid32
7031 #define __NR_sys_setresuid __NR_setresuid
7033 #ifdef __NR_setresgid32
7034 #define __NR_sys_setresgid __NR_setresgid32
7036 #define __NR_sys_setresgid __NR_setresgid
7039 _syscall1(int, sys_setuid
, uid_t
, uid
)
7040 _syscall1(int, sys_setgid
, gid_t
, gid
)
7041 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7042 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7044 void syscall_init(void)
7047 const argtype
*arg_type
;
7051 thunk_init(STRUCT_MAX
);
7053 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7054 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7055 #include "syscall_types.h"
7057 #undef STRUCT_SPECIAL
7059 /* Build target_to_host_errno_table[] table from
7060 * host_to_target_errno_table[]. */
7061 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7062 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7065 /* we patch the ioctl size if necessary. We rely on the fact that
7066 no ioctl has all the bits at '1' in the size field */
7068 while (ie
->target_cmd
!= 0) {
7069 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7070 TARGET_IOC_SIZEMASK
) {
7071 arg_type
= ie
->arg_type
;
7072 if (arg_type
[0] != TYPE_PTR
) {
7073 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7078 size
= thunk_type_size(arg_type
, 0);
7079 ie
->target_cmd
= (ie
->target_cmd
&
7080 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7081 (size
<< TARGET_IOC_SIZESHIFT
);
7084 /* automatic consistency check if same arch */
7085 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7086 (defined(__x86_64__) && defined(TARGET_X86_64))
7087 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7088 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7089 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7096 #ifdef TARGET_NR_truncate64
7097 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7102 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7106 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7110 #ifdef TARGET_NR_ftruncate64
7111 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7116 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7120 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7124 #if defined(TARGET_NR_timer_settime) || \
7125 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7126 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7127 abi_ulong target_addr
)
7129 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7130 offsetof(struct target_itimerspec
,
7132 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7133 offsetof(struct target_itimerspec
,
7135 return -TARGET_EFAULT
;
7142 #if defined(TARGET_NR_timer_settime64) || \
7143 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7144 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7145 abi_ulong target_addr
)
7147 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7148 offsetof(struct target__kernel_itimerspec
,
7150 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7151 offsetof(struct target__kernel_itimerspec
,
7153 return -TARGET_EFAULT
;
7160 #if ((defined(TARGET_NR_timerfd_gettime) || \
7161 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7162 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7163 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7164 struct itimerspec
*host_its
)
7166 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7168 &host_its
->it_interval
) ||
7169 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7171 &host_its
->it_value
)) {
7172 return -TARGET_EFAULT
;
7178 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7179 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7180 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7181 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7182 struct itimerspec
*host_its
)
7184 if (host_to_target_timespec64(target_addr
+
7185 offsetof(struct target__kernel_itimerspec
,
7187 &host_its
->it_interval
) ||
7188 host_to_target_timespec64(target_addr
+
7189 offsetof(struct target__kernel_itimerspec
,
7191 &host_its
->it_value
)) {
7192 return -TARGET_EFAULT
;
7198 #if defined(TARGET_NR_adjtimex) || \
7199 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7200 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7201 abi_long target_addr
)
7203 struct target_timex
*target_tx
;
7205 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7206 return -TARGET_EFAULT
;
7209 __get_user(host_tx
->modes
, &target_tx
->modes
);
7210 __get_user(host_tx
->offset
, &target_tx
->offset
);
7211 __get_user(host_tx
->freq
, &target_tx
->freq
);
7212 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7213 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7214 __get_user(host_tx
->status
, &target_tx
->status
);
7215 __get_user(host_tx
->constant
, &target_tx
->constant
);
7216 __get_user(host_tx
->precision
, &target_tx
->precision
);
7217 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7218 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7219 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7220 __get_user(host_tx
->tick
, &target_tx
->tick
);
7221 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7222 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7223 __get_user(host_tx
->shift
, &target_tx
->shift
);
7224 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7225 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7226 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7227 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7228 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7229 __get_user(host_tx
->tai
, &target_tx
->tai
);
7231 unlock_user_struct(target_tx
, target_addr
, 0);
7235 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7236 struct timex
*host_tx
)
7238 struct target_timex
*target_tx
;
7240 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7241 return -TARGET_EFAULT
;
7244 __put_user(host_tx
->modes
, &target_tx
->modes
);
7245 __put_user(host_tx
->offset
, &target_tx
->offset
);
7246 __put_user(host_tx
->freq
, &target_tx
->freq
);
7247 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7248 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7249 __put_user(host_tx
->status
, &target_tx
->status
);
7250 __put_user(host_tx
->constant
, &target_tx
->constant
);
7251 __put_user(host_tx
->precision
, &target_tx
->precision
);
7252 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7253 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7254 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7255 __put_user(host_tx
->tick
, &target_tx
->tick
);
7256 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7257 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7258 __put_user(host_tx
->shift
, &target_tx
->shift
);
7259 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7260 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7261 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7262 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7263 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7264 __put_user(host_tx
->tai
, &target_tx
->tai
);
7266 unlock_user_struct(target_tx
, target_addr
, 1);
7272 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7273 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7274 abi_long target_addr
)
7276 struct target__kernel_timex
*target_tx
;
7278 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7279 offsetof(struct target__kernel_timex
,
7281 return -TARGET_EFAULT
;
7284 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7285 return -TARGET_EFAULT
;
7288 __get_user(host_tx
->modes
, &target_tx
->modes
);
7289 __get_user(host_tx
->offset
, &target_tx
->offset
);
7290 __get_user(host_tx
->freq
, &target_tx
->freq
);
7291 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7292 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7293 __get_user(host_tx
->status
, &target_tx
->status
);
7294 __get_user(host_tx
->constant
, &target_tx
->constant
);
7295 __get_user(host_tx
->precision
, &target_tx
->precision
);
7296 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7297 __get_user(host_tx
->tick
, &target_tx
->tick
);
7298 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7299 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7300 __get_user(host_tx
->shift
, &target_tx
->shift
);
7301 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7302 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7303 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7304 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7305 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7306 __get_user(host_tx
->tai
, &target_tx
->tai
);
7308 unlock_user_struct(target_tx
, target_addr
, 0);
7312 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7313 struct timex
*host_tx
)
7315 struct target__kernel_timex
*target_tx
;
7317 if (copy_to_user_timeval64(target_addr
+
7318 offsetof(struct target__kernel_timex
, time
),
7320 return -TARGET_EFAULT
;
7323 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7324 return -TARGET_EFAULT
;
7327 __put_user(host_tx
->modes
, &target_tx
->modes
);
7328 __put_user(host_tx
->offset
, &target_tx
->offset
);
7329 __put_user(host_tx
->freq
, &target_tx
->freq
);
7330 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7331 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7332 __put_user(host_tx
->status
, &target_tx
->status
);
7333 __put_user(host_tx
->constant
, &target_tx
->constant
);
7334 __put_user(host_tx
->precision
, &target_tx
->precision
);
7335 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7336 __put_user(host_tx
->tick
, &target_tx
->tick
);
7337 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7338 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7339 __put_user(host_tx
->shift
, &target_tx
->shift
);
7340 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7341 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7342 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7343 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7344 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7345 __put_user(host_tx
->tai
, &target_tx
->tai
);
7347 unlock_user_struct(target_tx
, target_addr
, 1);
7352 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7353 abi_ulong target_addr
)
7355 struct target_sigevent
*target_sevp
;
7357 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7358 return -TARGET_EFAULT
;
7361 /* This union is awkward on 64 bit systems because it has a 32 bit
7362 * integer and a pointer in it; we follow the conversion approach
7363 * used for handling sigval types in signal.c so the guest should get
7364 * the correct value back even if we did a 64 bit byteswap and it's
7365 * using the 32 bit integer.
7367 host_sevp
->sigev_value
.sival_ptr
=
7368 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7369 host_sevp
->sigev_signo
=
7370 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7371 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7372 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7374 unlock_user_struct(target_sevp
, target_addr
, 1);
7378 #if defined(TARGET_NR_mlockall)
7379 static inline int target_to_host_mlockall_arg(int arg
)
7383 if (arg
& TARGET_MCL_CURRENT
) {
7384 result
|= MCL_CURRENT
;
7386 if (arg
& TARGET_MCL_FUTURE
) {
7387 result
|= MCL_FUTURE
;
7390 if (arg
& TARGET_MCL_ONFAULT
) {
7391 result
|= MCL_ONFAULT
;
7399 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7400 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7401 defined(TARGET_NR_newfstatat))
7402 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7403 abi_ulong target_addr
,
7404 struct stat
*host_st
)
7406 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7407 if (((CPUARMState
*)cpu_env
)->eabi
) {
7408 struct target_eabi_stat64
*target_st
;
7410 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7411 return -TARGET_EFAULT
;
7412 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7413 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7414 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7415 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7416 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7418 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7419 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7420 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7421 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7422 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7423 __put_user(host_st
->st_size
, &target_st
->st_size
);
7424 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7425 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7426 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7427 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7428 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7429 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7430 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7431 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7432 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7434 unlock_user_struct(target_st
, target_addr
, 1);
7438 #if defined(TARGET_HAS_STRUCT_STAT64)
7439 struct target_stat64
*target_st
;
7441 struct target_stat
*target_st
;
7444 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7445 return -TARGET_EFAULT
;
7446 memset(target_st
, 0, sizeof(*target_st
));
7447 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7448 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7449 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7450 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7452 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7453 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7454 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7455 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7456 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7457 /* XXX: better use of kernel struct */
7458 __put_user(host_st
->st_size
, &target_st
->st_size
);
7459 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7460 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7461 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7462 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7463 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7464 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7465 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7466 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7467 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7469 unlock_user_struct(target_st
, target_addr
, 1);
7476 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7477 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7478 abi_ulong target_addr
)
7480 struct target_statx
*target_stx
;
7482 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7483 return -TARGET_EFAULT
;
7485 memset(target_stx
, 0, sizeof(*target_stx
));
7487 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7488 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7489 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7490 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7491 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7492 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7493 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7494 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7495 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7496 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7497 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7498 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7499 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7500 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7501 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7502 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7503 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7504 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7505 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7506 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7507 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7508 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7509 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7511 unlock_user_struct(target_stx
, target_addr
, 1);
7517 static int do_sys_futex(int *uaddr
, int op
, int val
,
7518 const struct timespec
*timeout
, int *uaddr2
,
7521 #if HOST_LONG_BITS == 64
7522 #if defined(__NR_futex)
7523 /* always a 64-bit time_t, it doesn't define _time64 version */
7524 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7527 #else /* HOST_LONG_BITS == 64 */
7528 #if defined(__NR_futex_time64)
7529 if (sizeof(timeout
->tv_sec
) == 8) {
7530 /* _time64 function on 32bit arch */
7531 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7534 #if defined(__NR_futex)
7535 /* old function on 32bit arch */
7536 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7538 #endif /* HOST_LONG_BITS == 64 */
7539 g_assert_not_reached();
7542 static int do_safe_futex(int *uaddr
, int op
, int val
,
7543 const struct timespec
*timeout
, int *uaddr2
,
7546 #if HOST_LONG_BITS == 64
7547 #if defined(__NR_futex)
7548 /* always a 64-bit time_t, it doesn't define _time64 version */
7549 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7551 #else /* HOST_LONG_BITS == 64 */
7552 #if defined(__NR_futex_time64)
7553 if (sizeof(timeout
->tv_sec
) == 8) {
7554 /* _time64 function on 32bit arch */
7555 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7559 #if defined(__NR_futex)
7560 /* old function on 32bit arch */
7561 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7563 #endif /* HOST_LONG_BITS == 64 */
7564 return -TARGET_ENOSYS
;
7567 /* ??? Using host futex calls even when target atomic operations
7568 are not really atomic probably breaks things. However implementing
7569 futexes locally would make futexes shared between multiple processes
7570 tricky. However they're probably useless because guest atomic
7571 operations won't work either. */
7572 #if defined(TARGET_NR_futex)
7573 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7574 target_ulong uaddr2
, int val3
)
7576 struct timespec ts
, *pts
;
7579 /* ??? We assume FUTEX_* constants are the same on both host
7581 #ifdef FUTEX_CMD_MASK
7582 base_op
= op
& FUTEX_CMD_MASK
;
7588 case FUTEX_WAIT_BITSET
:
7591 target_to_host_timespec(pts
, timeout
);
7595 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7597 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7599 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7601 case FUTEX_CMP_REQUEUE
:
7603 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7604 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7605 But the prototype takes a `struct timespec *'; insert casts
7606 to satisfy the compiler. We do not need to tswap TIMEOUT
7607 since it's not compared to guest memory. */
7608 pts
= (struct timespec
*)(uintptr_t) timeout
;
7609 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7610 (base_op
== FUTEX_CMP_REQUEUE
7614 return -TARGET_ENOSYS
;
7619 #if defined(TARGET_NR_futex_time64)
7620 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7621 target_ulong uaddr2
, int val3
)
7623 struct timespec ts
, *pts
;
7626 /* ??? We assume FUTEX_* constants are the same on both host
7628 #ifdef FUTEX_CMD_MASK
7629 base_op
= op
& FUTEX_CMD_MASK
;
7635 case FUTEX_WAIT_BITSET
:
7638 if (target_to_host_timespec64(pts
, timeout
)) {
7639 return -TARGET_EFAULT
;
7644 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7646 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7648 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7650 case FUTEX_CMP_REQUEUE
:
7652 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7653 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7654 But the prototype takes a `struct timespec *'; insert casts
7655 to satisfy the compiler. We do not need to tswap TIMEOUT
7656 since it's not compared to guest memory. */
7657 pts
= (struct timespec
*)(uintptr_t) timeout
;
7658 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7659 (base_op
== FUTEX_CMP_REQUEUE
7663 return -TARGET_ENOSYS
;
7668 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7669 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7670 abi_long handle
, abi_long mount_id
,
7673 struct file_handle
*target_fh
;
7674 struct file_handle
*fh
;
7678 unsigned int size
, total_size
;
7680 if (get_user_s32(size
, handle
)) {
7681 return -TARGET_EFAULT
;
7684 name
= lock_user_string(pathname
);
7686 return -TARGET_EFAULT
;
7689 total_size
= sizeof(struct file_handle
) + size
;
7690 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7692 unlock_user(name
, pathname
, 0);
7693 return -TARGET_EFAULT
;
7696 fh
= g_malloc0(total_size
);
7697 fh
->handle_bytes
= size
;
7699 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7700 unlock_user(name
, pathname
, 0);
7702 /* man name_to_handle_at(2):
7703 * Other than the use of the handle_bytes field, the caller should treat
7704 * the file_handle structure as an opaque data type
7707 memcpy(target_fh
, fh
, total_size
);
7708 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7709 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7711 unlock_user(target_fh
, handle
, total_size
);
7713 if (put_user_s32(mid
, mount_id
)) {
7714 return -TARGET_EFAULT
;
7722 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7723 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7726 struct file_handle
*target_fh
;
7727 struct file_handle
*fh
;
7728 unsigned int size
, total_size
;
7731 if (get_user_s32(size
, handle
)) {
7732 return -TARGET_EFAULT
;
7735 total_size
= sizeof(struct file_handle
) + size
;
7736 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7738 return -TARGET_EFAULT
;
7741 fh
= g_memdup(target_fh
, total_size
);
7742 fh
->handle_bytes
= size
;
7743 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7745 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7746 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7750 unlock_user(target_fh
, handle
, total_size
);
7756 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7758 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7761 target_sigset_t
*target_mask
;
7765 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7766 return -TARGET_EINVAL
;
7768 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7769 return -TARGET_EFAULT
;
7772 target_to_host_sigset(&host_mask
, target_mask
);
7774 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7776 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7778 fd_trans_register(ret
, &target_signalfd_trans
);
7781 unlock_user_struct(target_mask
, mask
, 0);
7787 /* Map host to target signal numbers for the wait family of syscalls.
7788 Assume all other status bits are the same. */
7789 int host_to_target_waitstatus(int status
)
7791 if (WIFSIGNALED(status
)) {
7792 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7794 if (WIFSTOPPED(status
)) {
7795 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7801 static int open_self_cmdline(void *cpu_env
, int fd
)
7803 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7804 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7807 for (i
= 0; i
< bprm
->argc
; i
++) {
7808 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7810 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7818 static int open_self_maps(void *cpu_env
, int fd
)
7820 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7821 TaskState
*ts
= cpu
->opaque
;
7822 GSList
*map_info
= read_self_maps();
7826 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7827 MapInfo
*e
= (MapInfo
*) s
->data
;
7829 if (h2g_valid(e
->start
)) {
7830 unsigned long min
= e
->start
;
7831 unsigned long max
= e
->end
;
7832 int flags
= page_get_flags(h2g(min
));
7835 max
= h2g_valid(max
- 1) ?
7836 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7838 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7842 if (h2g(min
) == ts
->info
->stack_limit
) {
7848 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7849 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7850 h2g(min
), h2g(max
- 1) + 1,
7851 e
->is_read
? 'r' : '-',
7852 e
->is_write
? 'w' : '-',
7853 e
->is_exec
? 'x' : '-',
7854 e
->is_priv
? 'p' : '-',
7855 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7857 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7864 free_self_maps(map_info
);
7866 #ifdef TARGET_VSYSCALL_PAGE
7868 * We only support execution from the vsyscall page.
7869 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7871 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7872 " --xp 00000000 00:00 0",
7873 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7874 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7880 static int open_self_stat(void *cpu_env
, int fd
)
7882 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7883 TaskState
*ts
= cpu
->opaque
;
7884 g_autoptr(GString
) buf
= g_string_new(NULL
);
7887 for (i
= 0; i
< 44; i
++) {
7890 g_string_printf(buf
, FMT_pid
" ", getpid());
7891 } else if (i
== 1) {
7893 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7894 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7895 g_string_printf(buf
, "(%.15s) ", bin
);
7896 } else if (i
== 27) {
7898 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7900 /* for the rest, there is MasterCard */
7901 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7904 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7912 static int open_self_auxv(void *cpu_env
, int fd
)
7914 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7915 TaskState
*ts
= cpu
->opaque
;
7916 abi_ulong auxv
= ts
->info
->saved_auxv
;
7917 abi_ulong len
= ts
->info
->auxv_len
;
7921 * Auxiliary vector is stored in target process stack.
7922 * read in whole auxv vector and copy it to file
7924 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7928 r
= write(fd
, ptr
, len
);
7935 lseek(fd
, 0, SEEK_SET
);
7936 unlock_user(ptr
, auxv
, len
);
7942 static int is_proc_myself(const char *filename
, const char *entry
)
7944 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7945 filename
+= strlen("/proc/");
7946 if (!strncmp(filename
, "self/", strlen("self/"))) {
7947 filename
+= strlen("self/");
7948 } else if (*filename
>= '1' && *filename
<= '9') {
7950 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7951 if (!strncmp(filename
, myself
, strlen(myself
))) {
7952 filename
+= strlen(myself
);
7959 if (!strcmp(filename
, entry
)) {
7966 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7967 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7968 static int is_proc(const char *filename
, const char *entry
)
7970 return strcmp(filename
, entry
) == 0;
7974 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7975 static int open_net_route(void *cpu_env
, int fd
)
7982 fp
= fopen("/proc/net/route", "r");
7989 read
= getline(&line
, &len
, fp
);
7990 dprintf(fd
, "%s", line
);
7994 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7996 uint32_t dest
, gw
, mask
;
7997 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8000 fields
= sscanf(line
,
8001 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8002 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8003 &mask
, &mtu
, &window
, &irtt
);
8007 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8008 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8009 metric
, tswap32(mask
), mtu
, window
, irtt
);
8019 #if defined(TARGET_SPARC)
8020 static int open_cpuinfo(void *cpu_env
, int fd
)
8022 dprintf(fd
, "type\t\t: sun4u\n");
8027 #if defined(TARGET_HPPA)
8028 static int open_cpuinfo(void *cpu_env
, int fd
)
8030 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8031 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8032 dprintf(fd
, "capabilities\t: os32\n");
8033 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8034 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8039 #if defined(TARGET_M68K)
8040 static int open_hardware(void *cpu_env
, int fd
)
8042 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8047 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8050 const char *filename
;
8051 int (*fill
)(void *cpu_env
, int fd
);
8052 int (*cmp
)(const char *s1
, const char *s2
);
8054 const struct fake_open
*fake_open
;
8055 static const struct fake_open fakes
[] = {
8056 { "maps", open_self_maps
, is_proc_myself
},
8057 { "stat", open_self_stat
, is_proc_myself
},
8058 { "auxv", open_self_auxv
, is_proc_myself
},
8059 { "cmdline", open_self_cmdline
, is_proc_myself
},
8060 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8061 { "/proc/net/route", open_net_route
, is_proc
},
8063 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8064 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8066 #if defined(TARGET_M68K)
8067 { "/proc/hardware", open_hardware
, is_proc
},
8069 { NULL
, NULL
, NULL
}
8072 if (is_proc_myself(pathname
, "exe")) {
8073 int execfd
= qemu_getauxval(AT_EXECFD
);
8074 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8077 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8078 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8083 if (fake_open
->filename
) {
8085 char filename
[PATH_MAX
];
8088 /* create temporary file to map stat to */
8089 tmpdir
= getenv("TMPDIR");
8092 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8093 fd
= mkstemp(filename
);
8099 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8105 lseek(fd
, 0, SEEK_SET
);
8110 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8113 #define TIMER_MAGIC 0x0caf0000
8114 #define TIMER_MAGIC_MASK 0xffff0000
8116 /* Convert QEMU provided timer ID back to internal 16bit index format */
8117 static target_timer_t
get_timer_id(abi_long arg
)
8119 target_timer_t timerid
= arg
;
8121 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8122 return -TARGET_EINVAL
;
8127 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8128 return -TARGET_EINVAL
;
8134 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8136 abi_ulong target_addr
,
8139 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8140 unsigned host_bits
= sizeof(*host_mask
) * 8;
8141 abi_ulong
*target_mask
;
8144 assert(host_size
>= target_size
);
8146 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8148 return -TARGET_EFAULT
;
8150 memset(host_mask
, 0, host_size
);
8152 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8153 unsigned bit
= i
* target_bits
;
8156 __get_user(val
, &target_mask
[i
]);
8157 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8158 if (val
& (1UL << j
)) {
8159 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8164 unlock_user(target_mask
, target_addr
, 0);
8168 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8170 abi_ulong target_addr
,
8173 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8174 unsigned host_bits
= sizeof(*host_mask
) * 8;
8175 abi_ulong
*target_mask
;
8178 assert(host_size
>= target_size
);
8180 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8182 return -TARGET_EFAULT
;
8185 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8186 unsigned bit
= i
* target_bits
;
8189 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8190 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8194 __put_user(val
, &target_mask
[i
]);
8197 unlock_user(target_mask
, target_addr
, target_size
);
8201 /* This is an internal helper for do_syscall so that it is easier
8202 * to have a single return point, so that actions, such as logging
8203 * of syscall results, can be performed.
8204 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8206 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8207 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8208 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8211 CPUState
*cpu
= env_cpu(cpu_env
);
8213 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8214 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8215 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8216 || defined(TARGET_NR_statx)
8219 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8220 || defined(TARGET_NR_fstatfs)
8226 case TARGET_NR_exit
:
8227 /* In old applications this may be used to implement _exit(2).
8228 However in threaded applications it is used for thread termination,
8229 and _exit_group is used for application termination.
8230 Do thread termination if we have more then one thread. */
8232 if (block_signals()) {
8233 return -TARGET_ERESTARTSYS
;
8236 pthread_mutex_lock(&clone_lock
);
8238 if (CPU_NEXT(first_cpu
)) {
8239 TaskState
*ts
= cpu
->opaque
;
8241 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8242 object_unref(OBJECT(cpu
));
8244 * At this point the CPU should be unrealized and removed
8245 * from cpu lists. We can clean-up the rest of the thread
8246 * data without the lock held.
8249 pthread_mutex_unlock(&clone_lock
);
8251 if (ts
->child_tidptr
) {
8252 put_user_u32(0, ts
->child_tidptr
);
8253 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8258 rcu_unregister_thread();
8262 pthread_mutex_unlock(&clone_lock
);
8263 preexit_cleanup(cpu_env
, arg1
);
8265 return 0; /* avoid warning */
8266 case TARGET_NR_read
:
8267 if (arg2
== 0 && arg3
== 0) {
8268 return get_errno(safe_read(arg1
, 0, 0));
8270 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8271 return -TARGET_EFAULT
;
8272 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8274 fd_trans_host_to_target_data(arg1
)) {
8275 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8277 unlock_user(p
, arg2
, ret
);
8280 case TARGET_NR_write
:
8281 if (arg2
== 0 && arg3
== 0) {
8282 return get_errno(safe_write(arg1
, 0, 0));
8284 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8285 return -TARGET_EFAULT
;
8286 if (fd_trans_target_to_host_data(arg1
)) {
8287 void *copy
= g_malloc(arg3
);
8288 memcpy(copy
, p
, arg3
);
8289 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8291 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8295 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8297 unlock_user(p
, arg2
, 0);
8300 #ifdef TARGET_NR_open
8301 case TARGET_NR_open
:
8302 if (!(p
= lock_user_string(arg1
)))
8303 return -TARGET_EFAULT
;
8304 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8305 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8307 fd_trans_unregister(ret
);
8308 unlock_user(p
, arg1
, 0);
8311 case TARGET_NR_openat
:
8312 if (!(p
= lock_user_string(arg2
)))
8313 return -TARGET_EFAULT
;
8314 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8315 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8317 fd_trans_unregister(ret
);
8318 unlock_user(p
, arg2
, 0);
8320 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8321 case TARGET_NR_name_to_handle_at
:
8322 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8325 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8326 case TARGET_NR_open_by_handle_at
:
8327 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8328 fd_trans_unregister(ret
);
8331 case TARGET_NR_close
:
8332 fd_trans_unregister(arg1
);
8333 return get_errno(close(arg1
));
8336 return do_brk(arg1
);
8337 #ifdef TARGET_NR_fork
8338 case TARGET_NR_fork
:
8339 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8341 #ifdef TARGET_NR_waitpid
8342 case TARGET_NR_waitpid
:
8345 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8346 if (!is_error(ret
) && arg2
&& ret
8347 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8348 return -TARGET_EFAULT
;
8352 #ifdef TARGET_NR_waitid
8353 case TARGET_NR_waitid
:
8357 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8358 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8359 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8360 return -TARGET_EFAULT
;
8361 host_to_target_siginfo(p
, &info
);
8362 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8367 #ifdef TARGET_NR_creat /* not on alpha */
8368 case TARGET_NR_creat
:
8369 if (!(p
= lock_user_string(arg1
)))
8370 return -TARGET_EFAULT
;
8371 ret
= get_errno(creat(p
, arg2
));
8372 fd_trans_unregister(ret
);
8373 unlock_user(p
, arg1
, 0);
8376 #ifdef TARGET_NR_link
8377 case TARGET_NR_link
:
8380 p
= lock_user_string(arg1
);
8381 p2
= lock_user_string(arg2
);
8383 ret
= -TARGET_EFAULT
;
8385 ret
= get_errno(link(p
, p2
));
8386 unlock_user(p2
, arg2
, 0);
8387 unlock_user(p
, arg1
, 0);
8391 #if defined(TARGET_NR_linkat)
8392 case TARGET_NR_linkat
:
8396 return -TARGET_EFAULT
;
8397 p
= lock_user_string(arg2
);
8398 p2
= lock_user_string(arg4
);
8400 ret
= -TARGET_EFAULT
;
8402 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8403 unlock_user(p
, arg2
, 0);
8404 unlock_user(p2
, arg4
, 0);
8408 #ifdef TARGET_NR_unlink
8409 case TARGET_NR_unlink
:
8410 if (!(p
= lock_user_string(arg1
)))
8411 return -TARGET_EFAULT
;
8412 ret
= get_errno(unlink(p
));
8413 unlock_user(p
, arg1
, 0);
8416 #if defined(TARGET_NR_unlinkat)
8417 case TARGET_NR_unlinkat
:
8418 if (!(p
= lock_user_string(arg2
)))
8419 return -TARGET_EFAULT
;
8420 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8421 unlock_user(p
, arg2
, 0);
8424 case TARGET_NR_execve
:
8426 char **argp
, **envp
;
8429 abi_ulong guest_argp
;
8430 abi_ulong guest_envp
;
8437 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8438 if (get_user_ual(addr
, gp
))
8439 return -TARGET_EFAULT
;
8446 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8447 if (get_user_ual(addr
, gp
))
8448 return -TARGET_EFAULT
;
8454 argp
= g_new0(char *, argc
+ 1);
8455 envp
= g_new0(char *, envc
+ 1);
8457 for (gp
= guest_argp
, q
= argp
; gp
;
8458 gp
+= sizeof(abi_ulong
), q
++) {
8459 if (get_user_ual(addr
, gp
))
8463 if (!(*q
= lock_user_string(addr
)))
8465 total_size
+= strlen(*q
) + 1;
8469 for (gp
= guest_envp
, q
= envp
; gp
;
8470 gp
+= sizeof(abi_ulong
), q
++) {
8471 if (get_user_ual(addr
, gp
))
8475 if (!(*q
= lock_user_string(addr
)))
8477 total_size
+= strlen(*q
) + 1;
8481 if (!(p
= lock_user_string(arg1
)))
8483 /* Although execve() is not an interruptible syscall it is
8484 * a special case where we must use the safe_syscall wrapper:
8485 * if we allow a signal to happen before we make the host
8486 * syscall then we will 'lose' it, because at the point of
8487 * execve the process leaves QEMU's control. So we use the
8488 * safe syscall wrapper to ensure that we either take the
8489 * signal as a guest signal, or else it does not happen
8490 * before the execve completes and makes it the other
8491 * program's problem.
8493 ret
= get_errno(safe_execve(p
, argp
, envp
));
8494 unlock_user(p
, arg1
, 0);
8499 ret
= -TARGET_EFAULT
;
8502 for (gp
= guest_argp
, q
= argp
; *q
;
8503 gp
+= sizeof(abi_ulong
), q
++) {
8504 if (get_user_ual(addr
, gp
)
8507 unlock_user(*q
, addr
, 0);
8509 for (gp
= guest_envp
, q
= envp
; *q
;
8510 gp
+= sizeof(abi_ulong
), q
++) {
8511 if (get_user_ual(addr
, gp
)
8514 unlock_user(*q
, addr
, 0);
8521 case TARGET_NR_chdir
:
8522 if (!(p
= lock_user_string(arg1
)))
8523 return -TARGET_EFAULT
;
8524 ret
= get_errno(chdir(p
));
8525 unlock_user(p
, arg1
, 0);
8527 #ifdef TARGET_NR_time
8528 case TARGET_NR_time
:
8531 ret
= get_errno(time(&host_time
));
8534 && put_user_sal(host_time
, arg1
))
8535 return -TARGET_EFAULT
;
8539 #ifdef TARGET_NR_mknod
8540 case TARGET_NR_mknod
:
8541 if (!(p
= lock_user_string(arg1
)))
8542 return -TARGET_EFAULT
;
8543 ret
= get_errno(mknod(p
, arg2
, arg3
));
8544 unlock_user(p
, arg1
, 0);
8547 #if defined(TARGET_NR_mknodat)
8548 case TARGET_NR_mknodat
:
8549 if (!(p
= lock_user_string(arg2
)))
8550 return -TARGET_EFAULT
;
8551 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8552 unlock_user(p
, arg2
, 0);
8555 #ifdef TARGET_NR_chmod
8556 case TARGET_NR_chmod
:
8557 if (!(p
= lock_user_string(arg1
)))
8558 return -TARGET_EFAULT
;
8559 ret
= get_errno(chmod(p
, arg2
));
8560 unlock_user(p
, arg1
, 0);
8563 #ifdef TARGET_NR_lseek
8564 case TARGET_NR_lseek
:
8565 return get_errno(lseek(arg1
, arg2
, arg3
));
8567 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8568 /* Alpha specific */
8569 case TARGET_NR_getxpid
:
8570 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8571 return get_errno(getpid());
8573 #ifdef TARGET_NR_getpid
8574 case TARGET_NR_getpid
:
8575 return get_errno(getpid());
8577 case TARGET_NR_mount
:
8579 /* need to look at the data field */
8583 p
= lock_user_string(arg1
);
8585 return -TARGET_EFAULT
;
8591 p2
= lock_user_string(arg2
);
8594 unlock_user(p
, arg1
, 0);
8596 return -TARGET_EFAULT
;
8600 p3
= lock_user_string(arg3
);
8603 unlock_user(p
, arg1
, 0);
8605 unlock_user(p2
, arg2
, 0);
8606 return -TARGET_EFAULT
;
8612 /* FIXME - arg5 should be locked, but it isn't clear how to
8613 * do that since it's not guaranteed to be a NULL-terminated
8617 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8619 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8621 ret
= get_errno(ret
);
8624 unlock_user(p
, arg1
, 0);
8626 unlock_user(p2
, arg2
, 0);
8628 unlock_user(p3
, arg3
, 0);
8632 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8633 #if defined(TARGET_NR_umount)
8634 case TARGET_NR_umount
:
8636 #if defined(TARGET_NR_oldumount)
8637 case TARGET_NR_oldumount
:
8639 if (!(p
= lock_user_string(arg1
)))
8640 return -TARGET_EFAULT
;
8641 ret
= get_errno(umount(p
));
8642 unlock_user(p
, arg1
, 0);
8645 #ifdef TARGET_NR_stime /* not on alpha */
8646 case TARGET_NR_stime
:
8650 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8651 return -TARGET_EFAULT
;
8653 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8656 #ifdef TARGET_NR_alarm /* not on alpha */
8657 case TARGET_NR_alarm
:
8660 #ifdef TARGET_NR_pause /* not on alpha */
8661 case TARGET_NR_pause
:
8662 if (!block_signals()) {
8663 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8665 return -TARGET_EINTR
;
8667 #ifdef TARGET_NR_utime
8668 case TARGET_NR_utime
:
8670 struct utimbuf tbuf
, *host_tbuf
;
8671 struct target_utimbuf
*target_tbuf
;
8673 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8674 return -TARGET_EFAULT
;
8675 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8676 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8677 unlock_user_struct(target_tbuf
, arg2
, 0);
8682 if (!(p
= lock_user_string(arg1
)))
8683 return -TARGET_EFAULT
;
8684 ret
= get_errno(utime(p
, host_tbuf
));
8685 unlock_user(p
, arg1
, 0);
8689 #ifdef TARGET_NR_utimes
8690 case TARGET_NR_utimes
:
8692 struct timeval
*tvp
, tv
[2];
8694 if (copy_from_user_timeval(&tv
[0], arg2
)
8695 || copy_from_user_timeval(&tv
[1],
8696 arg2
+ sizeof(struct target_timeval
)))
8697 return -TARGET_EFAULT
;
8702 if (!(p
= lock_user_string(arg1
)))
8703 return -TARGET_EFAULT
;
8704 ret
= get_errno(utimes(p
, tvp
));
8705 unlock_user(p
, arg1
, 0);
8709 #if defined(TARGET_NR_futimesat)
8710 case TARGET_NR_futimesat
:
8712 struct timeval
*tvp
, tv
[2];
8714 if (copy_from_user_timeval(&tv
[0], arg3
)
8715 || copy_from_user_timeval(&tv
[1],
8716 arg3
+ sizeof(struct target_timeval
)))
8717 return -TARGET_EFAULT
;
8722 if (!(p
= lock_user_string(arg2
))) {
8723 return -TARGET_EFAULT
;
8725 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8726 unlock_user(p
, arg2
, 0);
8730 #ifdef TARGET_NR_access
8731 case TARGET_NR_access
:
8732 if (!(p
= lock_user_string(arg1
))) {
8733 return -TARGET_EFAULT
;
8735 ret
= get_errno(access(path(p
), arg2
));
8736 unlock_user(p
, arg1
, 0);
8739 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8740 case TARGET_NR_faccessat
:
8741 if (!(p
= lock_user_string(arg2
))) {
8742 return -TARGET_EFAULT
;
8744 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8745 unlock_user(p
, arg2
, 0);
8748 #ifdef TARGET_NR_nice /* not on alpha */
8749 case TARGET_NR_nice
:
8750 return get_errno(nice(arg1
));
8752 case TARGET_NR_sync
:
8755 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8756 case TARGET_NR_syncfs
:
8757 return get_errno(syncfs(arg1
));
8759 case TARGET_NR_kill
:
8760 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8761 #ifdef TARGET_NR_rename
8762 case TARGET_NR_rename
:
8765 p
= lock_user_string(arg1
);
8766 p2
= lock_user_string(arg2
);
8768 ret
= -TARGET_EFAULT
;
8770 ret
= get_errno(rename(p
, p2
));
8771 unlock_user(p2
, arg2
, 0);
8772 unlock_user(p
, arg1
, 0);
8776 #if defined(TARGET_NR_renameat)
8777 case TARGET_NR_renameat
:
8780 p
= lock_user_string(arg2
);
8781 p2
= lock_user_string(arg4
);
8783 ret
= -TARGET_EFAULT
;
8785 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8786 unlock_user(p2
, arg4
, 0);
8787 unlock_user(p
, arg2
, 0);
8791 #if defined(TARGET_NR_renameat2)
8792 case TARGET_NR_renameat2
:
8795 p
= lock_user_string(arg2
);
8796 p2
= lock_user_string(arg4
);
8798 ret
= -TARGET_EFAULT
;
8800 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8802 unlock_user(p2
, arg4
, 0);
8803 unlock_user(p
, arg2
, 0);
8807 #ifdef TARGET_NR_mkdir
8808 case TARGET_NR_mkdir
:
8809 if (!(p
= lock_user_string(arg1
)))
8810 return -TARGET_EFAULT
;
8811 ret
= get_errno(mkdir(p
, arg2
));
8812 unlock_user(p
, arg1
, 0);
8815 #if defined(TARGET_NR_mkdirat)
8816 case TARGET_NR_mkdirat
:
8817 if (!(p
= lock_user_string(arg2
)))
8818 return -TARGET_EFAULT
;
8819 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8820 unlock_user(p
, arg2
, 0);
8823 #ifdef TARGET_NR_rmdir
8824 case TARGET_NR_rmdir
:
8825 if (!(p
= lock_user_string(arg1
)))
8826 return -TARGET_EFAULT
;
8827 ret
= get_errno(rmdir(p
));
8828 unlock_user(p
, arg1
, 0);
8832 ret
= get_errno(dup(arg1
));
8834 fd_trans_dup(arg1
, ret
);
8837 #ifdef TARGET_NR_pipe
8838 case TARGET_NR_pipe
:
8839 return do_pipe(cpu_env
, arg1
, 0, 0);
8841 #ifdef TARGET_NR_pipe2
8842 case TARGET_NR_pipe2
:
8843 return do_pipe(cpu_env
, arg1
,
8844 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8846 case TARGET_NR_times
:
8848 struct target_tms
*tmsp
;
8850 ret
= get_errno(times(&tms
));
8852 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8854 return -TARGET_EFAULT
;
8855 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8856 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8857 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8858 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8861 ret
= host_to_target_clock_t(ret
);
8864 case TARGET_NR_acct
:
8866 ret
= get_errno(acct(NULL
));
8868 if (!(p
= lock_user_string(arg1
))) {
8869 return -TARGET_EFAULT
;
8871 ret
= get_errno(acct(path(p
)));
8872 unlock_user(p
, arg1
, 0);
8875 #ifdef TARGET_NR_umount2
8876 case TARGET_NR_umount2
:
8877 if (!(p
= lock_user_string(arg1
)))
8878 return -TARGET_EFAULT
;
8879 ret
= get_errno(umount2(p
, arg2
));
8880 unlock_user(p
, arg1
, 0);
8883 case TARGET_NR_ioctl
:
8884 return do_ioctl(arg1
, arg2
, arg3
);
8885 #ifdef TARGET_NR_fcntl
8886 case TARGET_NR_fcntl
:
8887 return do_fcntl(arg1
, arg2
, arg3
);
8889 case TARGET_NR_setpgid
:
8890 return get_errno(setpgid(arg1
, arg2
));
8891 case TARGET_NR_umask
:
8892 return get_errno(umask(arg1
));
8893 case TARGET_NR_chroot
:
8894 if (!(p
= lock_user_string(arg1
)))
8895 return -TARGET_EFAULT
;
8896 ret
= get_errno(chroot(p
));
8897 unlock_user(p
, arg1
, 0);
8899 #ifdef TARGET_NR_dup2
8900 case TARGET_NR_dup2
:
8901 ret
= get_errno(dup2(arg1
, arg2
));
8903 fd_trans_dup(arg1
, arg2
);
8907 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8908 case TARGET_NR_dup3
:
8912 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8915 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8916 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8918 fd_trans_dup(arg1
, arg2
);
8923 #ifdef TARGET_NR_getppid /* not on alpha */
8924 case TARGET_NR_getppid
:
8925 return get_errno(getppid());
8927 #ifdef TARGET_NR_getpgrp
8928 case TARGET_NR_getpgrp
:
8929 return get_errno(getpgrp());
8931 case TARGET_NR_setsid
:
8932 return get_errno(setsid());
8933 #ifdef TARGET_NR_sigaction
8934 case TARGET_NR_sigaction
:
8936 #if defined(TARGET_ALPHA)
8937 struct target_sigaction act
, oact
, *pact
= 0;
8938 struct target_old_sigaction
*old_act
;
8940 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8941 return -TARGET_EFAULT
;
8942 act
._sa_handler
= old_act
->_sa_handler
;
8943 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8944 act
.sa_flags
= old_act
->sa_flags
;
8945 act
.sa_restorer
= 0;
8946 unlock_user_struct(old_act
, arg2
, 0);
8949 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8950 if (!is_error(ret
) && arg3
) {
8951 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8952 return -TARGET_EFAULT
;
8953 old_act
->_sa_handler
= oact
._sa_handler
;
8954 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8955 old_act
->sa_flags
= oact
.sa_flags
;
8956 unlock_user_struct(old_act
, arg3
, 1);
8958 #elif defined(TARGET_MIPS)
8959 struct target_sigaction act
, oact
, *pact
, *old_act
;
8962 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8963 return -TARGET_EFAULT
;
8964 act
._sa_handler
= old_act
->_sa_handler
;
8965 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8966 act
.sa_flags
= old_act
->sa_flags
;
8967 unlock_user_struct(old_act
, arg2
, 0);
8973 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8975 if (!is_error(ret
) && arg3
) {
8976 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8977 return -TARGET_EFAULT
;
8978 old_act
->_sa_handler
= oact
._sa_handler
;
8979 old_act
->sa_flags
= oact
.sa_flags
;
8980 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8981 old_act
->sa_mask
.sig
[1] = 0;
8982 old_act
->sa_mask
.sig
[2] = 0;
8983 old_act
->sa_mask
.sig
[3] = 0;
8984 unlock_user_struct(old_act
, arg3
, 1);
8987 struct target_old_sigaction
*old_act
;
8988 struct target_sigaction act
, oact
, *pact
;
8990 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8991 return -TARGET_EFAULT
;
8992 act
._sa_handler
= old_act
->_sa_handler
;
8993 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8994 act
.sa_flags
= old_act
->sa_flags
;
8995 act
.sa_restorer
= old_act
->sa_restorer
;
8996 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8997 act
.ka_restorer
= 0;
8999 unlock_user_struct(old_act
, arg2
, 0);
9004 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9005 if (!is_error(ret
) && arg3
) {
9006 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9007 return -TARGET_EFAULT
;
9008 old_act
->_sa_handler
= oact
._sa_handler
;
9009 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9010 old_act
->sa_flags
= oact
.sa_flags
;
9011 old_act
->sa_restorer
= oact
.sa_restorer
;
9012 unlock_user_struct(old_act
, arg3
, 1);
9018 case TARGET_NR_rt_sigaction
:
9020 #if defined(TARGET_ALPHA)
9021 /* For Alpha and SPARC this is a 5 argument syscall, with
9022 * a 'restorer' parameter which must be copied into the
9023 * sa_restorer field of the sigaction struct.
9024 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9025 * and arg5 is the sigsetsize.
9026 * Alpha also has a separate rt_sigaction struct that it uses
9027 * here; SPARC uses the usual sigaction struct.
9029 struct target_rt_sigaction
*rt_act
;
9030 struct target_sigaction act
, oact
, *pact
= 0;
9032 if (arg4
!= sizeof(target_sigset_t
)) {
9033 return -TARGET_EINVAL
;
9036 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9037 return -TARGET_EFAULT
;
9038 act
._sa_handler
= rt_act
->_sa_handler
;
9039 act
.sa_mask
= rt_act
->sa_mask
;
9040 act
.sa_flags
= rt_act
->sa_flags
;
9041 act
.sa_restorer
= arg5
;
9042 unlock_user_struct(rt_act
, arg2
, 0);
9045 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9046 if (!is_error(ret
) && arg3
) {
9047 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9048 return -TARGET_EFAULT
;
9049 rt_act
->_sa_handler
= oact
._sa_handler
;
9050 rt_act
->sa_mask
= oact
.sa_mask
;
9051 rt_act
->sa_flags
= oact
.sa_flags
;
9052 unlock_user_struct(rt_act
, arg3
, 1);
9056 target_ulong restorer
= arg4
;
9057 target_ulong sigsetsize
= arg5
;
9059 target_ulong sigsetsize
= arg4
;
9061 struct target_sigaction
*act
;
9062 struct target_sigaction
*oact
;
9064 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9065 return -TARGET_EINVAL
;
9068 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9069 return -TARGET_EFAULT
;
9071 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9072 act
->ka_restorer
= restorer
;
9078 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9079 ret
= -TARGET_EFAULT
;
9080 goto rt_sigaction_fail
;
9084 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9087 unlock_user_struct(act
, arg2
, 0);
9089 unlock_user_struct(oact
, arg3
, 1);
9093 #ifdef TARGET_NR_sgetmask /* not on alpha */
9094 case TARGET_NR_sgetmask
:
9097 abi_ulong target_set
;
9098 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9100 host_to_target_old_sigset(&target_set
, &cur_set
);
9106 #ifdef TARGET_NR_ssetmask /* not on alpha */
9107 case TARGET_NR_ssetmask
:
9110 abi_ulong target_set
= arg1
;
9111 target_to_host_old_sigset(&set
, &target_set
);
9112 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9114 host_to_target_old_sigset(&target_set
, &oset
);
9120 #ifdef TARGET_NR_sigprocmask
9121 case TARGET_NR_sigprocmask
:
9123 #if defined(TARGET_ALPHA)
9124 sigset_t set
, oldset
;
9129 case TARGET_SIG_BLOCK
:
9132 case TARGET_SIG_UNBLOCK
:
9135 case TARGET_SIG_SETMASK
:
9139 return -TARGET_EINVAL
;
9142 target_to_host_old_sigset(&set
, &mask
);
9144 ret
= do_sigprocmask(how
, &set
, &oldset
);
9145 if (!is_error(ret
)) {
9146 host_to_target_old_sigset(&mask
, &oldset
);
9148 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9151 sigset_t set
, oldset
, *set_ptr
;
9156 case TARGET_SIG_BLOCK
:
9159 case TARGET_SIG_UNBLOCK
:
9162 case TARGET_SIG_SETMASK
:
9166 return -TARGET_EINVAL
;
9168 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9169 return -TARGET_EFAULT
;
9170 target_to_host_old_sigset(&set
, p
);
9171 unlock_user(p
, arg2
, 0);
9177 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9178 if (!is_error(ret
) && arg3
) {
9179 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9180 return -TARGET_EFAULT
;
9181 host_to_target_old_sigset(p
, &oldset
);
9182 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9188 case TARGET_NR_rt_sigprocmask
:
9191 sigset_t set
, oldset
, *set_ptr
;
9193 if (arg4
!= sizeof(target_sigset_t
)) {
9194 return -TARGET_EINVAL
;
9199 case TARGET_SIG_BLOCK
:
9202 case TARGET_SIG_UNBLOCK
:
9205 case TARGET_SIG_SETMASK
:
9209 return -TARGET_EINVAL
;
9211 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9212 return -TARGET_EFAULT
;
9213 target_to_host_sigset(&set
, p
);
9214 unlock_user(p
, arg2
, 0);
9220 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9221 if (!is_error(ret
) && arg3
) {
9222 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9223 return -TARGET_EFAULT
;
9224 host_to_target_sigset(p
, &oldset
);
9225 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9229 #ifdef TARGET_NR_sigpending
9230 case TARGET_NR_sigpending
:
9233 ret
= get_errno(sigpending(&set
));
9234 if (!is_error(ret
)) {
9235 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9236 return -TARGET_EFAULT
;
9237 host_to_target_old_sigset(p
, &set
);
9238 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9243 case TARGET_NR_rt_sigpending
:
9247 /* Yes, this check is >, not != like most. We follow the kernel's
9248 * logic and it does it like this because it implements
9249 * NR_sigpending through the same code path, and in that case
9250 * the old_sigset_t is smaller in size.
9252 if (arg2
> sizeof(target_sigset_t
)) {
9253 return -TARGET_EINVAL
;
9256 ret
= get_errno(sigpending(&set
));
9257 if (!is_error(ret
)) {
9258 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9259 return -TARGET_EFAULT
;
9260 host_to_target_sigset(p
, &set
);
9261 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9265 #ifdef TARGET_NR_sigsuspend
9266 case TARGET_NR_sigsuspend
:
9268 TaskState
*ts
= cpu
->opaque
;
9269 #if defined(TARGET_ALPHA)
9270 abi_ulong mask
= arg1
;
9271 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9273 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9274 return -TARGET_EFAULT
;
9275 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9276 unlock_user(p
, arg1
, 0);
9278 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9280 if (ret
!= -TARGET_ERESTARTSYS
) {
9281 ts
->in_sigsuspend
= 1;
9286 case TARGET_NR_rt_sigsuspend
:
9288 TaskState
*ts
= cpu
->opaque
;
9290 if (arg2
!= sizeof(target_sigset_t
)) {
9291 return -TARGET_EINVAL
;
9293 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9294 return -TARGET_EFAULT
;
9295 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9296 unlock_user(p
, arg1
, 0);
9297 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9299 if (ret
!= -TARGET_ERESTARTSYS
) {
9300 ts
->in_sigsuspend
= 1;
9304 #ifdef TARGET_NR_rt_sigtimedwait
9305 case TARGET_NR_rt_sigtimedwait
:
9308 struct timespec uts
, *puts
;
9311 if (arg4
!= sizeof(target_sigset_t
)) {
9312 return -TARGET_EINVAL
;
9315 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9316 return -TARGET_EFAULT
;
9317 target_to_host_sigset(&set
, p
);
9318 unlock_user(p
, arg1
, 0);
9321 if (target_to_host_timespec(puts
, arg3
)) {
9322 return -TARGET_EFAULT
;
9327 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9329 if (!is_error(ret
)) {
9331 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9334 return -TARGET_EFAULT
;
9336 host_to_target_siginfo(p
, &uinfo
);
9337 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9339 ret
= host_to_target_signal(ret
);
9344 #ifdef TARGET_NR_rt_sigtimedwait_time64
9345 case TARGET_NR_rt_sigtimedwait_time64
:
9348 struct timespec uts
, *puts
;
9351 if (arg4
!= sizeof(target_sigset_t
)) {
9352 return -TARGET_EINVAL
;
9355 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9357 return -TARGET_EFAULT
;
9359 target_to_host_sigset(&set
, p
);
9360 unlock_user(p
, arg1
, 0);
9363 if (target_to_host_timespec64(puts
, arg3
)) {
9364 return -TARGET_EFAULT
;
9369 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9371 if (!is_error(ret
)) {
9373 p
= lock_user(VERIFY_WRITE
, arg2
,
9374 sizeof(target_siginfo_t
), 0);
9376 return -TARGET_EFAULT
;
9378 host_to_target_siginfo(p
, &uinfo
);
9379 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9381 ret
= host_to_target_signal(ret
);
9386 case TARGET_NR_rt_sigqueueinfo
:
9390 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9392 return -TARGET_EFAULT
;
9394 target_to_host_siginfo(&uinfo
, p
);
9395 unlock_user(p
, arg3
, 0);
9396 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9399 case TARGET_NR_rt_tgsigqueueinfo
:
9403 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9405 return -TARGET_EFAULT
;
9407 target_to_host_siginfo(&uinfo
, p
);
9408 unlock_user(p
, arg4
, 0);
9409 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9412 #ifdef TARGET_NR_sigreturn
9413 case TARGET_NR_sigreturn
:
9414 if (block_signals()) {
9415 return -TARGET_ERESTARTSYS
;
9417 return do_sigreturn(cpu_env
);
9419 case TARGET_NR_rt_sigreturn
:
9420 if (block_signals()) {
9421 return -TARGET_ERESTARTSYS
;
9423 return do_rt_sigreturn(cpu_env
);
9424 case TARGET_NR_sethostname
:
9425 if (!(p
= lock_user_string(arg1
)))
9426 return -TARGET_EFAULT
;
9427 ret
= get_errno(sethostname(p
, arg2
));
9428 unlock_user(p
, arg1
, 0);
9430 #ifdef TARGET_NR_setrlimit
9431 case TARGET_NR_setrlimit
:
9433 int resource
= target_to_host_resource(arg1
);
9434 struct target_rlimit
*target_rlim
;
9436 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9437 return -TARGET_EFAULT
;
9438 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9439 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9440 unlock_user_struct(target_rlim
, arg2
, 0);
9442 * If we just passed through resource limit settings for memory then
9443 * they would also apply to QEMU's own allocations, and QEMU will
9444 * crash or hang or die if its allocations fail. Ideally we would
9445 * track the guest allocations in QEMU and apply the limits ourselves.
9446 * For now, just tell the guest the call succeeded but don't actually
9449 if (resource
!= RLIMIT_AS
&&
9450 resource
!= RLIMIT_DATA
&&
9451 resource
!= RLIMIT_STACK
) {
9452 return get_errno(setrlimit(resource
, &rlim
));
9458 #ifdef TARGET_NR_getrlimit
9459 case TARGET_NR_getrlimit
:
9461 int resource
= target_to_host_resource(arg1
);
9462 struct target_rlimit
*target_rlim
;
9465 ret
= get_errno(getrlimit(resource
, &rlim
));
9466 if (!is_error(ret
)) {
9467 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9468 return -TARGET_EFAULT
;
9469 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9470 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9471 unlock_user_struct(target_rlim
, arg2
, 1);
9476 case TARGET_NR_getrusage
:
9478 struct rusage rusage
;
9479 ret
= get_errno(getrusage(arg1
, &rusage
));
9480 if (!is_error(ret
)) {
9481 ret
= host_to_target_rusage(arg2
, &rusage
);
9485 #if defined(TARGET_NR_gettimeofday)
9486 case TARGET_NR_gettimeofday
:
9491 ret
= get_errno(gettimeofday(&tv
, &tz
));
9492 if (!is_error(ret
)) {
9493 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9494 return -TARGET_EFAULT
;
9496 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9497 return -TARGET_EFAULT
;
9503 #if defined(TARGET_NR_settimeofday)
9504 case TARGET_NR_settimeofday
:
9506 struct timeval tv
, *ptv
= NULL
;
9507 struct timezone tz
, *ptz
= NULL
;
9510 if (copy_from_user_timeval(&tv
, arg1
)) {
9511 return -TARGET_EFAULT
;
9517 if (copy_from_user_timezone(&tz
, arg2
)) {
9518 return -TARGET_EFAULT
;
9523 return get_errno(settimeofday(ptv
, ptz
));
9526 #if defined(TARGET_NR_select)
9527 case TARGET_NR_select
:
9528 #if defined(TARGET_WANT_NI_OLD_SELECT)
9529 /* some architectures used to have old_select here
9530 * but now ENOSYS it.
9532 ret
= -TARGET_ENOSYS
;
9533 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9534 ret
= do_old_select(arg1
);
9536 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9540 #ifdef TARGET_NR_pselect6
9541 case TARGET_NR_pselect6
:
9542 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9544 #ifdef TARGET_NR_pselect6_time64
9545 case TARGET_NR_pselect6_time64
:
9546 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9548 #ifdef TARGET_NR_symlink
9549 case TARGET_NR_symlink
:
9552 p
= lock_user_string(arg1
);
9553 p2
= lock_user_string(arg2
);
9555 ret
= -TARGET_EFAULT
;
9557 ret
= get_errno(symlink(p
, p2
));
9558 unlock_user(p2
, arg2
, 0);
9559 unlock_user(p
, arg1
, 0);
9563 #if defined(TARGET_NR_symlinkat)
9564 case TARGET_NR_symlinkat
:
9567 p
= lock_user_string(arg1
);
9568 p2
= lock_user_string(arg3
);
9570 ret
= -TARGET_EFAULT
;
9572 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9573 unlock_user(p2
, arg3
, 0);
9574 unlock_user(p
, arg1
, 0);
9578 #ifdef TARGET_NR_readlink
9579 case TARGET_NR_readlink
:
9582 p
= lock_user_string(arg1
);
9583 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9585 ret
= -TARGET_EFAULT
;
9587 /* Short circuit this for the magic exe check. */
9588 ret
= -TARGET_EINVAL
;
9589 } else if (is_proc_myself((const char *)p
, "exe")) {
9590 char real
[PATH_MAX
], *temp
;
9591 temp
= realpath(exec_path
, real
);
9592 /* Return value is # of bytes that we wrote to the buffer. */
9594 ret
= get_errno(-1);
9596 /* Don't worry about sign mismatch as earlier mapping
9597 * logic would have thrown a bad address error. */
9598 ret
= MIN(strlen(real
), arg3
);
9599 /* We cannot NUL terminate the string. */
9600 memcpy(p2
, real
, ret
);
9603 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9605 unlock_user(p2
, arg2
, ret
);
9606 unlock_user(p
, arg1
, 0);
9610 #if defined(TARGET_NR_readlinkat)
9611 case TARGET_NR_readlinkat
:
9614 p
= lock_user_string(arg2
);
9615 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9617 ret
= -TARGET_EFAULT
;
9618 } else if (is_proc_myself((const char *)p
, "exe")) {
9619 char real
[PATH_MAX
], *temp
;
9620 temp
= realpath(exec_path
, real
);
9621 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9622 snprintf((char *)p2
, arg4
, "%s", real
);
9624 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9626 unlock_user(p2
, arg3
, ret
);
9627 unlock_user(p
, arg2
, 0);
9631 #ifdef TARGET_NR_swapon
9632 case TARGET_NR_swapon
:
9633 if (!(p
= lock_user_string(arg1
)))
9634 return -TARGET_EFAULT
;
9635 ret
= get_errno(swapon(p
, arg2
));
9636 unlock_user(p
, arg1
, 0);
9639 case TARGET_NR_reboot
:
9640 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9641 /* arg4 must be ignored in all other cases */
9642 p
= lock_user_string(arg4
);
9644 return -TARGET_EFAULT
;
9646 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9647 unlock_user(p
, arg4
, 0);
9649 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9652 #ifdef TARGET_NR_mmap
9653 case TARGET_NR_mmap
:
9654 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9655 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9656 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9657 || defined(TARGET_S390X)
9660 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9661 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9662 return -TARGET_EFAULT
;
9669 unlock_user(v
, arg1
, 0);
9670 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9671 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9675 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9676 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9682 #ifdef TARGET_NR_mmap2
9683 case TARGET_NR_mmap2
:
9685 #define MMAP_SHIFT 12
9687 ret
= target_mmap(arg1
, arg2
, arg3
,
9688 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9689 arg5
, arg6
<< MMAP_SHIFT
);
9690 return get_errno(ret
);
9692 case TARGET_NR_munmap
:
9693 return get_errno(target_munmap(arg1
, arg2
));
9694 case TARGET_NR_mprotect
:
9696 TaskState
*ts
= cpu
->opaque
;
9697 /* Special hack to detect libc making the stack executable. */
9698 if ((arg3
& PROT_GROWSDOWN
)
9699 && arg1
>= ts
->info
->stack_limit
9700 && arg1
<= ts
->info
->start_stack
) {
9701 arg3
&= ~PROT_GROWSDOWN
;
9702 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9703 arg1
= ts
->info
->stack_limit
;
9706 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9707 #ifdef TARGET_NR_mremap
9708 case TARGET_NR_mremap
:
9709 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9711 /* ??? msync/mlock/munlock are broken for softmmu. */
9712 #ifdef TARGET_NR_msync
9713 case TARGET_NR_msync
:
9714 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9716 #ifdef TARGET_NR_mlock
9717 case TARGET_NR_mlock
:
9718 return get_errno(mlock(g2h(arg1
), arg2
));
9720 #ifdef TARGET_NR_munlock
9721 case TARGET_NR_munlock
:
9722 return get_errno(munlock(g2h(arg1
), arg2
));
9724 #ifdef TARGET_NR_mlockall
9725 case TARGET_NR_mlockall
:
9726 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9728 #ifdef TARGET_NR_munlockall
9729 case TARGET_NR_munlockall
:
9730 return get_errno(munlockall());
9732 #ifdef TARGET_NR_truncate
9733 case TARGET_NR_truncate
:
9734 if (!(p
= lock_user_string(arg1
)))
9735 return -TARGET_EFAULT
;
9736 ret
= get_errno(truncate(p
, arg2
));
9737 unlock_user(p
, arg1
, 0);
9740 #ifdef TARGET_NR_ftruncate
9741 case TARGET_NR_ftruncate
:
9742 return get_errno(ftruncate(arg1
, arg2
));
9744 case TARGET_NR_fchmod
:
9745 return get_errno(fchmod(arg1
, arg2
));
9746 #if defined(TARGET_NR_fchmodat)
9747 case TARGET_NR_fchmodat
:
9748 if (!(p
= lock_user_string(arg2
)))
9749 return -TARGET_EFAULT
;
9750 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9751 unlock_user(p
, arg2
, 0);
9754 case TARGET_NR_getpriority
:
9755 /* Note that negative values are valid for getpriority, so we must
9756 differentiate based on errno settings. */
9758 ret
= getpriority(arg1
, arg2
);
9759 if (ret
== -1 && errno
!= 0) {
9760 return -host_to_target_errno(errno
);
9763 /* Return value is the unbiased priority. Signal no error. */
9764 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9766 /* Return value is a biased priority to avoid negative numbers. */
9770 case TARGET_NR_setpriority
:
9771 return get_errno(setpriority(arg1
, arg2
, arg3
));
9772 #ifdef TARGET_NR_statfs
9773 case TARGET_NR_statfs
:
9774 if (!(p
= lock_user_string(arg1
))) {
9775 return -TARGET_EFAULT
;
9777 ret
= get_errno(statfs(path(p
), &stfs
));
9778 unlock_user(p
, arg1
, 0);
9780 if (!is_error(ret
)) {
9781 struct target_statfs
*target_stfs
;
9783 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9784 return -TARGET_EFAULT
;
9785 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9786 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9787 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9788 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9789 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9790 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9791 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9792 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9793 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9794 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9795 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9796 #ifdef _STATFS_F_FLAGS
9797 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9799 __put_user(0, &target_stfs
->f_flags
);
9801 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9802 unlock_user_struct(target_stfs
, arg2
, 1);
9806 #ifdef TARGET_NR_fstatfs
9807 case TARGET_NR_fstatfs
:
9808 ret
= get_errno(fstatfs(arg1
, &stfs
));
9809 goto convert_statfs
;
9811 #ifdef TARGET_NR_statfs64
9812 case TARGET_NR_statfs64
:
9813 if (!(p
= lock_user_string(arg1
))) {
9814 return -TARGET_EFAULT
;
9816 ret
= get_errno(statfs(path(p
), &stfs
));
9817 unlock_user(p
, arg1
, 0);
9819 if (!is_error(ret
)) {
9820 struct target_statfs64
*target_stfs
;
9822 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9823 return -TARGET_EFAULT
;
9824 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9825 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9826 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9827 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9828 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9829 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9830 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9831 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9832 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9833 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9834 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9835 #ifdef _STATFS_F_FLAGS
9836 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9838 __put_user(0, &target_stfs
->f_flags
);
9840 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9841 unlock_user_struct(target_stfs
, arg3
, 1);
9844 case TARGET_NR_fstatfs64
:
9845 ret
= get_errno(fstatfs(arg1
, &stfs
));
9846 goto convert_statfs64
;
9848 #ifdef TARGET_NR_socketcall
9849 case TARGET_NR_socketcall
:
9850 return do_socketcall(arg1
, arg2
);
9852 #ifdef TARGET_NR_accept
9853 case TARGET_NR_accept
:
9854 return do_accept4(arg1
, arg2
, arg3
, 0);
9856 #ifdef TARGET_NR_accept4
9857 case TARGET_NR_accept4
:
9858 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9860 #ifdef TARGET_NR_bind
9861 case TARGET_NR_bind
:
9862 return do_bind(arg1
, arg2
, arg3
);
9864 #ifdef TARGET_NR_connect
9865 case TARGET_NR_connect
:
9866 return do_connect(arg1
, arg2
, arg3
);
9868 #ifdef TARGET_NR_getpeername
9869 case TARGET_NR_getpeername
:
9870 return do_getpeername(arg1
, arg2
, arg3
);
9872 #ifdef TARGET_NR_getsockname
9873 case TARGET_NR_getsockname
:
9874 return do_getsockname(arg1
, arg2
, arg3
);
9876 #ifdef TARGET_NR_getsockopt
9877 case TARGET_NR_getsockopt
:
9878 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9880 #ifdef TARGET_NR_listen
9881 case TARGET_NR_listen
:
9882 return get_errno(listen(arg1
, arg2
));
9884 #ifdef TARGET_NR_recv
9885 case TARGET_NR_recv
:
9886 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9888 #ifdef TARGET_NR_recvfrom
9889 case TARGET_NR_recvfrom
:
9890 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9892 #ifdef TARGET_NR_recvmsg
9893 case TARGET_NR_recvmsg
:
9894 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9896 #ifdef TARGET_NR_send
9897 case TARGET_NR_send
:
9898 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9900 #ifdef TARGET_NR_sendmsg
9901 case TARGET_NR_sendmsg
:
9902 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9904 #ifdef TARGET_NR_sendmmsg
9905 case TARGET_NR_sendmmsg
:
9906 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9908 #ifdef TARGET_NR_recvmmsg
9909 case TARGET_NR_recvmmsg
:
9910 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9912 #ifdef TARGET_NR_sendto
9913 case TARGET_NR_sendto
:
9914 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9916 #ifdef TARGET_NR_shutdown
9917 case TARGET_NR_shutdown
:
9918 return get_errno(shutdown(arg1
, arg2
));
9920 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9921 case TARGET_NR_getrandom
:
9922 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9924 return -TARGET_EFAULT
;
9926 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9927 unlock_user(p
, arg1
, ret
);
9930 #ifdef TARGET_NR_socket
9931 case TARGET_NR_socket
:
9932 return do_socket(arg1
, arg2
, arg3
);
9934 #ifdef TARGET_NR_socketpair
9935 case TARGET_NR_socketpair
:
9936 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9938 #ifdef TARGET_NR_setsockopt
9939 case TARGET_NR_setsockopt
:
9940 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9942 #if defined(TARGET_NR_syslog)
9943 case TARGET_NR_syslog
:
9948 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9949 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9950 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9951 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9952 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9953 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9954 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9955 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9956 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9957 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9958 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9959 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9962 return -TARGET_EINVAL
;
9967 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9969 return -TARGET_EFAULT
;
9971 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9972 unlock_user(p
, arg2
, arg3
);
9976 return -TARGET_EINVAL
;
9981 case TARGET_NR_setitimer
:
9983 struct itimerval value
, ovalue
, *pvalue
;
9987 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9988 || copy_from_user_timeval(&pvalue
->it_value
,
9989 arg2
+ sizeof(struct target_timeval
)))
9990 return -TARGET_EFAULT
;
9994 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9995 if (!is_error(ret
) && arg3
) {
9996 if (copy_to_user_timeval(arg3
,
9997 &ovalue
.it_interval
)
9998 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10000 return -TARGET_EFAULT
;
10004 case TARGET_NR_getitimer
:
10006 struct itimerval value
;
10008 ret
= get_errno(getitimer(arg1
, &value
));
10009 if (!is_error(ret
) && arg2
) {
10010 if (copy_to_user_timeval(arg2
,
10011 &value
.it_interval
)
10012 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10014 return -TARGET_EFAULT
;
10018 #ifdef TARGET_NR_stat
10019 case TARGET_NR_stat
:
10020 if (!(p
= lock_user_string(arg1
))) {
10021 return -TARGET_EFAULT
;
10023 ret
= get_errno(stat(path(p
), &st
));
10024 unlock_user(p
, arg1
, 0);
10027 #ifdef TARGET_NR_lstat
10028 case TARGET_NR_lstat
:
10029 if (!(p
= lock_user_string(arg1
))) {
10030 return -TARGET_EFAULT
;
10032 ret
= get_errno(lstat(path(p
), &st
));
10033 unlock_user(p
, arg1
, 0);
10036 #ifdef TARGET_NR_fstat
10037 case TARGET_NR_fstat
:
10039 ret
= get_errno(fstat(arg1
, &st
));
10040 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10043 if (!is_error(ret
)) {
10044 struct target_stat
*target_st
;
10046 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10047 return -TARGET_EFAULT
;
10048 memset(target_st
, 0, sizeof(*target_st
));
10049 __put_user(st
.st_dev
, &target_st
->st_dev
);
10050 __put_user(st
.st_ino
, &target_st
->st_ino
);
10051 __put_user(st
.st_mode
, &target_st
->st_mode
);
10052 __put_user(st
.st_uid
, &target_st
->st_uid
);
10053 __put_user(st
.st_gid
, &target_st
->st_gid
);
10054 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10055 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10056 __put_user(st
.st_size
, &target_st
->st_size
);
10057 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10058 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10059 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10060 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10061 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10062 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10063 defined(TARGET_STAT_HAVE_NSEC)
10064 __put_user(st
.st_atim
.tv_nsec
,
10065 &target_st
->target_st_atime_nsec
);
10066 __put_user(st
.st_mtim
.tv_nsec
,
10067 &target_st
->target_st_mtime_nsec
);
10068 __put_user(st
.st_ctim
.tv_nsec
,
10069 &target_st
->target_st_ctime_nsec
);
10071 unlock_user_struct(target_st
, arg2
, 1);
10076 case TARGET_NR_vhangup
:
10077 return get_errno(vhangup());
10078 #ifdef TARGET_NR_syscall
10079 case TARGET_NR_syscall
:
10080 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10081 arg6
, arg7
, arg8
, 0);
10083 #if defined(TARGET_NR_wait4)
10084 case TARGET_NR_wait4
:
10087 abi_long status_ptr
= arg2
;
10088 struct rusage rusage
, *rusage_ptr
;
10089 abi_ulong target_rusage
= arg4
;
10090 abi_long rusage_err
;
10092 rusage_ptr
= &rusage
;
10095 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10096 if (!is_error(ret
)) {
10097 if (status_ptr
&& ret
) {
10098 status
= host_to_target_waitstatus(status
);
10099 if (put_user_s32(status
, status_ptr
))
10100 return -TARGET_EFAULT
;
10102 if (target_rusage
) {
10103 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10112 #ifdef TARGET_NR_swapoff
10113 case TARGET_NR_swapoff
:
10114 if (!(p
= lock_user_string(arg1
)))
10115 return -TARGET_EFAULT
;
10116 ret
= get_errno(swapoff(p
));
10117 unlock_user(p
, arg1
, 0);
10120 case TARGET_NR_sysinfo
:
10122 struct target_sysinfo
*target_value
;
10123 struct sysinfo value
;
10124 ret
= get_errno(sysinfo(&value
));
10125 if (!is_error(ret
) && arg1
)
10127 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10128 return -TARGET_EFAULT
;
10129 __put_user(value
.uptime
, &target_value
->uptime
);
10130 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10131 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10132 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10133 __put_user(value
.totalram
, &target_value
->totalram
);
10134 __put_user(value
.freeram
, &target_value
->freeram
);
10135 __put_user(value
.sharedram
, &target_value
->sharedram
);
10136 __put_user(value
.bufferram
, &target_value
->bufferram
);
10137 __put_user(value
.totalswap
, &target_value
->totalswap
);
10138 __put_user(value
.freeswap
, &target_value
->freeswap
);
10139 __put_user(value
.procs
, &target_value
->procs
);
10140 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10141 __put_user(value
.freehigh
, &target_value
->freehigh
);
10142 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10143 unlock_user_struct(target_value
, arg1
, 1);
10147 #ifdef TARGET_NR_ipc
10148 case TARGET_NR_ipc
:
10149 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10151 #ifdef TARGET_NR_semget
10152 case TARGET_NR_semget
:
10153 return get_errno(semget(arg1
, arg2
, arg3
));
10155 #ifdef TARGET_NR_semop
10156 case TARGET_NR_semop
:
10157 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10159 #ifdef TARGET_NR_semtimedop
10160 case TARGET_NR_semtimedop
:
10161 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10163 #ifdef TARGET_NR_semtimedop_time64
10164 case TARGET_NR_semtimedop_time64
:
10165 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10167 #ifdef TARGET_NR_semctl
10168 case TARGET_NR_semctl
:
10169 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10171 #ifdef TARGET_NR_msgctl
10172 case TARGET_NR_msgctl
:
10173 return do_msgctl(arg1
, arg2
, arg3
);
10175 #ifdef TARGET_NR_msgget
10176 case TARGET_NR_msgget
:
10177 return get_errno(msgget(arg1
, arg2
));
10179 #ifdef TARGET_NR_msgrcv
10180 case TARGET_NR_msgrcv
:
10181 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10183 #ifdef TARGET_NR_msgsnd
10184 case TARGET_NR_msgsnd
:
10185 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10187 #ifdef TARGET_NR_shmget
10188 case TARGET_NR_shmget
:
10189 return get_errno(shmget(arg1
, arg2
, arg3
));
10191 #ifdef TARGET_NR_shmctl
10192 case TARGET_NR_shmctl
:
10193 return do_shmctl(arg1
, arg2
, arg3
);
10195 #ifdef TARGET_NR_shmat
10196 case TARGET_NR_shmat
:
10197 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10199 #ifdef TARGET_NR_shmdt
10200 case TARGET_NR_shmdt
:
10201 return do_shmdt(arg1
);
10203 case TARGET_NR_fsync
:
10204 return get_errno(fsync(arg1
));
10205 case TARGET_NR_clone
:
10206 /* Linux manages to have three different orderings for its
10207 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10208 * match the kernel's CONFIG_CLONE_* settings.
10209 * Microblaze is further special in that it uses a sixth
10210 * implicit argument to clone for the TLS pointer.
10212 #if defined(TARGET_MICROBLAZE)
10213 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10214 #elif defined(TARGET_CLONE_BACKWARDS)
10215 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10216 #elif defined(TARGET_CLONE_BACKWARDS2)
10217 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10219 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10222 #ifdef __NR_exit_group
10223 /* new thread calls */
10224 case TARGET_NR_exit_group
:
10225 preexit_cleanup(cpu_env
, arg1
);
10226 return get_errno(exit_group(arg1
));
10228 case TARGET_NR_setdomainname
:
10229 if (!(p
= lock_user_string(arg1
)))
10230 return -TARGET_EFAULT
;
10231 ret
= get_errno(setdomainname(p
, arg2
));
10232 unlock_user(p
, arg1
, 0);
10234 case TARGET_NR_uname
:
10235 /* no need to transcode because we use the linux syscall */
10237 struct new_utsname
* buf
;
10239 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10240 return -TARGET_EFAULT
;
10241 ret
= get_errno(sys_uname(buf
));
10242 if (!is_error(ret
)) {
10243 /* Overwrite the native machine name with whatever is being
10245 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10246 sizeof(buf
->machine
));
10247 /* Allow the user to override the reported release. */
10248 if (qemu_uname_release
&& *qemu_uname_release
) {
10249 g_strlcpy(buf
->release
, qemu_uname_release
,
10250 sizeof(buf
->release
));
10253 unlock_user_struct(buf
, arg1
, 1);
10257 case TARGET_NR_modify_ldt
:
10258 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10259 #if !defined(TARGET_X86_64)
10260 case TARGET_NR_vm86
:
10261 return do_vm86(cpu_env
, arg1
, arg2
);
10264 #if defined(TARGET_NR_adjtimex)
10265 case TARGET_NR_adjtimex
:
10267 struct timex host_buf
;
10269 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10270 return -TARGET_EFAULT
;
10272 ret
= get_errno(adjtimex(&host_buf
));
10273 if (!is_error(ret
)) {
10274 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10275 return -TARGET_EFAULT
;
10281 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10282 case TARGET_NR_clock_adjtime
:
10284 struct timex htx
, *phtx
= &htx
;
10286 if (target_to_host_timex(phtx
, arg2
) != 0) {
10287 return -TARGET_EFAULT
;
10289 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10290 if (!is_error(ret
) && phtx
) {
10291 if (host_to_target_timex(arg2
, phtx
) != 0) {
10292 return -TARGET_EFAULT
;
10298 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10299 case TARGET_NR_clock_adjtime64
:
10303 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10304 return -TARGET_EFAULT
;
10306 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10307 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10308 return -TARGET_EFAULT
;
10313 case TARGET_NR_getpgid
:
10314 return get_errno(getpgid(arg1
));
10315 case TARGET_NR_fchdir
:
10316 return get_errno(fchdir(arg1
));
10317 case TARGET_NR_personality
:
10318 return get_errno(personality(arg1
));
10319 #ifdef TARGET_NR__llseek /* Not on alpha */
10320 case TARGET_NR__llseek
:
10323 #if !defined(__NR_llseek)
10324 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10326 ret
= get_errno(res
);
10331 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10333 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10334 return -TARGET_EFAULT
;
10339 #ifdef TARGET_NR_getdents
10340 case TARGET_NR_getdents
:
10341 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10342 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10344 struct target_dirent
*target_dirp
;
10345 struct linux_dirent
*dirp
;
10346 abi_long count
= arg3
;
10348 dirp
= g_try_malloc(count
);
10350 return -TARGET_ENOMEM
;
10353 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10354 if (!is_error(ret
)) {
10355 struct linux_dirent
*de
;
10356 struct target_dirent
*tde
;
10358 int reclen
, treclen
;
10359 int count1
, tnamelen
;
10363 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10364 return -TARGET_EFAULT
;
10367 reclen
= de
->d_reclen
;
10368 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10369 assert(tnamelen
>= 0);
10370 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10371 assert(count1
+ treclen
<= count
);
10372 tde
->d_reclen
= tswap16(treclen
);
10373 tde
->d_ino
= tswapal(de
->d_ino
);
10374 tde
->d_off
= tswapal(de
->d_off
);
10375 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10376 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10378 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10382 unlock_user(target_dirp
, arg2
, ret
);
10388 struct linux_dirent
*dirp
;
10389 abi_long count
= arg3
;
10391 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10392 return -TARGET_EFAULT
;
10393 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10394 if (!is_error(ret
)) {
10395 struct linux_dirent
*de
;
10400 reclen
= de
->d_reclen
;
10403 de
->d_reclen
= tswap16(reclen
);
10404 tswapls(&de
->d_ino
);
10405 tswapls(&de
->d_off
);
10406 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10410 unlock_user(dirp
, arg2
, ret
);
10414 /* Implement getdents in terms of getdents64 */
10416 struct linux_dirent64
*dirp
;
10417 abi_long count
= arg3
;
10419 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10421 return -TARGET_EFAULT
;
10423 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10424 if (!is_error(ret
)) {
10425 /* Convert the dirent64 structs to target dirent. We do this
10426 * in-place, since we can guarantee that a target_dirent is no
10427 * larger than a dirent64; however this means we have to be
10428 * careful to read everything before writing in the new format.
10430 struct linux_dirent64
*de
;
10431 struct target_dirent
*tde
;
10436 tde
= (struct target_dirent
*)dirp
;
10438 int namelen
, treclen
;
10439 int reclen
= de
->d_reclen
;
10440 uint64_t ino
= de
->d_ino
;
10441 int64_t off
= de
->d_off
;
10442 uint8_t type
= de
->d_type
;
10444 namelen
= strlen(de
->d_name
);
10445 treclen
= offsetof(struct target_dirent
, d_name
)
10447 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10449 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10450 tde
->d_ino
= tswapal(ino
);
10451 tde
->d_off
= tswapal(off
);
10452 tde
->d_reclen
= tswap16(treclen
);
10453 /* The target_dirent type is in what was formerly a padding
10454 * byte at the end of the structure:
10456 *(((char *)tde
) + treclen
- 1) = type
;
10458 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10459 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10465 unlock_user(dirp
, arg2
, ret
);
10469 #endif /* TARGET_NR_getdents */
10470 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10471 case TARGET_NR_getdents64
:
10473 struct linux_dirent64
*dirp
;
10474 abi_long count
= arg3
;
10475 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10476 return -TARGET_EFAULT
;
10477 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10478 if (!is_error(ret
)) {
10479 struct linux_dirent64
*de
;
10484 reclen
= de
->d_reclen
;
10487 de
->d_reclen
= tswap16(reclen
);
10488 tswap64s((uint64_t *)&de
->d_ino
);
10489 tswap64s((uint64_t *)&de
->d_off
);
10490 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10494 unlock_user(dirp
, arg2
, ret
);
10497 #endif /* TARGET_NR_getdents64 */
10498 #if defined(TARGET_NR__newselect)
10499 case TARGET_NR__newselect
:
10500 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10502 #ifdef TARGET_NR_poll
10503 case TARGET_NR_poll
:
10504 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10506 #ifdef TARGET_NR_ppoll
10507 case TARGET_NR_ppoll
:
10508 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10510 #ifdef TARGET_NR_ppoll_time64
10511 case TARGET_NR_ppoll_time64
:
10512 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10514 case TARGET_NR_flock
:
10515 /* NOTE: the flock constant seems to be the same for every
10517 return get_errno(safe_flock(arg1
, arg2
));
10518 case TARGET_NR_readv
:
10520 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10522 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10523 unlock_iovec(vec
, arg2
, arg3
, 1);
10525 ret
= -host_to_target_errno(errno
);
10529 case TARGET_NR_writev
:
10531 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10533 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10534 unlock_iovec(vec
, arg2
, arg3
, 0);
10536 ret
= -host_to_target_errno(errno
);
10540 #if defined(TARGET_NR_preadv)
10541 case TARGET_NR_preadv
:
10543 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10545 unsigned long low
, high
;
10547 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10548 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10549 unlock_iovec(vec
, arg2
, arg3
, 1);
10551 ret
= -host_to_target_errno(errno
);
10556 #if defined(TARGET_NR_pwritev)
10557 case TARGET_NR_pwritev
:
10559 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10561 unsigned long low
, high
;
10563 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10564 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10565 unlock_iovec(vec
, arg2
, arg3
, 0);
10567 ret
= -host_to_target_errno(errno
);
10572 case TARGET_NR_getsid
:
10573 return get_errno(getsid(arg1
));
10574 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10575 case TARGET_NR_fdatasync
:
10576 return get_errno(fdatasync(arg1
));
10578 case TARGET_NR_sched_getaffinity
:
10580 unsigned int mask_size
;
10581 unsigned long *mask
;
10584 * sched_getaffinity needs multiples of ulong, so need to take
10585 * care of mismatches between target ulong and host ulong sizes.
10587 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10588 return -TARGET_EINVAL
;
10590 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10592 mask
= alloca(mask_size
);
10593 memset(mask
, 0, mask_size
);
10594 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10596 if (!is_error(ret
)) {
10598 /* More data returned than the caller's buffer will fit.
10599 * This only happens if sizeof(abi_long) < sizeof(long)
10600 * and the caller passed us a buffer holding an odd number
10601 * of abi_longs. If the host kernel is actually using the
10602 * extra 4 bytes then fail EINVAL; otherwise we can just
10603 * ignore them and only copy the interesting part.
10605 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10606 if (numcpus
> arg2
* 8) {
10607 return -TARGET_EINVAL
;
10612 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10613 return -TARGET_EFAULT
;
10618 case TARGET_NR_sched_setaffinity
:
10620 unsigned int mask_size
;
10621 unsigned long *mask
;
10624 * sched_setaffinity needs multiples of ulong, so need to take
10625 * care of mismatches between target ulong and host ulong sizes.
10627 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10628 return -TARGET_EINVAL
;
10630 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10631 mask
= alloca(mask_size
);
10633 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10638 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10640 case TARGET_NR_getcpu
:
10642 unsigned cpu
, node
;
10643 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10644 arg2
? &node
: NULL
,
10646 if (is_error(ret
)) {
10649 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10650 return -TARGET_EFAULT
;
10652 if (arg2
&& put_user_u32(node
, arg2
)) {
10653 return -TARGET_EFAULT
;
10657 case TARGET_NR_sched_setparam
:
10659 struct sched_param
*target_schp
;
10660 struct sched_param schp
;
10663 return -TARGET_EINVAL
;
10665 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10666 return -TARGET_EFAULT
;
10667 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10668 unlock_user_struct(target_schp
, arg2
, 0);
10669 return get_errno(sched_setparam(arg1
, &schp
));
10671 case TARGET_NR_sched_getparam
:
10673 struct sched_param
*target_schp
;
10674 struct sched_param schp
;
10677 return -TARGET_EINVAL
;
10679 ret
= get_errno(sched_getparam(arg1
, &schp
));
10680 if (!is_error(ret
)) {
10681 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10682 return -TARGET_EFAULT
;
10683 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10684 unlock_user_struct(target_schp
, arg2
, 1);
10688 case TARGET_NR_sched_setscheduler
:
10690 struct sched_param
*target_schp
;
10691 struct sched_param schp
;
10693 return -TARGET_EINVAL
;
10695 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10696 return -TARGET_EFAULT
;
10697 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10698 unlock_user_struct(target_schp
, arg3
, 0);
10699 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10701 case TARGET_NR_sched_getscheduler
:
10702 return get_errno(sched_getscheduler(arg1
));
10703 case TARGET_NR_sched_yield
:
10704 return get_errno(sched_yield());
10705 case TARGET_NR_sched_get_priority_max
:
10706 return get_errno(sched_get_priority_max(arg1
));
10707 case TARGET_NR_sched_get_priority_min
:
10708 return get_errno(sched_get_priority_min(arg1
));
10709 #ifdef TARGET_NR_sched_rr_get_interval
10710 case TARGET_NR_sched_rr_get_interval
:
10712 struct timespec ts
;
10713 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10714 if (!is_error(ret
)) {
10715 ret
= host_to_target_timespec(arg2
, &ts
);
10720 #ifdef TARGET_NR_sched_rr_get_interval_time64
10721 case TARGET_NR_sched_rr_get_interval_time64
:
10723 struct timespec ts
;
10724 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10725 if (!is_error(ret
)) {
10726 ret
= host_to_target_timespec64(arg2
, &ts
);
10731 #if defined(TARGET_NR_nanosleep)
10732 case TARGET_NR_nanosleep
:
10734 struct timespec req
, rem
;
10735 target_to_host_timespec(&req
, arg1
);
10736 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10737 if (is_error(ret
) && arg2
) {
10738 host_to_target_timespec(arg2
, &rem
);
10743 case TARGET_NR_prctl
:
10745 case PR_GET_PDEATHSIG
:
10748 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10749 if (!is_error(ret
) && arg2
10750 && put_user_s32(deathsig
, arg2
)) {
10751 return -TARGET_EFAULT
;
10758 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10760 return -TARGET_EFAULT
;
10762 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10763 arg3
, arg4
, arg5
));
10764 unlock_user(name
, arg2
, 16);
10769 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10771 return -TARGET_EFAULT
;
10773 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10774 arg3
, arg4
, arg5
));
10775 unlock_user(name
, arg2
, 0);
10780 case TARGET_PR_GET_FP_MODE
:
10782 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10784 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10785 ret
|= TARGET_PR_FP_MODE_FR
;
10787 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10788 ret
|= TARGET_PR_FP_MODE_FRE
;
10792 case TARGET_PR_SET_FP_MODE
:
10794 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10795 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10796 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10797 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10798 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10800 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10801 TARGET_PR_FP_MODE_FRE
;
10803 /* If nothing to change, return right away, successfully. */
10804 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10807 /* Check the value is valid */
10808 if (arg2
& ~known_bits
) {
10809 return -TARGET_EOPNOTSUPP
;
10811 /* Setting FRE without FR is not supported. */
10812 if (new_fre
&& !new_fr
) {
10813 return -TARGET_EOPNOTSUPP
;
10815 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10816 /* FR1 is not supported */
10817 return -TARGET_EOPNOTSUPP
;
10819 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10820 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10821 /* cannot set FR=0 */
10822 return -TARGET_EOPNOTSUPP
;
10824 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10825 /* Cannot set FRE=1 */
10826 return -TARGET_EOPNOTSUPP
;
10830 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10831 for (i
= 0; i
< 32 ; i
+= 2) {
10832 if (!old_fr
&& new_fr
) {
10833 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10834 } else if (old_fr
&& !new_fr
) {
10835 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10840 env
->CP0_Status
|= (1 << CP0St_FR
);
10841 env
->hflags
|= MIPS_HFLAG_F64
;
10843 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10844 env
->hflags
&= ~MIPS_HFLAG_F64
;
10847 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10848 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10849 env
->hflags
|= MIPS_HFLAG_FRE
;
10852 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10853 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10859 #ifdef TARGET_AARCH64
10860 case TARGET_PR_SVE_SET_VL
:
10862 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10863 * PR_SVE_VL_INHERIT. Note the kernel definition
10864 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10865 * even though the current architectural maximum is VQ=16.
10867 ret
= -TARGET_EINVAL
;
10868 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10869 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10870 CPUARMState
*env
= cpu_env
;
10871 ARMCPU
*cpu
= env_archcpu(env
);
10872 uint32_t vq
, old_vq
;
10874 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10875 vq
= MAX(arg2
/ 16, 1);
10876 vq
= MIN(vq
, cpu
->sve_max_vq
);
10879 aarch64_sve_narrow_vq(env
, vq
);
10881 env
->vfp
.zcr_el
[1] = vq
- 1;
10882 arm_rebuild_hflags(env
);
10886 case TARGET_PR_SVE_GET_VL
:
10887 ret
= -TARGET_EINVAL
;
10889 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10890 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10891 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10895 case TARGET_PR_PAC_RESET_KEYS
:
10897 CPUARMState
*env
= cpu_env
;
10898 ARMCPU
*cpu
= env_archcpu(env
);
10900 if (arg3
|| arg4
|| arg5
) {
10901 return -TARGET_EINVAL
;
10903 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10904 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10905 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10906 TARGET_PR_PAC_APGAKEY
);
10912 } else if (arg2
& ~all
) {
10913 return -TARGET_EINVAL
;
10915 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10916 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10917 sizeof(ARMPACKey
), &err
);
10919 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10920 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10921 sizeof(ARMPACKey
), &err
);
10923 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10924 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10925 sizeof(ARMPACKey
), &err
);
10927 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10928 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10929 sizeof(ARMPACKey
), &err
);
10931 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10932 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10933 sizeof(ARMPACKey
), &err
);
10937 * Some unknown failure in the crypto. The best
10938 * we can do is log it and fail the syscall.
10939 * The real syscall cannot fail this way.
10941 qemu_log_mask(LOG_UNIMP
,
10942 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10943 error_get_pretty(err
));
10945 return -TARGET_EIO
;
10950 return -TARGET_EINVAL
;
10951 #endif /* AARCH64 */
10952 case PR_GET_SECCOMP
:
10953 case PR_SET_SECCOMP
:
10954 /* Disable seccomp to prevent the target disabling syscalls we
10956 return -TARGET_EINVAL
;
10958 /* Most prctl options have no pointer arguments */
10959 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10962 #ifdef TARGET_NR_arch_prctl
10963 case TARGET_NR_arch_prctl
:
10964 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10966 #ifdef TARGET_NR_pread64
10967 case TARGET_NR_pread64
:
10968 if (regpairs_aligned(cpu_env
, num
)) {
10972 if (arg2
== 0 && arg3
== 0) {
10973 /* Special-case NULL buffer and zero length, which should succeed */
10976 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10978 return -TARGET_EFAULT
;
10981 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10982 unlock_user(p
, arg2
, ret
);
10984 case TARGET_NR_pwrite64
:
10985 if (regpairs_aligned(cpu_env
, num
)) {
10989 if (arg2
== 0 && arg3
== 0) {
10990 /* Special-case NULL buffer and zero length, which should succeed */
10993 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10995 return -TARGET_EFAULT
;
10998 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10999 unlock_user(p
, arg2
, 0);
11002 case TARGET_NR_getcwd
:
11003 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11004 return -TARGET_EFAULT
;
11005 ret
= get_errno(sys_getcwd1(p
, arg2
));
11006 unlock_user(p
, arg1
, ret
);
11008 case TARGET_NR_capget
:
11009 case TARGET_NR_capset
:
11011 struct target_user_cap_header
*target_header
;
11012 struct target_user_cap_data
*target_data
= NULL
;
11013 struct __user_cap_header_struct header
;
11014 struct __user_cap_data_struct data
[2];
11015 struct __user_cap_data_struct
*dataptr
= NULL
;
11016 int i
, target_datalen
;
11017 int data_items
= 1;
11019 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11020 return -TARGET_EFAULT
;
11022 header
.version
= tswap32(target_header
->version
);
11023 header
.pid
= tswap32(target_header
->pid
);
11025 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11026 /* Version 2 and up takes pointer to two user_data structs */
11030 target_datalen
= sizeof(*target_data
) * data_items
;
11033 if (num
== TARGET_NR_capget
) {
11034 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11036 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11038 if (!target_data
) {
11039 unlock_user_struct(target_header
, arg1
, 0);
11040 return -TARGET_EFAULT
;
11043 if (num
== TARGET_NR_capset
) {
11044 for (i
= 0; i
< data_items
; i
++) {
11045 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11046 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11047 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11054 if (num
== TARGET_NR_capget
) {
11055 ret
= get_errno(capget(&header
, dataptr
));
11057 ret
= get_errno(capset(&header
, dataptr
));
11060 /* The kernel always updates version for both capget and capset */
11061 target_header
->version
= tswap32(header
.version
);
11062 unlock_user_struct(target_header
, arg1
, 1);
11065 if (num
== TARGET_NR_capget
) {
11066 for (i
= 0; i
< data_items
; i
++) {
11067 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11068 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11069 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11071 unlock_user(target_data
, arg2
, target_datalen
);
11073 unlock_user(target_data
, arg2
, 0);
11078 case TARGET_NR_sigaltstack
:
11079 return do_sigaltstack(arg1
, arg2
,
11080 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11082 #ifdef CONFIG_SENDFILE
11083 #ifdef TARGET_NR_sendfile
11084 case TARGET_NR_sendfile
:
11086 off_t
*offp
= NULL
;
11089 ret
= get_user_sal(off
, arg3
);
11090 if (is_error(ret
)) {
11095 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11096 if (!is_error(ret
) && arg3
) {
11097 abi_long ret2
= put_user_sal(off
, arg3
);
11098 if (is_error(ret2
)) {
11105 #ifdef TARGET_NR_sendfile64
11106 case TARGET_NR_sendfile64
:
11108 off_t
*offp
= NULL
;
11111 ret
= get_user_s64(off
, arg3
);
11112 if (is_error(ret
)) {
11117 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11118 if (!is_error(ret
) && arg3
) {
11119 abi_long ret2
= put_user_s64(off
, arg3
);
11120 if (is_error(ret2
)) {
11128 #ifdef TARGET_NR_vfork
11129 case TARGET_NR_vfork
:
11130 return get_errno(do_fork(cpu_env
,
11131 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11134 #ifdef TARGET_NR_ugetrlimit
11135 case TARGET_NR_ugetrlimit
:
11137 struct rlimit rlim
;
11138 int resource
= target_to_host_resource(arg1
);
11139 ret
= get_errno(getrlimit(resource
, &rlim
));
11140 if (!is_error(ret
)) {
11141 struct target_rlimit
*target_rlim
;
11142 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11143 return -TARGET_EFAULT
;
11144 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11145 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11146 unlock_user_struct(target_rlim
, arg2
, 1);
11151 #ifdef TARGET_NR_truncate64
11152 case TARGET_NR_truncate64
:
11153 if (!(p
= lock_user_string(arg1
)))
11154 return -TARGET_EFAULT
;
11155 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11156 unlock_user(p
, arg1
, 0);
11159 #ifdef TARGET_NR_ftruncate64
11160 case TARGET_NR_ftruncate64
:
11161 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11163 #ifdef TARGET_NR_stat64
11164 case TARGET_NR_stat64
:
11165 if (!(p
= lock_user_string(arg1
))) {
11166 return -TARGET_EFAULT
;
11168 ret
= get_errno(stat(path(p
), &st
));
11169 unlock_user(p
, arg1
, 0);
11170 if (!is_error(ret
))
11171 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11174 #ifdef TARGET_NR_lstat64
11175 case TARGET_NR_lstat64
:
11176 if (!(p
= lock_user_string(arg1
))) {
11177 return -TARGET_EFAULT
;
11179 ret
= get_errno(lstat(path(p
), &st
));
11180 unlock_user(p
, arg1
, 0);
11181 if (!is_error(ret
))
11182 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11185 #ifdef TARGET_NR_fstat64
11186 case TARGET_NR_fstat64
:
11187 ret
= get_errno(fstat(arg1
, &st
));
11188 if (!is_error(ret
))
11189 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11192 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11193 #ifdef TARGET_NR_fstatat64
11194 case TARGET_NR_fstatat64
:
11196 #ifdef TARGET_NR_newfstatat
11197 case TARGET_NR_newfstatat
:
11199 if (!(p
= lock_user_string(arg2
))) {
11200 return -TARGET_EFAULT
;
11202 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11203 unlock_user(p
, arg2
, 0);
11204 if (!is_error(ret
))
11205 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11208 #if defined(TARGET_NR_statx)
11209 case TARGET_NR_statx
:
11211 struct target_statx
*target_stx
;
11215 p
= lock_user_string(arg2
);
11217 return -TARGET_EFAULT
;
11219 #if defined(__NR_statx)
11222 * It is assumed that struct statx is architecture independent.
11224 struct target_statx host_stx
;
11227 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11228 if (!is_error(ret
)) {
11229 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11230 unlock_user(p
, arg2
, 0);
11231 return -TARGET_EFAULT
;
11235 if (ret
!= -TARGET_ENOSYS
) {
11236 unlock_user(p
, arg2
, 0);
11241 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11242 unlock_user(p
, arg2
, 0);
11244 if (!is_error(ret
)) {
11245 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11246 return -TARGET_EFAULT
;
11248 memset(target_stx
, 0, sizeof(*target_stx
));
11249 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11250 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11251 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11252 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11253 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11254 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11255 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11256 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11257 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11258 __put_user(st
.st_size
, &target_stx
->stx_size
);
11259 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11260 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11261 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11262 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11263 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11264 unlock_user_struct(target_stx
, arg5
, 1);
11269 #ifdef TARGET_NR_lchown
11270 case TARGET_NR_lchown
:
11271 if (!(p
= lock_user_string(arg1
)))
11272 return -TARGET_EFAULT
;
11273 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11274 unlock_user(p
, arg1
, 0);
11277 #ifdef TARGET_NR_getuid
11278 case TARGET_NR_getuid
:
11279 return get_errno(high2lowuid(getuid()));
11281 #ifdef TARGET_NR_getgid
11282 case TARGET_NR_getgid
:
11283 return get_errno(high2lowgid(getgid()));
11285 #ifdef TARGET_NR_geteuid
11286 case TARGET_NR_geteuid
:
11287 return get_errno(high2lowuid(geteuid()));
11289 #ifdef TARGET_NR_getegid
11290 case TARGET_NR_getegid
:
11291 return get_errno(high2lowgid(getegid()));
11293 case TARGET_NR_setreuid
:
11294 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11295 case TARGET_NR_setregid
:
11296 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11297 case TARGET_NR_getgroups
:
11299 int gidsetsize
= arg1
;
11300 target_id
*target_grouplist
;
11304 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11305 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11306 if (gidsetsize
== 0)
11308 if (!is_error(ret
)) {
11309 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11310 if (!target_grouplist
)
11311 return -TARGET_EFAULT
;
11312 for(i
= 0;i
< ret
; i
++)
11313 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11314 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11318 case TARGET_NR_setgroups
:
11320 int gidsetsize
= arg1
;
11321 target_id
*target_grouplist
;
11322 gid_t
*grouplist
= NULL
;
11325 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11326 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11327 if (!target_grouplist
) {
11328 return -TARGET_EFAULT
;
11330 for (i
= 0; i
< gidsetsize
; i
++) {
11331 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11333 unlock_user(target_grouplist
, arg2
, 0);
11335 return get_errno(setgroups(gidsetsize
, grouplist
));
11337 case TARGET_NR_fchown
:
11338 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11339 #if defined(TARGET_NR_fchownat)
11340 case TARGET_NR_fchownat
:
11341 if (!(p
= lock_user_string(arg2
)))
11342 return -TARGET_EFAULT
;
11343 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11344 low2highgid(arg4
), arg5
));
11345 unlock_user(p
, arg2
, 0);
11348 #ifdef TARGET_NR_setresuid
11349 case TARGET_NR_setresuid
:
11350 return get_errno(sys_setresuid(low2highuid(arg1
),
11352 low2highuid(arg3
)));
11354 #ifdef TARGET_NR_getresuid
11355 case TARGET_NR_getresuid
:
11357 uid_t ruid
, euid
, suid
;
11358 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11359 if (!is_error(ret
)) {
11360 if (put_user_id(high2lowuid(ruid
), arg1
)
11361 || put_user_id(high2lowuid(euid
), arg2
)
11362 || put_user_id(high2lowuid(suid
), arg3
))
11363 return -TARGET_EFAULT
;
11368 #ifdef TARGET_NR_getresgid
11369 case TARGET_NR_setresgid
:
11370 return get_errno(sys_setresgid(low2highgid(arg1
),
11372 low2highgid(arg3
)));
11374 #ifdef TARGET_NR_getresgid
11375 case TARGET_NR_getresgid
:
11377 gid_t rgid
, egid
, sgid
;
11378 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11379 if (!is_error(ret
)) {
11380 if (put_user_id(high2lowgid(rgid
), arg1
)
11381 || put_user_id(high2lowgid(egid
), arg2
)
11382 || put_user_id(high2lowgid(sgid
), arg3
))
11383 return -TARGET_EFAULT
;
11388 #ifdef TARGET_NR_chown
11389 case TARGET_NR_chown
:
11390 if (!(p
= lock_user_string(arg1
)))
11391 return -TARGET_EFAULT
;
11392 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11393 unlock_user(p
, arg1
, 0);
11396 case TARGET_NR_setuid
:
11397 return get_errno(sys_setuid(low2highuid(arg1
)));
11398 case TARGET_NR_setgid
:
11399 return get_errno(sys_setgid(low2highgid(arg1
)));
11400 case TARGET_NR_setfsuid
:
11401 return get_errno(setfsuid(arg1
));
11402 case TARGET_NR_setfsgid
:
11403 return get_errno(setfsgid(arg1
));
11405 #ifdef TARGET_NR_lchown32
11406 case TARGET_NR_lchown32
:
11407 if (!(p
= lock_user_string(arg1
)))
11408 return -TARGET_EFAULT
;
11409 ret
= get_errno(lchown(p
, arg2
, arg3
));
11410 unlock_user(p
, arg1
, 0);
11413 #ifdef TARGET_NR_getuid32
11414 case TARGET_NR_getuid32
:
11415 return get_errno(getuid());
11418 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11419 /* Alpha specific */
11420 case TARGET_NR_getxuid
:
11424 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11426 return get_errno(getuid());
11428 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11429 /* Alpha specific */
11430 case TARGET_NR_getxgid
:
11434 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11436 return get_errno(getgid());
11438 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11439 /* Alpha specific */
11440 case TARGET_NR_osf_getsysinfo
:
11441 ret
= -TARGET_EOPNOTSUPP
;
11443 case TARGET_GSI_IEEE_FP_CONTROL
:
11445 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11446 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11448 swcr
&= ~SWCR_STATUS_MASK
;
11449 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11451 if (put_user_u64 (swcr
, arg2
))
11452 return -TARGET_EFAULT
;
11457 /* case GSI_IEEE_STATE_AT_SIGNAL:
11458 -- Not implemented in linux kernel.
11460 -- Retrieves current unaligned access state; not much used.
11461 case GSI_PROC_TYPE:
11462 -- Retrieves implver information; surely not used.
11463 case GSI_GET_HWRPB:
11464 -- Grabs a copy of the HWRPB; surely not used.
11469 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11470 /* Alpha specific */
11471 case TARGET_NR_osf_setsysinfo
:
11472 ret
= -TARGET_EOPNOTSUPP
;
11474 case TARGET_SSI_IEEE_FP_CONTROL
:
11476 uint64_t swcr
, fpcr
;
11478 if (get_user_u64 (swcr
, arg2
)) {
11479 return -TARGET_EFAULT
;
11483 * The kernel calls swcr_update_status to update the
11484 * status bits from the fpcr at every point that it
11485 * could be queried. Therefore, we store the status
11486 * bits only in FPCR.
11488 ((CPUAlphaState
*)cpu_env
)->swcr
11489 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11491 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11492 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11493 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11494 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11499 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11501 uint64_t exc
, fpcr
, fex
;
11503 if (get_user_u64(exc
, arg2
)) {
11504 return -TARGET_EFAULT
;
11506 exc
&= SWCR_STATUS_MASK
;
11507 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11509 /* Old exceptions are not signaled. */
11510 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11512 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11513 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11515 /* Update the hardware fpcr. */
11516 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11517 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11520 int si_code
= TARGET_FPE_FLTUNK
;
11521 target_siginfo_t info
;
11523 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11524 si_code
= TARGET_FPE_FLTUND
;
11526 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11527 si_code
= TARGET_FPE_FLTRES
;
11529 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11530 si_code
= TARGET_FPE_FLTUND
;
11532 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11533 si_code
= TARGET_FPE_FLTOVF
;
11535 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11536 si_code
= TARGET_FPE_FLTDIV
;
11538 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11539 si_code
= TARGET_FPE_FLTINV
;
11542 info
.si_signo
= SIGFPE
;
11544 info
.si_code
= si_code
;
11545 info
._sifields
._sigfault
._addr
11546 = ((CPUArchState
*)cpu_env
)->pc
;
11547 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11548 QEMU_SI_FAULT
, &info
);
11554 /* case SSI_NVPAIRS:
11555 -- Used with SSIN_UACPROC to enable unaligned accesses.
11556 case SSI_IEEE_STATE_AT_SIGNAL:
11557 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11558 -- Not implemented in linux kernel
11563 #ifdef TARGET_NR_osf_sigprocmask
11564 /* Alpha specific. */
11565 case TARGET_NR_osf_sigprocmask
:
11569 sigset_t set
, oldset
;
11572 case TARGET_SIG_BLOCK
:
11575 case TARGET_SIG_UNBLOCK
:
11578 case TARGET_SIG_SETMASK
:
11582 return -TARGET_EINVAL
;
11585 target_to_host_old_sigset(&set
, &mask
);
11586 ret
= do_sigprocmask(how
, &set
, &oldset
);
11588 host_to_target_old_sigset(&mask
, &oldset
);
11595 #ifdef TARGET_NR_getgid32
11596 case TARGET_NR_getgid32
:
11597 return get_errno(getgid());
11599 #ifdef TARGET_NR_geteuid32
11600 case TARGET_NR_geteuid32
:
11601 return get_errno(geteuid());
11603 #ifdef TARGET_NR_getegid32
11604 case TARGET_NR_getegid32
:
11605 return get_errno(getegid());
11607 #ifdef TARGET_NR_setreuid32
11608 case TARGET_NR_setreuid32
:
11609 return get_errno(setreuid(arg1
, arg2
));
11611 #ifdef TARGET_NR_setregid32
11612 case TARGET_NR_setregid32
:
11613 return get_errno(setregid(arg1
, arg2
));
11615 #ifdef TARGET_NR_getgroups32
11616 case TARGET_NR_getgroups32
:
11618 int gidsetsize
= arg1
;
11619 uint32_t *target_grouplist
;
11623 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11624 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11625 if (gidsetsize
== 0)
11627 if (!is_error(ret
)) {
11628 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11629 if (!target_grouplist
) {
11630 return -TARGET_EFAULT
;
11632 for(i
= 0;i
< ret
; i
++)
11633 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11634 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11639 #ifdef TARGET_NR_setgroups32
11640 case TARGET_NR_setgroups32
:
11642 int gidsetsize
= arg1
;
11643 uint32_t *target_grouplist
;
11647 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11648 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11649 if (!target_grouplist
) {
11650 return -TARGET_EFAULT
;
11652 for(i
= 0;i
< gidsetsize
; i
++)
11653 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11654 unlock_user(target_grouplist
, arg2
, 0);
11655 return get_errno(setgroups(gidsetsize
, grouplist
));
11658 #ifdef TARGET_NR_fchown32
11659 case TARGET_NR_fchown32
:
11660 return get_errno(fchown(arg1
, arg2
, arg3
));
11662 #ifdef TARGET_NR_setresuid32
11663 case TARGET_NR_setresuid32
:
11664 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11666 #ifdef TARGET_NR_getresuid32
11667 case TARGET_NR_getresuid32
:
11669 uid_t ruid
, euid
, suid
;
11670 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11671 if (!is_error(ret
)) {
11672 if (put_user_u32(ruid
, arg1
)
11673 || put_user_u32(euid
, arg2
)
11674 || put_user_u32(suid
, arg3
))
11675 return -TARGET_EFAULT
;
11680 #ifdef TARGET_NR_setresgid32
11681 case TARGET_NR_setresgid32
:
11682 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11684 #ifdef TARGET_NR_getresgid32
11685 case TARGET_NR_getresgid32
:
11687 gid_t rgid
, egid
, sgid
;
11688 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11689 if (!is_error(ret
)) {
11690 if (put_user_u32(rgid
, arg1
)
11691 || put_user_u32(egid
, arg2
)
11692 || put_user_u32(sgid
, arg3
))
11693 return -TARGET_EFAULT
;
11698 #ifdef TARGET_NR_chown32
11699 case TARGET_NR_chown32
:
11700 if (!(p
= lock_user_string(arg1
)))
11701 return -TARGET_EFAULT
;
11702 ret
= get_errno(chown(p
, arg2
, arg3
));
11703 unlock_user(p
, arg1
, 0);
11706 #ifdef TARGET_NR_setuid32
11707 case TARGET_NR_setuid32
:
11708 return get_errno(sys_setuid(arg1
));
11710 #ifdef TARGET_NR_setgid32
11711 case TARGET_NR_setgid32
:
11712 return get_errno(sys_setgid(arg1
));
11714 #ifdef TARGET_NR_setfsuid32
11715 case TARGET_NR_setfsuid32
:
11716 return get_errno(setfsuid(arg1
));
11718 #ifdef TARGET_NR_setfsgid32
11719 case TARGET_NR_setfsgid32
:
11720 return get_errno(setfsgid(arg1
));
11722 #ifdef TARGET_NR_mincore
11723 case TARGET_NR_mincore
:
11725 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11727 return -TARGET_ENOMEM
;
11729 p
= lock_user_string(arg3
);
11731 ret
= -TARGET_EFAULT
;
11733 ret
= get_errno(mincore(a
, arg2
, p
));
11734 unlock_user(p
, arg3
, ret
);
11736 unlock_user(a
, arg1
, 0);
11740 #ifdef TARGET_NR_arm_fadvise64_64
11741 case TARGET_NR_arm_fadvise64_64
:
11742 /* arm_fadvise64_64 looks like fadvise64_64 but
11743 * with different argument order: fd, advice, offset, len
11744 * rather than the usual fd, offset, len, advice.
11745 * Note that offset and len are both 64-bit so appear as
11746 * pairs of 32-bit registers.
11748 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11749 target_offset64(arg5
, arg6
), arg2
);
11750 return -host_to_target_errno(ret
);
11753 #if TARGET_ABI_BITS == 32
11755 #ifdef TARGET_NR_fadvise64_64
11756 case TARGET_NR_fadvise64_64
:
11757 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11758 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11766 /* 6 args: fd, offset (high, low), len (high, low), advice */
11767 if (regpairs_aligned(cpu_env
, num
)) {
11768 /* offset is in (3,4), len in (5,6) and advice in 7 */
11776 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11777 target_offset64(arg4
, arg5
), arg6
);
11778 return -host_to_target_errno(ret
);
11781 #ifdef TARGET_NR_fadvise64
11782 case TARGET_NR_fadvise64
:
11783 /* 5 args: fd, offset (high, low), len, advice */
11784 if (regpairs_aligned(cpu_env
, num
)) {
11785 /* offset is in (3,4), len in 5 and advice in 6 */
11791 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11792 return -host_to_target_errno(ret
);
11795 #else /* not a 32-bit ABI */
11796 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11797 #ifdef TARGET_NR_fadvise64_64
11798 case TARGET_NR_fadvise64_64
:
11800 #ifdef TARGET_NR_fadvise64
11801 case TARGET_NR_fadvise64
:
11803 #ifdef TARGET_S390X
11805 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11806 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11807 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11808 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11812 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11814 #endif /* end of 64-bit ABI fadvise handling */
11816 #ifdef TARGET_NR_madvise
11817 case TARGET_NR_madvise
:
11818 /* A straight passthrough may not be safe because qemu sometimes
11819 turns private file-backed mappings into anonymous mappings.
11820 This will break MADV_DONTNEED.
11821 This is a hint, so ignoring and returning success is ok. */
11824 #ifdef TARGET_NR_fcntl64
11825 case TARGET_NR_fcntl64
:
11829 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11830 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11833 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11834 copyfrom
= copy_from_user_oabi_flock64
;
11835 copyto
= copy_to_user_oabi_flock64
;
11839 cmd
= target_to_host_fcntl_cmd(arg2
);
11840 if (cmd
== -TARGET_EINVAL
) {
11845 case TARGET_F_GETLK64
:
11846 ret
= copyfrom(&fl
, arg3
);
11850 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11852 ret
= copyto(arg3
, &fl
);
11856 case TARGET_F_SETLK64
:
11857 case TARGET_F_SETLKW64
:
11858 ret
= copyfrom(&fl
, arg3
);
11862 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11865 ret
= do_fcntl(arg1
, arg2
, arg3
);
11871 #ifdef TARGET_NR_cacheflush
11872 case TARGET_NR_cacheflush
:
11873 /* self-modifying code is handled automatically, so nothing needed */
11876 #ifdef TARGET_NR_getpagesize
11877 case TARGET_NR_getpagesize
:
11878 return TARGET_PAGE_SIZE
;
11880 case TARGET_NR_gettid
:
11881 return get_errno(sys_gettid());
11882 #ifdef TARGET_NR_readahead
11883 case TARGET_NR_readahead
:
11884 #if TARGET_ABI_BITS == 32
11885 if (regpairs_aligned(cpu_env
, num
)) {
11890 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11892 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11897 #ifdef TARGET_NR_setxattr
11898 case TARGET_NR_listxattr
:
11899 case TARGET_NR_llistxattr
:
11903 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11905 return -TARGET_EFAULT
;
11908 p
= lock_user_string(arg1
);
11910 if (num
== TARGET_NR_listxattr
) {
11911 ret
= get_errno(listxattr(p
, b
, arg3
));
11913 ret
= get_errno(llistxattr(p
, b
, arg3
));
11916 ret
= -TARGET_EFAULT
;
11918 unlock_user(p
, arg1
, 0);
11919 unlock_user(b
, arg2
, arg3
);
11922 case TARGET_NR_flistxattr
:
11926 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11928 return -TARGET_EFAULT
;
11931 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11932 unlock_user(b
, arg2
, arg3
);
11935 case TARGET_NR_setxattr
:
11936 case TARGET_NR_lsetxattr
:
11938 void *p
, *n
, *v
= 0;
11940 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11942 return -TARGET_EFAULT
;
11945 p
= lock_user_string(arg1
);
11946 n
= lock_user_string(arg2
);
11948 if (num
== TARGET_NR_setxattr
) {
11949 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11951 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11954 ret
= -TARGET_EFAULT
;
11956 unlock_user(p
, arg1
, 0);
11957 unlock_user(n
, arg2
, 0);
11958 unlock_user(v
, arg3
, 0);
11961 case TARGET_NR_fsetxattr
:
11965 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11967 return -TARGET_EFAULT
;
11970 n
= lock_user_string(arg2
);
11972 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11974 ret
= -TARGET_EFAULT
;
11976 unlock_user(n
, arg2
, 0);
11977 unlock_user(v
, arg3
, 0);
11980 case TARGET_NR_getxattr
:
11981 case TARGET_NR_lgetxattr
:
11983 void *p
, *n
, *v
= 0;
11985 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11987 return -TARGET_EFAULT
;
11990 p
= lock_user_string(arg1
);
11991 n
= lock_user_string(arg2
);
11993 if (num
== TARGET_NR_getxattr
) {
11994 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11996 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11999 ret
= -TARGET_EFAULT
;
12001 unlock_user(p
, arg1
, 0);
12002 unlock_user(n
, arg2
, 0);
12003 unlock_user(v
, arg3
, arg4
);
12006 case TARGET_NR_fgetxattr
:
12010 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12012 return -TARGET_EFAULT
;
12015 n
= lock_user_string(arg2
);
12017 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12019 ret
= -TARGET_EFAULT
;
12021 unlock_user(n
, arg2
, 0);
12022 unlock_user(v
, arg3
, arg4
);
12025 case TARGET_NR_removexattr
:
12026 case TARGET_NR_lremovexattr
:
12029 p
= lock_user_string(arg1
);
12030 n
= lock_user_string(arg2
);
12032 if (num
== TARGET_NR_removexattr
) {
12033 ret
= get_errno(removexattr(p
, n
));
12035 ret
= get_errno(lremovexattr(p
, n
));
12038 ret
= -TARGET_EFAULT
;
12040 unlock_user(p
, arg1
, 0);
12041 unlock_user(n
, arg2
, 0);
12044 case TARGET_NR_fremovexattr
:
12047 n
= lock_user_string(arg2
);
12049 ret
= get_errno(fremovexattr(arg1
, n
));
12051 ret
= -TARGET_EFAULT
;
12053 unlock_user(n
, arg2
, 0);
12057 #endif /* CONFIG_ATTR */
12058 #ifdef TARGET_NR_set_thread_area
12059 case TARGET_NR_set_thread_area
:
12060 #if defined(TARGET_MIPS)
12061 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12063 #elif defined(TARGET_CRIS)
12065 ret
= -TARGET_EINVAL
;
12067 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12071 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12072 return do_set_thread_area(cpu_env
, arg1
);
12073 #elif defined(TARGET_M68K)
12075 TaskState
*ts
= cpu
->opaque
;
12076 ts
->tp_value
= arg1
;
12080 return -TARGET_ENOSYS
;
12083 #ifdef TARGET_NR_get_thread_area
12084 case TARGET_NR_get_thread_area
:
12085 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12086 return do_get_thread_area(cpu_env
, arg1
);
12087 #elif defined(TARGET_M68K)
12089 TaskState
*ts
= cpu
->opaque
;
12090 return ts
->tp_value
;
12093 return -TARGET_ENOSYS
;
12096 #ifdef TARGET_NR_getdomainname
12097 case TARGET_NR_getdomainname
:
12098 return -TARGET_ENOSYS
;
12101 #ifdef TARGET_NR_clock_settime
12102 case TARGET_NR_clock_settime
:
12104 struct timespec ts
;
12106 ret
= target_to_host_timespec(&ts
, arg2
);
12107 if (!is_error(ret
)) {
12108 ret
= get_errno(clock_settime(arg1
, &ts
));
12113 #ifdef TARGET_NR_clock_settime64
12114 case TARGET_NR_clock_settime64
:
12116 struct timespec ts
;
12118 ret
= target_to_host_timespec64(&ts
, arg2
);
12119 if (!is_error(ret
)) {
12120 ret
= get_errno(clock_settime(arg1
, &ts
));
12125 #ifdef TARGET_NR_clock_gettime
12126 case TARGET_NR_clock_gettime
:
12128 struct timespec ts
;
12129 ret
= get_errno(clock_gettime(arg1
, &ts
));
12130 if (!is_error(ret
)) {
12131 ret
= host_to_target_timespec(arg2
, &ts
);
12136 #ifdef TARGET_NR_clock_gettime64
12137 case TARGET_NR_clock_gettime64
:
12139 struct timespec ts
;
12140 ret
= get_errno(clock_gettime(arg1
, &ts
));
12141 if (!is_error(ret
)) {
12142 ret
= host_to_target_timespec64(arg2
, &ts
);
12147 #ifdef TARGET_NR_clock_getres
12148 case TARGET_NR_clock_getres
:
12150 struct timespec ts
;
12151 ret
= get_errno(clock_getres(arg1
, &ts
));
12152 if (!is_error(ret
)) {
12153 host_to_target_timespec(arg2
, &ts
);
12158 #ifdef TARGET_NR_clock_getres_time64
12159 case TARGET_NR_clock_getres_time64
:
12161 struct timespec ts
;
12162 ret
= get_errno(clock_getres(arg1
, &ts
));
12163 if (!is_error(ret
)) {
12164 host_to_target_timespec64(arg2
, &ts
);
12169 #ifdef TARGET_NR_clock_nanosleep
12170 case TARGET_NR_clock_nanosleep
:
12172 struct timespec ts
;
12173 if (target_to_host_timespec(&ts
, arg3
)) {
12174 return -TARGET_EFAULT
;
12176 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12177 &ts
, arg4
? &ts
: NULL
));
12179 * if the call is interrupted by a signal handler, it fails
12180 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12181 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12183 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12184 host_to_target_timespec(arg4
, &ts
)) {
12185 return -TARGET_EFAULT
;
12191 #ifdef TARGET_NR_clock_nanosleep_time64
12192 case TARGET_NR_clock_nanosleep_time64
:
12194 struct timespec ts
;
12196 if (target_to_host_timespec64(&ts
, arg3
)) {
12197 return -TARGET_EFAULT
;
12200 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12201 &ts
, arg4
? &ts
: NULL
));
12203 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12204 host_to_target_timespec64(arg4
, &ts
)) {
12205 return -TARGET_EFAULT
;
12211 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12212 case TARGET_NR_set_tid_address
:
12213 return get_errno(set_tid_address((int *)g2h(arg1
)));
12216 case TARGET_NR_tkill
:
12217 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12219 case TARGET_NR_tgkill
:
12220 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12221 target_to_host_signal(arg3
)));
12223 #ifdef TARGET_NR_set_robust_list
12224 case TARGET_NR_set_robust_list
:
12225 case TARGET_NR_get_robust_list
:
12226 /* The ABI for supporting robust futexes has userspace pass
12227 * the kernel a pointer to a linked list which is updated by
12228 * userspace after the syscall; the list is walked by the kernel
12229 * when the thread exits. Since the linked list in QEMU guest
12230 * memory isn't a valid linked list for the host and we have
12231 * no way to reliably intercept the thread-death event, we can't
12232 * support these. Silently return ENOSYS so that guest userspace
12233 * falls back to a non-robust futex implementation (which should
12234 * be OK except in the corner case of the guest crashing while
12235 * holding a mutex that is shared with another process via
12238 return -TARGET_ENOSYS
;
12241 #if defined(TARGET_NR_utimensat)
12242 case TARGET_NR_utimensat
:
12244 struct timespec
*tsp
, ts
[2];
12248 if (target_to_host_timespec(ts
, arg3
)) {
12249 return -TARGET_EFAULT
;
12251 if (target_to_host_timespec(ts
+ 1, arg3
+
12252 sizeof(struct target_timespec
))) {
12253 return -TARGET_EFAULT
;
12258 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12260 if (!(p
= lock_user_string(arg2
))) {
12261 return -TARGET_EFAULT
;
12263 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12264 unlock_user(p
, arg2
, 0);
12269 #ifdef TARGET_NR_utimensat_time64
12270 case TARGET_NR_utimensat_time64
:
12272 struct timespec
*tsp
, ts
[2];
12276 if (target_to_host_timespec64(ts
, arg3
)) {
12277 return -TARGET_EFAULT
;
12279 if (target_to_host_timespec64(ts
+ 1, arg3
+
12280 sizeof(struct target__kernel_timespec
))) {
12281 return -TARGET_EFAULT
;
12286 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12288 p
= lock_user_string(arg2
);
12290 return -TARGET_EFAULT
;
12292 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12293 unlock_user(p
, arg2
, 0);
12298 #ifdef TARGET_NR_futex
12299 case TARGET_NR_futex
:
12300 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12302 #ifdef TARGET_NR_futex_time64
12303 case TARGET_NR_futex_time64
:
12304 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12306 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12307 case TARGET_NR_inotify_init
:
12308 ret
= get_errno(sys_inotify_init());
12310 fd_trans_register(ret
, &target_inotify_trans
);
12314 #ifdef CONFIG_INOTIFY1
12315 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12316 case TARGET_NR_inotify_init1
:
12317 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12318 fcntl_flags_tbl
)));
12320 fd_trans_register(ret
, &target_inotify_trans
);
12325 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12326 case TARGET_NR_inotify_add_watch
:
12327 p
= lock_user_string(arg2
);
12328 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12329 unlock_user(p
, arg2
, 0);
12332 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12333 case TARGET_NR_inotify_rm_watch
:
12334 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12338 case TARGET_NR_mq_open
:
12340 struct mq_attr posix_mq_attr
;
12341 struct mq_attr
*pposix_mq_attr
;
12344 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12345 pposix_mq_attr
= NULL
;
12347 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12348 return -TARGET_EFAULT
;
12350 pposix_mq_attr
= &posix_mq_attr
;
12352 p
= lock_user_string(arg1
- 1);
12354 return -TARGET_EFAULT
;
12356 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12357 unlock_user (p
, arg1
, 0);
12361 case TARGET_NR_mq_unlink
:
12362 p
= lock_user_string(arg1
- 1);
12364 return -TARGET_EFAULT
;
12366 ret
= get_errno(mq_unlink(p
));
12367 unlock_user (p
, arg1
, 0);
12370 #ifdef TARGET_NR_mq_timedsend
12371 case TARGET_NR_mq_timedsend
:
12373 struct timespec ts
;
12375 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12377 if (target_to_host_timespec(&ts
, arg5
)) {
12378 return -TARGET_EFAULT
;
12380 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12381 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12382 return -TARGET_EFAULT
;
12385 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12387 unlock_user (p
, arg2
, arg3
);
12391 #ifdef TARGET_NR_mq_timedsend_time64
12392 case TARGET_NR_mq_timedsend_time64
:
12394 struct timespec ts
;
12396 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12398 if (target_to_host_timespec64(&ts
, arg5
)) {
12399 return -TARGET_EFAULT
;
12401 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12402 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12403 return -TARGET_EFAULT
;
12406 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12408 unlock_user(p
, arg2
, arg3
);
12413 #ifdef TARGET_NR_mq_timedreceive
12414 case TARGET_NR_mq_timedreceive
:
12416 struct timespec ts
;
12419 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12421 if (target_to_host_timespec(&ts
, arg5
)) {
12422 return -TARGET_EFAULT
;
12424 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12426 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12427 return -TARGET_EFAULT
;
12430 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12433 unlock_user (p
, arg2
, arg3
);
12435 put_user_u32(prio
, arg4
);
12439 #ifdef TARGET_NR_mq_timedreceive_time64
12440 case TARGET_NR_mq_timedreceive_time64
:
12442 struct timespec ts
;
12445 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12447 if (target_to_host_timespec64(&ts
, arg5
)) {
12448 return -TARGET_EFAULT
;
12450 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12452 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12453 return -TARGET_EFAULT
;
12456 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12459 unlock_user(p
, arg2
, arg3
);
12461 put_user_u32(prio
, arg4
);
12467 /* Not implemented for now... */
12468 /* case TARGET_NR_mq_notify: */
12471 case TARGET_NR_mq_getsetattr
:
12473 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12476 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12477 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12478 &posix_mq_attr_out
));
12479 } else if (arg3
!= 0) {
12480 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12482 if (ret
== 0 && arg3
!= 0) {
12483 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12489 #ifdef CONFIG_SPLICE
12490 #ifdef TARGET_NR_tee
12491 case TARGET_NR_tee
:
12493 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12497 #ifdef TARGET_NR_splice
12498 case TARGET_NR_splice
:
12500 loff_t loff_in
, loff_out
;
12501 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12503 if (get_user_u64(loff_in
, arg2
)) {
12504 return -TARGET_EFAULT
;
12506 ploff_in
= &loff_in
;
12509 if (get_user_u64(loff_out
, arg4
)) {
12510 return -TARGET_EFAULT
;
12512 ploff_out
= &loff_out
;
12514 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12516 if (put_user_u64(loff_in
, arg2
)) {
12517 return -TARGET_EFAULT
;
12521 if (put_user_u64(loff_out
, arg4
)) {
12522 return -TARGET_EFAULT
;
12528 #ifdef TARGET_NR_vmsplice
12529 case TARGET_NR_vmsplice
:
12531 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12533 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12534 unlock_iovec(vec
, arg2
, arg3
, 0);
12536 ret
= -host_to_target_errno(errno
);
12541 #endif /* CONFIG_SPLICE */
12542 #ifdef CONFIG_EVENTFD
12543 #if defined(TARGET_NR_eventfd)
12544 case TARGET_NR_eventfd
:
12545 ret
= get_errno(eventfd(arg1
, 0));
12547 fd_trans_register(ret
, &target_eventfd_trans
);
12551 #if defined(TARGET_NR_eventfd2)
12552 case TARGET_NR_eventfd2
:
12554 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12555 if (arg2
& TARGET_O_NONBLOCK
) {
12556 host_flags
|= O_NONBLOCK
;
12558 if (arg2
& TARGET_O_CLOEXEC
) {
12559 host_flags
|= O_CLOEXEC
;
12561 ret
= get_errno(eventfd(arg1
, host_flags
));
12563 fd_trans_register(ret
, &target_eventfd_trans
);
12568 #endif /* CONFIG_EVENTFD */
12569 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12570 case TARGET_NR_fallocate
:
12571 #if TARGET_ABI_BITS == 32
12572 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12573 target_offset64(arg5
, arg6
)));
12575 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12579 #if defined(CONFIG_SYNC_FILE_RANGE)
12580 #if defined(TARGET_NR_sync_file_range)
12581 case TARGET_NR_sync_file_range
:
12582 #if TARGET_ABI_BITS == 32
12583 #if defined(TARGET_MIPS)
12584 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12585 target_offset64(arg5
, arg6
), arg7
));
12587 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12588 target_offset64(arg4
, arg5
), arg6
));
12589 #endif /* !TARGET_MIPS */
12591 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12595 #if defined(TARGET_NR_sync_file_range2) || \
12596 defined(TARGET_NR_arm_sync_file_range)
12597 #if defined(TARGET_NR_sync_file_range2)
12598 case TARGET_NR_sync_file_range2
:
12600 #if defined(TARGET_NR_arm_sync_file_range)
12601 case TARGET_NR_arm_sync_file_range
:
12603 /* This is like sync_file_range but the arguments are reordered */
12604 #if TARGET_ABI_BITS == 32
12605 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12606 target_offset64(arg5
, arg6
), arg2
));
12608 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12613 #if defined(TARGET_NR_signalfd4)
12614 case TARGET_NR_signalfd4
:
12615 return do_signalfd4(arg1
, arg2
, arg4
);
12617 #if defined(TARGET_NR_signalfd)
12618 case TARGET_NR_signalfd
:
12619 return do_signalfd4(arg1
, arg2
, 0);
12621 #if defined(CONFIG_EPOLL)
12622 #if defined(TARGET_NR_epoll_create)
12623 case TARGET_NR_epoll_create
:
12624 return get_errno(epoll_create(arg1
));
12626 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12627 case TARGET_NR_epoll_create1
:
12628 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12630 #if defined(TARGET_NR_epoll_ctl)
12631 case TARGET_NR_epoll_ctl
:
12633 struct epoll_event ep
;
12634 struct epoll_event
*epp
= 0;
12636 if (arg2
!= EPOLL_CTL_DEL
) {
12637 struct target_epoll_event
*target_ep
;
12638 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12639 return -TARGET_EFAULT
;
12641 ep
.events
= tswap32(target_ep
->events
);
12643 * The epoll_data_t union is just opaque data to the kernel,
12644 * so we transfer all 64 bits across and need not worry what
12645 * actual data type it is.
12647 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12648 unlock_user_struct(target_ep
, arg4
, 0);
12651 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12652 * non-null pointer, even though this argument is ignored.
12657 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12661 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12662 #if defined(TARGET_NR_epoll_wait)
12663 case TARGET_NR_epoll_wait
:
12665 #if defined(TARGET_NR_epoll_pwait)
12666 case TARGET_NR_epoll_pwait
:
12669 struct target_epoll_event
*target_ep
;
12670 struct epoll_event
*ep
;
12672 int maxevents
= arg3
;
12673 int timeout
= arg4
;
12675 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12676 return -TARGET_EINVAL
;
12679 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12680 maxevents
* sizeof(struct target_epoll_event
), 1);
12682 return -TARGET_EFAULT
;
12685 ep
= g_try_new(struct epoll_event
, maxevents
);
12687 unlock_user(target_ep
, arg2
, 0);
12688 return -TARGET_ENOMEM
;
12692 #if defined(TARGET_NR_epoll_pwait)
12693 case TARGET_NR_epoll_pwait
:
12695 target_sigset_t
*target_set
;
12696 sigset_t _set
, *set
= &_set
;
12699 if (arg6
!= sizeof(target_sigset_t
)) {
12700 ret
= -TARGET_EINVAL
;
12704 target_set
= lock_user(VERIFY_READ
, arg5
,
12705 sizeof(target_sigset_t
), 1);
12707 ret
= -TARGET_EFAULT
;
12710 target_to_host_sigset(set
, target_set
);
12711 unlock_user(target_set
, arg5
, 0);
12716 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12717 set
, SIGSET_T_SIZE
));
12721 #if defined(TARGET_NR_epoll_wait)
12722 case TARGET_NR_epoll_wait
:
12723 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12728 ret
= -TARGET_ENOSYS
;
12730 if (!is_error(ret
)) {
12732 for (i
= 0; i
< ret
; i
++) {
12733 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12734 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12736 unlock_user(target_ep
, arg2
,
12737 ret
* sizeof(struct target_epoll_event
));
12739 unlock_user(target_ep
, arg2
, 0);
12746 #ifdef TARGET_NR_prlimit64
12747 case TARGET_NR_prlimit64
:
12749 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12750 struct target_rlimit64
*target_rnew
, *target_rold
;
12751 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12752 int resource
= target_to_host_resource(arg2
);
12754 if (arg3
&& (resource
!= RLIMIT_AS
&&
12755 resource
!= RLIMIT_DATA
&&
12756 resource
!= RLIMIT_STACK
)) {
12757 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12758 return -TARGET_EFAULT
;
12760 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12761 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12762 unlock_user_struct(target_rnew
, arg3
, 0);
12766 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12767 if (!is_error(ret
) && arg4
) {
12768 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12769 return -TARGET_EFAULT
;
12771 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12772 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12773 unlock_user_struct(target_rold
, arg4
, 1);
12778 #ifdef TARGET_NR_gethostname
12779 case TARGET_NR_gethostname
:
12781 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12783 ret
= get_errno(gethostname(name
, arg2
));
12784 unlock_user(name
, arg1
, arg2
);
12786 ret
= -TARGET_EFAULT
;
12791 #ifdef TARGET_NR_atomic_cmpxchg_32
12792 case TARGET_NR_atomic_cmpxchg_32
:
12794 /* should use start_exclusive from main.c */
12795 abi_ulong mem_value
;
12796 if (get_user_u32(mem_value
, arg6
)) {
12797 target_siginfo_t info
;
12798 info
.si_signo
= SIGSEGV
;
12800 info
.si_code
= TARGET_SEGV_MAPERR
;
12801 info
._sifields
._sigfault
._addr
= arg6
;
12802 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12803 QEMU_SI_FAULT
, &info
);
12807 if (mem_value
== arg2
)
12808 put_user_u32(arg1
, arg6
);
12812 #ifdef TARGET_NR_atomic_barrier
12813 case TARGET_NR_atomic_barrier
:
12814 /* Like the kernel implementation and the
12815 qemu arm barrier, no-op this? */
12819 #ifdef TARGET_NR_timer_create
12820 case TARGET_NR_timer_create
:
12822 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12824 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12827 int timer_index
= next_free_host_timer();
12829 if (timer_index
< 0) {
12830 ret
= -TARGET_EAGAIN
;
12832 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12835 phost_sevp
= &host_sevp
;
12836 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12842 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12846 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12847 return -TARGET_EFAULT
;
12855 #ifdef TARGET_NR_timer_settime
12856 case TARGET_NR_timer_settime
:
12858 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12859 * struct itimerspec * old_value */
12860 target_timer_t timerid
= get_timer_id(arg1
);
12864 } else if (arg3
== 0) {
12865 ret
= -TARGET_EINVAL
;
12867 timer_t htimer
= g_posix_timers
[timerid
];
12868 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12870 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12871 return -TARGET_EFAULT
;
12874 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12875 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12876 return -TARGET_EFAULT
;
12883 #ifdef TARGET_NR_timer_settime64
12884 case TARGET_NR_timer_settime64
:
12886 target_timer_t timerid
= get_timer_id(arg1
);
12890 } else if (arg3
== 0) {
12891 ret
= -TARGET_EINVAL
;
12893 timer_t htimer
= g_posix_timers
[timerid
];
12894 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12896 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12897 return -TARGET_EFAULT
;
12900 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12901 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12902 return -TARGET_EFAULT
;
12909 #ifdef TARGET_NR_timer_gettime
12910 case TARGET_NR_timer_gettime
:
12912 /* args: timer_t timerid, struct itimerspec *curr_value */
12913 target_timer_t timerid
= get_timer_id(arg1
);
12917 } else if (!arg2
) {
12918 ret
= -TARGET_EFAULT
;
12920 timer_t htimer
= g_posix_timers
[timerid
];
12921 struct itimerspec hspec
;
12922 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12924 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12925 ret
= -TARGET_EFAULT
;
12932 #ifdef TARGET_NR_timer_gettime64
12933 case TARGET_NR_timer_gettime64
:
12935 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12936 target_timer_t timerid
= get_timer_id(arg1
);
12940 } else if (!arg2
) {
12941 ret
= -TARGET_EFAULT
;
12943 timer_t htimer
= g_posix_timers
[timerid
];
12944 struct itimerspec hspec
;
12945 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12947 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12948 ret
= -TARGET_EFAULT
;
12955 #ifdef TARGET_NR_timer_getoverrun
12956 case TARGET_NR_timer_getoverrun
:
12958 /* args: timer_t timerid */
12959 target_timer_t timerid
= get_timer_id(arg1
);
12964 timer_t htimer
= g_posix_timers
[timerid
];
12965 ret
= get_errno(timer_getoverrun(htimer
));
12971 #ifdef TARGET_NR_timer_delete
12972 case TARGET_NR_timer_delete
:
12974 /* args: timer_t timerid */
12975 target_timer_t timerid
= get_timer_id(arg1
);
12980 timer_t htimer
= g_posix_timers
[timerid
];
12981 ret
= get_errno(timer_delete(htimer
));
12982 g_posix_timers
[timerid
] = 0;
12988 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12989 case TARGET_NR_timerfd_create
:
12990 return get_errno(timerfd_create(arg1
,
12991 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12994 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12995 case TARGET_NR_timerfd_gettime
:
12997 struct itimerspec its_curr
;
12999 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13001 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13002 return -TARGET_EFAULT
;
13008 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13009 case TARGET_NR_timerfd_gettime64
:
13011 struct itimerspec its_curr
;
13013 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13015 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13016 return -TARGET_EFAULT
;
13022 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13023 case TARGET_NR_timerfd_settime
:
13025 struct itimerspec its_new
, its_old
, *p_new
;
13028 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13029 return -TARGET_EFAULT
;
13036 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13038 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13039 return -TARGET_EFAULT
;
13045 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13046 case TARGET_NR_timerfd_settime64
:
13048 struct itimerspec its_new
, its_old
, *p_new
;
13051 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13052 return -TARGET_EFAULT
;
13059 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13061 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13062 return -TARGET_EFAULT
;
13068 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13069 case TARGET_NR_ioprio_get
:
13070 return get_errno(ioprio_get(arg1
, arg2
));
13073 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13074 case TARGET_NR_ioprio_set
:
13075 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13078 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13079 case TARGET_NR_setns
:
13080 return get_errno(setns(arg1
, arg2
));
13082 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13083 case TARGET_NR_unshare
:
13084 return get_errno(unshare(arg1
));
13086 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13087 case TARGET_NR_kcmp
:
13088 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13090 #ifdef TARGET_NR_swapcontext
13091 case TARGET_NR_swapcontext
:
13092 /* PowerPC specific. */
13093 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13095 #ifdef TARGET_NR_memfd_create
13096 case TARGET_NR_memfd_create
:
13097 p
= lock_user_string(arg1
);
13099 return -TARGET_EFAULT
;
13101 ret
= get_errno(memfd_create(p
, arg2
));
13102 fd_trans_unregister(ret
);
13103 unlock_user(p
, arg1
, 0);
13106 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13107 case TARGET_NR_membarrier
:
13108 return get_errno(membarrier(arg1
, arg2
));
13111 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13112 case TARGET_NR_copy_file_range
:
13114 loff_t inoff
, outoff
;
13115 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13118 if (get_user_u64(inoff
, arg2
)) {
13119 return -TARGET_EFAULT
;
13124 if (get_user_u64(outoff
, arg4
)) {
13125 return -TARGET_EFAULT
;
13129 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13131 if (!is_error(ret
) && ret
> 0) {
13133 if (put_user_u64(inoff
, arg2
)) {
13134 return -TARGET_EFAULT
;
13138 if (put_user_u64(outoff
, arg4
)) {
13139 return -TARGET_EFAULT
;
13148 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13149 return -TARGET_ENOSYS
;
13154 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13155 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13156 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13159 CPUState
*cpu
= env_cpu(cpu_env
);
13162 #ifdef DEBUG_ERESTARTSYS
13163 /* Debug-only code for exercising the syscall-restart code paths
13164 * in the per-architecture cpu main loops: restart every syscall
13165 * the guest makes once before letting it through.
13171 return -TARGET_ERESTARTSYS
;
13176 record_syscall_start(cpu
, num
, arg1
,
13177 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13179 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13180 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13183 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13184 arg5
, arg6
, arg7
, arg8
);
13186 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13187 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13188 arg3
, arg4
, arg5
, arg6
);
13191 record_syscall_return(cpu
, num
, ret
);