4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include <linux/icmpv6.h>
60 #include <linux/if_tun.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
64 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
78 #ifdef HAVE_SYS_KCOV_H
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
95 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #if defined(CONFIG_USBFS)
103 #include <linux/usbdevice_fs.h>
104 #include <linux/usb/ch9.h>
106 #include <linux/vt.h>
107 #include <linux/dm-ioctl.h>
108 #include <linux/reboot.h>
109 #include <linux/route.h>
110 #include <linux/filter.h>
111 #include <linux/blkpg.h>
112 #include <netpacket/packet.h>
113 #include <linux/netlink.h>
114 #include <linux/if_alg.h>
115 #include <linux/rtc.h>
116 #include <sound/asound.h>
118 #include <linux/btrfs.h>
121 #include <libdrm/drm.h>
122 #include <libdrm/i915_drm.h>
124 #include "linux_loop.h"
128 #include "qemu/guest-random.h"
129 #include "qemu/selfmap.h"
130 #include "user/syscall-trace.h"
131 #include "qapi/error.h"
132 #include "fd-trans.h"
136 #define CLONE_IO 0x80000000 /* Clone io context */
139 /* We can't directly call the host clone syscall, because this will
140 * badly confuse libc (breaking mutexes, for example). So we must
141 * divide clone flags into:
142 * * flag combinations that look like pthread_create()
143 * * flag combinations that look like fork()
144 * * flags we can implement within QEMU itself
145 * * flags we can't support and will return an error for
147 /* For thread creation, all these flags must be present; for
148 * fork, none must be present.
150 #define CLONE_THREAD_FLAGS \
151 (CLONE_VM | CLONE_FS | CLONE_FILES | \
152 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
154 /* These flags are ignored:
155 * CLONE_DETACHED is now ignored by the kernel;
156 * CLONE_IO is just an optimisation hint to the I/O scheduler
158 #define CLONE_IGNORED_FLAGS \
159 (CLONE_DETACHED | CLONE_IO)
161 /* Flags for fork which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_FORK_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
166 /* Flags for thread creation which we can implement within QEMU itself */
167 #define CLONE_OPTIONAL_THREAD_FLAGS \
168 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
169 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
171 #define CLONE_INVALID_FORK_FLAGS \
172 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
174 #define CLONE_INVALID_THREAD_FLAGS \
175 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
176 CLONE_IGNORED_FLAGS))
178 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
179 * have almost all been allocated. We cannot support any of
180 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
181 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
182 * The checks against the invalid thread masks above will catch these.
183 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
187 * once. This exercises the codepaths for restart.
189 //#define DEBUG_ERESTARTSYS
191 //#include <linux/msdos_fs.h>
192 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
193 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
203 #define _syscall0(type,name) \
204 static type name (void) \
206 return syscall(__NR_##name); \
209 #define _syscall1(type,name,type1,arg1) \
210 static type name (type1 arg1) \
212 return syscall(__NR_##name, arg1); \
215 #define _syscall2(type,name,type1,arg1,type2,arg2) \
216 static type name (type1 arg1,type2 arg2) \
218 return syscall(__NR_##name, arg1, arg2); \
221 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
222 static type name (type1 arg1,type2 arg2,type3 arg3) \
224 return syscall(__NR_##name, arg1, arg2, arg3); \
227 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
230 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
233 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
235 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
237 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
241 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
242 type5,arg5,type6,arg6) \
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
246 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
250 #define __NR_sys_uname __NR_uname
251 #define __NR_sys_getcwd1 __NR_getcwd
252 #define __NR_sys_getdents __NR_getdents
253 #define __NR_sys_getdents64 __NR_getdents64
254 #define __NR_sys_getpriority __NR_getpriority
255 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
256 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
257 #define __NR_sys_syslog __NR_syslog
258 #if defined(__NR_futex)
259 # define __NR_sys_futex __NR_futex
261 #if defined(__NR_futex_time64)
262 # define __NR_sys_futex_time64 __NR_futex_time64
264 #define __NR_sys_inotify_init __NR_inotify_init
265 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
266 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
267 #define __NR_sys_statx __NR_statx
269 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
270 #define __NR__llseek __NR_lseek
273 /* Newer kernel ports have llseek() instead of _llseek() */
274 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
275 #define TARGET_NR__llseek TARGET_NR_llseek
278 #define __NR_sys_gettid __NR_gettid
279 _syscall0(int, sys_gettid
)
281 /* For the 64-bit guest on 32-bit host case we must emulate
282 * getdents using getdents64, because otherwise the host
283 * might hand us back more dirent records than we can fit
284 * into the guest buffer after structure format conversion.
285 * Otherwise we emulate getdents with getdents if the host has it.
287 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
288 #define EMULATE_GETDENTS_WITH_GETDENTS
291 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
292 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
294 #if (defined(TARGET_NR_getdents) && \
295 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
296 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
297 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
299 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
300 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
301 loff_t
*, res
, uint
, wh
);
303 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
304 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
306 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
307 #ifdef __NR_exit_group
308 _syscall1(int,exit_group
,int,error_code
)
310 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
311 _syscall1(int,set_tid_address
,int *,tidptr
)
313 #if defined(__NR_futex)
314 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
315 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
317 #if defined(__NR_futex_time64)
318 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
319 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
321 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
322 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
323 unsigned long *, user_mask_ptr
);
324 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
325 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
326 unsigned long *, user_mask_ptr
);
327 #define __NR_sys_getcpu __NR_getcpu
328 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
329 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
331 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
332 struct __user_cap_data_struct
*, data
);
333 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
334 struct __user_cap_data_struct
*, data
);
335 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
336 _syscall2(int, ioprio_get
, int, which
, int, who
)
338 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
339 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
341 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
342 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
345 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
346 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
347 unsigned long, idx1
, unsigned long, idx2
)
351 * It is assumed that struct statx is architecture independent.
353 #if defined(TARGET_NR_statx) && defined(__NR_statx)
354 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
355 unsigned int, mask
, struct target_statx
*, statxbuf
)
357 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
358 _syscall2(int, membarrier
, int, cmd
, int, flags
)
361 static bitmask_transtbl fcntl_flags_tbl
[] = {
362 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
363 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
364 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
365 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
366 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
367 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
368 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
369 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
370 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
371 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
372 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
373 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
374 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
375 #if defined(O_DIRECT)
376 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
378 #if defined(O_NOATIME)
379 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
381 #if defined(O_CLOEXEC)
382 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
385 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
387 #if defined(O_TMPFILE)
388 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
390 /* Don't terminate the list prematurely on 64-bit host+guest. */
391 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
392 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
397 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
399 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
400 #if defined(__NR_utimensat)
401 #define __NR_sys_utimensat __NR_utimensat
402 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
403 const struct timespec
*,tsp
,int,flags
)
405 static int sys_utimensat(int dirfd
, const char *pathname
,
406 const struct timespec times
[2], int flags
)
412 #endif /* TARGET_NR_utimensat */
414 #ifdef TARGET_NR_renameat2
415 #if defined(__NR_renameat2)
416 #define __NR_sys_renameat2 __NR_renameat2
417 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
418 const char *, new, unsigned int, flags
)
420 static int sys_renameat2(int oldfd
, const char *old
,
421 int newfd
, const char *new, int flags
)
424 return renameat(oldfd
, old
, newfd
, new);
430 #endif /* TARGET_NR_renameat2 */
432 #ifdef CONFIG_INOTIFY
433 #include <sys/inotify.h>
435 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
436 static int sys_inotify_init(void)
438 return (inotify_init());
441 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
442 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
444 return (inotify_add_watch(fd
, pathname
, mask
));
447 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
448 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
450 return (inotify_rm_watch(fd
, wd
));
453 #ifdef CONFIG_INOTIFY1
454 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
455 static int sys_inotify_init1(int flags
)
457 return (inotify_init1(flags
));
462 /* Userspace can usually survive runtime without inotify */
463 #undef TARGET_NR_inotify_init
464 #undef TARGET_NR_inotify_init1
465 #undef TARGET_NR_inotify_add_watch
466 #undef TARGET_NR_inotify_rm_watch
467 #endif /* CONFIG_INOTIFY */
469 #if defined(TARGET_NR_prlimit64)
470 #ifndef __NR_prlimit64
471 # define __NR_prlimit64 -1
473 #define __NR_sys_prlimit64 __NR_prlimit64
474 /* The glibc rlimit structure may not be that used by the underlying syscall */
475 struct host_rlimit64
{
479 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
480 const struct host_rlimit64
*, new_limit
,
481 struct host_rlimit64
*, old_limit
)
485 #if defined(TARGET_NR_timer_create)
486 /* Maximum of 32 active POSIX timers allowed at any one time. */
487 static timer_t g_posix_timers
[32] = { 0, } ;
489 static inline int next_free_host_timer(void)
492 /* FIXME: Does finding the next free slot require a lock? */
493 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
494 if (g_posix_timers
[k
] == 0) {
495 g_posix_timers
[k
] = (timer_t
) 1;
503 #define ERRNO_TABLE_SIZE 1200
505 /* target_to_host_errno_table[] is initialized from
506 * host_to_target_errno_table[] in syscall_init(). */
507 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
511 * This list is the union of errno values overridden in asm-<arch>/errno.h
512 * minus the errnos that are not actually generic to all archs.
514 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
515 [EAGAIN
] = TARGET_EAGAIN
,
516 [EIDRM
] = TARGET_EIDRM
,
517 [ECHRNG
] = TARGET_ECHRNG
,
518 [EL2NSYNC
] = TARGET_EL2NSYNC
,
519 [EL3HLT
] = TARGET_EL3HLT
,
520 [EL3RST
] = TARGET_EL3RST
,
521 [ELNRNG
] = TARGET_ELNRNG
,
522 [EUNATCH
] = TARGET_EUNATCH
,
523 [ENOCSI
] = TARGET_ENOCSI
,
524 [EL2HLT
] = TARGET_EL2HLT
,
525 [EDEADLK
] = TARGET_EDEADLK
,
526 [ENOLCK
] = TARGET_ENOLCK
,
527 [EBADE
] = TARGET_EBADE
,
528 [EBADR
] = TARGET_EBADR
,
529 [EXFULL
] = TARGET_EXFULL
,
530 [ENOANO
] = TARGET_ENOANO
,
531 [EBADRQC
] = TARGET_EBADRQC
,
532 [EBADSLT
] = TARGET_EBADSLT
,
533 [EBFONT
] = TARGET_EBFONT
,
534 [ENOSTR
] = TARGET_ENOSTR
,
535 [ENODATA
] = TARGET_ENODATA
,
536 [ETIME
] = TARGET_ETIME
,
537 [ENOSR
] = TARGET_ENOSR
,
538 [ENONET
] = TARGET_ENONET
,
539 [ENOPKG
] = TARGET_ENOPKG
,
540 [EREMOTE
] = TARGET_EREMOTE
,
541 [ENOLINK
] = TARGET_ENOLINK
,
542 [EADV
] = TARGET_EADV
,
543 [ESRMNT
] = TARGET_ESRMNT
,
544 [ECOMM
] = TARGET_ECOMM
,
545 [EPROTO
] = TARGET_EPROTO
,
546 [EDOTDOT
] = TARGET_EDOTDOT
,
547 [EMULTIHOP
] = TARGET_EMULTIHOP
,
548 [EBADMSG
] = TARGET_EBADMSG
,
549 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
550 [EOVERFLOW
] = TARGET_EOVERFLOW
,
551 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
552 [EBADFD
] = TARGET_EBADFD
,
553 [EREMCHG
] = TARGET_EREMCHG
,
554 [ELIBACC
] = TARGET_ELIBACC
,
555 [ELIBBAD
] = TARGET_ELIBBAD
,
556 [ELIBSCN
] = TARGET_ELIBSCN
,
557 [ELIBMAX
] = TARGET_ELIBMAX
,
558 [ELIBEXEC
] = TARGET_ELIBEXEC
,
559 [EILSEQ
] = TARGET_EILSEQ
,
560 [ENOSYS
] = TARGET_ENOSYS
,
561 [ELOOP
] = TARGET_ELOOP
,
562 [ERESTART
] = TARGET_ERESTART
,
563 [ESTRPIPE
] = TARGET_ESTRPIPE
,
564 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
565 [EUSERS
] = TARGET_EUSERS
,
566 [ENOTSOCK
] = TARGET_ENOTSOCK
,
567 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
568 [EMSGSIZE
] = TARGET_EMSGSIZE
,
569 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
570 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
571 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
572 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
573 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
574 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
575 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
576 [EADDRINUSE
] = TARGET_EADDRINUSE
,
577 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
578 [ENETDOWN
] = TARGET_ENETDOWN
,
579 [ENETUNREACH
] = TARGET_ENETUNREACH
,
580 [ENETRESET
] = TARGET_ENETRESET
,
581 [ECONNABORTED
] = TARGET_ECONNABORTED
,
582 [ECONNRESET
] = TARGET_ECONNRESET
,
583 [ENOBUFS
] = TARGET_ENOBUFS
,
584 [EISCONN
] = TARGET_EISCONN
,
585 [ENOTCONN
] = TARGET_ENOTCONN
,
586 [EUCLEAN
] = TARGET_EUCLEAN
,
587 [ENOTNAM
] = TARGET_ENOTNAM
,
588 [ENAVAIL
] = TARGET_ENAVAIL
,
589 [EISNAM
] = TARGET_EISNAM
,
590 [EREMOTEIO
] = TARGET_EREMOTEIO
,
591 [EDQUOT
] = TARGET_EDQUOT
,
592 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
593 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
594 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
595 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
596 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
597 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
598 [EALREADY
] = TARGET_EALREADY
,
599 [EINPROGRESS
] = TARGET_EINPROGRESS
,
600 [ESTALE
] = TARGET_ESTALE
,
601 [ECANCELED
] = TARGET_ECANCELED
,
602 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
603 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
605 [ENOKEY
] = TARGET_ENOKEY
,
608 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
611 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
614 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
617 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
619 #ifdef ENOTRECOVERABLE
620 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
623 [ENOMSG
] = TARGET_ENOMSG
,
626 [ERFKILL
] = TARGET_ERFKILL
,
629 [EHWPOISON
] = TARGET_EHWPOISON
,
633 static inline int host_to_target_errno(int err
)
635 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
636 host_to_target_errno_table
[err
]) {
637 return host_to_target_errno_table
[err
];
642 static inline int target_to_host_errno(int err
)
644 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
645 target_to_host_errno_table
[err
]) {
646 return target_to_host_errno_table
[err
];
651 static inline abi_long
get_errno(abi_long ret
)
654 return -host_to_target_errno(errno
);
659 const char *target_strerror(int err
)
661 if (err
== TARGET_ERESTARTSYS
) {
662 return "To be restarted";
664 if (err
== TARGET_QEMU_ESIGRETURN
) {
665 return "Successful exit from sigreturn";
668 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
671 return strerror(target_to_host_errno(err
));
674 #define safe_syscall0(type, name) \
675 static type safe_##name(void) \
677 return safe_syscall(__NR_##name); \
680 #define safe_syscall1(type, name, type1, arg1) \
681 static type safe_##name(type1 arg1) \
683 return safe_syscall(__NR_##name, arg1); \
686 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
687 static type safe_##name(type1 arg1, type2 arg2) \
689 return safe_syscall(__NR_##name, arg1, arg2); \
692 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
693 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
698 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
702 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
705 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
706 type4, arg4, type5, arg5) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
710 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
713 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
714 type4, arg4, type5, arg5, type6, arg6) \
715 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
716 type5 arg5, type6 arg6) \
718 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
721 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
722 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
723 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
724 int, flags
, mode_t
, mode
)
725 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
726 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
727 struct rusage
*, rusage
)
729 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
730 int, options
, struct rusage
*, rusage
)
731 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
732 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
733 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
734 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
735 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
737 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
738 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
739 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
742 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
743 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
745 #if defined(__NR_futex)
746 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
747 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
749 #if defined(__NR_futex_time64)
750 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
751 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
753 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
754 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
755 safe_syscall2(int, tkill
, int, tid
, int, sig
)
756 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
757 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
758 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
759 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
760 unsigned long, pos_l
, unsigned long, pos_h
)
761 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
762 unsigned long, pos_l
, unsigned long, pos_h
)
763 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
765 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
766 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
767 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
768 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
769 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
770 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
771 safe_syscall2(int, flock
, int, fd
, int, operation
)
772 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
773 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
774 const struct timespec
*, uts
, size_t, sigsetsize
)
776 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
778 #if defined(TARGET_NR_nanosleep)
779 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
780 struct timespec
*, rem
)
782 #if defined(TARGET_NR_clock_nanosleep) || \
783 defined(TARGET_NR_clock_nanosleep_time64)
784 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
785 const struct timespec
*, req
, struct timespec
*, rem
)
789 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
792 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
793 void *, ptr
, long, fifth
)
797 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
801 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
802 long, msgtype
, int, flags
)
804 #ifdef __NR_semtimedop
805 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
806 unsigned, nsops
, const struct timespec
*, timeout
)
808 #if defined(TARGET_NR_mq_timedsend) || \
809 defined(TARGET_NR_mq_timedsend_time64)
810 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
811 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
813 #if defined(TARGET_NR_mq_timedreceive) || \
814 defined(TARGET_NR_mq_timedreceive_time64)
815 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
816 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
818 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
819 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
820 int, outfd
, loff_t
*, poutoff
, size_t, length
,
824 /* We do ioctl like this rather than via safe_syscall3 to preserve the
825 * "third argument might be integer or pointer or not present" behaviour of
828 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
829 /* Similarly for fcntl. Note that callers must always:
830 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
831 * use the flock64 struct rather than unsuffixed flock
832 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 static inline int host_to_target_sock_type(int host_type
)
844 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
846 target_type
= TARGET_SOCK_DGRAM
;
849 target_type
= TARGET_SOCK_STREAM
;
852 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
856 #if defined(SOCK_CLOEXEC)
857 if (host_type
& SOCK_CLOEXEC
) {
858 target_type
|= TARGET_SOCK_CLOEXEC
;
862 #if defined(SOCK_NONBLOCK)
863 if (host_type
& SOCK_NONBLOCK
) {
864 target_type
|= TARGET_SOCK_NONBLOCK
;
871 static abi_ulong target_brk
;
872 static abi_ulong target_original_brk
;
873 static abi_ulong brk_page
;
875 void target_set_brk(abi_ulong new_brk
)
877 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
878 brk_page
= HOST_PAGE_ALIGN(target_brk
);
881 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
882 #define DEBUGF_BRK(message, args...)
884 /* do_brk() must return target values and target errnos. */
885 abi_long
do_brk(abi_ulong new_brk
)
887 abi_long mapped_addr
;
888 abi_ulong new_alloc_size
;
890 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
893 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
896 if (new_brk
< target_original_brk
) {
897 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
902 /* If the new brk is less than the highest page reserved to the
903 * target heap allocation, set it and we're almost done... */
904 if (new_brk
<= brk_page
) {
905 /* Heap contents are initialized to zero, as for anonymous
907 if (new_brk
> target_brk
) {
908 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
910 target_brk
= new_brk
;
911 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
915 /* We need to allocate more memory after the brk... Note that
916 * we don't use MAP_FIXED because that will map over the top of
917 * any existing mapping (like the one with the host libc or qemu
918 * itself); instead we treat "mapped but at wrong address" as
919 * a failure and unmap again.
921 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
922 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
923 PROT_READ
|PROT_WRITE
,
924 MAP_ANON
|MAP_PRIVATE
, 0, 0));
926 if (mapped_addr
== brk_page
) {
927 /* Heap contents are initialized to zero, as for anonymous
928 * mapped pages. Technically the new pages are already
929 * initialized to zero since they *are* anonymous mapped
930 * pages, however we have to take care with the contents that
931 * come from the remaining part of the previous page: it may
932 * contains garbage data due to a previous heap usage (grown
934 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
936 target_brk
= new_brk
;
937 brk_page
= HOST_PAGE_ALIGN(target_brk
);
938 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
941 } else if (mapped_addr
!= -1) {
942 /* Mapped but at wrong address, meaning there wasn't actually
943 * enough space for this brk.
945 target_munmap(mapped_addr
, new_alloc_size
);
947 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
950 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
953 #if defined(TARGET_ALPHA)
954 /* We (partially) emulate OSF/1 on Alpha, which requires we
955 return a proper errno, not an unchanged brk value. */
956 return -TARGET_ENOMEM
;
958 /* For everything else, return the previous break. */
962 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
963 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
964 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
965 abi_ulong target_fds_addr
,
969 abi_ulong b
, *target_fds
;
971 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
972 if (!(target_fds
= lock_user(VERIFY_READ
,
974 sizeof(abi_ulong
) * nw
,
976 return -TARGET_EFAULT
;
980 for (i
= 0; i
< nw
; i
++) {
981 /* grab the abi_ulong */
982 __get_user(b
, &target_fds
[i
]);
983 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
984 /* check the bit inside the abi_ulong */
991 unlock_user(target_fds
, target_fds_addr
, 0);
996 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
997 abi_ulong target_fds_addr
,
1000 if (target_fds_addr
) {
1001 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1002 return -TARGET_EFAULT
;
1010 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1016 abi_ulong
*target_fds
;
1018 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1019 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1021 sizeof(abi_ulong
) * nw
,
1023 return -TARGET_EFAULT
;
1026 for (i
= 0; i
< nw
; i
++) {
1028 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1029 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1032 __put_user(v
, &target_fds
[i
]);
1035 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1041 #if defined(__alpha__)
1042 #define HOST_HZ 1024
1047 static inline abi_long
host_to_target_clock_t(long ticks
)
1049 #if HOST_HZ == TARGET_HZ
1052 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1056 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1057 const struct rusage
*rusage
)
1059 struct target_rusage
*target_rusage
;
1061 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1062 return -TARGET_EFAULT
;
1063 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1064 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1065 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1066 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1067 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1068 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1069 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1070 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1071 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1072 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1073 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1074 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1075 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1076 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1077 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1078 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1079 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1080 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1081 unlock_user_struct(target_rusage
, target_addr
, 1);
1086 #ifdef TARGET_NR_setrlimit
1087 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1089 abi_ulong target_rlim_swap
;
1092 target_rlim_swap
= tswapal(target_rlim
);
1093 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1094 return RLIM_INFINITY
;
1096 result
= target_rlim_swap
;
1097 if (target_rlim_swap
!= (rlim_t
)result
)
1098 return RLIM_INFINITY
;
1104 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1105 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1107 abi_ulong target_rlim_swap
;
1110 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1111 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1113 target_rlim_swap
= rlim
;
1114 result
= tswapal(target_rlim_swap
);
1120 static inline int target_to_host_resource(int code
)
1123 case TARGET_RLIMIT_AS
:
1125 case TARGET_RLIMIT_CORE
:
1127 case TARGET_RLIMIT_CPU
:
1129 case TARGET_RLIMIT_DATA
:
1131 case TARGET_RLIMIT_FSIZE
:
1132 return RLIMIT_FSIZE
;
1133 case TARGET_RLIMIT_LOCKS
:
1134 return RLIMIT_LOCKS
;
1135 case TARGET_RLIMIT_MEMLOCK
:
1136 return RLIMIT_MEMLOCK
;
1137 case TARGET_RLIMIT_MSGQUEUE
:
1138 return RLIMIT_MSGQUEUE
;
1139 case TARGET_RLIMIT_NICE
:
1141 case TARGET_RLIMIT_NOFILE
:
1142 return RLIMIT_NOFILE
;
1143 case TARGET_RLIMIT_NPROC
:
1144 return RLIMIT_NPROC
;
1145 case TARGET_RLIMIT_RSS
:
1147 case TARGET_RLIMIT_RTPRIO
:
1148 return RLIMIT_RTPRIO
;
1149 case TARGET_RLIMIT_SIGPENDING
:
1150 return RLIMIT_SIGPENDING
;
1151 case TARGET_RLIMIT_STACK
:
1152 return RLIMIT_STACK
;
1158 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1159 abi_ulong target_tv_addr
)
1161 struct target_timeval
*target_tv
;
1163 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1164 return -TARGET_EFAULT
;
1167 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1168 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1170 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1175 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1176 const struct timeval
*tv
)
1178 struct target_timeval
*target_tv
;
1180 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1181 return -TARGET_EFAULT
;
1184 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1185 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1187 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1192 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1193 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1194 abi_ulong target_tv_addr
)
1196 struct target__kernel_sock_timeval
*target_tv
;
1198 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1199 return -TARGET_EFAULT
;
1202 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1203 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1205 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1211 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1212 const struct timeval
*tv
)
1214 struct target__kernel_sock_timeval
*target_tv
;
1216 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1217 return -TARGET_EFAULT
;
1220 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1221 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1223 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1228 #if defined(TARGET_NR_futex) || \
1229 defined(TARGET_NR_rt_sigtimedwait) || \
1230 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1234 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1235 defined(TARGET_NR_timer_settime) || \
1236 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1237 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1238 abi_ulong target_addr
)
1240 struct target_timespec
*target_ts
;
1242 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1243 return -TARGET_EFAULT
;
1245 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1246 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1247 unlock_user_struct(target_ts
, target_addr
, 0);
1252 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1253 defined(TARGET_NR_timer_settime64) || \
1254 defined(TARGET_NR_mq_timedsend_time64) || \
1255 defined(TARGET_NR_mq_timedreceive_time64) || \
1256 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1257 defined(TARGET_NR_clock_nanosleep_time64) || \
1258 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1259 defined(TARGET_NR_utimensat) || \
1260 defined(TARGET_NR_utimensat_time64) || \
1261 defined(TARGET_NR_semtimedop_time64) || \
1262 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1263 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1264 abi_ulong target_addr
)
1266 struct target__kernel_timespec
*target_ts
;
1268 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1269 return -TARGET_EFAULT
;
1271 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1272 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1273 /* in 32bit mode, this drops the padding */
1274 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1275 unlock_user_struct(target_ts
, target_addr
, 0);
1280 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1281 struct timespec
*host_ts
)
1283 struct target_timespec
*target_ts
;
1285 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1286 return -TARGET_EFAULT
;
1288 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1289 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1290 unlock_user_struct(target_ts
, target_addr
, 1);
1294 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1295 struct timespec
*host_ts
)
1297 struct target__kernel_timespec
*target_ts
;
1299 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1300 return -TARGET_EFAULT
;
1302 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1303 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1304 unlock_user_struct(target_ts
, target_addr
, 1);
1308 #if defined(TARGET_NR_gettimeofday)
1309 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1310 struct timezone
*tz
)
1312 struct target_timezone
*target_tz
;
1314 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1315 return -TARGET_EFAULT
;
1318 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1319 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1321 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1327 #if defined(TARGET_NR_settimeofday)
1328 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1329 abi_ulong target_tz_addr
)
1331 struct target_timezone
*target_tz
;
1333 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1334 return -TARGET_EFAULT
;
1337 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1338 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1340 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1346 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1349 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1350 abi_ulong target_mq_attr_addr
)
1352 struct target_mq_attr
*target_mq_attr
;
1354 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1355 target_mq_attr_addr
, 1))
1356 return -TARGET_EFAULT
;
1358 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1359 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1360 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1361 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1363 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1368 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1369 const struct mq_attr
*attr
)
1371 struct target_mq_attr
*target_mq_attr
;
1373 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1374 target_mq_attr_addr
, 0))
1375 return -TARGET_EFAULT
;
1377 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1378 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1379 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1380 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1382 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1388 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1389 /* do_select() must return target values and target errnos. */
1390 static abi_long
do_select(int n
,
1391 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1392 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1394 fd_set rfds
, wfds
, efds
;
1395 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1397 struct timespec ts
, *ts_ptr
;
1400 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1404 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1408 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1413 if (target_tv_addr
) {
1414 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1415 return -TARGET_EFAULT
;
1416 ts
.tv_sec
= tv
.tv_sec
;
1417 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1423 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1426 if (!is_error(ret
)) {
1427 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1428 return -TARGET_EFAULT
;
1429 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1430 return -TARGET_EFAULT
;
1431 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1432 return -TARGET_EFAULT
;
1434 if (target_tv_addr
) {
1435 tv
.tv_sec
= ts
.tv_sec
;
1436 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1437 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1438 return -TARGET_EFAULT
;
1446 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1447 static abi_long
do_old_select(abi_ulong arg1
)
1449 struct target_sel_arg_struct
*sel
;
1450 abi_ulong inp
, outp
, exp
, tvp
;
1453 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1454 return -TARGET_EFAULT
;
1457 nsel
= tswapal(sel
->n
);
1458 inp
= tswapal(sel
->inp
);
1459 outp
= tswapal(sel
->outp
);
1460 exp
= tswapal(sel
->exp
);
1461 tvp
= tswapal(sel
->tvp
);
1463 unlock_user_struct(sel
, arg1
, 0);
1465 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1470 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1471 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1472 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1475 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1476 fd_set rfds
, wfds
, efds
;
1477 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1478 struct timespec ts
, *ts_ptr
;
1482 * The 6th arg is actually two args smashed together,
1483 * so we cannot use the C library.
1491 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1492 target_sigset_t
*target_sigset
;
1500 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1504 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1508 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1514 * This takes a timespec, and not a timeval, so we cannot
1515 * use the do_select() helper ...
1519 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1520 return -TARGET_EFAULT
;
1523 if (target_to_host_timespec(&ts
, ts_addr
)) {
1524 return -TARGET_EFAULT
;
1532 /* Extract the two packed args for the sigset */
1535 sig
.size
= SIGSET_T_SIZE
;
1537 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1539 return -TARGET_EFAULT
;
1541 arg_sigset
= tswapal(arg7
[0]);
1542 arg_sigsize
= tswapal(arg7
[1]);
1543 unlock_user(arg7
, arg6
, 0);
1547 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1548 /* Like the kernel, we enforce correct size sigsets */
1549 return -TARGET_EINVAL
;
1551 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1552 sizeof(*target_sigset
), 1);
1553 if (!target_sigset
) {
1554 return -TARGET_EFAULT
;
1556 target_to_host_sigset(&set
, target_sigset
);
1557 unlock_user(target_sigset
, arg_sigset
, 0);
1565 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1568 if (!is_error(ret
)) {
1569 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1570 return -TARGET_EFAULT
;
1572 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1573 return -TARGET_EFAULT
;
1575 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1576 return -TARGET_EFAULT
;
1579 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1580 return -TARGET_EFAULT
;
1583 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1584 return -TARGET_EFAULT
;
1592 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1593 defined(TARGET_NR_ppoll_time64)
1594 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1595 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1597 struct target_pollfd
*target_pfd
;
1598 unsigned int nfds
= arg2
;
1606 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1607 return -TARGET_EINVAL
;
1609 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1610 sizeof(struct target_pollfd
) * nfds
, 1);
1612 return -TARGET_EFAULT
;
1615 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1616 for (i
= 0; i
< nfds
; i
++) {
1617 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1618 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1622 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1623 target_sigset_t
*target_set
;
1624 sigset_t _set
, *set
= &_set
;
1628 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1629 unlock_user(target_pfd
, arg1
, 0);
1630 return -TARGET_EFAULT
;
1633 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1634 unlock_user(target_pfd
, arg1
, 0);
1635 return -TARGET_EFAULT
;
1643 if (arg5
!= sizeof(target_sigset_t
)) {
1644 unlock_user(target_pfd
, arg1
, 0);
1645 return -TARGET_EINVAL
;
1648 target_set
= lock_user(VERIFY_READ
, arg4
,
1649 sizeof(target_sigset_t
), 1);
1651 unlock_user(target_pfd
, arg1
, 0);
1652 return -TARGET_EFAULT
;
1654 target_to_host_sigset(set
, target_set
);
1659 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1660 set
, SIGSET_T_SIZE
));
1662 if (!is_error(ret
) && arg3
) {
1664 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1665 return -TARGET_EFAULT
;
1668 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1669 return -TARGET_EFAULT
;
1674 unlock_user(target_set
, arg4
, 0);
1677 struct timespec ts
, *pts
;
1680 /* Convert ms to secs, ns */
1681 ts
.tv_sec
= arg3
/ 1000;
1682 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1685 /* -ve poll() timeout means "infinite" */
1688 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1691 if (!is_error(ret
)) {
1692 for (i
= 0; i
< nfds
; i
++) {
1693 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1696 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1701 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1704 return pipe2(host_pipe
, flags
);
1710 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1711 int flags
, int is_pipe2
)
1715 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1718 return get_errno(ret
);
1720 /* Several targets have special calling conventions for the original
1721 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1723 #if defined(TARGET_ALPHA)
1724 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1725 return host_pipe
[0];
1726 #elif defined(TARGET_MIPS)
1727 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1728 return host_pipe
[0];
1729 #elif defined(TARGET_SH4)
1730 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1731 return host_pipe
[0];
1732 #elif defined(TARGET_SPARC)
1733 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1734 return host_pipe
[0];
1738 if (put_user_s32(host_pipe
[0], pipedes
)
1739 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1740 return -TARGET_EFAULT
;
1741 return get_errno(ret
);
1744 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1745 abi_ulong target_addr
,
1748 struct target_ip_mreqn
*target_smreqn
;
1750 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1752 return -TARGET_EFAULT
;
1753 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1754 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1755 if (len
== sizeof(struct target_ip_mreqn
))
1756 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1757 unlock_user(target_smreqn
, target_addr
, 0);
1762 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1763 abi_ulong target_addr
,
1766 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1767 sa_family_t sa_family
;
1768 struct target_sockaddr
*target_saddr
;
1770 if (fd_trans_target_to_host_addr(fd
)) {
1771 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1774 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1776 return -TARGET_EFAULT
;
1778 sa_family
= tswap16(target_saddr
->sa_family
);
1780 /* Oops. The caller might send a incomplete sun_path; sun_path
1781 * must be terminated by \0 (see the manual page), but
1782 * unfortunately it is quite common to specify sockaddr_un
1783 * length as "strlen(x->sun_path)" while it should be
1784 * "strlen(...) + 1". We'll fix that here if needed.
1785 * Linux kernel has a similar feature.
1788 if (sa_family
== AF_UNIX
) {
1789 if (len
< unix_maxlen
&& len
> 0) {
1790 char *cp
= (char*)target_saddr
;
1792 if ( cp
[len
-1] && !cp
[len
] )
1795 if (len
> unix_maxlen
)
1799 memcpy(addr
, target_saddr
, len
);
1800 addr
->sa_family
= sa_family
;
1801 if (sa_family
== AF_NETLINK
) {
1802 struct sockaddr_nl
*nladdr
;
1804 nladdr
= (struct sockaddr_nl
*)addr
;
1805 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1806 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1807 } else if (sa_family
== AF_PACKET
) {
1808 struct target_sockaddr_ll
*lladdr
;
1810 lladdr
= (struct target_sockaddr_ll
*)addr
;
1811 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1812 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1814 unlock_user(target_saddr
, target_addr
, 0);
1819 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1820 struct sockaddr
*addr
,
1823 struct target_sockaddr
*target_saddr
;
1830 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1832 return -TARGET_EFAULT
;
1833 memcpy(target_saddr
, addr
, len
);
1834 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1835 sizeof(target_saddr
->sa_family
)) {
1836 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1838 if (addr
->sa_family
== AF_NETLINK
&&
1839 len
>= sizeof(struct target_sockaddr_nl
)) {
1840 struct target_sockaddr_nl
*target_nl
=
1841 (struct target_sockaddr_nl
*)target_saddr
;
1842 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1843 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1844 } else if (addr
->sa_family
== AF_PACKET
) {
1845 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1846 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1847 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1848 } else if (addr
->sa_family
== AF_INET6
&&
1849 len
>= sizeof(struct target_sockaddr_in6
)) {
1850 struct target_sockaddr_in6
*target_in6
=
1851 (struct target_sockaddr_in6
*)target_saddr
;
1852 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1854 unlock_user(target_saddr
, target_addr
, len
);
1859 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1860 struct target_msghdr
*target_msgh
)
1862 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1863 abi_long msg_controllen
;
1864 abi_ulong target_cmsg_addr
;
1865 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1866 socklen_t space
= 0;
1868 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1869 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1871 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1872 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1873 target_cmsg_start
= target_cmsg
;
1875 return -TARGET_EFAULT
;
1877 while (cmsg
&& target_cmsg
) {
1878 void *data
= CMSG_DATA(cmsg
);
1879 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1881 int len
= tswapal(target_cmsg
->cmsg_len
)
1882 - sizeof(struct target_cmsghdr
);
1884 space
+= CMSG_SPACE(len
);
1885 if (space
> msgh
->msg_controllen
) {
1886 space
-= CMSG_SPACE(len
);
1887 /* This is a QEMU bug, since we allocated the payload
1888 * area ourselves (unlike overflow in host-to-target
1889 * conversion, which is just the guest giving us a buffer
1890 * that's too small). It can't happen for the payload types
1891 * we currently support; if it becomes an issue in future
1892 * we would need to improve our allocation strategy to
1893 * something more intelligent than "twice the size of the
1894 * target buffer we're reading from".
1896 qemu_log_mask(LOG_UNIMP
,
1897 ("Unsupported ancillary data %d/%d: "
1898 "unhandled msg size\n"),
1899 tswap32(target_cmsg
->cmsg_level
),
1900 tswap32(target_cmsg
->cmsg_type
));
1904 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1905 cmsg
->cmsg_level
= SOL_SOCKET
;
1907 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1909 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1910 cmsg
->cmsg_len
= CMSG_LEN(len
);
1912 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1913 int *fd
= (int *)data
;
1914 int *target_fd
= (int *)target_data
;
1915 int i
, numfds
= len
/ sizeof(int);
1917 for (i
= 0; i
< numfds
; i
++) {
1918 __get_user(fd
[i
], target_fd
+ i
);
1920 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1921 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1922 struct ucred
*cred
= (struct ucred
*)data
;
1923 struct target_ucred
*target_cred
=
1924 (struct target_ucred
*)target_data
;
1926 __get_user(cred
->pid
, &target_cred
->pid
);
1927 __get_user(cred
->uid
, &target_cred
->uid
);
1928 __get_user(cred
->gid
, &target_cred
->gid
);
1930 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1931 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1932 memcpy(data
, target_data
, len
);
1935 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1936 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1939 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1941 msgh
->msg_controllen
= space
;
1945 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1946 struct msghdr
*msgh
)
1948 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1949 abi_long msg_controllen
;
1950 abi_ulong target_cmsg_addr
;
1951 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1952 socklen_t space
= 0;
1954 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1955 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1957 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1958 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1959 target_cmsg_start
= target_cmsg
;
1961 return -TARGET_EFAULT
;
1963 while (cmsg
&& target_cmsg
) {
1964 void *data
= CMSG_DATA(cmsg
);
1965 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1967 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1968 int tgt_len
, tgt_space
;
1970 /* We never copy a half-header but may copy half-data;
1971 * this is Linux's behaviour in put_cmsg(). Note that
1972 * truncation here is a guest problem (which we report
1973 * to the guest via the CTRUNC bit), unlike truncation
1974 * in target_to_host_cmsg, which is a QEMU bug.
1976 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1977 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1981 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1982 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1984 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1986 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1988 /* Payload types which need a different size of payload on
1989 * the target must adjust tgt_len here.
1992 switch (cmsg
->cmsg_level
) {
1994 switch (cmsg
->cmsg_type
) {
1996 tgt_len
= sizeof(struct target_timeval
);
2006 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2007 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2008 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2011 /* We must now copy-and-convert len bytes of payload
2012 * into tgt_len bytes of destination space. Bear in mind
2013 * that in both source and destination we may be dealing
2014 * with a truncated value!
2016 switch (cmsg
->cmsg_level
) {
2018 switch (cmsg
->cmsg_type
) {
2021 int *fd
= (int *)data
;
2022 int *target_fd
= (int *)target_data
;
2023 int i
, numfds
= tgt_len
/ sizeof(int);
2025 for (i
= 0; i
< numfds
; i
++) {
2026 __put_user(fd
[i
], target_fd
+ i
);
2032 struct timeval
*tv
= (struct timeval
*)data
;
2033 struct target_timeval
*target_tv
=
2034 (struct target_timeval
*)target_data
;
2036 if (len
!= sizeof(struct timeval
) ||
2037 tgt_len
!= sizeof(struct target_timeval
)) {
2041 /* copy struct timeval to target */
2042 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2043 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2046 case SCM_CREDENTIALS
:
2048 struct ucred
*cred
= (struct ucred
*)data
;
2049 struct target_ucred
*target_cred
=
2050 (struct target_ucred
*)target_data
;
2052 __put_user(cred
->pid
, &target_cred
->pid
);
2053 __put_user(cred
->uid
, &target_cred
->uid
);
2054 __put_user(cred
->gid
, &target_cred
->gid
);
2063 switch (cmsg
->cmsg_type
) {
2066 uint32_t *v
= (uint32_t *)data
;
2067 uint32_t *t_int
= (uint32_t *)target_data
;
2069 if (len
!= sizeof(uint32_t) ||
2070 tgt_len
!= sizeof(uint32_t)) {
2073 __put_user(*v
, t_int
);
2079 struct sock_extended_err ee
;
2080 struct sockaddr_in offender
;
2082 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2083 struct errhdr_t
*target_errh
=
2084 (struct errhdr_t
*)target_data
;
2086 if (len
!= sizeof(struct errhdr_t
) ||
2087 tgt_len
!= sizeof(struct errhdr_t
)) {
2090 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2091 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2092 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2093 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2094 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2095 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2096 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2097 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2098 (void *) &errh
->offender
, sizeof(errh
->offender
));
2107 switch (cmsg
->cmsg_type
) {
2110 uint32_t *v
= (uint32_t *)data
;
2111 uint32_t *t_int
= (uint32_t *)target_data
;
2113 if (len
!= sizeof(uint32_t) ||
2114 tgt_len
!= sizeof(uint32_t)) {
2117 __put_user(*v
, t_int
);
2123 struct sock_extended_err ee
;
2124 struct sockaddr_in6 offender
;
2126 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2127 struct errhdr6_t
*target_errh
=
2128 (struct errhdr6_t
*)target_data
;
2130 if (len
!= sizeof(struct errhdr6_t
) ||
2131 tgt_len
!= sizeof(struct errhdr6_t
)) {
2134 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2135 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2136 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2137 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2138 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2139 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2140 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2141 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2142 (void *) &errh
->offender
, sizeof(errh
->offender
));
2152 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2153 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2154 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2155 if (tgt_len
> len
) {
2156 memset(target_data
+ len
, 0, tgt_len
- len
);
2160 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2161 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2162 if (msg_controllen
< tgt_space
) {
2163 tgt_space
= msg_controllen
;
2165 msg_controllen
-= tgt_space
;
2167 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2168 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2171 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2173 target_msgh
->msg_controllen
= tswapal(space
);
2177 /* do_setsockopt() Must return target values and target errnos. */
2178 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2179 abi_ulong optval_addr
, socklen_t optlen
)
2183 struct ip_mreqn
*ip_mreq
;
2184 struct ip_mreq_source
*ip_mreq_source
;
2189 /* TCP and UDP options all take an 'int' value. */
2190 if (optlen
< sizeof(uint32_t))
2191 return -TARGET_EINVAL
;
2193 if (get_user_u32(val
, optval_addr
))
2194 return -TARGET_EFAULT
;
2195 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2202 case IP_ROUTER_ALERT
:
2206 case IP_MTU_DISCOVER
:
2213 case IP_MULTICAST_TTL
:
2214 case IP_MULTICAST_LOOP
:
2216 if (optlen
>= sizeof(uint32_t)) {
2217 if (get_user_u32(val
, optval_addr
))
2218 return -TARGET_EFAULT
;
2219 } else if (optlen
>= 1) {
2220 if (get_user_u8(val
, optval_addr
))
2221 return -TARGET_EFAULT
;
2223 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2225 case IP_ADD_MEMBERSHIP
:
2226 case IP_DROP_MEMBERSHIP
:
2227 if (optlen
< sizeof (struct target_ip_mreq
) ||
2228 optlen
> sizeof (struct target_ip_mreqn
))
2229 return -TARGET_EINVAL
;
2231 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2232 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2233 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2236 case IP_BLOCK_SOURCE
:
2237 case IP_UNBLOCK_SOURCE
:
2238 case IP_ADD_SOURCE_MEMBERSHIP
:
2239 case IP_DROP_SOURCE_MEMBERSHIP
:
2240 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2241 return -TARGET_EINVAL
;
2243 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2244 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2245 unlock_user (ip_mreq_source
, optval_addr
, 0);
2254 case IPV6_MTU_DISCOVER
:
2257 case IPV6_RECVPKTINFO
:
2258 case IPV6_UNICAST_HOPS
:
2259 case IPV6_MULTICAST_HOPS
:
2260 case IPV6_MULTICAST_LOOP
:
2262 case IPV6_RECVHOPLIMIT
:
2263 case IPV6_2292HOPLIMIT
:
2266 case IPV6_2292PKTINFO
:
2267 case IPV6_RECVTCLASS
:
2268 case IPV6_RECVRTHDR
:
2269 case IPV6_2292RTHDR
:
2270 case IPV6_RECVHOPOPTS
:
2271 case IPV6_2292HOPOPTS
:
2272 case IPV6_RECVDSTOPTS
:
2273 case IPV6_2292DSTOPTS
:
2275 #ifdef IPV6_RECVPATHMTU
2276 case IPV6_RECVPATHMTU
:
2278 #ifdef IPV6_TRANSPARENT
2279 case IPV6_TRANSPARENT
:
2281 #ifdef IPV6_FREEBIND
2284 #ifdef IPV6_RECVORIGDSTADDR
2285 case IPV6_RECVORIGDSTADDR
:
2288 if (optlen
< sizeof(uint32_t)) {
2289 return -TARGET_EINVAL
;
2291 if (get_user_u32(val
, optval_addr
)) {
2292 return -TARGET_EFAULT
;
2294 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 &val
, sizeof(val
)));
2299 struct in6_pktinfo pki
;
2301 if (optlen
< sizeof(pki
)) {
2302 return -TARGET_EINVAL
;
2305 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2306 return -TARGET_EFAULT
;
2309 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2311 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2312 &pki
, sizeof(pki
)));
2315 case IPV6_ADD_MEMBERSHIP
:
2316 case IPV6_DROP_MEMBERSHIP
:
2318 struct ipv6_mreq ipv6mreq
;
2320 if (optlen
< sizeof(ipv6mreq
)) {
2321 return -TARGET_EINVAL
;
2324 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2325 return -TARGET_EFAULT
;
2328 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2330 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2331 &ipv6mreq
, sizeof(ipv6mreq
)));
2342 struct icmp6_filter icmp6f
;
2344 if (optlen
> sizeof(icmp6f
)) {
2345 optlen
= sizeof(icmp6f
);
2348 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2349 return -TARGET_EFAULT
;
2352 for (val
= 0; val
< 8; val
++) {
2353 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2356 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2368 /* those take an u32 value */
2369 if (optlen
< sizeof(uint32_t)) {
2370 return -TARGET_EINVAL
;
2373 if (get_user_u32(val
, optval_addr
)) {
2374 return -TARGET_EFAULT
;
2376 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2377 &val
, sizeof(val
)));
2384 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2389 char *alg_key
= g_malloc(optlen
);
2392 return -TARGET_ENOMEM
;
2394 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2396 return -TARGET_EFAULT
;
2398 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2403 case ALG_SET_AEAD_AUTHSIZE
:
2405 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2414 case TARGET_SOL_SOCKET
:
2416 case TARGET_SO_RCVTIMEO
:
2420 optname
= SO_RCVTIMEO
;
2423 if (optlen
!= sizeof(struct target_timeval
)) {
2424 return -TARGET_EINVAL
;
2427 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2428 return -TARGET_EFAULT
;
2431 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2435 case TARGET_SO_SNDTIMEO
:
2436 optname
= SO_SNDTIMEO
;
2438 case TARGET_SO_ATTACH_FILTER
:
2440 struct target_sock_fprog
*tfprog
;
2441 struct target_sock_filter
*tfilter
;
2442 struct sock_fprog fprog
;
2443 struct sock_filter
*filter
;
2446 if (optlen
!= sizeof(*tfprog
)) {
2447 return -TARGET_EINVAL
;
2449 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2450 return -TARGET_EFAULT
;
2452 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2453 tswapal(tfprog
->filter
), 0)) {
2454 unlock_user_struct(tfprog
, optval_addr
, 1);
2455 return -TARGET_EFAULT
;
2458 fprog
.len
= tswap16(tfprog
->len
);
2459 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2460 if (filter
== NULL
) {
2461 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2462 unlock_user_struct(tfprog
, optval_addr
, 1);
2463 return -TARGET_ENOMEM
;
2465 for (i
= 0; i
< fprog
.len
; i
++) {
2466 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2467 filter
[i
].jt
= tfilter
[i
].jt
;
2468 filter
[i
].jf
= tfilter
[i
].jf
;
2469 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2471 fprog
.filter
= filter
;
2473 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2474 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2477 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2478 unlock_user_struct(tfprog
, optval_addr
, 1);
2481 case TARGET_SO_BINDTODEVICE
:
2483 char *dev_ifname
, *addr_ifname
;
2485 if (optlen
> IFNAMSIZ
- 1) {
2486 optlen
= IFNAMSIZ
- 1;
2488 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2490 return -TARGET_EFAULT
;
2492 optname
= SO_BINDTODEVICE
;
2493 addr_ifname
= alloca(IFNAMSIZ
);
2494 memcpy(addr_ifname
, dev_ifname
, optlen
);
2495 addr_ifname
[optlen
] = 0;
2496 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2497 addr_ifname
, optlen
));
2498 unlock_user (dev_ifname
, optval_addr
, 0);
2501 case TARGET_SO_LINGER
:
2504 struct target_linger
*tlg
;
2506 if (optlen
!= sizeof(struct target_linger
)) {
2507 return -TARGET_EINVAL
;
2509 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2510 return -TARGET_EFAULT
;
2512 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2513 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2514 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2516 unlock_user_struct(tlg
, optval_addr
, 0);
2519 /* Options with 'int' argument. */
2520 case TARGET_SO_DEBUG
:
2523 case TARGET_SO_REUSEADDR
:
2524 optname
= SO_REUSEADDR
;
2527 case TARGET_SO_REUSEPORT
:
2528 optname
= SO_REUSEPORT
;
2531 case TARGET_SO_TYPE
:
2534 case TARGET_SO_ERROR
:
2537 case TARGET_SO_DONTROUTE
:
2538 optname
= SO_DONTROUTE
;
2540 case TARGET_SO_BROADCAST
:
2541 optname
= SO_BROADCAST
;
2543 case TARGET_SO_SNDBUF
:
2544 optname
= SO_SNDBUF
;
2546 case TARGET_SO_SNDBUFFORCE
:
2547 optname
= SO_SNDBUFFORCE
;
2549 case TARGET_SO_RCVBUF
:
2550 optname
= SO_RCVBUF
;
2552 case TARGET_SO_RCVBUFFORCE
:
2553 optname
= SO_RCVBUFFORCE
;
2555 case TARGET_SO_KEEPALIVE
:
2556 optname
= SO_KEEPALIVE
;
2558 case TARGET_SO_OOBINLINE
:
2559 optname
= SO_OOBINLINE
;
2561 case TARGET_SO_NO_CHECK
:
2562 optname
= SO_NO_CHECK
;
2564 case TARGET_SO_PRIORITY
:
2565 optname
= SO_PRIORITY
;
2568 case TARGET_SO_BSDCOMPAT
:
2569 optname
= SO_BSDCOMPAT
;
2572 case TARGET_SO_PASSCRED
:
2573 optname
= SO_PASSCRED
;
2575 case TARGET_SO_PASSSEC
:
2576 optname
= SO_PASSSEC
;
2578 case TARGET_SO_TIMESTAMP
:
2579 optname
= SO_TIMESTAMP
;
2581 case TARGET_SO_RCVLOWAT
:
2582 optname
= SO_RCVLOWAT
;
2587 if (optlen
< sizeof(uint32_t))
2588 return -TARGET_EINVAL
;
2590 if (get_user_u32(val
, optval_addr
))
2591 return -TARGET_EFAULT
;
2592 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2597 case NETLINK_PKTINFO
:
2598 case NETLINK_ADD_MEMBERSHIP
:
2599 case NETLINK_DROP_MEMBERSHIP
:
2600 case NETLINK_BROADCAST_ERROR
:
2601 case NETLINK_NO_ENOBUFS
:
2602 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2603 case NETLINK_LISTEN_ALL_NSID
:
2604 case NETLINK_CAP_ACK
:
2605 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2606 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2607 case NETLINK_EXT_ACK
:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2610 case NETLINK_GET_STRICT_CHK
:
2611 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 if (optlen
< sizeof(uint32_t)) {
2618 return -TARGET_EINVAL
;
2620 if (get_user_u32(val
, optval_addr
)) {
2621 return -TARGET_EFAULT
;
2623 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2626 #endif /* SOL_NETLINK */
2629 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2631 ret
= -TARGET_ENOPROTOOPT
;
2636 /* do_getsockopt() Must return target values and target errnos. */
2637 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2638 abi_ulong optval_addr
, abi_ulong optlen
)
2645 case TARGET_SOL_SOCKET
:
2648 /* These don't just return a single integer */
2649 case TARGET_SO_PEERNAME
:
2651 case TARGET_SO_RCVTIMEO
: {
2655 optname
= SO_RCVTIMEO
;
2658 if (get_user_u32(len
, optlen
)) {
2659 return -TARGET_EFAULT
;
2662 return -TARGET_EINVAL
;
2666 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2671 if (len
> sizeof(struct target_timeval
)) {
2672 len
= sizeof(struct target_timeval
);
2674 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2675 return -TARGET_EFAULT
;
2677 if (put_user_u32(len
, optlen
)) {
2678 return -TARGET_EFAULT
;
2682 case TARGET_SO_SNDTIMEO
:
2683 optname
= SO_SNDTIMEO
;
2685 case TARGET_SO_PEERCRED
: {
2688 struct target_ucred
*tcr
;
2690 if (get_user_u32(len
, optlen
)) {
2691 return -TARGET_EFAULT
;
2694 return -TARGET_EINVAL
;
2698 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2706 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2707 return -TARGET_EFAULT
;
2709 __put_user(cr
.pid
, &tcr
->pid
);
2710 __put_user(cr
.uid
, &tcr
->uid
);
2711 __put_user(cr
.gid
, &tcr
->gid
);
2712 unlock_user_struct(tcr
, optval_addr
, 1);
2713 if (put_user_u32(len
, optlen
)) {
2714 return -TARGET_EFAULT
;
2718 case TARGET_SO_PEERSEC
: {
2721 if (get_user_u32(len
, optlen
)) {
2722 return -TARGET_EFAULT
;
2725 return -TARGET_EINVAL
;
2727 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2729 return -TARGET_EFAULT
;
2732 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2734 if (put_user_u32(lv
, optlen
)) {
2735 ret
= -TARGET_EFAULT
;
2737 unlock_user(name
, optval_addr
, lv
);
2740 case TARGET_SO_LINGER
:
2744 struct target_linger
*tlg
;
2746 if (get_user_u32(len
, optlen
)) {
2747 return -TARGET_EFAULT
;
2750 return -TARGET_EINVAL
;
2754 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2762 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2763 return -TARGET_EFAULT
;
2765 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2766 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2767 unlock_user_struct(tlg
, optval_addr
, 1);
2768 if (put_user_u32(len
, optlen
)) {
2769 return -TARGET_EFAULT
;
2773 /* Options with 'int' argument. */
2774 case TARGET_SO_DEBUG
:
2777 case TARGET_SO_REUSEADDR
:
2778 optname
= SO_REUSEADDR
;
2781 case TARGET_SO_REUSEPORT
:
2782 optname
= SO_REUSEPORT
;
2785 case TARGET_SO_TYPE
:
2788 case TARGET_SO_ERROR
:
2791 case TARGET_SO_DONTROUTE
:
2792 optname
= SO_DONTROUTE
;
2794 case TARGET_SO_BROADCAST
:
2795 optname
= SO_BROADCAST
;
2797 case TARGET_SO_SNDBUF
:
2798 optname
= SO_SNDBUF
;
2800 case TARGET_SO_RCVBUF
:
2801 optname
= SO_RCVBUF
;
2803 case TARGET_SO_KEEPALIVE
:
2804 optname
= SO_KEEPALIVE
;
2806 case TARGET_SO_OOBINLINE
:
2807 optname
= SO_OOBINLINE
;
2809 case TARGET_SO_NO_CHECK
:
2810 optname
= SO_NO_CHECK
;
2812 case TARGET_SO_PRIORITY
:
2813 optname
= SO_PRIORITY
;
2816 case TARGET_SO_BSDCOMPAT
:
2817 optname
= SO_BSDCOMPAT
;
2820 case TARGET_SO_PASSCRED
:
2821 optname
= SO_PASSCRED
;
2823 case TARGET_SO_TIMESTAMP
:
2824 optname
= SO_TIMESTAMP
;
2826 case TARGET_SO_RCVLOWAT
:
2827 optname
= SO_RCVLOWAT
;
2829 case TARGET_SO_ACCEPTCONN
:
2830 optname
= SO_ACCEPTCONN
;
2838 /* TCP and UDP options all take an 'int' value. */
2840 if (get_user_u32(len
, optlen
))
2841 return -TARGET_EFAULT
;
2843 return -TARGET_EINVAL
;
2845 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2848 if (optname
== SO_TYPE
) {
2849 val
= host_to_target_sock_type(val
);
2854 if (put_user_u32(val
, optval_addr
))
2855 return -TARGET_EFAULT
;
2857 if (put_user_u8(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2860 if (put_user_u32(len
, optlen
))
2861 return -TARGET_EFAULT
;
2868 case IP_ROUTER_ALERT
:
2872 case IP_MTU_DISCOVER
:
2878 case IP_MULTICAST_TTL
:
2879 case IP_MULTICAST_LOOP
:
2880 if (get_user_u32(len
, optlen
))
2881 return -TARGET_EFAULT
;
2883 return -TARGET_EINVAL
;
2885 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2888 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2890 if (put_user_u32(len
, optlen
)
2891 || put_user_u8(val
, optval_addr
))
2892 return -TARGET_EFAULT
;
2894 if (len
> sizeof(int))
2896 if (put_user_u32(len
, optlen
)
2897 || put_user_u32(val
, optval_addr
))
2898 return -TARGET_EFAULT
;
2902 ret
= -TARGET_ENOPROTOOPT
;
2908 case IPV6_MTU_DISCOVER
:
2911 case IPV6_RECVPKTINFO
:
2912 case IPV6_UNICAST_HOPS
:
2913 case IPV6_MULTICAST_HOPS
:
2914 case IPV6_MULTICAST_LOOP
:
2916 case IPV6_RECVHOPLIMIT
:
2917 case IPV6_2292HOPLIMIT
:
2920 case IPV6_2292PKTINFO
:
2921 case IPV6_RECVTCLASS
:
2922 case IPV6_RECVRTHDR
:
2923 case IPV6_2292RTHDR
:
2924 case IPV6_RECVHOPOPTS
:
2925 case IPV6_2292HOPOPTS
:
2926 case IPV6_RECVDSTOPTS
:
2927 case IPV6_2292DSTOPTS
:
2929 #ifdef IPV6_RECVPATHMTU
2930 case IPV6_RECVPATHMTU
:
2932 #ifdef IPV6_TRANSPARENT
2933 case IPV6_TRANSPARENT
:
2935 #ifdef IPV6_FREEBIND
2938 #ifdef IPV6_RECVORIGDSTADDR
2939 case IPV6_RECVORIGDSTADDR
:
2941 if (get_user_u32(len
, optlen
))
2942 return -TARGET_EFAULT
;
2944 return -TARGET_EINVAL
;
2946 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2949 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2951 if (put_user_u32(len
, optlen
)
2952 || put_user_u8(val
, optval_addr
))
2953 return -TARGET_EFAULT
;
2955 if (len
> sizeof(int))
2957 if (put_user_u32(len
, optlen
)
2958 || put_user_u32(val
, optval_addr
))
2959 return -TARGET_EFAULT
;
2963 ret
= -TARGET_ENOPROTOOPT
;
2970 case NETLINK_PKTINFO
:
2971 case NETLINK_BROADCAST_ERROR
:
2972 case NETLINK_NO_ENOBUFS
:
2973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2974 case NETLINK_LISTEN_ALL_NSID
:
2975 case NETLINK_CAP_ACK
:
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2978 case NETLINK_EXT_ACK
:
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2980 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2981 case NETLINK_GET_STRICT_CHK
:
2982 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2983 if (get_user_u32(len
, optlen
)) {
2984 return -TARGET_EFAULT
;
2986 if (len
!= sizeof(val
)) {
2987 return -TARGET_EINVAL
;
2990 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2994 if (put_user_u32(lv
, optlen
)
2995 || put_user_u32(val
, optval_addr
)) {
2996 return -TARGET_EFAULT
;
2999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3000 case NETLINK_LIST_MEMBERSHIPS
:
3004 if (get_user_u32(len
, optlen
)) {
3005 return -TARGET_EFAULT
;
3008 return -TARGET_EINVAL
;
3010 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3012 return -TARGET_EFAULT
;
3015 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3017 unlock_user(results
, optval_addr
, 0);
3020 /* swap host endianess to target endianess. */
3021 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3022 results
[i
] = tswap32(results
[i
]);
3024 if (put_user_u32(lv
, optlen
)) {
3025 return -TARGET_EFAULT
;
3027 unlock_user(results
, optval_addr
, 0);
3030 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3035 #endif /* SOL_NETLINK */
3038 qemu_log_mask(LOG_UNIMP
,
3039 "getsockopt level=%d optname=%d not yet supported\n",
3041 ret
= -TARGET_EOPNOTSUPP
;
3047 /* Convert target low/high pair representing file offset into the host
3048 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3049 * as the kernel doesn't handle them either.
3051 static void target_to_host_low_high(abi_ulong tlow
,
3053 unsigned long *hlow
,
3054 unsigned long *hhigh
)
3056 uint64_t off
= tlow
|
3057 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3058 TARGET_LONG_BITS
/ 2;
3061 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3064 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3065 abi_ulong count
, int copy
)
3067 struct target_iovec
*target_vec
;
3069 abi_ulong total_len
, max_len
;
3072 bool bad_address
= false;
3078 if (count
> IOV_MAX
) {
3083 vec
= g_try_new0(struct iovec
, count
);
3089 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3090 count
* sizeof(struct target_iovec
), 1);
3091 if (target_vec
== NULL
) {
3096 /* ??? If host page size > target page size, this will result in a
3097 value larger than what we can actually support. */
3098 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3101 for (i
= 0; i
< count
; i
++) {
3102 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3103 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3108 } else if (len
== 0) {
3109 /* Zero length pointer is ignored. */
3110 vec
[i
].iov_base
= 0;
3112 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3113 /* If the first buffer pointer is bad, this is a fault. But
3114 * subsequent bad buffers will result in a partial write; this
3115 * is realized by filling the vector with null pointers and
3117 if (!vec
[i
].iov_base
) {
3128 if (len
> max_len
- total_len
) {
3129 len
= max_len
- total_len
;
3132 vec
[i
].iov_len
= len
;
3136 unlock_user(target_vec
, target_addr
, 0);
3141 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3142 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3145 unlock_user(target_vec
, target_addr
, 0);
3152 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3153 abi_ulong count
, int copy
)
3155 struct target_iovec
*target_vec
;
3158 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3159 count
* sizeof(struct target_iovec
), 1);
3161 for (i
= 0; i
< count
; i
++) {
3162 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3163 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3167 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3169 unlock_user(target_vec
, target_addr
, 0);
3175 static inline int target_to_host_sock_type(int *type
)
3178 int target_type
= *type
;
3180 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3181 case TARGET_SOCK_DGRAM
:
3182 host_type
= SOCK_DGRAM
;
3184 case TARGET_SOCK_STREAM
:
3185 host_type
= SOCK_STREAM
;
3188 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3191 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3192 #if defined(SOCK_CLOEXEC)
3193 host_type
|= SOCK_CLOEXEC
;
3195 return -TARGET_EINVAL
;
3198 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3199 #if defined(SOCK_NONBLOCK)
3200 host_type
|= SOCK_NONBLOCK
;
3201 #elif !defined(O_NONBLOCK)
3202 return -TARGET_EINVAL
;
3209 /* Try to emulate socket type flags after socket creation. */
3210 static int sock_flags_fixup(int fd
, int target_type
)
3212 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3213 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3214 int flags
= fcntl(fd
, F_GETFL
);
3215 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3217 return -TARGET_EINVAL
;
3224 /* do_socket() Must return target values and target errnos. */
3225 static abi_long
do_socket(int domain
, int type
, int protocol
)
3227 int target_type
= type
;
3230 ret
= target_to_host_sock_type(&type
);
3235 if (domain
== PF_NETLINK
&& !(
3236 #ifdef CONFIG_RTNETLINK
3237 protocol
== NETLINK_ROUTE
||
3239 protocol
== NETLINK_KOBJECT_UEVENT
||
3240 protocol
== NETLINK_AUDIT
)) {
3241 return -TARGET_EPROTONOSUPPORT
;
3244 if (domain
== AF_PACKET
||
3245 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3246 protocol
= tswap16(protocol
);
3249 ret
= get_errno(socket(domain
, type
, protocol
));
3251 ret
= sock_flags_fixup(ret
, target_type
);
3252 if (type
== SOCK_PACKET
) {
3253 /* Manage an obsolete case :
3254 * if socket type is SOCK_PACKET, bind by name
3256 fd_trans_register(ret
, &target_packet_trans
);
3257 } else if (domain
== PF_NETLINK
) {
3259 #ifdef CONFIG_RTNETLINK
3261 fd_trans_register(ret
, &target_netlink_route_trans
);
3264 case NETLINK_KOBJECT_UEVENT
:
3265 /* nothing to do: messages are strings */
3268 fd_trans_register(ret
, &target_netlink_audit_trans
);
3271 g_assert_not_reached();
3278 /* do_bind() Must return target values and target errnos. */
3279 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3285 if ((int)addrlen
< 0) {
3286 return -TARGET_EINVAL
;
3289 addr
= alloca(addrlen
+1);
3291 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3295 return get_errno(bind(sockfd
, addr
, addrlen
));
3298 /* do_connect() Must return target values and target errnos. */
3299 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3305 if ((int)addrlen
< 0) {
3306 return -TARGET_EINVAL
;
3309 addr
= alloca(addrlen
+1);
3311 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3315 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3318 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3319 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3320 int flags
, int send
)
3326 abi_ulong target_vec
;
3328 if (msgp
->msg_name
) {
3329 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3330 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3331 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3332 tswapal(msgp
->msg_name
),
3334 if (ret
== -TARGET_EFAULT
) {
3335 /* For connected sockets msg_name and msg_namelen must
3336 * be ignored, so returning EFAULT immediately is wrong.
3337 * Instead, pass a bad msg_name to the host kernel, and
3338 * let it decide whether to return EFAULT or not.
3340 msg
.msg_name
= (void *)-1;
3345 msg
.msg_name
= NULL
;
3346 msg
.msg_namelen
= 0;
3348 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3349 msg
.msg_control
= alloca(msg
.msg_controllen
);
3350 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3352 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3354 count
= tswapal(msgp
->msg_iovlen
);
3355 target_vec
= tswapal(msgp
->msg_iov
);
3357 if (count
> IOV_MAX
) {
3358 /* sendrcvmsg returns a different errno for this condition than
3359 * readv/writev, so we must catch it here before lock_iovec() does.
3361 ret
= -TARGET_EMSGSIZE
;
3365 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3366 target_vec
, count
, send
);
3368 ret
= -host_to_target_errno(errno
);
3371 msg
.msg_iovlen
= count
;
3375 if (fd_trans_target_to_host_data(fd
)) {
3378 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3379 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3380 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3381 msg
.msg_iov
->iov_len
);
3383 msg
.msg_iov
->iov_base
= host_msg
;
3384 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3388 ret
= target_to_host_cmsg(&msg
, msgp
);
3390 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3394 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3395 if (!is_error(ret
)) {
3397 if (fd_trans_host_to_target_data(fd
)) {
3398 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3399 MIN(msg
.msg_iov
->iov_len
, len
));
3401 ret
= host_to_target_cmsg(msgp
, &msg
);
3403 if (!is_error(ret
)) {
3404 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3405 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3406 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3407 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3408 msg
.msg_name
, msg
.msg_namelen
);
3420 unlock_iovec(vec
, target_vec
, count
, !send
);
3425 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3426 int flags
, int send
)
3429 struct target_msghdr
*msgp
;
3431 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3435 return -TARGET_EFAULT
;
3437 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3438 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3442 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3443 * so it might not have this *mmsg-specific flag either.
3445 #ifndef MSG_WAITFORONE
3446 #define MSG_WAITFORONE 0x10000
3449 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3450 unsigned int vlen
, unsigned int flags
,
3453 struct target_mmsghdr
*mmsgp
;
3457 if (vlen
> UIO_MAXIOV
) {
3461 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3463 return -TARGET_EFAULT
;
3466 for (i
= 0; i
< vlen
; i
++) {
3467 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3468 if (is_error(ret
)) {
3471 mmsgp
[i
].msg_len
= tswap32(ret
);
3472 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3473 if (flags
& MSG_WAITFORONE
) {
3474 flags
|= MSG_DONTWAIT
;
3478 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3480 /* Return number of datagrams sent if we sent any at all;
3481 * otherwise return the error.
3489 /* do_accept4() Must return target values and target errnos. */
3490 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3491 abi_ulong target_addrlen_addr
, int flags
)
3493 socklen_t addrlen
, ret_addrlen
;
3498 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3500 if (target_addr
== 0) {
3501 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3504 /* linux returns EFAULT if addrlen pointer is invalid */
3505 if (get_user_u32(addrlen
, target_addrlen_addr
))
3506 return -TARGET_EFAULT
;
3508 if ((int)addrlen
< 0) {
3509 return -TARGET_EINVAL
;
3512 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3513 return -TARGET_EFAULT
;
3515 addr
= alloca(addrlen
);
3517 ret_addrlen
= addrlen
;
3518 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3519 if (!is_error(ret
)) {
3520 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3521 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3522 ret
= -TARGET_EFAULT
;
3528 /* do_getpeername() Must return target values and target errnos. */
3529 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3530 abi_ulong target_addrlen_addr
)
3532 socklen_t addrlen
, ret_addrlen
;
3536 if (get_user_u32(addrlen
, target_addrlen_addr
))
3537 return -TARGET_EFAULT
;
3539 if ((int)addrlen
< 0) {
3540 return -TARGET_EINVAL
;
3543 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3544 return -TARGET_EFAULT
;
3546 addr
= alloca(addrlen
);
3548 ret_addrlen
= addrlen
;
3549 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3550 if (!is_error(ret
)) {
3551 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3552 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3553 ret
= -TARGET_EFAULT
;
3559 /* do_getsockname() Must return target values and target errnos. */
3560 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3561 abi_ulong target_addrlen_addr
)
3563 socklen_t addrlen
, ret_addrlen
;
3567 if (get_user_u32(addrlen
, target_addrlen_addr
))
3568 return -TARGET_EFAULT
;
3570 if ((int)addrlen
< 0) {
3571 return -TARGET_EINVAL
;
3574 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3575 return -TARGET_EFAULT
;
3577 addr
= alloca(addrlen
);
3579 ret_addrlen
= addrlen
;
3580 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3581 if (!is_error(ret
)) {
3582 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3583 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3584 ret
= -TARGET_EFAULT
;
3590 /* do_socketpair() Must return target values and target errnos. */
3591 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3592 abi_ulong target_tab_addr
)
3597 target_to_host_sock_type(&type
);
3599 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3600 if (!is_error(ret
)) {
3601 if (put_user_s32(tab
[0], target_tab_addr
)
3602 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3603 ret
= -TARGET_EFAULT
;
3608 /* do_sendto() Must return target values and target errnos. */
3609 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3610 abi_ulong target_addr
, socklen_t addrlen
)
3614 void *copy_msg
= NULL
;
3617 if ((int)addrlen
< 0) {
3618 return -TARGET_EINVAL
;
3621 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3623 return -TARGET_EFAULT
;
3624 if (fd_trans_target_to_host_data(fd
)) {
3625 copy_msg
= host_msg
;
3626 host_msg
= g_malloc(len
);
3627 memcpy(host_msg
, copy_msg
, len
);
3628 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3634 addr
= alloca(addrlen
+1);
3635 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3639 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3641 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3646 host_msg
= copy_msg
;
3648 unlock_user(host_msg
, msg
, 0);
3652 /* do_recvfrom() Must return target values and target errnos. */
3653 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3654 abi_ulong target_addr
,
3655 abi_ulong target_addrlen
)
3657 socklen_t addrlen
, ret_addrlen
;
3662 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3664 return -TARGET_EFAULT
;
3666 if (get_user_u32(addrlen
, target_addrlen
)) {
3667 ret
= -TARGET_EFAULT
;
3670 if ((int)addrlen
< 0) {
3671 ret
= -TARGET_EINVAL
;
3674 addr
= alloca(addrlen
);
3675 ret_addrlen
= addrlen
;
3676 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3677 addr
, &ret_addrlen
));
3679 addr
= NULL
; /* To keep compiler quiet. */
3680 addrlen
= 0; /* To keep compiler quiet. */
3681 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3683 if (!is_error(ret
)) {
3684 if (fd_trans_host_to_target_data(fd
)) {
3686 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3687 if (is_error(trans
)) {
3693 host_to_target_sockaddr(target_addr
, addr
,
3694 MIN(addrlen
, ret_addrlen
));
3695 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3696 ret
= -TARGET_EFAULT
;
3700 unlock_user(host_msg
, msg
, len
);
3703 unlock_user(host_msg
, msg
, 0);
3708 #ifdef TARGET_NR_socketcall
3709 /* do_socketcall() must return target values and target errnos. */
3710 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3712 static const unsigned nargs
[] = { /* number of arguments per operation */
3713 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3714 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3715 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3716 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3717 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3718 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3719 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3720 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3721 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3722 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3723 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3724 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3725 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3726 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3727 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3728 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3729 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3730 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3731 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3732 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3734 abi_long a
[6]; /* max 6 args */
3737 /* check the range of the first argument num */
3738 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3739 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3740 return -TARGET_EINVAL
;
3742 /* ensure we have space for args */
3743 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3744 return -TARGET_EINVAL
;
3746 /* collect the arguments in a[] according to nargs[] */
3747 for (i
= 0; i
< nargs
[num
]; ++i
) {
3748 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3749 return -TARGET_EFAULT
;
3752 /* now when we have the args, invoke the appropriate underlying function */
3754 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3755 return do_socket(a
[0], a
[1], a
[2]);
3756 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3757 return do_bind(a
[0], a
[1], a
[2]);
3758 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3759 return do_connect(a
[0], a
[1], a
[2]);
3760 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3761 return get_errno(listen(a
[0], a
[1]));
3762 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3763 return do_accept4(a
[0], a
[1], a
[2], 0);
3764 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3765 return do_getsockname(a
[0], a
[1], a
[2]);
3766 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3767 return do_getpeername(a
[0], a
[1], a
[2]);
3768 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3769 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3770 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3771 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3772 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3773 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3774 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3775 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3776 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3777 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3778 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3779 return get_errno(shutdown(a
[0], a
[1]));
3780 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3781 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3782 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3783 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3784 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3785 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3786 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3787 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3788 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3789 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3790 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3791 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3792 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3793 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3795 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3796 return -TARGET_EINVAL
;
3801 #define N_SHM_REGIONS 32
3803 static struct shm_region
{
3807 } shm_regions
[N_SHM_REGIONS
];
3809 #ifndef TARGET_SEMID64_DS
3810 /* asm-generic version of this struct */
3811 struct target_semid64_ds
3813 struct target_ipc_perm sem_perm
;
3814 abi_ulong sem_otime
;
3815 #if TARGET_ABI_BITS == 32
3816 abi_ulong __unused1
;
3818 abi_ulong sem_ctime
;
3819 #if TARGET_ABI_BITS == 32
3820 abi_ulong __unused2
;
3822 abi_ulong sem_nsems
;
3823 abi_ulong __unused3
;
3824 abi_ulong __unused4
;
3828 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3829 abi_ulong target_addr
)
3831 struct target_ipc_perm
*target_ip
;
3832 struct target_semid64_ds
*target_sd
;
3834 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3835 return -TARGET_EFAULT
;
3836 target_ip
= &(target_sd
->sem_perm
);
3837 host_ip
->__key
= tswap32(target_ip
->__key
);
3838 host_ip
->uid
= tswap32(target_ip
->uid
);
3839 host_ip
->gid
= tswap32(target_ip
->gid
);
3840 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3841 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3842 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3843 host_ip
->mode
= tswap32(target_ip
->mode
);
3845 host_ip
->mode
= tswap16(target_ip
->mode
);
3847 #if defined(TARGET_PPC)
3848 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3850 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3852 unlock_user_struct(target_sd
, target_addr
, 0);
3856 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3857 struct ipc_perm
*host_ip
)
3859 struct target_ipc_perm
*target_ip
;
3860 struct target_semid64_ds
*target_sd
;
3862 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3863 return -TARGET_EFAULT
;
3864 target_ip
= &(target_sd
->sem_perm
);
3865 target_ip
->__key
= tswap32(host_ip
->__key
);
3866 target_ip
->uid
= tswap32(host_ip
->uid
);
3867 target_ip
->gid
= tswap32(host_ip
->gid
);
3868 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3869 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3870 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3871 target_ip
->mode
= tswap32(host_ip
->mode
);
3873 target_ip
->mode
= tswap16(host_ip
->mode
);
3875 #if defined(TARGET_PPC)
3876 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3878 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3880 unlock_user_struct(target_sd
, target_addr
, 1);
3884 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3885 abi_ulong target_addr
)
3887 struct target_semid64_ds
*target_sd
;
3889 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3890 return -TARGET_EFAULT
;
3891 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3892 return -TARGET_EFAULT
;
3893 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3894 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3895 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3896 unlock_user_struct(target_sd
, target_addr
, 0);
3900 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3901 struct semid_ds
*host_sd
)
3903 struct target_semid64_ds
*target_sd
;
3905 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3906 return -TARGET_EFAULT
;
3907 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3908 return -TARGET_EFAULT
;
3909 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3910 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3911 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3912 unlock_user_struct(target_sd
, target_addr
, 1);
3916 struct target_seminfo
{
3929 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3930 struct seminfo
*host_seminfo
)
3932 struct target_seminfo
*target_seminfo
;
3933 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3934 return -TARGET_EFAULT
;
3935 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3936 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3937 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3938 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3939 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3940 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3941 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3942 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3943 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3944 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3945 unlock_user_struct(target_seminfo
, target_addr
, 1);
3951 struct semid_ds
*buf
;
3952 unsigned short *array
;
3953 struct seminfo
*__buf
;
3956 union target_semun
{
3963 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3964 abi_ulong target_addr
)
3967 unsigned short *array
;
3969 struct semid_ds semid_ds
;
3972 semun
.buf
= &semid_ds
;
3974 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3976 return get_errno(ret
);
3978 nsems
= semid_ds
.sem_nsems
;
3980 *host_array
= g_try_new(unsigned short, nsems
);
3982 return -TARGET_ENOMEM
;
3984 array
= lock_user(VERIFY_READ
, target_addr
,
3985 nsems
*sizeof(unsigned short), 1);
3987 g_free(*host_array
);
3988 return -TARGET_EFAULT
;
3991 for(i
=0; i
<nsems
; i
++) {
3992 __get_user((*host_array
)[i
], &array
[i
]);
3994 unlock_user(array
, target_addr
, 0);
3999 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4000 unsigned short **host_array
)
4003 unsigned short *array
;
4005 struct semid_ds semid_ds
;
4008 semun
.buf
= &semid_ds
;
4010 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4012 return get_errno(ret
);
4014 nsems
= semid_ds
.sem_nsems
;
4016 array
= lock_user(VERIFY_WRITE
, target_addr
,
4017 nsems
*sizeof(unsigned short), 0);
4019 return -TARGET_EFAULT
;
4021 for(i
=0; i
<nsems
; i
++) {
4022 __put_user((*host_array
)[i
], &array
[i
]);
4024 g_free(*host_array
);
4025 unlock_user(array
, target_addr
, 1);
4030 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4031 abi_ulong target_arg
)
4033 union target_semun target_su
= { .buf
= target_arg
};
4035 struct semid_ds dsarg
;
4036 unsigned short *array
= NULL
;
4037 struct seminfo seminfo
;
4038 abi_long ret
= -TARGET_EINVAL
;
4045 /* In 64 bit cross-endian situations, we will erroneously pick up
4046 * the wrong half of the union for the "val" element. To rectify
4047 * this, the entire 8-byte structure is byteswapped, followed by
4048 * a swap of the 4 byte val field. In other cases, the data is
4049 * already in proper host byte order. */
4050 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4051 target_su
.buf
= tswapal(target_su
.buf
);
4052 arg
.val
= tswap32(target_su
.val
);
4054 arg
.val
= target_su
.val
;
4056 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4060 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4064 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4065 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4072 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4076 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4077 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4083 arg
.__buf
= &seminfo
;
4084 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4085 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4093 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4100 struct target_sembuf
{
4101 unsigned short sem_num
;
4106 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4107 abi_ulong target_addr
,
4110 struct target_sembuf
*target_sembuf
;
4113 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4114 nsops
*sizeof(struct target_sembuf
), 1);
4116 return -TARGET_EFAULT
;
4118 for(i
=0; i
<nsops
; i
++) {
4119 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4120 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4121 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4124 unlock_user(target_sembuf
, target_addr
, 0);
4129 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4130 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4133 * This macro is required to handle the s390 variants, which passes the
4134 * arguments in a different order than default.
4137 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4138 (__nsops), (__timeout), (__sops)
4140 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4141 (__nsops), 0, (__sops), (__timeout)
4144 static inline abi_long
do_semtimedop(int semid
,
4147 abi_long timeout
, bool time64
)
4149 struct sembuf
*sops
;
4150 struct timespec ts
, *pts
= NULL
;
4156 if (target_to_host_timespec64(pts
, timeout
)) {
4157 return -TARGET_EFAULT
;
4160 if (target_to_host_timespec(pts
, timeout
)) {
4161 return -TARGET_EFAULT
;
4166 if (nsops
> TARGET_SEMOPM
) {
4167 return -TARGET_E2BIG
;
4170 sops
= g_new(struct sembuf
, nsops
);
4172 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4174 return -TARGET_EFAULT
;
4177 ret
= -TARGET_ENOSYS
;
4178 #ifdef __NR_semtimedop
4179 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4182 if (ret
== -TARGET_ENOSYS
) {
4183 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4184 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4192 struct target_msqid_ds
4194 struct target_ipc_perm msg_perm
;
4195 abi_ulong msg_stime
;
4196 #if TARGET_ABI_BITS == 32
4197 abi_ulong __unused1
;
4199 abi_ulong msg_rtime
;
4200 #if TARGET_ABI_BITS == 32
4201 abi_ulong __unused2
;
4203 abi_ulong msg_ctime
;
4204 #if TARGET_ABI_BITS == 32
4205 abi_ulong __unused3
;
4207 abi_ulong __msg_cbytes
;
4209 abi_ulong msg_qbytes
;
4210 abi_ulong msg_lspid
;
4211 abi_ulong msg_lrpid
;
4212 abi_ulong __unused4
;
4213 abi_ulong __unused5
;
4216 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4217 abi_ulong target_addr
)
4219 struct target_msqid_ds
*target_md
;
4221 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4222 return -TARGET_EFAULT
;
4223 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4224 return -TARGET_EFAULT
;
4225 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4226 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4227 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4228 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4229 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4230 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4231 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4232 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4233 unlock_user_struct(target_md
, target_addr
, 0);
4237 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4238 struct msqid_ds
*host_md
)
4240 struct target_msqid_ds
*target_md
;
4242 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4243 return -TARGET_EFAULT
;
4244 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4245 return -TARGET_EFAULT
;
4246 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4247 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4248 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4249 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4250 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4251 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4252 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4253 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4254 unlock_user_struct(target_md
, target_addr
, 1);
4258 struct target_msginfo
{
4266 unsigned short int msgseg
;
4269 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4270 struct msginfo
*host_msginfo
)
4272 struct target_msginfo
*target_msginfo
;
4273 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4274 return -TARGET_EFAULT
;
4275 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4276 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4277 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4278 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4279 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4280 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4281 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4282 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4283 unlock_user_struct(target_msginfo
, target_addr
, 1);
4287 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4289 struct msqid_ds dsarg
;
4290 struct msginfo msginfo
;
4291 abi_long ret
= -TARGET_EINVAL
;
4299 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4300 return -TARGET_EFAULT
;
4301 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4302 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4303 return -TARGET_EFAULT
;
4306 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4310 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4311 if (host_to_target_msginfo(ptr
, &msginfo
))
4312 return -TARGET_EFAULT
;
4319 struct target_msgbuf
{
4324 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4325 ssize_t msgsz
, int msgflg
)
4327 struct target_msgbuf
*target_mb
;
4328 struct msgbuf
*host_mb
;
4332 return -TARGET_EINVAL
;
4335 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4336 return -TARGET_EFAULT
;
4337 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4339 unlock_user_struct(target_mb
, msgp
, 0);
4340 return -TARGET_ENOMEM
;
4342 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4343 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4344 ret
= -TARGET_ENOSYS
;
4346 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4349 if (ret
== -TARGET_ENOSYS
) {
4351 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4354 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4360 unlock_user_struct(target_mb
, msgp
, 0);
4366 #if defined(__sparc__)
4367 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4368 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4369 #elif defined(__s390x__)
4370 /* The s390 sys_ipc variant has only five parameters. */
4371 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4372 ((long int[]){(long int)__msgp, __msgtyp})
4374 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4375 ((long int[]){(long int)__msgp, __msgtyp}), 0
4379 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4380 ssize_t msgsz
, abi_long msgtyp
,
4383 struct target_msgbuf
*target_mb
;
4385 struct msgbuf
*host_mb
;
4389 return -TARGET_EINVAL
;
4392 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4393 return -TARGET_EFAULT
;
4395 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4397 ret
= -TARGET_ENOMEM
;
4400 ret
= -TARGET_ENOSYS
;
4402 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4405 if (ret
== -TARGET_ENOSYS
) {
4406 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4407 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4412 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4413 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4414 if (!target_mtext
) {
4415 ret
= -TARGET_EFAULT
;
4418 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4419 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4422 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4426 unlock_user_struct(target_mb
, msgp
, 1);
4431 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4432 abi_ulong target_addr
)
4434 struct target_shmid_ds
*target_sd
;
4436 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4437 return -TARGET_EFAULT
;
4438 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4439 return -TARGET_EFAULT
;
4440 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4441 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4442 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4443 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4444 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4445 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4446 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4447 unlock_user_struct(target_sd
, target_addr
, 0);
4451 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4452 struct shmid_ds
*host_sd
)
4454 struct target_shmid_ds
*target_sd
;
4456 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4457 return -TARGET_EFAULT
;
4458 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4459 return -TARGET_EFAULT
;
4460 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4461 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4462 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4463 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4464 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4465 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4466 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4467 unlock_user_struct(target_sd
, target_addr
, 1);
4471 struct target_shminfo
{
4479 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4480 struct shminfo
*host_shminfo
)
4482 struct target_shminfo
*target_shminfo
;
4483 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4484 return -TARGET_EFAULT
;
4485 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4486 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4487 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4488 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4489 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4490 unlock_user_struct(target_shminfo
, target_addr
, 1);
4494 struct target_shm_info
{
4499 abi_ulong swap_attempts
;
4500 abi_ulong swap_successes
;
4503 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4504 struct shm_info
*host_shm_info
)
4506 struct target_shm_info
*target_shm_info
;
4507 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4508 return -TARGET_EFAULT
;
4509 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4510 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4511 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4512 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4513 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4514 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4515 unlock_user_struct(target_shm_info
, target_addr
, 1);
4519 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4521 struct shmid_ds dsarg
;
4522 struct shminfo shminfo
;
4523 struct shm_info shm_info
;
4524 abi_long ret
= -TARGET_EINVAL
;
4532 if (target_to_host_shmid_ds(&dsarg
, buf
))
4533 return -TARGET_EFAULT
;
4534 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4535 if (host_to_target_shmid_ds(buf
, &dsarg
))
4536 return -TARGET_EFAULT
;
4539 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4540 if (host_to_target_shminfo(buf
, &shminfo
))
4541 return -TARGET_EFAULT
;
4544 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4545 if (host_to_target_shm_info(buf
, &shm_info
))
4546 return -TARGET_EFAULT
;
4551 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4558 #ifndef TARGET_FORCE_SHMLBA
4559 /* For most architectures, SHMLBA is the same as the page size;
4560 * some architectures have larger values, in which case they should
4561 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4562 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4563 * and defining its own value for SHMLBA.
4565 * The kernel also permits SHMLBA to be set by the architecture to a
4566 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4567 * this means that addresses are rounded to the large size if
4568 * SHM_RND is set but addresses not aligned to that size are not rejected
4569 * as long as they are at least page-aligned. Since the only architecture
4570 * which uses this is ia64 this code doesn't provide for that oddity.
4572 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4574 return TARGET_PAGE_SIZE
;
4578 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4579 int shmid
, abi_ulong shmaddr
, int shmflg
)
4583 struct shmid_ds shm_info
;
4587 /* find out the length of the shared memory segment */
4588 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4589 if (is_error(ret
)) {
4590 /* can't get length, bail out */
4594 shmlba
= target_shmlba(cpu_env
);
4596 if (shmaddr
& (shmlba
- 1)) {
4597 if (shmflg
& SHM_RND
) {
4598 shmaddr
&= ~(shmlba
- 1);
4600 return -TARGET_EINVAL
;
4603 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4604 return -TARGET_EINVAL
;
4610 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4612 abi_ulong mmap_start
;
4614 /* In order to use the host shmat, we need to honor host SHMLBA. */
4615 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4617 if (mmap_start
== -1) {
4619 host_raddr
= (void *)-1;
4621 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4624 if (host_raddr
== (void *)-1) {
4626 return get_errno((long)host_raddr
);
4628 raddr
=h2g((unsigned long)host_raddr
);
4630 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4631 PAGE_VALID
| PAGE_READ
|
4632 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4634 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4635 if (!shm_regions
[i
].in_use
) {
4636 shm_regions
[i
].in_use
= true;
4637 shm_regions
[i
].start
= raddr
;
4638 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4648 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4655 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4656 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4657 shm_regions
[i
].in_use
= false;
4658 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4662 rv
= get_errno(shmdt(g2h(shmaddr
)));
4669 #ifdef TARGET_NR_ipc
4670 /* ??? This only works with linear mappings. */
4671 /* do_ipc() must return target values and target errnos. */
4672 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4673 unsigned int call
, abi_long first
,
4674 abi_long second
, abi_long third
,
4675 abi_long ptr
, abi_long fifth
)
4680 version
= call
>> 16;
4685 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4687 case IPCOP_semtimedop
:
4689 * The s390 sys_ipc variant has only five parameters instead of six
4690 * (as for default variant) and the only difference is the handling of
4691 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4692 * to a struct timespec where the generic variant uses fifth parameter.
4694 #if defined(TARGET_S390X)
4695 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4697 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4702 ret
= get_errno(semget(first
, second
, third
));
4705 case IPCOP_semctl
: {
4706 /* The semun argument to semctl is passed by value, so dereference the
4709 get_user_ual(atptr
, ptr
);
4710 ret
= do_semctl(first
, second
, third
, atptr
);
4715 ret
= get_errno(msgget(first
, second
));
4719 ret
= do_msgsnd(first
, ptr
, second
, third
);
4723 ret
= do_msgctl(first
, second
, ptr
);
4730 struct target_ipc_kludge
{
4735 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4736 ret
= -TARGET_EFAULT
;
4740 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4742 unlock_user_struct(tmp
, ptr
, 0);
4746 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4755 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4756 if (is_error(raddr
))
4757 return get_errno(raddr
);
4758 if (put_user_ual(raddr
, third
))
4759 return -TARGET_EFAULT
;
4763 ret
= -TARGET_EINVAL
;
4768 ret
= do_shmdt(ptr
);
4772 /* IPC_* flag values are the same on all linux platforms */
4773 ret
= get_errno(shmget(first
, second
, third
));
4776 /* IPC_* and SHM_* command values are the same on all linux platforms */
4778 ret
= do_shmctl(first
, second
, ptr
);
4781 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4783 ret
= -TARGET_ENOSYS
;
4790 /* kernel structure types definitions */
4792 #define STRUCT(name, ...) STRUCT_ ## name,
4793 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4795 #include "syscall_types.h"
4799 #undef STRUCT_SPECIAL
4801 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4802 #define STRUCT_SPECIAL(name)
4803 #include "syscall_types.h"
4805 #undef STRUCT_SPECIAL
4807 #define MAX_STRUCT_SIZE 4096
4809 #ifdef CONFIG_FIEMAP
4810 /* So fiemap access checks don't overflow on 32 bit systems.
4811 * This is very slightly smaller than the limit imposed by
4812 * the underlying kernel.
4814 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4815 / sizeof(struct fiemap_extent))
4817 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4818 int fd
, int cmd
, abi_long arg
)
4820 /* The parameter for this ioctl is a struct fiemap followed
4821 * by an array of struct fiemap_extent whose size is set
4822 * in fiemap->fm_extent_count. The array is filled in by the
4825 int target_size_in
, target_size_out
;
4827 const argtype
*arg_type
= ie
->arg_type
;
4828 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4831 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4835 assert(arg_type
[0] == TYPE_PTR
);
4836 assert(ie
->access
== IOC_RW
);
4838 target_size_in
= thunk_type_size(arg_type
, 0);
4839 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4841 return -TARGET_EFAULT
;
4843 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4844 unlock_user(argptr
, arg
, 0);
4845 fm
= (struct fiemap
*)buf_temp
;
4846 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4847 return -TARGET_EINVAL
;
4850 outbufsz
= sizeof (*fm
) +
4851 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4853 if (outbufsz
> MAX_STRUCT_SIZE
) {
4854 /* We can't fit all the extents into the fixed size buffer.
4855 * Allocate one that is large enough and use it instead.
4857 fm
= g_try_malloc(outbufsz
);
4859 return -TARGET_ENOMEM
;
4861 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4864 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4865 if (!is_error(ret
)) {
4866 target_size_out
= target_size_in
;
4867 /* An extent_count of 0 means we were only counting the extents
4868 * so there are no structs to copy
4870 if (fm
->fm_extent_count
!= 0) {
4871 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4873 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4875 ret
= -TARGET_EFAULT
;
4877 /* Convert the struct fiemap */
4878 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4879 if (fm
->fm_extent_count
!= 0) {
4880 p
= argptr
+ target_size_in
;
4881 /* ...and then all the struct fiemap_extents */
4882 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4883 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4888 unlock_user(argptr
, arg
, target_size_out
);
4898 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4899 int fd
, int cmd
, abi_long arg
)
4901 const argtype
*arg_type
= ie
->arg_type
;
4905 struct ifconf
*host_ifconf
;
4907 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4908 int target_ifreq_size
;
4913 abi_long target_ifc_buf
;
4917 assert(arg_type
[0] == TYPE_PTR
);
4918 assert(ie
->access
== IOC_RW
);
4921 target_size
= thunk_type_size(arg_type
, 0);
4923 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4925 return -TARGET_EFAULT
;
4926 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4927 unlock_user(argptr
, arg
, 0);
4929 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4930 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4931 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4933 if (target_ifc_buf
!= 0) {
4934 target_ifc_len
= host_ifconf
->ifc_len
;
4935 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4936 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4938 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4939 if (outbufsz
> MAX_STRUCT_SIZE
) {
4941 * We can't fit all the extents into the fixed size buffer.
4942 * Allocate one that is large enough and use it instead.
4944 host_ifconf
= malloc(outbufsz
);
4946 return -TARGET_ENOMEM
;
4948 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4951 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4953 host_ifconf
->ifc_len
= host_ifc_len
;
4955 host_ifc_buf
= NULL
;
4957 host_ifconf
->ifc_buf
= host_ifc_buf
;
4959 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4960 if (!is_error(ret
)) {
4961 /* convert host ifc_len to target ifc_len */
4963 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4964 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4965 host_ifconf
->ifc_len
= target_ifc_len
;
4967 /* restore target ifc_buf */
4969 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4971 /* copy struct ifconf to target user */
4973 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4975 return -TARGET_EFAULT
;
4976 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4977 unlock_user(argptr
, arg
, target_size
);
4979 if (target_ifc_buf
!= 0) {
4980 /* copy ifreq[] to target user */
4981 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4982 for (i
= 0; i
< nb_ifreq
; i
++) {
4983 thunk_convert(argptr
+ i
* target_ifreq_size
,
4984 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4985 ifreq_arg_type
, THUNK_TARGET
);
4987 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4998 #if defined(CONFIG_USBFS)
4999 #if HOST_LONG_BITS > 64
5000 #error USBDEVFS thunks do not support >64 bit hosts yet.
5003 uint64_t target_urb_adr
;
5004 uint64_t target_buf_adr
;
5005 char *target_buf_ptr
;
5006 struct usbdevfs_urb host_urb
;
5009 static GHashTable
*usbdevfs_urb_hashtable(void)
5011 static GHashTable
*urb_hashtable
;
5013 if (!urb_hashtable
) {
5014 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5016 return urb_hashtable
;
5019 static void urb_hashtable_insert(struct live_urb
*urb
)
5021 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5022 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5025 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5027 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5028 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5031 static void urb_hashtable_remove(struct live_urb
*urb
)
5033 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5034 g_hash_table_remove(urb_hashtable
, urb
);
5038 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5039 int fd
, int cmd
, abi_long arg
)
5041 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5042 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5043 struct live_urb
*lurb
;
5047 uintptr_t target_urb_adr
;
5050 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5052 memset(buf_temp
, 0, sizeof(uint64_t));
5053 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5054 if (is_error(ret
)) {
5058 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5059 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5060 if (!lurb
->target_urb_adr
) {
5061 return -TARGET_EFAULT
;
5063 urb_hashtable_remove(lurb
);
5064 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5065 lurb
->host_urb
.buffer_length
);
5066 lurb
->target_buf_ptr
= NULL
;
5068 /* restore the guest buffer pointer */
5069 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5071 /* update the guest urb struct */
5072 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5075 return -TARGET_EFAULT
;
5077 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5078 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5080 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5081 /* write back the urb handle */
5082 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5085 return -TARGET_EFAULT
;
5088 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5089 target_urb_adr
= lurb
->target_urb_adr
;
5090 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5091 unlock_user(argptr
, arg
, target_size
);
5098 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5099 uint8_t *buf_temp
__attribute__((unused
)),
5100 int fd
, int cmd
, abi_long arg
)
5102 struct live_urb
*lurb
;
5104 /* map target address back to host URB with metadata. */
5105 lurb
= urb_hashtable_lookup(arg
);
5107 return -TARGET_EFAULT
;
5109 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5113 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5114 int fd
, int cmd
, abi_long arg
)
5116 const argtype
*arg_type
= ie
->arg_type
;
5121 struct live_urb
*lurb
;
5124 * each submitted URB needs to map to a unique ID for the
5125 * kernel, and that unique ID needs to be a pointer to
5126 * host memory. hence, we need to malloc for each URB.
5127 * isochronous transfers have a variable length struct.
5130 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5132 /* construct host copy of urb and metadata */
5133 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5135 return -TARGET_ENOMEM
;
5138 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5141 return -TARGET_EFAULT
;
5143 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5144 unlock_user(argptr
, arg
, 0);
5146 lurb
->target_urb_adr
= arg
;
5147 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5149 /* buffer space used depends on endpoint type so lock the entire buffer */
5150 /* control type urbs should check the buffer contents for true direction */
5151 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5152 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5153 lurb
->host_urb
.buffer_length
, 1);
5154 if (lurb
->target_buf_ptr
== NULL
) {
5156 return -TARGET_EFAULT
;
5159 /* update buffer pointer in host copy */
5160 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5162 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5163 if (is_error(ret
)) {
5164 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5167 urb_hashtable_insert(lurb
);
5172 #endif /* CONFIG_USBFS */
5174 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5175 int cmd
, abi_long arg
)
5178 struct dm_ioctl
*host_dm
;
5179 abi_long guest_data
;
5180 uint32_t guest_data_size
;
5182 const argtype
*arg_type
= ie
->arg_type
;
5184 void *big_buf
= NULL
;
5188 target_size
= thunk_type_size(arg_type
, 0);
5189 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5191 ret
= -TARGET_EFAULT
;
5194 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5195 unlock_user(argptr
, arg
, 0);
5197 /* buf_temp is too small, so fetch things into a bigger buffer */
5198 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5199 memcpy(big_buf
, buf_temp
, target_size
);
5203 guest_data
= arg
+ host_dm
->data_start
;
5204 if ((guest_data
- arg
) < 0) {
5205 ret
= -TARGET_EINVAL
;
5208 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5209 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5211 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5213 ret
= -TARGET_EFAULT
;
5217 switch (ie
->host_cmd
) {
5219 case DM_LIST_DEVICES
:
5222 case DM_DEV_SUSPEND
:
5225 case DM_TABLE_STATUS
:
5226 case DM_TABLE_CLEAR
:
5228 case DM_LIST_VERSIONS
:
5232 case DM_DEV_SET_GEOMETRY
:
5233 /* data contains only strings */
5234 memcpy(host_data
, argptr
, guest_data_size
);
5237 memcpy(host_data
, argptr
, guest_data_size
);
5238 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5242 void *gspec
= argptr
;
5243 void *cur_data
= host_data
;
5244 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5245 int spec_size
= thunk_type_size(arg_type
, 0);
5248 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5249 struct dm_target_spec
*spec
= cur_data
;
5253 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5254 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5256 spec
->next
= sizeof(*spec
) + slen
;
5257 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5259 cur_data
+= spec
->next
;
5264 ret
= -TARGET_EINVAL
;
5265 unlock_user(argptr
, guest_data
, 0);
5268 unlock_user(argptr
, guest_data
, 0);
5270 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5271 if (!is_error(ret
)) {
5272 guest_data
= arg
+ host_dm
->data_start
;
5273 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5274 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5275 switch (ie
->host_cmd
) {
5280 case DM_DEV_SUSPEND
:
5283 case DM_TABLE_CLEAR
:
5285 case DM_DEV_SET_GEOMETRY
:
5286 /* no return data */
5288 case DM_LIST_DEVICES
:
5290 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5291 uint32_t remaining_data
= guest_data_size
;
5292 void *cur_data
= argptr
;
5293 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5294 int nl_size
= 12; /* can't use thunk_size due to alignment */
5297 uint32_t next
= nl
->next
;
5299 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5301 if (remaining_data
< nl
->next
) {
5302 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5305 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5306 strcpy(cur_data
+ nl_size
, nl
->name
);
5307 cur_data
+= nl
->next
;
5308 remaining_data
-= nl
->next
;
5312 nl
= (void*)nl
+ next
;
5317 case DM_TABLE_STATUS
:
5319 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5320 void *cur_data
= argptr
;
5321 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5322 int spec_size
= thunk_type_size(arg_type
, 0);
5325 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5326 uint32_t next
= spec
->next
;
5327 int slen
= strlen((char*)&spec
[1]) + 1;
5328 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5329 if (guest_data_size
< spec
->next
) {
5330 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5333 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5334 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5335 cur_data
= argptr
+ spec
->next
;
5336 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5342 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5343 int count
= *(uint32_t*)hdata
;
5344 uint64_t *hdev
= hdata
+ 8;
5345 uint64_t *gdev
= argptr
+ 8;
5348 *(uint32_t*)argptr
= tswap32(count
);
5349 for (i
= 0; i
< count
; i
++) {
5350 *gdev
= tswap64(*hdev
);
5356 case DM_LIST_VERSIONS
:
5358 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5359 uint32_t remaining_data
= guest_data_size
;
5360 void *cur_data
= argptr
;
5361 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5362 int vers_size
= thunk_type_size(arg_type
, 0);
5365 uint32_t next
= vers
->next
;
5367 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5369 if (remaining_data
< vers
->next
) {
5370 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5373 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5374 strcpy(cur_data
+ vers_size
, vers
->name
);
5375 cur_data
+= vers
->next
;
5376 remaining_data
-= vers
->next
;
5380 vers
= (void*)vers
+ next
;
5385 unlock_user(argptr
, guest_data
, 0);
5386 ret
= -TARGET_EINVAL
;
5389 unlock_user(argptr
, guest_data
, guest_data_size
);
5391 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5393 ret
= -TARGET_EFAULT
;
5396 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5397 unlock_user(argptr
, arg
, target_size
);
5404 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5405 int cmd
, abi_long arg
)
5409 const argtype
*arg_type
= ie
->arg_type
;
5410 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5413 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5414 struct blkpg_partition host_part
;
5416 /* Read and convert blkpg */
5418 target_size
= thunk_type_size(arg_type
, 0);
5419 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5421 ret
= -TARGET_EFAULT
;
5424 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5425 unlock_user(argptr
, arg
, 0);
5427 switch (host_blkpg
->op
) {
5428 case BLKPG_ADD_PARTITION
:
5429 case BLKPG_DEL_PARTITION
:
5430 /* payload is struct blkpg_partition */
5433 /* Unknown opcode */
5434 ret
= -TARGET_EINVAL
;
5438 /* Read and convert blkpg->data */
5439 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5440 target_size
= thunk_type_size(part_arg_type
, 0);
5441 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5443 ret
= -TARGET_EFAULT
;
5446 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5447 unlock_user(argptr
, arg
, 0);
5449 /* Swizzle the data pointer to our local copy and call! */
5450 host_blkpg
->data
= &host_part
;
5451 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5457 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5458 int fd
, int cmd
, abi_long arg
)
5460 const argtype
*arg_type
= ie
->arg_type
;
5461 const StructEntry
*se
;
5462 const argtype
*field_types
;
5463 const int *dst_offsets
, *src_offsets
;
5466 abi_ulong
*target_rt_dev_ptr
= NULL
;
5467 unsigned long *host_rt_dev_ptr
= NULL
;
5471 assert(ie
->access
== IOC_W
);
5472 assert(*arg_type
== TYPE_PTR
);
5474 assert(*arg_type
== TYPE_STRUCT
);
5475 target_size
= thunk_type_size(arg_type
, 0);
5476 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5478 return -TARGET_EFAULT
;
5481 assert(*arg_type
== (int)STRUCT_rtentry
);
5482 se
= struct_entries
+ *arg_type
++;
5483 assert(se
->convert
[0] == NULL
);
5484 /* convert struct here to be able to catch rt_dev string */
5485 field_types
= se
->field_types
;
5486 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5487 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5488 for (i
= 0; i
< se
->nb_fields
; i
++) {
5489 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5490 assert(*field_types
== TYPE_PTRVOID
);
5491 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5492 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5493 if (*target_rt_dev_ptr
!= 0) {
5494 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5495 tswapal(*target_rt_dev_ptr
));
5496 if (!*host_rt_dev_ptr
) {
5497 unlock_user(argptr
, arg
, 0);
5498 return -TARGET_EFAULT
;
5501 *host_rt_dev_ptr
= 0;
5506 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5507 argptr
+ src_offsets
[i
],
5508 field_types
, THUNK_HOST
);
5510 unlock_user(argptr
, arg
, 0);
5512 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5514 assert(host_rt_dev_ptr
!= NULL
);
5515 assert(target_rt_dev_ptr
!= NULL
);
5516 if (*host_rt_dev_ptr
!= 0) {
5517 unlock_user((void *)*host_rt_dev_ptr
,
5518 *target_rt_dev_ptr
, 0);
5523 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5524 int fd
, int cmd
, abi_long arg
)
5526 int sig
= target_to_host_signal(arg
);
5527 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5530 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5531 int fd
, int cmd
, abi_long arg
)
5536 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5537 if (is_error(ret
)) {
5541 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5542 if (copy_to_user_timeval(arg
, &tv
)) {
5543 return -TARGET_EFAULT
;
5546 if (copy_to_user_timeval64(arg
, &tv
)) {
5547 return -TARGET_EFAULT
;
5554 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5555 int fd
, int cmd
, abi_long arg
)
5560 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5561 if (is_error(ret
)) {
5565 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5566 if (host_to_target_timespec(arg
, &ts
)) {
5567 return -TARGET_EFAULT
;
5570 if (host_to_target_timespec64(arg
, &ts
)) {
5571 return -TARGET_EFAULT
;
5579 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5580 int fd
, int cmd
, abi_long arg
)
5582 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5583 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5589 static void unlock_drm_version(struct drm_version
*host_ver
,
5590 struct target_drm_version
*target_ver
,
5593 unlock_user(host_ver
->name
, target_ver
->name
,
5594 copy
? host_ver
->name_len
: 0);
5595 unlock_user(host_ver
->date
, target_ver
->date
,
5596 copy
? host_ver
->date_len
: 0);
5597 unlock_user(host_ver
->desc
, target_ver
->desc
,
5598 copy
? host_ver
->desc_len
: 0);
5601 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5602 struct target_drm_version
*target_ver
)
5604 memset(host_ver
, 0, sizeof(*host_ver
));
5606 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5607 if (host_ver
->name_len
) {
5608 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5609 target_ver
->name_len
, 0);
5610 if (!host_ver
->name
) {
5615 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5616 if (host_ver
->date_len
) {
5617 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5618 target_ver
->date_len
, 0);
5619 if (!host_ver
->date
) {
5624 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5625 if (host_ver
->desc_len
) {
5626 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5627 target_ver
->desc_len
, 0);
5628 if (!host_ver
->desc
) {
5635 unlock_drm_version(host_ver
, target_ver
, false);
5639 static inline void host_to_target_drmversion(
5640 struct target_drm_version
*target_ver
,
5641 struct drm_version
*host_ver
)
5643 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5644 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5645 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5646 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5647 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5648 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5649 unlock_drm_version(host_ver
, target_ver
, true);
5652 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5653 int fd
, int cmd
, abi_long arg
)
5655 struct drm_version
*ver
;
5656 struct target_drm_version
*target_ver
;
5659 switch (ie
->host_cmd
) {
5660 case DRM_IOCTL_VERSION
:
5661 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5662 return -TARGET_EFAULT
;
5664 ver
= (struct drm_version
*)buf_temp
;
5665 ret
= target_to_host_drmversion(ver
, target_ver
);
5666 if (!is_error(ret
)) {
5667 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5668 if (is_error(ret
)) {
5669 unlock_drm_version(ver
, target_ver
, false);
5671 host_to_target_drmversion(target_ver
, ver
);
5674 unlock_user_struct(target_ver
, arg
, 0);
5677 return -TARGET_ENOSYS
;
5680 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5681 struct drm_i915_getparam
*gparam
,
5682 int fd
, abi_long arg
)
5686 struct target_drm_i915_getparam
*target_gparam
;
5688 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5689 return -TARGET_EFAULT
;
5692 __get_user(gparam
->param
, &target_gparam
->param
);
5693 gparam
->value
= &value
;
5694 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5695 put_user_s32(value
, target_gparam
->value
);
5697 unlock_user_struct(target_gparam
, arg
, 0);
5701 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5702 int fd
, int cmd
, abi_long arg
)
5704 switch (ie
->host_cmd
) {
5705 case DRM_IOCTL_I915_GETPARAM
:
5706 return do_ioctl_drm_i915_getparam(ie
,
5707 (struct drm_i915_getparam
*)buf_temp
,
5710 return -TARGET_ENOSYS
;
5716 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5717 int fd
, int cmd
, abi_long arg
)
5719 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5720 struct tun_filter
*target_filter
;
5723 assert(ie
->access
== IOC_W
);
5725 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5726 if (!target_filter
) {
5727 return -TARGET_EFAULT
;
5729 filter
->flags
= tswap16(target_filter
->flags
);
5730 filter
->count
= tswap16(target_filter
->count
);
5731 unlock_user(target_filter
, arg
, 0);
5733 if (filter
->count
) {
5734 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5736 return -TARGET_EFAULT
;
5739 target_addr
= lock_user(VERIFY_READ
,
5740 arg
+ offsetof(struct tun_filter
, addr
),
5741 filter
->count
* ETH_ALEN
, 1);
5743 return -TARGET_EFAULT
;
5745 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5746 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5749 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5752 IOCTLEntry ioctl_entries
[] = {
5753 #define IOCTL(cmd, access, ...) \
5754 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5755 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5756 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5757 #define IOCTL_IGNORE(cmd) \
5758 { TARGET_ ## cmd, 0, #cmd },
5763 /* ??? Implement proper locking for ioctls. */
5764 /* do_ioctl() Must return target values and target errnos. */
5765 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5767 const IOCTLEntry
*ie
;
5768 const argtype
*arg_type
;
5770 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5776 if (ie
->target_cmd
== 0) {
5778 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5779 return -TARGET_ENOSYS
;
5781 if (ie
->target_cmd
== cmd
)
5785 arg_type
= ie
->arg_type
;
5787 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5788 } else if (!ie
->host_cmd
) {
5789 /* Some architectures define BSD ioctls in their headers
5790 that are not implemented in Linux. */
5791 return -TARGET_ENOSYS
;
5794 switch(arg_type
[0]) {
5797 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5803 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5807 target_size
= thunk_type_size(arg_type
, 0);
5808 switch(ie
->access
) {
5810 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5811 if (!is_error(ret
)) {
5812 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5814 return -TARGET_EFAULT
;
5815 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5816 unlock_user(argptr
, arg
, target_size
);
5820 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5822 return -TARGET_EFAULT
;
5823 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5824 unlock_user(argptr
, arg
, 0);
5825 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5829 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5831 return -TARGET_EFAULT
;
5832 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5833 unlock_user(argptr
, arg
, 0);
5834 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5835 if (!is_error(ret
)) {
5836 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5838 return -TARGET_EFAULT
;
5839 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5840 unlock_user(argptr
, arg
, target_size
);
5846 qemu_log_mask(LOG_UNIMP
,
5847 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5848 (long)cmd
, arg_type
[0]);
5849 ret
= -TARGET_ENOSYS
;
5855 static const bitmask_transtbl iflag_tbl
[] = {
5856 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5857 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5858 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5859 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5860 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5861 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5862 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5863 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5864 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5865 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5866 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5867 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5868 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5869 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5870 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5874 static const bitmask_transtbl oflag_tbl
[] = {
5875 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5876 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5877 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5878 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5879 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5880 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5881 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5882 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5883 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5884 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5885 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5886 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5887 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5888 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5889 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5890 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5891 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5892 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5893 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5894 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5895 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5896 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5897 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5898 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5902 static const bitmask_transtbl cflag_tbl
[] = {
5903 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5904 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5905 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5906 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5907 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5908 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5909 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5910 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5911 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5912 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5913 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5914 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5915 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5916 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5917 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5918 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5919 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5920 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5921 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5922 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5923 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5924 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5925 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5926 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5927 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5928 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5929 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5930 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5931 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5932 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5933 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5937 static const bitmask_transtbl lflag_tbl
[] = {
5938 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5939 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5940 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5941 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5942 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5943 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5944 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5945 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5946 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5947 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5948 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5949 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5950 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5951 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5952 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5953 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5957 static void target_to_host_termios (void *dst
, const void *src
)
5959 struct host_termios
*host
= dst
;
5960 const struct target_termios
*target
= src
;
5963 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5965 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5967 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5969 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5970 host
->c_line
= target
->c_line
;
5972 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5973 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5974 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5975 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5976 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5977 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5978 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5979 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5980 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5981 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5982 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5983 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5984 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5985 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5986 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5987 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5988 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5989 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5992 static void host_to_target_termios (void *dst
, const void *src
)
5994 struct target_termios
*target
= dst
;
5995 const struct host_termios
*host
= src
;
5998 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6000 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6002 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6004 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6005 target
->c_line
= host
->c_line
;
6007 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6008 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6009 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6010 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6011 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6012 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6013 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6014 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6015 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6016 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6017 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6018 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6019 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6020 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6021 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6022 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6023 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6024 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6027 static const StructEntry struct_termios_def
= {
6028 .convert
= { host_to_target_termios
, target_to_host_termios
},
6029 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6030 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6031 .print
= print_termios
,
6034 static bitmask_transtbl mmap_flags_tbl
[] = {
6035 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6036 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6037 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6038 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6039 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6040 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6041 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6042 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6043 MAP_DENYWRITE
, MAP_DENYWRITE
},
6044 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6045 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6046 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6047 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6048 MAP_NORESERVE
, MAP_NORESERVE
},
6049 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6050 /* MAP_STACK had been ignored by the kernel for quite some time.
6051 Recognize it for the target insofar as we do not want to pass
6052 it through to the host. */
6053 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6058 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6059 * TARGET_I386 is defined if TARGET_X86_64 is defined
6061 #if defined(TARGET_I386)
6063 /* NOTE: there is really one LDT for all the threads */
6064 static uint8_t *ldt_table
;
6066 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6073 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6074 if (size
> bytecount
)
6076 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6078 return -TARGET_EFAULT
;
6079 /* ??? Should this by byteswapped? */
6080 memcpy(p
, ldt_table
, size
);
6081 unlock_user(p
, ptr
, size
);
6085 /* XXX: add locking support */
6086 static abi_long
write_ldt(CPUX86State
*env
,
6087 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6089 struct target_modify_ldt_ldt_s ldt_info
;
6090 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6091 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6092 int seg_not_present
, useable
, lm
;
6093 uint32_t *lp
, entry_1
, entry_2
;
6095 if (bytecount
!= sizeof(ldt_info
))
6096 return -TARGET_EINVAL
;
6097 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6098 return -TARGET_EFAULT
;
6099 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6100 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6101 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6102 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6103 unlock_user_struct(target_ldt_info
, ptr
, 0);
6105 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6106 return -TARGET_EINVAL
;
6107 seg_32bit
= ldt_info
.flags
& 1;
6108 contents
= (ldt_info
.flags
>> 1) & 3;
6109 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6110 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6111 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6112 useable
= (ldt_info
.flags
>> 6) & 1;
6116 lm
= (ldt_info
.flags
>> 7) & 1;
6118 if (contents
== 3) {
6120 return -TARGET_EINVAL
;
6121 if (seg_not_present
== 0)
6122 return -TARGET_EINVAL
;
6124 /* allocate the LDT */
6126 env
->ldt
.base
= target_mmap(0,
6127 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6128 PROT_READ
|PROT_WRITE
,
6129 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6130 if (env
->ldt
.base
== -1)
6131 return -TARGET_ENOMEM
;
6132 memset(g2h(env
->ldt
.base
), 0,
6133 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6134 env
->ldt
.limit
= 0xffff;
6135 ldt_table
= g2h(env
->ldt
.base
);
6138 /* NOTE: same code as Linux kernel */
6139 /* Allow LDTs to be cleared by the user. */
6140 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6143 read_exec_only
== 1 &&
6145 limit_in_pages
== 0 &&
6146 seg_not_present
== 1 &&
6154 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6155 (ldt_info
.limit
& 0x0ffff);
6156 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6157 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6158 (ldt_info
.limit
& 0xf0000) |
6159 ((read_exec_only
^ 1) << 9) |
6161 ((seg_not_present
^ 1) << 15) |
6163 (limit_in_pages
<< 23) |
6167 entry_2
|= (useable
<< 20);
6169 /* Install the new entry ... */
6171 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6172 lp
[0] = tswap32(entry_1
);
6173 lp
[1] = tswap32(entry_2
);
6177 /* specific and weird i386 syscalls */
6178 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6179 unsigned long bytecount
)
6185 ret
= read_ldt(ptr
, bytecount
);
6188 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6191 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6194 ret
= -TARGET_ENOSYS
;
6200 #if defined(TARGET_ABI32)
6201 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6203 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6204 struct target_modify_ldt_ldt_s ldt_info
;
6205 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6206 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6207 int seg_not_present
, useable
, lm
;
6208 uint32_t *lp
, entry_1
, entry_2
;
6211 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6212 if (!target_ldt_info
)
6213 return -TARGET_EFAULT
;
6214 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6215 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6216 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6217 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6218 if (ldt_info
.entry_number
== -1) {
6219 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6220 if (gdt_table
[i
] == 0) {
6221 ldt_info
.entry_number
= i
;
6222 target_ldt_info
->entry_number
= tswap32(i
);
6227 unlock_user_struct(target_ldt_info
, ptr
, 1);
6229 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6230 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6231 return -TARGET_EINVAL
;
6232 seg_32bit
= ldt_info
.flags
& 1;
6233 contents
= (ldt_info
.flags
>> 1) & 3;
6234 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6235 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6236 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6237 useable
= (ldt_info
.flags
>> 6) & 1;
6241 lm
= (ldt_info
.flags
>> 7) & 1;
6244 if (contents
== 3) {
6245 if (seg_not_present
== 0)
6246 return -TARGET_EINVAL
;
6249 /* NOTE: same code as Linux kernel */
6250 /* Allow LDTs to be cleared by the user. */
6251 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6252 if ((contents
== 0 &&
6253 read_exec_only
== 1 &&
6255 limit_in_pages
== 0 &&
6256 seg_not_present
== 1 &&
6264 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6265 (ldt_info
.limit
& 0x0ffff);
6266 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6267 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6268 (ldt_info
.limit
& 0xf0000) |
6269 ((read_exec_only
^ 1) << 9) |
6271 ((seg_not_present
^ 1) << 15) |
6273 (limit_in_pages
<< 23) |
6278 /* Install the new entry ... */
6280 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6281 lp
[0] = tswap32(entry_1
);
6282 lp
[1] = tswap32(entry_2
);
6286 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6288 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6289 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6290 uint32_t base_addr
, limit
, flags
;
6291 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6292 int seg_not_present
, useable
, lm
;
6293 uint32_t *lp
, entry_1
, entry_2
;
6295 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6296 if (!target_ldt_info
)
6297 return -TARGET_EFAULT
;
6298 idx
= tswap32(target_ldt_info
->entry_number
);
6299 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6300 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6301 unlock_user_struct(target_ldt_info
, ptr
, 1);
6302 return -TARGET_EINVAL
;
6304 lp
= (uint32_t *)(gdt_table
+ idx
);
6305 entry_1
= tswap32(lp
[0]);
6306 entry_2
= tswap32(lp
[1]);
6308 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6309 contents
= (entry_2
>> 10) & 3;
6310 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6311 seg_32bit
= (entry_2
>> 22) & 1;
6312 limit_in_pages
= (entry_2
>> 23) & 1;
6313 useable
= (entry_2
>> 20) & 1;
6317 lm
= (entry_2
>> 21) & 1;
6319 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6320 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6321 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6322 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6323 base_addr
= (entry_1
>> 16) |
6324 (entry_2
& 0xff000000) |
6325 ((entry_2
& 0xff) << 16);
6326 target_ldt_info
->base_addr
= tswapal(base_addr
);
6327 target_ldt_info
->limit
= tswap32(limit
);
6328 target_ldt_info
->flags
= tswap32(flags
);
6329 unlock_user_struct(target_ldt_info
, ptr
, 1);
6333 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6335 return -TARGET_ENOSYS
;
6338 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6345 case TARGET_ARCH_SET_GS
:
6346 case TARGET_ARCH_SET_FS
:
6347 if (code
== TARGET_ARCH_SET_GS
)
6351 cpu_x86_load_seg(env
, idx
, 0);
6352 env
->segs
[idx
].base
= addr
;
6354 case TARGET_ARCH_GET_GS
:
6355 case TARGET_ARCH_GET_FS
:
6356 if (code
== TARGET_ARCH_GET_GS
)
6360 val
= env
->segs
[idx
].base
;
6361 if (put_user(val
, addr
, abi_ulong
))
6362 ret
= -TARGET_EFAULT
;
6365 ret
= -TARGET_EINVAL
;
6370 #endif /* defined(TARGET_ABI32 */
6372 #endif /* defined(TARGET_I386) */
6374 #define NEW_STACK_SIZE 0x40000
6377 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6380 pthread_mutex_t mutex
;
6381 pthread_cond_t cond
;
6384 abi_ulong child_tidptr
;
6385 abi_ulong parent_tidptr
;
6389 static void *clone_func(void *arg
)
6391 new_thread_info
*info
= arg
;
6396 rcu_register_thread();
6397 tcg_register_thread();
6401 ts
= (TaskState
*)cpu
->opaque
;
6402 info
->tid
= sys_gettid();
6404 if (info
->child_tidptr
)
6405 put_user_u32(info
->tid
, info
->child_tidptr
);
6406 if (info
->parent_tidptr
)
6407 put_user_u32(info
->tid
, info
->parent_tidptr
);
6408 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6409 /* Enable signals. */
6410 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6411 /* Signal to the parent that we're ready. */
6412 pthread_mutex_lock(&info
->mutex
);
6413 pthread_cond_broadcast(&info
->cond
);
6414 pthread_mutex_unlock(&info
->mutex
);
6415 /* Wait until the parent has finished initializing the tls state. */
6416 pthread_mutex_lock(&clone_lock
);
6417 pthread_mutex_unlock(&clone_lock
);
6423 /* do_fork() Must return host values and target errnos (unlike most
6424 do_*() functions). */
6425 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6426 abi_ulong parent_tidptr
, target_ulong newtls
,
6427 abi_ulong child_tidptr
)
6429 CPUState
*cpu
= env_cpu(env
);
6433 CPUArchState
*new_env
;
6436 flags
&= ~CLONE_IGNORED_FLAGS
;
6438 /* Emulate vfork() with fork() */
6439 if (flags
& CLONE_VFORK
)
6440 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6442 if (flags
& CLONE_VM
) {
6443 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6444 new_thread_info info
;
6445 pthread_attr_t attr
;
6447 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6448 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6449 return -TARGET_EINVAL
;
6452 ts
= g_new0(TaskState
, 1);
6453 init_task_state(ts
);
6455 /* Grab a mutex so that thread setup appears atomic. */
6456 pthread_mutex_lock(&clone_lock
);
6458 /* we create a new CPU instance. */
6459 new_env
= cpu_copy(env
);
6460 /* Init regs that differ from the parent. */
6461 cpu_clone_regs_child(new_env
, newsp
, flags
);
6462 cpu_clone_regs_parent(env
, flags
);
6463 new_cpu
= env_cpu(new_env
);
6464 new_cpu
->opaque
= ts
;
6465 ts
->bprm
= parent_ts
->bprm
;
6466 ts
->info
= parent_ts
->info
;
6467 ts
->signal_mask
= parent_ts
->signal_mask
;
6469 if (flags
& CLONE_CHILD_CLEARTID
) {
6470 ts
->child_tidptr
= child_tidptr
;
6473 if (flags
& CLONE_SETTLS
) {
6474 cpu_set_tls (new_env
, newtls
);
6477 memset(&info
, 0, sizeof(info
));
6478 pthread_mutex_init(&info
.mutex
, NULL
);
6479 pthread_mutex_lock(&info
.mutex
);
6480 pthread_cond_init(&info
.cond
, NULL
);
6482 if (flags
& CLONE_CHILD_SETTID
) {
6483 info
.child_tidptr
= child_tidptr
;
6485 if (flags
& CLONE_PARENT_SETTID
) {
6486 info
.parent_tidptr
= parent_tidptr
;
6489 ret
= pthread_attr_init(&attr
);
6490 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6491 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6492 /* It is not safe to deliver signals until the child has finished
6493 initializing, so temporarily block all signals. */
6494 sigfillset(&sigmask
);
6495 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6496 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6498 /* If this is our first additional thread, we need to ensure we
6499 * generate code for parallel execution and flush old translations.
6501 if (!parallel_cpus
) {
6502 parallel_cpus
= true;
6506 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6507 /* TODO: Free new CPU state if thread creation failed. */
6509 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6510 pthread_attr_destroy(&attr
);
6512 /* Wait for the child to initialize. */
6513 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6518 pthread_mutex_unlock(&info
.mutex
);
6519 pthread_cond_destroy(&info
.cond
);
6520 pthread_mutex_destroy(&info
.mutex
);
6521 pthread_mutex_unlock(&clone_lock
);
6523 /* if no CLONE_VM, we consider it is a fork */
6524 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6525 return -TARGET_EINVAL
;
6528 /* We can't support custom termination signals */
6529 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6530 return -TARGET_EINVAL
;
6533 if (block_signals()) {
6534 return -TARGET_ERESTARTSYS
;
6540 /* Child Process. */
6541 cpu_clone_regs_child(env
, newsp
, flags
);
6543 /* There is a race condition here. The parent process could
6544 theoretically read the TID in the child process before the child
6545 tid is set. This would require using either ptrace
6546 (not implemented) or having *_tidptr to point at a shared memory
6547 mapping. We can't repeat the spinlock hack used above because
6548 the child process gets its own copy of the lock. */
6549 if (flags
& CLONE_CHILD_SETTID
)
6550 put_user_u32(sys_gettid(), child_tidptr
);
6551 if (flags
& CLONE_PARENT_SETTID
)
6552 put_user_u32(sys_gettid(), parent_tidptr
);
6553 ts
= (TaskState
*)cpu
->opaque
;
6554 if (flags
& CLONE_SETTLS
)
6555 cpu_set_tls (env
, newtls
);
6556 if (flags
& CLONE_CHILD_CLEARTID
)
6557 ts
->child_tidptr
= child_tidptr
;
6559 cpu_clone_regs_parent(env
, flags
);
6566 /* warning : doesn't handle linux specific flags... */
6567 static int target_to_host_fcntl_cmd(int cmd
)
6572 case TARGET_F_DUPFD
:
6573 case TARGET_F_GETFD
:
6574 case TARGET_F_SETFD
:
6575 case TARGET_F_GETFL
:
6576 case TARGET_F_SETFL
:
6577 case TARGET_F_OFD_GETLK
:
6578 case TARGET_F_OFD_SETLK
:
6579 case TARGET_F_OFD_SETLKW
:
6582 case TARGET_F_GETLK
:
6585 case TARGET_F_SETLK
:
6588 case TARGET_F_SETLKW
:
6591 case TARGET_F_GETOWN
:
6594 case TARGET_F_SETOWN
:
6597 case TARGET_F_GETSIG
:
6600 case TARGET_F_SETSIG
:
6603 #if TARGET_ABI_BITS == 32
6604 case TARGET_F_GETLK64
:
6607 case TARGET_F_SETLK64
:
6610 case TARGET_F_SETLKW64
:
6614 case TARGET_F_SETLEASE
:
6617 case TARGET_F_GETLEASE
:
6620 #ifdef F_DUPFD_CLOEXEC
6621 case TARGET_F_DUPFD_CLOEXEC
:
6622 ret
= F_DUPFD_CLOEXEC
;
6625 case TARGET_F_NOTIFY
:
6629 case TARGET_F_GETOWN_EX
:
6634 case TARGET_F_SETOWN_EX
:
6639 case TARGET_F_SETPIPE_SZ
:
6642 case TARGET_F_GETPIPE_SZ
:
6647 case TARGET_F_ADD_SEALS
:
6650 case TARGET_F_GET_SEALS
:
6655 ret
= -TARGET_EINVAL
;
6659 #if defined(__powerpc64__)
6660 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6661 * is not supported by kernel. The glibc fcntl call actually adjusts
6662 * them to 5, 6 and 7 before making the syscall(). Since we make the
6663 * syscall directly, adjust to what is supported by the kernel.
6665 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6666 ret
-= F_GETLK64
- 5;
6673 #define FLOCK_TRANSTBL \
6675 TRANSTBL_CONVERT(F_RDLCK); \
6676 TRANSTBL_CONVERT(F_WRLCK); \
6677 TRANSTBL_CONVERT(F_UNLCK); \
6678 TRANSTBL_CONVERT(F_EXLCK); \
6679 TRANSTBL_CONVERT(F_SHLCK); \
6682 static int target_to_host_flock(int type
)
6684 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6686 #undef TRANSTBL_CONVERT
6687 return -TARGET_EINVAL
;
6690 static int host_to_target_flock(int type
)
6692 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6694 #undef TRANSTBL_CONVERT
6695 /* if we don't know how to convert the value coming
6696 * from the host we copy to the target field as-is
6701 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6702 abi_ulong target_flock_addr
)
6704 struct target_flock
*target_fl
;
6707 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6708 return -TARGET_EFAULT
;
6711 __get_user(l_type
, &target_fl
->l_type
);
6712 l_type
= target_to_host_flock(l_type
);
6716 fl
->l_type
= l_type
;
6717 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6718 __get_user(fl
->l_start
, &target_fl
->l_start
);
6719 __get_user(fl
->l_len
, &target_fl
->l_len
);
6720 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6721 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6725 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6726 const struct flock64
*fl
)
6728 struct target_flock
*target_fl
;
6731 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6732 return -TARGET_EFAULT
;
6735 l_type
= host_to_target_flock(fl
->l_type
);
6736 __put_user(l_type
, &target_fl
->l_type
);
6737 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6738 __put_user(fl
->l_start
, &target_fl
->l_start
);
6739 __put_user(fl
->l_len
, &target_fl
->l_len
);
6740 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6741 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6745 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6746 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6748 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6749 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6750 abi_ulong target_flock_addr
)
6752 struct target_oabi_flock64
*target_fl
;
6755 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6756 return -TARGET_EFAULT
;
6759 __get_user(l_type
, &target_fl
->l_type
);
6760 l_type
= target_to_host_flock(l_type
);
6764 fl
->l_type
= l_type
;
6765 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6766 __get_user(fl
->l_start
, &target_fl
->l_start
);
6767 __get_user(fl
->l_len
, &target_fl
->l_len
);
6768 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6769 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6773 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6774 const struct flock64
*fl
)
6776 struct target_oabi_flock64
*target_fl
;
6779 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6780 return -TARGET_EFAULT
;
6783 l_type
= host_to_target_flock(fl
->l_type
);
6784 __put_user(l_type
, &target_fl
->l_type
);
6785 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6786 __put_user(fl
->l_start
, &target_fl
->l_start
);
6787 __put_user(fl
->l_len
, &target_fl
->l_len
);
6788 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6789 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6794 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6795 abi_ulong target_flock_addr
)
6797 struct target_flock64
*target_fl
;
6800 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6801 return -TARGET_EFAULT
;
6804 __get_user(l_type
, &target_fl
->l_type
);
6805 l_type
= target_to_host_flock(l_type
);
6809 fl
->l_type
= l_type
;
6810 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6811 __get_user(fl
->l_start
, &target_fl
->l_start
);
6812 __get_user(fl
->l_len
, &target_fl
->l_len
);
6813 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6814 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6818 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6819 const struct flock64
*fl
)
6821 struct target_flock64
*target_fl
;
6824 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6825 return -TARGET_EFAULT
;
6828 l_type
= host_to_target_flock(fl
->l_type
);
6829 __put_user(l_type
, &target_fl
->l_type
);
6830 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6831 __put_user(fl
->l_start
, &target_fl
->l_start
);
6832 __put_user(fl
->l_len
, &target_fl
->l_len
);
6833 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6834 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6838 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6840 struct flock64 fl64
;
6842 struct f_owner_ex fox
;
6843 struct target_f_owner_ex
*target_fox
;
6846 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6848 if (host_cmd
== -TARGET_EINVAL
)
6852 case TARGET_F_GETLK
:
6853 ret
= copy_from_user_flock(&fl64
, arg
);
6857 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6859 ret
= copy_to_user_flock(arg
, &fl64
);
6863 case TARGET_F_SETLK
:
6864 case TARGET_F_SETLKW
:
6865 ret
= copy_from_user_flock(&fl64
, arg
);
6869 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6872 case TARGET_F_GETLK64
:
6873 case TARGET_F_OFD_GETLK
:
6874 ret
= copy_from_user_flock64(&fl64
, arg
);
6878 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6880 ret
= copy_to_user_flock64(arg
, &fl64
);
6883 case TARGET_F_SETLK64
:
6884 case TARGET_F_SETLKW64
:
6885 case TARGET_F_OFD_SETLK
:
6886 case TARGET_F_OFD_SETLKW
:
6887 ret
= copy_from_user_flock64(&fl64
, arg
);
6891 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6894 case TARGET_F_GETFL
:
6895 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6897 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6901 case TARGET_F_SETFL
:
6902 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6903 target_to_host_bitmask(arg
,
6908 case TARGET_F_GETOWN_EX
:
6909 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6911 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6912 return -TARGET_EFAULT
;
6913 target_fox
->type
= tswap32(fox
.type
);
6914 target_fox
->pid
= tswap32(fox
.pid
);
6915 unlock_user_struct(target_fox
, arg
, 1);
6921 case TARGET_F_SETOWN_EX
:
6922 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6923 return -TARGET_EFAULT
;
6924 fox
.type
= tswap32(target_fox
->type
);
6925 fox
.pid
= tswap32(target_fox
->pid
);
6926 unlock_user_struct(target_fox
, arg
, 0);
6927 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6931 case TARGET_F_SETSIG
:
6932 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6935 case TARGET_F_GETSIG
:
6936 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6939 case TARGET_F_SETOWN
:
6940 case TARGET_F_GETOWN
:
6941 case TARGET_F_SETLEASE
:
6942 case TARGET_F_GETLEASE
:
6943 case TARGET_F_SETPIPE_SZ
:
6944 case TARGET_F_GETPIPE_SZ
:
6945 case TARGET_F_ADD_SEALS
:
6946 case TARGET_F_GET_SEALS
:
6947 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6951 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6959 static inline int high2lowuid(int uid
)
6967 static inline int high2lowgid(int gid
)
6975 static inline int low2highuid(int uid
)
6977 if ((int16_t)uid
== -1)
6983 static inline int low2highgid(int gid
)
6985 if ((int16_t)gid
== -1)
6990 static inline int tswapid(int id
)
6995 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6997 #else /* !USE_UID16 */
6998 static inline int high2lowuid(int uid
)
7002 static inline int high2lowgid(int gid
)
7006 static inline int low2highuid(int uid
)
7010 static inline int low2highgid(int gid
)
7014 static inline int tswapid(int id
)
7019 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7021 #endif /* USE_UID16 */
7023 /* We must do direct syscalls for setting UID/GID, because we want to
7024 * implement the Linux system call semantics of "change only for this thread",
7025 * not the libc/POSIX semantics of "change for all threads in process".
7026 * (See http://ewontfix.com/17/ for more details.)
7027 * We use the 32-bit version of the syscalls if present; if it is not
7028 * then either the host architecture supports 32-bit UIDs natively with
7029 * the standard syscall, or the 16-bit UID is the best we can do.
7031 #ifdef __NR_setuid32
7032 #define __NR_sys_setuid __NR_setuid32
7034 #define __NR_sys_setuid __NR_setuid
7036 #ifdef __NR_setgid32
7037 #define __NR_sys_setgid __NR_setgid32
7039 #define __NR_sys_setgid __NR_setgid
7041 #ifdef __NR_setresuid32
7042 #define __NR_sys_setresuid __NR_setresuid32
7044 #define __NR_sys_setresuid __NR_setresuid
7046 #ifdef __NR_setresgid32
7047 #define __NR_sys_setresgid __NR_setresgid32
7049 #define __NR_sys_setresgid __NR_setresgid
7052 _syscall1(int, sys_setuid
, uid_t
, uid
)
7053 _syscall1(int, sys_setgid
, gid_t
, gid
)
7054 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7055 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7057 void syscall_init(void)
7060 const argtype
*arg_type
;
7064 thunk_init(STRUCT_MAX
);
7066 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7067 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7068 #include "syscall_types.h"
7070 #undef STRUCT_SPECIAL
7072 /* Build target_to_host_errno_table[] table from
7073 * host_to_target_errno_table[]. */
7074 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7075 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7078 /* we patch the ioctl size if necessary. We rely on the fact that
7079 no ioctl has all the bits at '1' in the size field */
7081 while (ie
->target_cmd
!= 0) {
7082 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7083 TARGET_IOC_SIZEMASK
) {
7084 arg_type
= ie
->arg_type
;
7085 if (arg_type
[0] != TYPE_PTR
) {
7086 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7091 size
= thunk_type_size(arg_type
, 0);
7092 ie
->target_cmd
= (ie
->target_cmd
&
7093 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7094 (size
<< TARGET_IOC_SIZESHIFT
);
7097 /* automatic consistency check if same arch */
7098 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7099 (defined(__x86_64__) && defined(TARGET_X86_64))
7100 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7101 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7102 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7109 #ifdef TARGET_NR_truncate64
7110 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7115 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7119 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7123 #ifdef TARGET_NR_ftruncate64
7124 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7129 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7133 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7137 #if defined(TARGET_NR_timer_settime) || \
7138 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7139 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7140 abi_ulong target_addr
)
7142 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7143 offsetof(struct target_itimerspec
,
7145 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7146 offsetof(struct target_itimerspec
,
7148 return -TARGET_EFAULT
;
7155 #if defined(TARGET_NR_timer_settime64) || \
7156 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7157 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7158 abi_ulong target_addr
)
7160 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7161 offsetof(struct target__kernel_itimerspec
,
7163 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7164 offsetof(struct target__kernel_itimerspec
,
7166 return -TARGET_EFAULT
;
7173 #if ((defined(TARGET_NR_timerfd_gettime) || \
7174 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7175 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7176 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7177 struct itimerspec
*host_its
)
7179 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7181 &host_its
->it_interval
) ||
7182 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7184 &host_its
->it_value
)) {
7185 return -TARGET_EFAULT
;
7191 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7192 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7193 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7194 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7195 struct itimerspec
*host_its
)
7197 if (host_to_target_timespec64(target_addr
+
7198 offsetof(struct target__kernel_itimerspec
,
7200 &host_its
->it_interval
) ||
7201 host_to_target_timespec64(target_addr
+
7202 offsetof(struct target__kernel_itimerspec
,
7204 &host_its
->it_value
)) {
7205 return -TARGET_EFAULT
;
7211 #if defined(TARGET_NR_adjtimex) || \
7212 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7213 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7214 abi_long target_addr
)
7216 struct target_timex
*target_tx
;
7218 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7219 return -TARGET_EFAULT
;
7222 __get_user(host_tx
->modes
, &target_tx
->modes
);
7223 __get_user(host_tx
->offset
, &target_tx
->offset
);
7224 __get_user(host_tx
->freq
, &target_tx
->freq
);
7225 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7226 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7227 __get_user(host_tx
->status
, &target_tx
->status
);
7228 __get_user(host_tx
->constant
, &target_tx
->constant
);
7229 __get_user(host_tx
->precision
, &target_tx
->precision
);
7230 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7231 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7232 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7233 __get_user(host_tx
->tick
, &target_tx
->tick
);
7234 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7235 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7236 __get_user(host_tx
->shift
, &target_tx
->shift
);
7237 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7238 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7239 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7240 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7241 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7242 __get_user(host_tx
->tai
, &target_tx
->tai
);
7244 unlock_user_struct(target_tx
, target_addr
, 0);
7248 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7249 struct timex
*host_tx
)
7251 struct target_timex
*target_tx
;
7253 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7254 return -TARGET_EFAULT
;
7257 __put_user(host_tx
->modes
, &target_tx
->modes
);
7258 __put_user(host_tx
->offset
, &target_tx
->offset
);
7259 __put_user(host_tx
->freq
, &target_tx
->freq
);
7260 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7261 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7262 __put_user(host_tx
->status
, &target_tx
->status
);
7263 __put_user(host_tx
->constant
, &target_tx
->constant
);
7264 __put_user(host_tx
->precision
, &target_tx
->precision
);
7265 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7266 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7267 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7268 __put_user(host_tx
->tick
, &target_tx
->tick
);
7269 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7270 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7271 __put_user(host_tx
->shift
, &target_tx
->shift
);
7272 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7273 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7274 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7275 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7276 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7277 __put_user(host_tx
->tai
, &target_tx
->tai
);
7279 unlock_user_struct(target_tx
, target_addr
, 1);
7285 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7286 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7287 abi_long target_addr
)
7289 struct target__kernel_timex
*target_tx
;
7291 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7292 offsetof(struct target__kernel_timex
,
7294 return -TARGET_EFAULT
;
7297 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7298 return -TARGET_EFAULT
;
7301 __get_user(host_tx
->modes
, &target_tx
->modes
);
7302 __get_user(host_tx
->offset
, &target_tx
->offset
);
7303 __get_user(host_tx
->freq
, &target_tx
->freq
);
7304 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7305 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7306 __get_user(host_tx
->status
, &target_tx
->status
);
7307 __get_user(host_tx
->constant
, &target_tx
->constant
);
7308 __get_user(host_tx
->precision
, &target_tx
->precision
);
7309 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7310 __get_user(host_tx
->tick
, &target_tx
->tick
);
7311 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7312 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7313 __get_user(host_tx
->shift
, &target_tx
->shift
);
7314 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7315 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7316 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7317 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7318 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7319 __get_user(host_tx
->tai
, &target_tx
->tai
);
7321 unlock_user_struct(target_tx
, target_addr
, 0);
7325 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7326 struct timex
*host_tx
)
7328 struct target__kernel_timex
*target_tx
;
7330 if (copy_to_user_timeval64(target_addr
+
7331 offsetof(struct target__kernel_timex
, time
),
7333 return -TARGET_EFAULT
;
7336 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7337 return -TARGET_EFAULT
;
7340 __put_user(host_tx
->modes
, &target_tx
->modes
);
7341 __put_user(host_tx
->offset
, &target_tx
->offset
);
7342 __put_user(host_tx
->freq
, &target_tx
->freq
);
7343 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7344 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7345 __put_user(host_tx
->status
, &target_tx
->status
);
7346 __put_user(host_tx
->constant
, &target_tx
->constant
);
7347 __put_user(host_tx
->precision
, &target_tx
->precision
);
7348 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7349 __put_user(host_tx
->tick
, &target_tx
->tick
);
7350 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7351 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7352 __put_user(host_tx
->shift
, &target_tx
->shift
);
7353 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7354 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7355 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7356 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7357 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7358 __put_user(host_tx
->tai
, &target_tx
->tai
);
7360 unlock_user_struct(target_tx
, target_addr
, 1);
7365 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7366 abi_ulong target_addr
)
7368 struct target_sigevent
*target_sevp
;
7370 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7371 return -TARGET_EFAULT
;
7374 /* This union is awkward on 64 bit systems because it has a 32 bit
7375 * integer and a pointer in it; we follow the conversion approach
7376 * used for handling sigval types in signal.c so the guest should get
7377 * the correct value back even if we did a 64 bit byteswap and it's
7378 * using the 32 bit integer.
7380 host_sevp
->sigev_value
.sival_ptr
=
7381 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7382 host_sevp
->sigev_signo
=
7383 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7384 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7385 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7387 unlock_user_struct(target_sevp
, target_addr
, 1);
7391 #if defined(TARGET_NR_mlockall)
7392 static inline int target_to_host_mlockall_arg(int arg
)
7396 if (arg
& TARGET_MCL_CURRENT
) {
7397 result
|= MCL_CURRENT
;
7399 if (arg
& TARGET_MCL_FUTURE
) {
7400 result
|= MCL_FUTURE
;
7403 if (arg
& TARGET_MCL_ONFAULT
) {
7404 result
|= MCL_ONFAULT
;
7412 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7413 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7414 defined(TARGET_NR_newfstatat))
7415 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7416 abi_ulong target_addr
,
7417 struct stat
*host_st
)
7419 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7420 if (((CPUARMState
*)cpu_env
)->eabi
) {
7421 struct target_eabi_stat64
*target_st
;
7423 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7424 return -TARGET_EFAULT
;
7425 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7426 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7427 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7428 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7429 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7431 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7432 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7433 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7434 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7435 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7436 __put_user(host_st
->st_size
, &target_st
->st_size
);
7437 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7438 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7439 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7440 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7441 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7442 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7443 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7444 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7445 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7447 unlock_user_struct(target_st
, target_addr
, 1);
7451 #if defined(TARGET_HAS_STRUCT_STAT64)
7452 struct target_stat64
*target_st
;
7454 struct target_stat
*target_st
;
7457 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7458 return -TARGET_EFAULT
;
7459 memset(target_st
, 0, sizeof(*target_st
));
7460 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7461 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7462 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7463 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7465 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7466 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7467 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7468 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7469 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7470 /* XXX: better use of kernel struct */
7471 __put_user(host_st
->st_size
, &target_st
->st_size
);
7472 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7473 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7474 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7475 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7476 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7477 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7478 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7479 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7480 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7482 unlock_user_struct(target_st
, target_addr
, 1);
7489 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7490 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7491 abi_ulong target_addr
)
7493 struct target_statx
*target_stx
;
7495 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7496 return -TARGET_EFAULT
;
7498 memset(target_stx
, 0, sizeof(*target_stx
));
7500 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7501 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7502 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7503 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7504 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7505 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7506 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7507 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7508 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7509 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7510 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7511 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7512 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7513 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7514 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7515 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7516 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7517 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7518 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7519 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7520 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7521 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7522 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7524 unlock_user_struct(target_stx
, target_addr
, 1);
7530 static int do_sys_futex(int *uaddr
, int op
, int val
,
7531 const struct timespec
*timeout
, int *uaddr2
,
7534 #if HOST_LONG_BITS == 64
7535 #if defined(__NR_futex)
7536 /* always a 64-bit time_t, it doesn't define _time64 version */
7537 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7540 #else /* HOST_LONG_BITS == 64 */
7541 #if defined(__NR_futex_time64)
7542 if (sizeof(timeout
->tv_sec
) == 8) {
7543 /* _time64 function on 32bit arch */
7544 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7547 #if defined(__NR_futex)
7548 /* old function on 32bit arch */
7549 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7551 #endif /* HOST_LONG_BITS == 64 */
7552 g_assert_not_reached();
7555 static int do_safe_futex(int *uaddr
, int op
, int val
,
7556 const struct timespec
*timeout
, int *uaddr2
,
7559 #if HOST_LONG_BITS == 64
7560 #if defined(__NR_futex)
7561 /* always a 64-bit time_t, it doesn't define _time64 version */
7562 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7564 #else /* HOST_LONG_BITS == 64 */
7565 #if defined(__NR_futex_time64)
7566 if (sizeof(timeout
->tv_sec
) == 8) {
7567 /* _time64 function on 32bit arch */
7568 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7572 #if defined(__NR_futex)
7573 /* old function on 32bit arch */
7574 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7576 #endif /* HOST_LONG_BITS == 64 */
7577 return -TARGET_ENOSYS
;
7580 /* ??? Using host futex calls even when target atomic operations
7581 are not really atomic probably breaks things. However implementing
7582 futexes locally would make futexes shared between multiple processes
7583 tricky. However they're probably useless because guest atomic
7584 operations won't work either. */
7585 #if defined(TARGET_NR_futex)
7586 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7587 target_ulong uaddr2
, int val3
)
7589 struct timespec ts
, *pts
;
7592 /* ??? We assume FUTEX_* constants are the same on both host
7594 #ifdef FUTEX_CMD_MASK
7595 base_op
= op
& FUTEX_CMD_MASK
;
7601 case FUTEX_WAIT_BITSET
:
7604 target_to_host_timespec(pts
, timeout
);
7608 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7610 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7612 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7614 case FUTEX_CMP_REQUEUE
:
7616 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7617 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7618 But the prototype takes a `struct timespec *'; insert casts
7619 to satisfy the compiler. We do not need to tswap TIMEOUT
7620 since it's not compared to guest memory. */
7621 pts
= (struct timespec
*)(uintptr_t) timeout
;
7622 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7623 (base_op
== FUTEX_CMP_REQUEUE
7627 return -TARGET_ENOSYS
;
7632 #if defined(TARGET_NR_futex_time64)
7633 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7634 target_ulong uaddr2
, int val3
)
7636 struct timespec ts
, *pts
;
7639 /* ??? We assume FUTEX_* constants are the same on both host
7641 #ifdef FUTEX_CMD_MASK
7642 base_op
= op
& FUTEX_CMD_MASK
;
7648 case FUTEX_WAIT_BITSET
:
7651 if (target_to_host_timespec64(pts
, timeout
)) {
7652 return -TARGET_EFAULT
;
7657 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7659 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7661 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7663 case FUTEX_CMP_REQUEUE
:
7665 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7666 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7667 But the prototype takes a `struct timespec *'; insert casts
7668 to satisfy the compiler. We do not need to tswap TIMEOUT
7669 since it's not compared to guest memory. */
7670 pts
= (struct timespec
*)(uintptr_t) timeout
;
7671 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7672 (base_op
== FUTEX_CMP_REQUEUE
7676 return -TARGET_ENOSYS
;
7681 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7682 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7683 abi_long handle
, abi_long mount_id
,
7686 struct file_handle
*target_fh
;
7687 struct file_handle
*fh
;
7691 unsigned int size
, total_size
;
7693 if (get_user_s32(size
, handle
)) {
7694 return -TARGET_EFAULT
;
7697 name
= lock_user_string(pathname
);
7699 return -TARGET_EFAULT
;
7702 total_size
= sizeof(struct file_handle
) + size
;
7703 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7705 unlock_user(name
, pathname
, 0);
7706 return -TARGET_EFAULT
;
7709 fh
= g_malloc0(total_size
);
7710 fh
->handle_bytes
= size
;
7712 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7713 unlock_user(name
, pathname
, 0);
7715 /* man name_to_handle_at(2):
7716 * Other than the use of the handle_bytes field, the caller should treat
7717 * the file_handle structure as an opaque data type
7720 memcpy(target_fh
, fh
, total_size
);
7721 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7722 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7724 unlock_user(target_fh
, handle
, total_size
);
7726 if (put_user_s32(mid
, mount_id
)) {
7727 return -TARGET_EFAULT
;
7735 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7736 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7739 struct file_handle
*target_fh
;
7740 struct file_handle
*fh
;
7741 unsigned int size
, total_size
;
7744 if (get_user_s32(size
, handle
)) {
7745 return -TARGET_EFAULT
;
7748 total_size
= sizeof(struct file_handle
) + size
;
7749 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7751 return -TARGET_EFAULT
;
7754 fh
= g_memdup(target_fh
, total_size
);
7755 fh
->handle_bytes
= size
;
7756 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7758 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7759 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7763 unlock_user(target_fh
, handle
, total_size
);
7769 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7771 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7774 target_sigset_t
*target_mask
;
7778 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7779 return -TARGET_EINVAL
;
7781 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7782 return -TARGET_EFAULT
;
7785 target_to_host_sigset(&host_mask
, target_mask
);
7787 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7789 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7791 fd_trans_register(ret
, &target_signalfd_trans
);
7794 unlock_user_struct(target_mask
, mask
, 0);
7800 /* Map host to target signal numbers for the wait family of syscalls.
7801 Assume all other status bits are the same. */
7802 int host_to_target_waitstatus(int status
)
7804 if (WIFSIGNALED(status
)) {
7805 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7807 if (WIFSTOPPED(status
)) {
7808 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7814 static int open_self_cmdline(void *cpu_env
, int fd
)
7816 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7817 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7820 for (i
= 0; i
< bprm
->argc
; i
++) {
7821 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7823 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7831 static int open_self_maps(void *cpu_env
, int fd
)
7833 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7834 TaskState
*ts
= cpu
->opaque
;
7835 GSList
*map_info
= read_self_maps();
7839 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7840 MapInfo
*e
= (MapInfo
*) s
->data
;
7842 if (h2g_valid(e
->start
)) {
7843 unsigned long min
= e
->start
;
7844 unsigned long max
= e
->end
;
7845 int flags
= page_get_flags(h2g(min
));
7848 max
= h2g_valid(max
- 1) ?
7849 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7851 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7855 if (h2g(min
) == ts
->info
->stack_limit
) {
7861 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7862 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7863 h2g(min
), h2g(max
- 1) + 1,
7864 e
->is_read
? 'r' : '-',
7865 e
->is_write
? 'w' : '-',
7866 e
->is_exec
? 'x' : '-',
7867 e
->is_priv
? 'p' : '-',
7868 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7870 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7877 free_self_maps(map_info
);
7879 #ifdef TARGET_VSYSCALL_PAGE
7881 * We only support execution from the vsyscall page.
7882 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7884 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7885 " --xp 00000000 00:00 0",
7886 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7887 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7893 static int open_self_stat(void *cpu_env
, int fd
)
7895 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7896 TaskState
*ts
= cpu
->opaque
;
7897 g_autoptr(GString
) buf
= g_string_new(NULL
);
7900 for (i
= 0; i
< 44; i
++) {
7903 g_string_printf(buf
, FMT_pid
" ", getpid());
7904 } else if (i
== 1) {
7906 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7907 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7908 g_string_printf(buf
, "(%.15s) ", bin
);
7909 } else if (i
== 27) {
7911 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7913 /* for the rest, there is MasterCard */
7914 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7917 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7925 static int open_self_auxv(void *cpu_env
, int fd
)
7927 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7928 TaskState
*ts
= cpu
->opaque
;
7929 abi_ulong auxv
= ts
->info
->saved_auxv
;
7930 abi_ulong len
= ts
->info
->auxv_len
;
7934 * Auxiliary vector is stored in target process stack.
7935 * read in whole auxv vector and copy it to file
7937 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7941 r
= write(fd
, ptr
, len
);
7948 lseek(fd
, 0, SEEK_SET
);
7949 unlock_user(ptr
, auxv
, len
);
7955 static int is_proc_myself(const char *filename
, const char *entry
)
7957 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7958 filename
+= strlen("/proc/");
7959 if (!strncmp(filename
, "self/", strlen("self/"))) {
7960 filename
+= strlen("self/");
7961 } else if (*filename
>= '1' && *filename
<= '9') {
7963 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7964 if (!strncmp(filename
, myself
, strlen(myself
))) {
7965 filename
+= strlen(myself
);
7972 if (!strcmp(filename
, entry
)) {
7979 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7980 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7981 static int is_proc(const char *filename
, const char *entry
)
7983 return strcmp(filename
, entry
) == 0;
7987 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7988 static int open_net_route(void *cpu_env
, int fd
)
7995 fp
= fopen("/proc/net/route", "r");
8002 read
= getline(&line
, &len
, fp
);
8003 dprintf(fd
, "%s", line
);
8007 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8009 uint32_t dest
, gw
, mask
;
8010 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8013 fields
= sscanf(line
,
8014 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8015 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8016 &mask
, &mtu
, &window
, &irtt
);
8020 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8021 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8022 metric
, tswap32(mask
), mtu
, window
, irtt
);
8032 #if defined(TARGET_SPARC)
8033 static int open_cpuinfo(void *cpu_env
, int fd
)
8035 dprintf(fd
, "type\t\t: sun4u\n");
8040 #if defined(TARGET_HPPA)
8041 static int open_cpuinfo(void *cpu_env
, int fd
)
8043 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8044 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8045 dprintf(fd
, "capabilities\t: os32\n");
8046 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8047 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8052 #if defined(TARGET_M68K)
8053 static int open_hardware(void *cpu_env
, int fd
)
8055 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8060 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8063 const char *filename
;
8064 int (*fill
)(void *cpu_env
, int fd
);
8065 int (*cmp
)(const char *s1
, const char *s2
);
8067 const struct fake_open
*fake_open
;
8068 static const struct fake_open fakes
[] = {
8069 { "maps", open_self_maps
, is_proc_myself
},
8070 { "stat", open_self_stat
, is_proc_myself
},
8071 { "auxv", open_self_auxv
, is_proc_myself
},
8072 { "cmdline", open_self_cmdline
, is_proc_myself
},
8073 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8074 { "/proc/net/route", open_net_route
, is_proc
},
8076 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8077 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8079 #if defined(TARGET_M68K)
8080 { "/proc/hardware", open_hardware
, is_proc
},
8082 { NULL
, NULL
, NULL
}
8085 if (is_proc_myself(pathname
, "exe")) {
8086 int execfd
= qemu_getauxval(AT_EXECFD
);
8087 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8090 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8091 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8096 if (fake_open
->filename
) {
8098 char filename
[PATH_MAX
];
8101 /* create temporary file to map stat to */
8102 tmpdir
= getenv("TMPDIR");
8105 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8106 fd
= mkstemp(filename
);
8112 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8118 lseek(fd
, 0, SEEK_SET
);
8123 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8126 #define TIMER_MAGIC 0x0caf0000
8127 #define TIMER_MAGIC_MASK 0xffff0000
8129 /* Convert QEMU provided timer ID back to internal 16bit index format */
8130 static target_timer_t
get_timer_id(abi_long arg
)
8132 target_timer_t timerid
= arg
;
8134 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8135 return -TARGET_EINVAL
;
8140 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8141 return -TARGET_EINVAL
;
8147 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8149 abi_ulong target_addr
,
8152 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8153 unsigned host_bits
= sizeof(*host_mask
) * 8;
8154 abi_ulong
*target_mask
;
8157 assert(host_size
>= target_size
);
8159 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8161 return -TARGET_EFAULT
;
8163 memset(host_mask
, 0, host_size
);
8165 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8166 unsigned bit
= i
* target_bits
;
8169 __get_user(val
, &target_mask
[i
]);
8170 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8171 if (val
& (1UL << j
)) {
8172 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8177 unlock_user(target_mask
, target_addr
, 0);
8181 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8183 abi_ulong target_addr
,
8186 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8187 unsigned host_bits
= sizeof(*host_mask
) * 8;
8188 abi_ulong
*target_mask
;
8191 assert(host_size
>= target_size
);
8193 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8195 return -TARGET_EFAULT
;
8198 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8199 unsigned bit
= i
* target_bits
;
8202 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8203 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8207 __put_user(val
, &target_mask
[i
]);
8210 unlock_user(target_mask
, target_addr
, target_size
);
8214 /* This is an internal helper for do_syscall so that it is easier
8215 * to have a single return point, so that actions, such as logging
8216 * of syscall results, can be performed.
8217 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8219 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8220 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8221 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8224 CPUState
*cpu
= env_cpu(cpu_env
);
8226 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8227 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8228 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8229 || defined(TARGET_NR_statx)
8232 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8233 || defined(TARGET_NR_fstatfs)
8239 case TARGET_NR_exit
:
8240 /* In old applications this may be used to implement _exit(2).
8241 However in threaded applications it is used for thread termination,
8242 and _exit_group is used for application termination.
8243 Do thread termination if we have more then one thread. */
8245 if (block_signals()) {
8246 return -TARGET_ERESTARTSYS
;
8249 pthread_mutex_lock(&clone_lock
);
8251 if (CPU_NEXT(first_cpu
)) {
8252 TaskState
*ts
= cpu
->opaque
;
8254 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8255 object_unref(OBJECT(cpu
));
8257 * At this point the CPU should be unrealized and removed
8258 * from cpu lists. We can clean-up the rest of the thread
8259 * data without the lock held.
8262 pthread_mutex_unlock(&clone_lock
);
8264 if (ts
->child_tidptr
) {
8265 put_user_u32(0, ts
->child_tidptr
);
8266 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8271 rcu_unregister_thread();
8275 pthread_mutex_unlock(&clone_lock
);
8276 preexit_cleanup(cpu_env
, arg1
);
8278 return 0; /* avoid warning */
8279 case TARGET_NR_read
:
8280 if (arg2
== 0 && arg3
== 0) {
8281 return get_errno(safe_read(arg1
, 0, 0));
8283 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8284 return -TARGET_EFAULT
;
8285 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8287 fd_trans_host_to_target_data(arg1
)) {
8288 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8290 unlock_user(p
, arg2
, ret
);
8293 case TARGET_NR_write
:
8294 if (arg2
== 0 && arg3
== 0) {
8295 return get_errno(safe_write(arg1
, 0, 0));
8297 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8298 return -TARGET_EFAULT
;
8299 if (fd_trans_target_to_host_data(arg1
)) {
8300 void *copy
= g_malloc(arg3
);
8301 memcpy(copy
, p
, arg3
);
8302 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8304 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8308 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8310 unlock_user(p
, arg2
, 0);
8313 #ifdef TARGET_NR_open
8314 case TARGET_NR_open
:
8315 if (!(p
= lock_user_string(arg1
)))
8316 return -TARGET_EFAULT
;
8317 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8318 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8320 fd_trans_unregister(ret
);
8321 unlock_user(p
, arg1
, 0);
8324 case TARGET_NR_openat
:
8325 if (!(p
= lock_user_string(arg2
)))
8326 return -TARGET_EFAULT
;
8327 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8328 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8330 fd_trans_unregister(ret
);
8331 unlock_user(p
, arg2
, 0);
8333 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8334 case TARGET_NR_name_to_handle_at
:
8335 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8338 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8339 case TARGET_NR_open_by_handle_at
:
8340 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8341 fd_trans_unregister(ret
);
8344 case TARGET_NR_close
:
8345 fd_trans_unregister(arg1
);
8346 return get_errno(close(arg1
));
8349 return do_brk(arg1
);
8350 #ifdef TARGET_NR_fork
8351 case TARGET_NR_fork
:
8352 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8354 #ifdef TARGET_NR_waitpid
8355 case TARGET_NR_waitpid
:
8358 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8359 if (!is_error(ret
) && arg2
&& ret
8360 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8361 return -TARGET_EFAULT
;
8365 #ifdef TARGET_NR_waitid
8366 case TARGET_NR_waitid
:
8370 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8371 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8372 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8373 return -TARGET_EFAULT
;
8374 host_to_target_siginfo(p
, &info
);
8375 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8380 #ifdef TARGET_NR_creat /* not on alpha */
8381 case TARGET_NR_creat
:
8382 if (!(p
= lock_user_string(arg1
)))
8383 return -TARGET_EFAULT
;
8384 ret
= get_errno(creat(p
, arg2
));
8385 fd_trans_unregister(ret
);
8386 unlock_user(p
, arg1
, 0);
8389 #ifdef TARGET_NR_link
8390 case TARGET_NR_link
:
8393 p
= lock_user_string(arg1
);
8394 p2
= lock_user_string(arg2
);
8396 ret
= -TARGET_EFAULT
;
8398 ret
= get_errno(link(p
, p2
));
8399 unlock_user(p2
, arg2
, 0);
8400 unlock_user(p
, arg1
, 0);
8404 #if defined(TARGET_NR_linkat)
8405 case TARGET_NR_linkat
:
8409 return -TARGET_EFAULT
;
8410 p
= lock_user_string(arg2
);
8411 p2
= lock_user_string(arg4
);
8413 ret
= -TARGET_EFAULT
;
8415 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8416 unlock_user(p
, arg2
, 0);
8417 unlock_user(p2
, arg4
, 0);
8421 #ifdef TARGET_NR_unlink
8422 case TARGET_NR_unlink
:
8423 if (!(p
= lock_user_string(arg1
)))
8424 return -TARGET_EFAULT
;
8425 ret
= get_errno(unlink(p
));
8426 unlock_user(p
, arg1
, 0);
8429 #if defined(TARGET_NR_unlinkat)
8430 case TARGET_NR_unlinkat
:
8431 if (!(p
= lock_user_string(arg2
)))
8432 return -TARGET_EFAULT
;
8433 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8434 unlock_user(p
, arg2
, 0);
8437 case TARGET_NR_execve
:
8439 char **argp
, **envp
;
8442 abi_ulong guest_argp
;
8443 abi_ulong guest_envp
;
8450 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8451 if (get_user_ual(addr
, gp
))
8452 return -TARGET_EFAULT
;
8459 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8460 if (get_user_ual(addr
, gp
))
8461 return -TARGET_EFAULT
;
8467 argp
= g_new0(char *, argc
+ 1);
8468 envp
= g_new0(char *, envc
+ 1);
8470 for (gp
= guest_argp
, q
= argp
; gp
;
8471 gp
+= sizeof(abi_ulong
), q
++) {
8472 if (get_user_ual(addr
, gp
))
8476 if (!(*q
= lock_user_string(addr
)))
8478 total_size
+= strlen(*q
) + 1;
8482 for (gp
= guest_envp
, q
= envp
; gp
;
8483 gp
+= sizeof(abi_ulong
), q
++) {
8484 if (get_user_ual(addr
, gp
))
8488 if (!(*q
= lock_user_string(addr
)))
8490 total_size
+= strlen(*q
) + 1;
8494 if (!(p
= lock_user_string(arg1
)))
8496 /* Although execve() is not an interruptible syscall it is
8497 * a special case where we must use the safe_syscall wrapper:
8498 * if we allow a signal to happen before we make the host
8499 * syscall then we will 'lose' it, because at the point of
8500 * execve the process leaves QEMU's control. So we use the
8501 * safe syscall wrapper to ensure that we either take the
8502 * signal as a guest signal, or else it does not happen
8503 * before the execve completes and makes it the other
8504 * program's problem.
8506 ret
= get_errno(safe_execve(p
, argp
, envp
));
8507 unlock_user(p
, arg1
, 0);
8512 ret
= -TARGET_EFAULT
;
8515 for (gp
= guest_argp
, q
= argp
; *q
;
8516 gp
+= sizeof(abi_ulong
), q
++) {
8517 if (get_user_ual(addr
, gp
)
8520 unlock_user(*q
, addr
, 0);
8522 for (gp
= guest_envp
, q
= envp
; *q
;
8523 gp
+= sizeof(abi_ulong
), q
++) {
8524 if (get_user_ual(addr
, gp
)
8527 unlock_user(*q
, addr
, 0);
8534 case TARGET_NR_chdir
:
8535 if (!(p
= lock_user_string(arg1
)))
8536 return -TARGET_EFAULT
;
8537 ret
= get_errno(chdir(p
));
8538 unlock_user(p
, arg1
, 0);
8540 #ifdef TARGET_NR_time
8541 case TARGET_NR_time
:
8544 ret
= get_errno(time(&host_time
));
8547 && put_user_sal(host_time
, arg1
))
8548 return -TARGET_EFAULT
;
8552 #ifdef TARGET_NR_mknod
8553 case TARGET_NR_mknod
:
8554 if (!(p
= lock_user_string(arg1
)))
8555 return -TARGET_EFAULT
;
8556 ret
= get_errno(mknod(p
, arg2
, arg3
));
8557 unlock_user(p
, arg1
, 0);
8560 #if defined(TARGET_NR_mknodat)
8561 case TARGET_NR_mknodat
:
8562 if (!(p
= lock_user_string(arg2
)))
8563 return -TARGET_EFAULT
;
8564 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8565 unlock_user(p
, arg2
, 0);
8568 #ifdef TARGET_NR_chmod
8569 case TARGET_NR_chmod
:
8570 if (!(p
= lock_user_string(arg1
)))
8571 return -TARGET_EFAULT
;
8572 ret
= get_errno(chmod(p
, arg2
));
8573 unlock_user(p
, arg1
, 0);
8576 #ifdef TARGET_NR_lseek
8577 case TARGET_NR_lseek
:
8578 return get_errno(lseek(arg1
, arg2
, arg3
));
8580 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8581 /* Alpha specific */
8582 case TARGET_NR_getxpid
:
8583 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8584 return get_errno(getpid());
8586 #ifdef TARGET_NR_getpid
8587 case TARGET_NR_getpid
:
8588 return get_errno(getpid());
8590 case TARGET_NR_mount
:
8592 /* need to look at the data field */
8596 p
= lock_user_string(arg1
);
8598 return -TARGET_EFAULT
;
8604 p2
= lock_user_string(arg2
);
8607 unlock_user(p
, arg1
, 0);
8609 return -TARGET_EFAULT
;
8613 p3
= lock_user_string(arg3
);
8616 unlock_user(p
, arg1
, 0);
8618 unlock_user(p2
, arg2
, 0);
8619 return -TARGET_EFAULT
;
8625 /* FIXME - arg5 should be locked, but it isn't clear how to
8626 * do that since it's not guaranteed to be a NULL-terminated
8630 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8632 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8634 ret
= get_errno(ret
);
8637 unlock_user(p
, arg1
, 0);
8639 unlock_user(p2
, arg2
, 0);
8641 unlock_user(p3
, arg3
, 0);
8645 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8646 #if defined(TARGET_NR_umount)
8647 case TARGET_NR_umount
:
8649 #if defined(TARGET_NR_oldumount)
8650 case TARGET_NR_oldumount
:
8652 if (!(p
= lock_user_string(arg1
)))
8653 return -TARGET_EFAULT
;
8654 ret
= get_errno(umount(p
));
8655 unlock_user(p
, arg1
, 0);
8658 #ifdef TARGET_NR_stime /* not on alpha */
8659 case TARGET_NR_stime
:
8663 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8664 return -TARGET_EFAULT
;
8666 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8669 #ifdef TARGET_NR_alarm /* not on alpha */
8670 case TARGET_NR_alarm
:
8673 #ifdef TARGET_NR_pause /* not on alpha */
8674 case TARGET_NR_pause
:
8675 if (!block_signals()) {
8676 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8678 return -TARGET_EINTR
;
8680 #ifdef TARGET_NR_utime
8681 case TARGET_NR_utime
:
8683 struct utimbuf tbuf
, *host_tbuf
;
8684 struct target_utimbuf
*target_tbuf
;
8686 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8687 return -TARGET_EFAULT
;
8688 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8689 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8690 unlock_user_struct(target_tbuf
, arg2
, 0);
8695 if (!(p
= lock_user_string(arg1
)))
8696 return -TARGET_EFAULT
;
8697 ret
= get_errno(utime(p
, host_tbuf
));
8698 unlock_user(p
, arg1
, 0);
8702 #ifdef TARGET_NR_utimes
8703 case TARGET_NR_utimes
:
8705 struct timeval
*tvp
, tv
[2];
8707 if (copy_from_user_timeval(&tv
[0], arg2
)
8708 || copy_from_user_timeval(&tv
[1],
8709 arg2
+ sizeof(struct target_timeval
)))
8710 return -TARGET_EFAULT
;
8715 if (!(p
= lock_user_string(arg1
)))
8716 return -TARGET_EFAULT
;
8717 ret
= get_errno(utimes(p
, tvp
));
8718 unlock_user(p
, arg1
, 0);
8722 #if defined(TARGET_NR_futimesat)
8723 case TARGET_NR_futimesat
:
8725 struct timeval
*tvp
, tv
[2];
8727 if (copy_from_user_timeval(&tv
[0], arg3
)
8728 || copy_from_user_timeval(&tv
[1],
8729 arg3
+ sizeof(struct target_timeval
)))
8730 return -TARGET_EFAULT
;
8735 if (!(p
= lock_user_string(arg2
))) {
8736 return -TARGET_EFAULT
;
8738 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8739 unlock_user(p
, arg2
, 0);
8743 #ifdef TARGET_NR_access
8744 case TARGET_NR_access
:
8745 if (!(p
= lock_user_string(arg1
))) {
8746 return -TARGET_EFAULT
;
8748 ret
= get_errno(access(path(p
), arg2
));
8749 unlock_user(p
, arg1
, 0);
8752 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8753 case TARGET_NR_faccessat
:
8754 if (!(p
= lock_user_string(arg2
))) {
8755 return -TARGET_EFAULT
;
8757 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8758 unlock_user(p
, arg2
, 0);
8761 #ifdef TARGET_NR_nice /* not on alpha */
8762 case TARGET_NR_nice
:
8763 return get_errno(nice(arg1
));
8765 case TARGET_NR_sync
:
8768 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8769 case TARGET_NR_syncfs
:
8770 return get_errno(syncfs(arg1
));
8772 case TARGET_NR_kill
:
8773 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8774 #ifdef TARGET_NR_rename
8775 case TARGET_NR_rename
:
8778 p
= lock_user_string(arg1
);
8779 p2
= lock_user_string(arg2
);
8781 ret
= -TARGET_EFAULT
;
8783 ret
= get_errno(rename(p
, p2
));
8784 unlock_user(p2
, arg2
, 0);
8785 unlock_user(p
, arg1
, 0);
8789 #if defined(TARGET_NR_renameat)
8790 case TARGET_NR_renameat
:
8793 p
= lock_user_string(arg2
);
8794 p2
= lock_user_string(arg4
);
8796 ret
= -TARGET_EFAULT
;
8798 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8799 unlock_user(p2
, arg4
, 0);
8800 unlock_user(p
, arg2
, 0);
8804 #if defined(TARGET_NR_renameat2)
8805 case TARGET_NR_renameat2
:
8808 p
= lock_user_string(arg2
);
8809 p2
= lock_user_string(arg4
);
8811 ret
= -TARGET_EFAULT
;
8813 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8815 unlock_user(p2
, arg4
, 0);
8816 unlock_user(p
, arg2
, 0);
8820 #ifdef TARGET_NR_mkdir
8821 case TARGET_NR_mkdir
:
8822 if (!(p
= lock_user_string(arg1
)))
8823 return -TARGET_EFAULT
;
8824 ret
= get_errno(mkdir(p
, arg2
));
8825 unlock_user(p
, arg1
, 0);
8828 #if defined(TARGET_NR_mkdirat)
8829 case TARGET_NR_mkdirat
:
8830 if (!(p
= lock_user_string(arg2
)))
8831 return -TARGET_EFAULT
;
8832 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8833 unlock_user(p
, arg2
, 0);
8836 #ifdef TARGET_NR_rmdir
8837 case TARGET_NR_rmdir
:
8838 if (!(p
= lock_user_string(arg1
)))
8839 return -TARGET_EFAULT
;
8840 ret
= get_errno(rmdir(p
));
8841 unlock_user(p
, arg1
, 0);
8845 ret
= get_errno(dup(arg1
));
8847 fd_trans_dup(arg1
, ret
);
8850 #ifdef TARGET_NR_pipe
8851 case TARGET_NR_pipe
:
8852 return do_pipe(cpu_env
, arg1
, 0, 0);
8854 #ifdef TARGET_NR_pipe2
8855 case TARGET_NR_pipe2
:
8856 return do_pipe(cpu_env
, arg1
,
8857 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8859 case TARGET_NR_times
:
8861 struct target_tms
*tmsp
;
8863 ret
= get_errno(times(&tms
));
8865 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8867 return -TARGET_EFAULT
;
8868 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8869 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8870 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8871 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8874 ret
= host_to_target_clock_t(ret
);
8877 case TARGET_NR_acct
:
8879 ret
= get_errno(acct(NULL
));
8881 if (!(p
= lock_user_string(arg1
))) {
8882 return -TARGET_EFAULT
;
8884 ret
= get_errno(acct(path(p
)));
8885 unlock_user(p
, arg1
, 0);
8888 #ifdef TARGET_NR_umount2
8889 case TARGET_NR_umount2
:
8890 if (!(p
= lock_user_string(arg1
)))
8891 return -TARGET_EFAULT
;
8892 ret
= get_errno(umount2(p
, arg2
));
8893 unlock_user(p
, arg1
, 0);
8896 case TARGET_NR_ioctl
:
8897 return do_ioctl(arg1
, arg2
, arg3
);
8898 #ifdef TARGET_NR_fcntl
8899 case TARGET_NR_fcntl
:
8900 return do_fcntl(arg1
, arg2
, arg3
);
8902 case TARGET_NR_setpgid
:
8903 return get_errno(setpgid(arg1
, arg2
));
8904 case TARGET_NR_umask
:
8905 return get_errno(umask(arg1
));
8906 case TARGET_NR_chroot
:
8907 if (!(p
= lock_user_string(arg1
)))
8908 return -TARGET_EFAULT
;
8909 ret
= get_errno(chroot(p
));
8910 unlock_user(p
, arg1
, 0);
8912 #ifdef TARGET_NR_dup2
8913 case TARGET_NR_dup2
:
8914 ret
= get_errno(dup2(arg1
, arg2
));
8916 fd_trans_dup(arg1
, arg2
);
8920 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8921 case TARGET_NR_dup3
:
8925 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8928 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8929 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8931 fd_trans_dup(arg1
, arg2
);
8936 #ifdef TARGET_NR_getppid /* not on alpha */
8937 case TARGET_NR_getppid
:
8938 return get_errno(getppid());
8940 #ifdef TARGET_NR_getpgrp
8941 case TARGET_NR_getpgrp
:
8942 return get_errno(getpgrp());
8944 case TARGET_NR_setsid
:
8945 return get_errno(setsid());
8946 #ifdef TARGET_NR_sigaction
8947 case TARGET_NR_sigaction
:
8949 #if defined(TARGET_ALPHA)
8950 struct target_sigaction act
, oact
, *pact
= 0;
8951 struct target_old_sigaction
*old_act
;
8953 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8954 return -TARGET_EFAULT
;
8955 act
._sa_handler
= old_act
->_sa_handler
;
8956 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8957 act
.sa_flags
= old_act
->sa_flags
;
8958 act
.sa_restorer
= 0;
8959 unlock_user_struct(old_act
, arg2
, 0);
8962 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8963 if (!is_error(ret
) && arg3
) {
8964 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8965 return -TARGET_EFAULT
;
8966 old_act
->_sa_handler
= oact
._sa_handler
;
8967 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8968 old_act
->sa_flags
= oact
.sa_flags
;
8969 unlock_user_struct(old_act
, arg3
, 1);
8971 #elif defined(TARGET_MIPS)
8972 struct target_sigaction act
, oact
, *pact
, *old_act
;
8975 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8976 return -TARGET_EFAULT
;
8977 act
._sa_handler
= old_act
->_sa_handler
;
8978 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8979 act
.sa_flags
= old_act
->sa_flags
;
8980 unlock_user_struct(old_act
, arg2
, 0);
8986 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8988 if (!is_error(ret
) && arg3
) {
8989 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8990 return -TARGET_EFAULT
;
8991 old_act
->_sa_handler
= oact
._sa_handler
;
8992 old_act
->sa_flags
= oact
.sa_flags
;
8993 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8994 old_act
->sa_mask
.sig
[1] = 0;
8995 old_act
->sa_mask
.sig
[2] = 0;
8996 old_act
->sa_mask
.sig
[3] = 0;
8997 unlock_user_struct(old_act
, arg3
, 1);
9000 struct target_old_sigaction
*old_act
;
9001 struct target_sigaction act
, oact
, *pact
;
9003 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9004 return -TARGET_EFAULT
;
9005 act
._sa_handler
= old_act
->_sa_handler
;
9006 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9007 act
.sa_flags
= old_act
->sa_flags
;
9008 act
.sa_restorer
= old_act
->sa_restorer
;
9009 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9010 act
.ka_restorer
= 0;
9012 unlock_user_struct(old_act
, arg2
, 0);
9017 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9018 if (!is_error(ret
) && arg3
) {
9019 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9020 return -TARGET_EFAULT
;
9021 old_act
->_sa_handler
= oact
._sa_handler
;
9022 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9023 old_act
->sa_flags
= oact
.sa_flags
;
9024 old_act
->sa_restorer
= oact
.sa_restorer
;
9025 unlock_user_struct(old_act
, arg3
, 1);
9031 case TARGET_NR_rt_sigaction
:
9033 #if defined(TARGET_ALPHA)
9034 /* For Alpha and SPARC this is a 5 argument syscall, with
9035 * a 'restorer' parameter which must be copied into the
9036 * sa_restorer field of the sigaction struct.
9037 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9038 * and arg5 is the sigsetsize.
9039 * Alpha also has a separate rt_sigaction struct that it uses
9040 * here; SPARC uses the usual sigaction struct.
9042 struct target_rt_sigaction
*rt_act
;
9043 struct target_sigaction act
, oact
, *pact
= 0;
9045 if (arg4
!= sizeof(target_sigset_t
)) {
9046 return -TARGET_EINVAL
;
9049 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9050 return -TARGET_EFAULT
;
9051 act
._sa_handler
= rt_act
->_sa_handler
;
9052 act
.sa_mask
= rt_act
->sa_mask
;
9053 act
.sa_flags
= rt_act
->sa_flags
;
9054 act
.sa_restorer
= arg5
;
9055 unlock_user_struct(rt_act
, arg2
, 0);
9058 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9059 if (!is_error(ret
) && arg3
) {
9060 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9061 return -TARGET_EFAULT
;
9062 rt_act
->_sa_handler
= oact
._sa_handler
;
9063 rt_act
->sa_mask
= oact
.sa_mask
;
9064 rt_act
->sa_flags
= oact
.sa_flags
;
9065 unlock_user_struct(rt_act
, arg3
, 1);
9069 target_ulong restorer
= arg4
;
9070 target_ulong sigsetsize
= arg5
;
9072 target_ulong sigsetsize
= arg4
;
9074 struct target_sigaction
*act
;
9075 struct target_sigaction
*oact
;
9077 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9078 return -TARGET_EINVAL
;
9081 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9082 return -TARGET_EFAULT
;
9084 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9085 act
->ka_restorer
= restorer
;
9091 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9092 ret
= -TARGET_EFAULT
;
9093 goto rt_sigaction_fail
;
9097 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9100 unlock_user_struct(act
, arg2
, 0);
9102 unlock_user_struct(oact
, arg3
, 1);
9106 #ifdef TARGET_NR_sgetmask /* not on alpha */
9107 case TARGET_NR_sgetmask
:
9110 abi_ulong target_set
;
9111 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9113 host_to_target_old_sigset(&target_set
, &cur_set
);
9119 #ifdef TARGET_NR_ssetmask /* not on alpha */
9120 case TARGET_NR_ssetmask
:
9123 abi_ulong target_set
= arg1
;
9124 target_to_host_old_sigset(&set
, &target_set
);
9125 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9127 host_to_target_old_sigset(&target_set
, &oset
);
9133 #ifdef TARGET_NR_sigprocmask
9134 case TARGET_NR_sigprocmask
:
9136 #if defined(TARGET_ALPHA)
9137 sigset_t set
, oldset
;
9142 case TARGET_SIG_BLOCK
:
9145 case TARGET_SIG_UNBLOCK
:
9148 case TARGET_SIG_SETMASK
:
9152 return -TARGET_EINVAL
;
9155 target_to_host_old_sigset(&set
, &mask
);
9157 ret
= do_sigprocmask(how
, &set
, &oldset
);
9158 if (!is_error(ret
)) {
9159 host_to_target_old_sigset(&mask
, &oldset
);
9161 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9164 sigset_t set
, oldset
, *set_ptr
;
9169 case TARGET_SIG_BLOCK
:
9172 case TARGET_SIG_UNBLOCK
:
9175 case TARGET_SIG_SETMASK
:
9179 return -TARGET_EINVAL
;
9181 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9182 return -TARGET_EFAULT
;
9183 target_to_host_old_sigset(&set
, p
);
9184 unlock_user(p
, arg2
, 0);
9190 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9191 if (!is_error(ret
) && arg3
) {
9192 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9193 return -TARGET_EFAULT
;
9194 host_to_target_old_sigset(p
, &oldset
);
9195 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9201 case TARGET_NR_rt_sigprocmask
:
9204 sigset_t set
, oldset
, *set_ptr
;
9206 if (arg4
!= sizeof(target_sigset_t
)) {
9207 return -TARGET_EINVAL
;
9212 case TARGET_SIG_BLOCK
:
9215 case TARGET_SIG_UNBLOCK
:
9218 case TARGET_SIG_SETMASK
:
9222 return -TARGET_EINVAL
;
9224 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9225 return -TARGET_EFAULT
;
9226 target_to_host_sigset(&set
, p
);
9227 unlock_user(p
, arg2
, 0);
9233 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9234 if (!is_error(ret
) && arg3
) {
9235 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9236 return -TARGET_EFAULT
;
9237 host_to_target_sigset(p
, &oldset
);
9238 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9242 #ifdef TARGET_NR_sigpending
9243 case TARGET_NR_sigpending
:
9246 ret
= get_errno(sigpending(&set
));
9247 if (!is_error(ret
)) {
9248 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9249 return -TARGET_EFAULT
;
9250 host_to_target_old_sigset(p
, &set
);
9251 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9256 case TARGET_NR_rt_sigpending
:
9260 /* Yes, this check is >, not != like most. We follow the kernel's
9261 * logic and it does it like this because it implements
9262 * NR_sigpending through the same code path, and in that case
9263 * the old_sigset_t is smaller in size.
9265 if (arg2
> sizeof(target_sigset_t
)) {
9266 return -TARGET_EINVAL
;
9269 ret
= get_errno(sigpending(&set
));
9270 if (!is_error(ret
)) {
9271 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9272 return -TARGET_EFAULT
;
9273 host_to_target_sigset(p
, &set
);
9274 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9278 #ifdef TARGET_NR_sigsuspend
9279 case TARGET_NR_sigsuspend
:
9281 TaskState
*ts
= cpu
->opaque
;
9282 #if defined(TARGET_ALPHA)
9283 abi_ulong mask
= arg1
;
9284 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9286 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9287 return -TARGET_EFAULT
;
9288 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9289 unlock_user(p
, arg1
, 0);
9291 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9293 if (ret
!= -TARGET_ERESTARTSYS
) {
9294 ts
->in_sigsuspend
= 1;
9299 case TARGET_NR_rt_sigsuspend
:
9301 TaskState
*ts
= cpu
->opaque
;
9303 if (arg2
!= sizeof(target_sigset_t
)) {
9304 return -TARGET_EINVAL
;
9306 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9307 return -TARGET_EFAULT
;
9308 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9309 unlock_user(p
, arg1
, 0);
9310 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9312 if (ret
!= -TARGET_ERESTARTSYS
) {
9313 ts
->in_sigsuspend
= 1;
9317 #ifdef TARGET_NR_rt_sigtimedwait
9318 case TARGET_NR_rt_sigtimedwait
:
9321 struct timespec uts
, *puts
;
9324 if (arg4
!= sizeof(target_sigset_t
)) {
9325 return -TARGET_EINVAL
;
9328 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9329 return -TARGET_EFAULT
;
9330 target_to_host_sigset(&set
, p
);
9331 unlock_user(p
, arg1
, 0);
9334 if (target_to_host_timespec(puts
, arg3
)) {
9335 return -TARGET_EFAULT
;
9340 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9342 if (!is_error(ret
)) {
9344 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9347 return -TARGET_EFAULT
;
9349 host_to_target_siginfo(p
, &uinfo
);
9350 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9352 ret
= host_to_target_signal(ret
);
9357 #ifdef TARGET_NR_rt_sigtimedwait_time64
9358 case TARGET_NR_rt_sigtimedwait_time64
:
9361 struct timespec uts
, *puts
;
9364 if (arg4
!= sizeof(target_sigset_t
)) {
9365 return -TARGET_EINVAL
;
9368 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9370 return -TARGET_EFAULT
;
9372 target_to_host_sigset(&set
, p
);
9373 unlock_user(p
, arg1
, 0);
9376 if (target_to_host_timespec64(puts
, arg3
)) {
9377 return -TARGET_EFAULT
;
9382 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9384 if (!is_error(ret
)) {
9386 p
= lock_user(VERIFY_WRITE
, arg2
,
9387 sizeof(target_siginfo_t
), 0);
9389 return -TARGET_EFAULT
;
9391 host_to_target_siginfo(p
, &uinfo
);
9392 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9394 ret
= host_to_target_signal(ret
);
9399 case TARGET_NR_rt_sigqueueinfo
:
9403 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9405 return -TARGET_EFAULT
;
9407 target_to_host_siginfo(&uinfo
, p
);
9408 unlock_user(p
, arg3
, 0);
9409 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9412 case TARGET_NR_rt_tgsigqueueinfo
:
9416 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9418 return -TARGET_EFAULT
;
9420 target_to_host_siginfo(&uinfo
, p
);
9421 unlock_user(p
, arg4
, 0);
9422 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9425 #ifdef TARGET_NR_sigreturn
9426 case TARGET_NR_sigreturn
:
9427 if (block_signals()) {
9428 return -TARGET_ERESTARTSYS
;
9430 return do_sigreturn(cpu_env
);
9432 case TARGET_NR_rt_sigreturn
:
9433 if (block_signals()) {
9434 return -TARGET_ERESTARTSYS
;
9436 return do_rt_sigreturn(cpu_env
);
9437 case TARGET_NR_sethostname
:
9438 if (!(p
= lock_user_string(arg1
)))
9439 return -TARGET_EFAULT
;
9440 ret
= get_errno(sethostname(p
, arg2
));
9441 unlock_user(p
, arg1
, 0);
9443 #ifdef TARGET_NR_setrlimit
9444 case TARGET_NR_setrlimit
:
9446 int resource
= target_to_host_resource(arg1
);
9447 struct target_rlimit
*target_rlim
;
9449 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9450 return -TARGET_EFAULT
;
9451 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9452 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9453 unlock_user_struct(target_rlim
, arg2
, 0);
9455 * If we just passed through resource limit settings for memory then
9456 * they would also apply to QEMU's own allocations, and QEMU will
9457 * crash or hang or die if its allocations fail. Ideally we would
9458 * track the guest allocations in QEMU and apply the limits ourselves.
9459 * For now, just tell the guest the call succeeded but don't actually
9462 if (resource
!= RLIMIT_AS
&&
9463 resource
!= RLIMIT_DATA
&&
9464 resource
!= RLIMIT_STACK
) {
9465 return get_errno(setrlimit(resource
, &rlim
));
9471 #ifdef TARGET_NR_getrlimit
9472 case TARGET_NR_getrlimit
:
9474 int resource
= target_to_host_resource(arg1
);
9475 struct target_rlimit
*target_rlim
;
9478 ret
= get_errno(getrlimit(resource
, &rlim
));
9479 if (!is_error(ret
)) {
9480 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9481 return -TARGET_EFAULT
;
9482 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9483 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9484 unlock_user_struct(target_rlim
, arg2
, 1);
9489 case TARGET_NR_getrusage
:
9491 struct rusage rusage
;
9492 ret
= get_errno(getrusage(arg1
, &rusage
));
9493 if (!is_error(ret
)) {
9494 ret
= host_to_target_rusage(arg2
, &rusage
);
9498 #if defined(TARGET_NR_gettimeofday)
9499 case TARGET_NR_gettimeofday
:
9504 ret
= get_errno(gettimeofday(&tv
, &tz
));
9505 if (!is_error(ret
)) {
9506 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9507 return -TARGET_EFAULT
;
9509 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9510 return -TARGET_EFAULT
;
9516 #if defined(TARGET_NR_settimeofday)
9517 case TARGET_NR_settimeofday
:
9519 struct timeval tv
, *ptv
= NULL
;
9520 struct timezone tz
, *ptz
= NULL
;
9523 if (copy_from_user_timeval(&tv
, arg1
)) {
9524 return -TARGET_EFAULT
;
9530 if (copy_from_user_timezone(&tz
, arg2
)) {
9531 return -TARGET_EFAULT
;
9536 return get_errno(settimeofday(ptv
, ptz
));
9539 #if defined(TARGET_NR_select)
9540 case TARGET_NR_select
:
9541 #if defined(TARGET_WANT_NI_OLD_SELECT)
9542 /* some architectures used to have old_select here
9543 * but now ENOSYS it.
9545 ret
= -TARGET_ENOSYS
;
9546 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9547 ret
= do_old_select(arg1
);
9549 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9553 #ifdef TARGET_NR_pselect6
9554 case TARGET_NR_pselect6
:
9555 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9557 #ifdef TARGET_NR_pselect6_time64
9558 case TARGET_NR_pselect6_time64
:
9559 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9561 #ifdef TARGET_NR_symlink
9562 case TARGET_NR_symlink
:
9565 p
= lock_user_string(arg1
);
9566 p2
= lock_user_string(arg2
);
9568 ret
= -TARGET_EFAULT
;
9570 ret
= get_errno(symlink(p
, p2
));
9571 unlock_user(p2
, arg2
, 0);
9572 unlock_user(p
, arg1
, 0);
9576 #if defined(TARGET_NR_symlinkat)
9577 case TARGET_NR_symlinkat
:
9580 p
= lock_user_string(arg1
);
9581 p2
= lock_user_string(arg3
);
9583 ret
= -TARGET_EFAULT
;
9585 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9586 unlock_user(p2
, arg3
, 0);
9587 unlock_user(p
, arg1
, 0);
9591 #ifdef TARGET_NR_readlink
9592 case TARGET_NR_readlink
:
9595 p
= lock_user_string(arg1
);
9596 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9598 ret
= -TARGET_EFAULT
;
9600 /* Short circuit this for the magic exe check. */
9601 ret
= -TARGET_EINVAL
;
9602 } else if (is_proc_myself((const char *)p
, "exe")) {
9603 char real
[PATH_MAX
], *temp
;
9604 temp
= realpath(exec_path
, real
);
9605 /* Return value is # of bytes that we wrote to the buffer. */
9607 ret
= get_errno(-1);
9609 /* Don't worry about sign mismatch as earlier mapping
9610 * logic would have thrown a bad address error. */
9611 ret
= MIN(strlen(real
), arg3
);
9612 /* We cannot NUL terminate the string. */
9613 memcpy(p2
, real
, ret
);
9616 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9618 unlock_user(p2
, arg2
, ret
);
9619 unlock_user(p
, arg1
, 0);
9623 #if defined(TARGET_NR_readlinkat)
9624 case TARGET_NR_readlinkat
:
9627 p
= lock_user_string(arg2
);
9628 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9630 ret
= -TARGET_EFAULT
;
9631 } else if (is_proc_myself((const char *)p
, "exe")) {
9632 char real
[PATH_MAX
], *temp
;
9633 temp
= realpath(exec_path
, real
);
9634 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9635 snprintf((char *)p2
, arg4
, "%s", real
);
9637 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9639 unlock_user(p2
, arg3
, ret
);
9640 unlock_user(p
, arg2
, 0);
9644 #ifdef TARGET_NR_swapon
9645 case TARGET_NR_swapon
:
9646 if (!(p
= lock_user_string(arg1
)))
9647 return -TARGET_EFAULT
;
9648 ret
= get_errno(swapon(p
, arg2
));
9649 unlock_user(p
, arg1
, 0);
9652 case TARGET_NR_reboot
:
9653 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9654 /* arg4 must be ignored in all other cases */
9655 p
= lock_user_string(arg4
);
9657 return -TARGET_EFAULT
;
9659 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9660 unlock_user(p
, arg4
, 0);
9662 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9665 #ifdef TARGET_NR_mmap
9666 case TARGET_NR_mmap
:
9667 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9668 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9669 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9670 || defined(TARGET_S390X)
9673 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9674 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9675 return -TARGET_EFAULT
;
9682 unlock_user(v
, arg1
, 0);
9683 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9684 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9688 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9689 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9695 #ifdef TARGET_NR_mmap2
9696 case TARGET_NR_mmap2
:
9698 #define MMAP_SHIFT 12
9700 ret
= target_mmap(arg1
, arg2
, arg3
,
9701 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9702 arg5
, arg6
<< MMAP_SHIFT
);
9703 return get_errno(ret
);
9705 case TARGET_NR_munmap
:
9706 return get_errno(target_munmap(arg1
, arg2
));
9707 case TARGET_NR_mprotect
:
9709 TaskState
*ts
= cpu
->opaque
;
9710 /* Special hack to detect libc making the stack executable. */
9711 if ((arg3
& PROT_GROWSDOWN
)
9712 && arg1
>= ts
->info
->stack_limit
9713 && arg1
<= ts
->info
->start_stack
) {
9714 arg3
&= ~PROT_GROWSDOWN
;
9715 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9716 arg1
= ts
->info
->stack_limit
;
9719 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9720 #ifdef TARGET_NR_mremap
9721 case TARGET_NR_mremap
:
9722 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9724 /* ??? msync/mlock/munlock are broken for softmmu. */
9725 #ifdef TARGET_NR_msync
9726 case TARGET_NR_msync
:
9727 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9729 #ifdef TARGET_NR_mlock
9730 case TARGET_NR_mlock
:
9731 return get_errno(mlock(g2h(arg1
), arg2
));
9733 #ifdef TARGET_NR_munlock
9734 case TARGET_NR_munlock
:
9735 return get_errno(munlock(g2h(arg1
), arg2
));
9737 #ifdef TARGET_NR_mlockall
9738 case TARGET_NR_mlockall
:
9739 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9741 #ifdef TARGET_NR_munlockall
9742 case TARGET_NR_munlockall
:
9743 return get_errno(munlockall());
9745 #ifdef TARGET_NR_truncate
9746 case TARGET_NR_truncate
:
9747 if (!(p
= lock_user_string(arg1
)))
9748 return -TARGET_EFAULT
;
9749 ret
= get_errno(truncate(p
, arg2
));
9750 unlock_user(p
, arg1
, 0);
9753 #ifdef TARGET_NR_ftruncate
9754 case TARGET_NR_ftruncate
:
9755 return get_errno(ftruncate(arg1
, arg2
));
9757 case TARGET_NR_fchmod
:
9758 return get_errno(fchmod(arg1
, arg2
));
9759 #if defined(TARGET_NR_fchmodat)
9760 case TARGET_NR_fchmodat
:
9761 if (!(p
= lock_user_string(arg2
)))
9762 return -TARGET_EFAULT
;
9763 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9764 unlock_user(p
, arg2
, 0);
9767 case TARGET_NR_getpriority
:
9768 /* Note that negative values are valid for getpriority, so we must
9769 differentiate based on errno settings. */
9771 ret
= getpriority(arg1
, arg2
);
9772 if (ret
== -1 && errno
!= 0) {
9773 return -host_to_target_errno(errno
);
9776 /* Return value is the unbiased priority. Signal no error. */
9777 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9779 /* Return value is a biased priority to avoid negative numbers. */
9783 case TARGET_NR_setpriority
:
9784 return get_errno(setpriority(arg1
, arg2
, arg3
));
9785 #ifdef TARGET_NR_statfs
9786 case TARGET_NR_statfs
:
9787 if (!(p
= lock_user_string(arg1
))) {
9788 return -TARGET_EFAULT
;
9790 ret
= get_errno(statfs(path(p
), &stfs
));
9791 unlock_user(p
, arg1
, 0);
9793 if (!is_error(ret
)) {
9794 struct target_statfs
*target_stfs
;
9796 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9797 return -TARGET_EFAULT
;
9798 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9799 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9800 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9801 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9802 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9803 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9804 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9805 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9806 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9807 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9808 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9809 #ifdef _STATFS_F_FLAGS
9810 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9812 __put_user(0, &target_stfs
->f_flags
);
9814 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9815 unlock_user_struct(target_stfs
, arg2
, 1);
9819 #ifdef TARGET_NR_fstatfs
9820 case TARGET_NR_fstatfs
:
9821 ret
= get_errno(fstatfs(arg1
, &stfs
));
9822 goto convert_statfs
;
9824 #ifdef TARGET_NR_statfs64
9825 case TARGET_NR_statfs64
:
9826 if (!(p
= lock_user_string(arg1
))) {
9827 return -TARGET_EFAULT
;
9829 ret
= get_errno(statfs(path(p
), &stfs
));
9830 unlock_user(p
, arg1
, 0);
9832 if (!is_error(ret
)) {
9833 struct target_statfs64
*target_stfs
;
9835 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9836 return -TARGET_EFAULT
;
9837 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9838 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9839 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9840 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9841 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9842 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9843 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9844 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9845 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9846 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9847 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9848 #ifdef _STATFS_F_FLAGS
9849 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9851 __put_user(0, &target_stfs
->f_flags
);
9853 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9854 unlock_user_struct(target_stfs
, arg3
, 1);
9857 case TARGET_NR_fstatfs64
:
9858 ret
= get_errno(fstatfs(arg1
, &stfs
));
9859 goto convert_statfs64
;
9861 #ifdef TARGET_NR_socketcall
9862 case TARGET_NR_socketcall
:
9863 return do_socketcall(arg1
, arg2
);
9865 #ifdef TARGET_NR_accept
9866 case TARGET_NR_accept
:
9867 return do_accept4(arg1
, arg2
, arg3
, 0);
9869 #ifdef TARGET_NR_accept4
9870 case TARGET_NR_accept4
:
9871 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9873 #ifdef TARGET_NR_bind
9874 case TARGET_NR_bind
:
9875 return do_bind(arg1
, arg2
, arg3
);
9877 #ifdef TARGET_NR_connect
9878 case TARGET_NR_connect
:
9879 return do_connect(arg1
, arg2
, arg3
);
9881 #ifdef TARGET_NR_getpeername
9882 case TARGET_NR_getpeername
:
9883 return do_getpeername(arg1
, arg2
, arg3
);
9885 #ifdef TARGET_NR_getsockname
9886 case TARGET_NR_getsockname
:
9887 return do_getsockname(arg1
, arg2
, arg3
);
9889 #ifdef TARGET_NR_getsockopt
9890 case TARGET_NR_getsockopt
:
9891 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9893 #ifdef TARGET_NR_listen
9894 case TARGET_NR_listen
:
9895 return get_errno(listen(arg1
, arg2
));
9897 #ifdef TARGET_NR_recv
9898 case TARGET_NR_recv
:
9899 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9901 #ifdef TARGET_NR_recvfrom
9902 case TARGET_NR_recvfrom
:
9903 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9905 #ifdef TARGET_NR_recvmsg
9906 case TARGET_NR_recvmsg
:
9907 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9909 #ifdef TARGET_NR_send
9910 case TARGET_NR_send
:
9911 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9913 #ifdef TARGET_NR_sendmsg
9914 case TARGET_NR_sendmsg
:
9915 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9917 #ifdef TARGET_NR_sendmmsg
9918 case TARGET_NR_sendmmsg
:
9919 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9921 #ifdef TARGET_NR_recvmmsg
9922 case TARGET_NR_recvmmsg
:
9923 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9925 #ifdef TARGET_NR_sendto
9926 case TARGET_NR_sendto
:
9927 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9929 #ifdef TARGET_NR_shutdown
9930 case TARGET_NR_shutdown
:
9931 return get_errno(shutdown(arg1
, arg2
));
9933 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9934 case TARGET_NR_getrandom
:
9935 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9937 return -TARGET_EFAULT
;
9939 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9940 unlock_user(p
, arg1
, ret
);
9943 #ifdef TARGET_NR_socket
9944 case TARGET_NR_socket
:
9945 return do_socket(arg1
, arg2
, arg3
);
9947 #ifdef TARGET_NR_socketpair
9948 case TARGET_NR_socketpair
:
9949 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9951 #ifdef TARGET_NR_setsockopt
9952 case TARGET_NR_setsockopt
:
9953 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9955 #if defined(TARGET_NR_syslog)
9956 case TARGET_NR_syslog
:
9961 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9962 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9963 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9964 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9965 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9966 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9967 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9968 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9969 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9970 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9971 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9972 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9975 return -TARGET_EINVAL
;
9980 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9982 return -TARGET_EFAULT
;
9984 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9985 unlock_user(p
, arg2
, arg3
);
9989 return -TARGET_EINVAL
;
9994 case TARGET_NR_setitimer
:
9996 struct itimerval value
, ovalue
, *pvalue
;
10000 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10001 || copy_from_user_timeval(&pvalue
->it_value
,
10002 arg2
+ sizeof(struct target_timeval
)))
10003 return -TARGET_EFAULT
;
10007 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10008 if (!is_error(ret
) && arg3
) {
10009 if (copy_to_user_timeval(arg3
,
10010 &ovalue
.it_interval
)
10011 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10013 return -TARGET_EFAULT
;
10017 case TARGET_NR_getitimer
:
10019 struct itimerval value
;
10021 ret
= get_errno(getitimer(arg1
, &value
));
10022 if (!is_error(ret
) && arg2
) {
10023 if (copy_to_user_timeval(arg2
,
10024 &value
.it_interval
)
10025 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10027 return -TARGET_EFAULT
;
10031 #ifdef TARGET_NR_stat
10032 case TARGET_NR_stat
:
10033 if (!(p
= lock_user_string(arg1
))) {
10034 return -TARGET_EFAULT
;
10036 ret
= get_errno(stat(path(p
), &st
));
10037 unlock_user(p
, arg1
, 0);
10040 #ifdef TARGET_NR_lstat
10041 case TARGET_NR_lstat
:
10042 if (!(p
= lock_user_string(arg1
))) {
10043 return -TARGET_EFAULT
;
10045 ret
= get_errno(lstat(path(p
), &st
));
10046 unlock_user(p
, arg1
, 0);
10049 #ifdef TARGET_NR_fstat
10050 case TARGET_NR_fstat
:
10052 ret
= get_errno(fstat(arg1
, &st
));
10053 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10056 if (!is_error(ret
)) {
10057 struct target_stat
*target_st
;
10059 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10060 return -TARGET_EFAULT
;
10061 memset(target_st
, 0, sizeof(*target_st
));
10062 __put_user(st
.st_dev
, &target_st
->st_dev
);
10063 __put_user(st
.st_ino
, &target_st
->st_ino
);
10064 __put_user(st
.st_mode
, &target_st
->st_mode
);
10065 __put_user(st
.st_uid
, &target_st
->st_uid
);
10066 __put_user(st
.st_gid
, &target_st
->st_gid
);
10067 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10068 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10069 __put_user(st
.st_size
, &target_st
->st_size
);
10070 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10071 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10072 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10073 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10074 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10075 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10076 defined(TARGET_STAT_HAVE_NSEC)
10077 __put_user(st
.st_atim
.tv_nsec
,
10078 &target_st
->target_st_atime_nsec
);
10079 __put_user(st
.st_mtim
.tv_nsec
,
10080 &target_st
->target_st_mtime_nsec
);
10081 __put_user(st
.st_ctim
.tv_nsec
,
10082 &target_st
->target_st_ctime_nsec
);
10084 unlock_user_struct(target_st
, arg2
, 1);
10089 case TARGET_NR_vhangup
:
10090 return get_errno(vhangup());
10091 #ifdef TARGET_NR_syscall
10092 case TARGET_NR_syscall
:
10093 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10094 arg6
, arg7
, arg8
, 0);
10096 #if defined(TARGET_NR_wait4)
10097 case TARGET_NR_wait4
:
10100 abi_long status_ptr
= arg2
;
10101 struct rusage rusage
, *rusage_ptr
;
10102 abi_ulong target_rusage
= arg4
;
10103 abi_long rusage_err
;
10105 rusage_ptr
= &rusage
;
10108 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10109 if (!is_error(ret
)) {
10110 if (status_ptr
&& ret
) {
10111 status
= host_to_target_waitstatus(status
);
10112 if (put_user_s32(status
, status_ptr
))
10113 return -TARGET_EFAULT
;
10115 if (target_rusage
) {
10116 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10125 #ifdef TARGET_NR_swapoff
10126 case TARGET_NR_swapoff
:
10127 if (!(p
= lock_user_string(arg1
)))
10128 return -TARGET_EFAULT
;
10129 ret
= get_errno(swapoff(p
));
10130 unlock_user(p
, arg1
, 0);
10133 case TARGET_NR_sysinfo
:
10135 struct target_sysinfo
*target_value
;
10136 struct sysinfo value
;
10137 ret
= get_errno(sysinfo(&value
));
10138 if (!is_error(ret
) && arg1
)
10140 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10141 return -TARGET_EFAULT
;
10142 __put_user(value
.uptime
, &target_value
->uptime
);
10143 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10144 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10145 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10146 __put_user(value
.totalram
, &target_value
->totalram
);
10147 __put_user(value
.freeram
, &target_value
->freeram
);
10148 __put_user(value
.sharedram
, &target_value
->sharedram
);
10149 __put_user(value
.bufferram
, &target_value
->bufferram
);
10150 __put_user(value
.totalswap
, &target_value
->totalswap
);
10151 __put_user(value
.freeswap
, &target_value
->freeswap
);
10152 __put_user(value
.procs
, &target_value
->procs
);
10153 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10154 __put_user(value
.freehigh
, &target_value
->freehigh
);
10155 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10156 unlock_user_struct(target_value
, arg1
, 1);
10160 #ifdef TARGET_NR_ipc
10161 case TARGET_NR_ipc
:
10162 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10164 #ifdef TARGET_NR_semget
10165 case TARGET_NR_semget
:
10166 return get_errno(semget(arg1
, arg2
, arg3
));
10168 #ifdef TARGET_NR_semop
10169 case TARGET_NR_semop
:
10170 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10172 #ifdef TARGET_NR_semtimedop
10173 case TARGET_NR_semtimedop
:
10174 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10176 #ifdef TARGET_NR_semtimedop_time64
10177 case TARGET_NR_semtimedop_time64
:
10178 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10180 #ifdef TARGET_NR_semctl
10181 case TARGET_NR_semctl
:
10182 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10184 #ifdef TARGET_NR_msgctl
10185 case TARGET_NR_msgctl
:
10186 return do_msgctl(arg1
, arg2
, arg3
);
10188 #ifdef TARGET_NR_msgget
10189 case TARGET_NR_msgget
:
10190 return get_errno(msgget(arg1
, arg2
));
10192 #ifdef TARGET_NR_msgrcv
10193 case TARGET_NR_msgrcv
:
10194 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10196 #ifdef TARGET_NR_msgsnd
10197 case TARGET_NR_msgsnd
:
10198 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10200 #ifdef TARGET_NR_shmget
10201 case TARGET_NR_shmget
:
10202 return get_errno(shmget(arg1
, arg2
, arg3
));
10204 #ifdef TARGET_NR_shmctl
10205 case TARGET_NR_shmctl
:
10206 return do_shmctl(arg1
, arg2
, arg3
);
10208 #ifdef TARGET_NR_shmat
10209 case TARGET_NR_shmat
:
10210 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10212 #ifdef TARGET_NR_shmdt
10213 case TARGET_NR_shmdt
:
10214 return do_shmdt(arg1
);
10216 case TARGET_NR_fsync
:
10217 return get_errno(fsync(arg1
));
10218 case TARGET_NR_clone
:
10219 /* Linux manages to have three different orderings for its
10220 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10221 * match the kernel's CONFIG_CLONE_* settings.
10222 * Microblaze is further special in that it uses a sixth
10223 * implicit argument to clone for the TLS pointer.
10225 #if defined(TARGET_MICROBLAZE)
10226 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10227 #elif defined(TARGET_CLONE_BACKWARDS)
10228 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10229 #elif defined(TARGET_CLONE_BACKWARDS2)
10230 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10232 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10235 #ifdef __NR_exit_group
10236 /* new thread calls */
10237 case TARGET_NR_exit_group
:
10238 preexit_cleanup(cpu_env
, arg1
);
10239 return get_errno(exit_group(arg1
));
10241 case TARGET_NR_setdomainname
:
10242 if (!(p
= lock_user_string(arg1
)))
10243 return -TARGET_EFAULT
;
10244 ret
= get_errno(setdomainname(p
, arg2
));
10245 unlock_user(p
, arg1
, 0);
10247 case TARGET_NR_uname
:
10248 /* no need to transcode because we use the linux syscall */
10250 struct new_utsname
* buf
;
10252 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10253 return -TARGET_EFAULT
;
10254 ret
= get_errno(sys_uname(buf
));
10255 if (!is_error(ret
)) {
10256 /* Overwrite the native machine name with whatever is being
10258 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10259 sizeof(buf
->machine
));
10260 /* Allow the user to override the reported release. */
10261 if (qemu_uname_release
&& *qemu_uname_release
) {
10262 g_strlcpy(buf
->release
, qemu_uname_release
,
10263 sizeof(buf
->release
));
10266 unlock_user_struct(buf
, arg1
, 1);
10270 case TARGET_NR_modify_ldt
:
10271 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10272 #if !defined(TARGET_X86_64)
10273 case TARGET_NR_vm86
:
10274 return do_vm86(cpu_env
, arg1
, arg2
);
10277 #if defined(TARGET_NR_adjtimex)
10278 case TARGET_NR_adjtimex
:
10280 struct timex host_buf
;
10282 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10283 return -TARGET_EFAULT
;
10285 ret
= get_errno(adjtimex(&host_buf
));
10286 if (!is_error(ret
)) {
10287 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10288 return -TARGET_EFAULT
;
10294 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10295 case TARGET_NR_clock_adjtime
:
10297 struct timex htx
, *phtx
= &htx
;
10299 if (target_to_host_timex(phtx
, arg2
) != 0) {
10300 return -TARGET_EFAULT
;
10302 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10303 if (!is_error(ret
) && phtx
) {
10304 if (host_to_target_timex(arg2
, phtx
) != 0) {
10305 return -TARGET_EFAULT
;
10311 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10312 case TARGET_NR_clock_adjtime64
:
10316 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10317 return -TARGET_EFAULT
;
10319 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10320 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10321 return -TARGET_EFAULT
;
10326 case TARGET_NR_getpgid
:
10327 return get_errno(getpgid(arg1
));
10328 case TARGET_NR_fchdir
:
10329 return get_errno(fchdir(arg1
));
10330 case TARGET_NR_personality
:
10331 return get_errno(personality(arg1
));
10332 #ifdef TARGET_NR__llseek /* Not on alpha */
10333 case TARGET_NR__llseek
:
10336 #if !defined(__NR_llseek)
10337 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10339 ret
= get_errno(res
);
10344 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10346 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10347 return -TARGET_EFAULT
;
10352 #ifdef TARGET_NR_getdents
10353 case TARGET_NR_getdents
:
10354 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10355 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10357 struct target_dirent
*target_dirp
;
10358 struct linux_dirent
*dirp
;
10359 abi_long count
= arg3
;
10361 dirp
= g_try_malloc(count
);
10363 return -TARGET_ENOMEM
;
10366 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10367 if (!is_error(ret
)) {
10368 struct linux_dirent
*de
;
10369 struct target_dirent
*tde
;
10371 int reclen
, treclen
;
10372 int count1
, tnamelen
;
10376 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10377 return -TARGET_EFAULT
;
10380 reclen
= de
->d_reclen
;
10381 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10382 assert(tnamelen
>= 0);
10383 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10384 assert(count1
+ treclen
<= count
);
10385 tde
->d_reclen
= tswap16(treclen
);
10386 tde
->d_ino
= tswapal(de
->d_ino
);
10387 tde
->d_off
= tswapal(de
->d_off
);
10388 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10389 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10391 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10395 unlock_user(target_dirp
, arg2
, ret
);
10401 struct linux_dirent
*dirp
;
10402 abi_long count
= arg3
;
10404 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10405 return -TARGET_EFAULT
;
10406 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10407 if (!is_error(ret
)) {
10408 struct linux_dirent
*de
;
10413 reclen
= de
->d_reclen
;
10416 de
->d_reclen
= tswap16(reclen
);
10417 tswapls(&de
->d_ino
);
10418 tswapls(&de
->d_off
);
10419 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10423 unlock_user(dirp
, arg2
, ret
);
10427 /* Implement getdents in terms of getdents64 */
10429 struct linux_dirent64
*dirp
;
10430 abi_long count
= arg3
;
10432 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10434 return -TARGET_EFAULT
;
10436 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10437 if (!is_error(ret
)) {
10438 /* Convert the dirent64 structs to target dirent. We do this
10439 * in-place, since we can guarantee that a target_dirent is no
10440 * larger than a dirent64; however this means we have to be
10441 * careful to read everything before writing in the new format.
10443 struct linux_dirent64
*de
;
10444 struct target_dirent
*tde
;
10449 tde
= (struct target_dirent
*)dirp
;
10451 int namelen
, treclen
;
10452 int reclen
= de
->d_reclen
;
10453 uint64_t ino
= de
->d_ino
;
10454 int64_t off
= de
->d_off
;
10455 uint8_t type
= de
->d_type
;
10457 namelen
= strlen(de
->d_name
);
10458 treclen
= offsetof(struct target_dirent
, d_name
)
10460 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10462 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10463 tde
->d_ino
= tswapal(ino
);
10464 tde
->d_off
= tswapal(off
);
10465 tde
->d_reclen
= tswap16(treclen
);
10466 /* The target_dirent type is in what was formerly a padding
10467 * byte at the end of the structure:
10469 *(((char *)tde
) + treclen
- 1) = type
;
10471 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10472 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10478 unlock_user(dirp
, arg2
, ret
);
10482 #endif /* TARGET_NR_getdents */
10483 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10484 case TARGET_NR_getdents64
:
10486 struct linux_dirent64
*dirp
;
10487 abi_long count
= arg3
;
10488 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10489 return -TARGET_EFAULT
;
10490 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10491 if (!is_error(ret
)) {
10492 struct linux_dirent64
*de
;
10497 reclen
= de
->d_reclen
;
10500 de
->d_reclen
= tswap16(reclen
);
10501 tswap64s((uint64_t *)&de
->d_ino
);
10502 tswap64s((uint64_t *)&de
->d_off
);
10503 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10507 unlock_user(dirp
, arg2
, ret
);
10510 #endif /* TARGET_NR_getdents64 */
10511 #if defined(TARGET_NR__newselect)
10512 case TARGET_NR__newselect
:
10513 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10515 #ifdef TARGET_NR_poll
10516 case TARGET_NR_poll
:
10517 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10519 #ifdef TARGET_NR_ppoll
10520 case TARGET_NR_ppoll
:
10521 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10523 #ifdef TARGET_NR_ppoll_time64
10524 case TARGET_NR_ppoll_time64
:
10525 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10527 case TARGET_NR_flock
:
10528 /* NOTE: the flock constant seems to be the same for every
10530 return get_errno(safe_flock(arg1
, arg2
));
10531 case TARGET_NR_readv
:
10533 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10535 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10536 unlock_iovec(vec
, arg2
, arg3
, 1);
10538 ret
= -host_to_target_errno(errno
);
10542 case TARGET_NR_writev
:
10544 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10546 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10547 unlock_iovec(vec
, arg2
, arg3
, 0);
10549 ret
= -host_to_target_errno(errno
);
10553 #if defined(TARGET_NR_preadv)
10554 case TARGET_NR_preadv
:
10556 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10558 unsigned long low
, high
;
10560 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10561 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10562 unlock_iovec(vec
, arg2
, arg3
, 1);
10564 ret
= -host_to_target_errno(errno
);
10569 #if defined(TARGET_NR_pwritev)
10570 case TARGET_NR_pwritev
:
10572 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10574 unsigned long low
, high
;
10576 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10577 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10578 unlock_iovec(vec
, arg2
, arg3
, 0);
10580 ret
= -host_to_target_errno(errno
);
10585 case TARGET_NR_getsid
:
10586 return get_errno(getsid(arg1
));
10587 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10588 case TARGET_NR_fdatasync
:
10589 return get_errno(fdatasync(arg1
));
10591 case TARGET_NR_sched_getaffinity
:
10593 unsigned int mask_size
;
10594 unsigned long *mask
;
10597 * sched_getaffinity needs multiples of ulong, so need to take
10598 * care of mismatches between target ulong and host ulong sizes.
10600 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10601 return -TARGET_EINVAL
;
10603 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10605 mask
= alloca(mask_size
);
10606 memset(mask
, 0, mask_size
);
10607 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10609 if (!is_error(ret
)) {
10611 /* More data returned than the caller's buffer will fit.
10612 * This only happens if sizeof(abi_long) < sizeof(long)
10613 * and the caller passed us a buffer holding an odd number
10614 * of abi_longs. If the host kernel is actually using the
10615 * extra 4 bytes then fail EINVAL; otherwise we can just
10616 * ignore them and only copy the interesting part.
10618 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10619 if (numcpus
> arg2
* 8) {
10620 return -TARGET_EINVAL
;
10625 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10626 return -TARGET_EFAULT
;
10631 case TARGET_NR_sched_setaffinity
:
10633 unsigned int mask_size
;
10634 unsigned long *mask
;
10637 * sched_setaffinity needs multiples of ulong, so need to take
10638 * care of mismatches between target ulong and host ulong sizes.
10640 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10641 return -TARGET_EINVAL
;
10643 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10644 mask
= alloca(mask_size
);
10646 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10651 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10653 case TARGET_NR_getcpu
:
10655 unsigned cpu
, node
;
10656 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10657 arg2
? &node
: NULL
,
10659 if (is_error(ret
)) {
10662 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10663 return -TARGET_EFAULT
;
10665 if (arg2
&& put_user_u32(node
, arg2
)) {
10666 return -TARGET_EFAULT
;
10670 case TARGET_NR_sched_setparam
:
10672 struct sched_param
*target_schp
;
10673 struct sched_param schp
;
10676 return -TARGET_EINVAL
;
10678 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10679 return -TARGET_EFAULT
;
10680 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10681 unlock_user_struct(target_schp
, arg2
, 0);
10682 return get_errno(sched_setparam(arg1
, &schp
));
10684 case TARGET_NR_sched_getparam
:
10686 struct sched_param
*target_schp
;
10687 struct sched_param schp
;
10690 return -TARGET_EINVAL
;
10692 ret
= get_errno(sched_getparam(arg1
, &schp
));
10693 if (!is_error(ret
)) {
10694 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10695 return -TARGET_EFAULT
;
10696 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10697 unlock_user_struct(target_schp
, arg2
, 1);
10701 case TARGET_NR_sched_setscheduler
:
10703 struct sched_param
*target_schp
;
10704 struct sched_param schp
;
10706 return -TARGET_EINVAL
;
10708 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10709 return -TARGET_EFAULT
;
10710 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10711 unlock_user_struct(target_schp
, arg3
, 0);
10712 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10714 case TARGET_NR_sched_getscheduler
:
10715 return get_errno(sched_getscheduler(arg1
));
10716 case TARGET_NR_sched_yield
:
10717 return get_errno(sched_yield());
10718 case TARGET_NR_sched_get_priority_max
:
10719 return get_errno(sched_get_priority_max(arg1
));
10720 case TARGET_NR_sched_get_priority_min
:
10721 return get_errno(sched_get_priority_min(arg1
));
10722 #ifdef TARGET_NR_sched_rr_get_interval
10723 case TARGET_NR_sched_rr_get_interval
:
10725 struct timespec ts
;
10726 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10727 if (!is_error(ret
)) {
10728 ret
= host_to_target_timespec(arg2
, &ts
);
10733 #ifdef TARGET_NR_sched_rr_get_interval_time64
10734 case TARGET_NR_sched_rr_get_interval_time64
:
10736 struct timespec ts
;
10737 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10738 if (!is_error(ret
)) {
10739 ret
= host_to_target_timespec64(arg2
, &ts
);
10744 #if defined(TARGET_NR_nanosleep)
10745 case TARGET_NR_nanosleep
:
10747 struct timespec req
, rem
;
10748 target_to_host_timespec(&req
, arg1
);
10749 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10750 if (is_error(ret
) && arg2
) {
10751 host_to_target_timespec(arg2
, &rem
);
10756 case TARGET_NR_prctl
:
10758 case PR_GET_PDEATHSIG
:
10761 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10762 if (!is_error(ret
) && arg2
10763 && put_user_s32(deathsig
, arg2
)) {
10764 return -TARGET_EFAULT
;
10771 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10773 return -TARGET_EFAULT
;
10775 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10776 arg3
, arg4
, arg5
));
10777 unlock_user(name
, arg2
, 16);
10782 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10784 return -TARGET_EFAULT
;
10786 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10787 arg3
, arg4
, arg5
));
10788 unlock_user(name
, arg2
, 0);
10793 case TARGET_PR_GET_FP_MODE
:
10795 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10797 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10798 ret
|= TARGET_PR_FP_MODE_FR
;
10800 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10801 ret
|= TARGET_PR_FP_MODE_FRE
;
10805 case TARGET_PR_SET_FP_MODE
:
10807 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10808 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10809 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10810 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10811 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10813 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10814 TARGET_PR_FP_MODE_FRE
;
10816 /* If nothing to change, return right away, successfully. */
10817 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10820 /* Check the value is valid */
10821 if (arg2
& ~known_bits
) {
10822 return -TARGET_EOPNOTSUPP
;
10824 /* Setting FRE without FR is not supported. */
10825 if (new_fre
&& !new_fr
) {
10826 return -TARGET_EOPNOTSUPP
;
10828 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10829 /* FR1 is not supported */
10830 return -TARGET_EOPNOTSUPP
;
10832 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10833 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10834 /* cannot set FR=0 */
10835 return -TARGET_EOPNOTSUPP
;
10837 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10838 /* Cannot set FRE=1 */
10839 return -TARGET_EOPNOTSUPP
;
10843 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10844 for (i
= 0; i
< 32 ; i
+= 2) {
10845 if (!old_fr
&& new_fr
) {
10846 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10847 } else if (old_fr
&& !new_fr
) {
10848 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10853 env
->CP0_Status
|= (1 << CP0St_FR
);
10854 env
->hflags
|= MIPS_HFLAG_F64
;
10856 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10857 env
->hflags
&= ~MIPS_HFLAG_F64
;
10860 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10861 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10862 env
->hflags
|= MIPS_HFLAG_FRE
;
10865 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10866 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10872 #ifdef TARGET_AARCH64
10873 case TARGET_PR_SVE_SET_VL
:
10875 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10876 * PR_SVE_VL_INHERIT. Note the kernel definition
10877 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10878 * even though the current architectural maximum is VQ=16.
10880 ret
= -TARGET_EINVAL
;
10881 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10882 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10883 CPUARMState
*env
= cpu_env
;
10884 ARMCPU
*cpu
= env_archcpu(env
);
10885 uint32_t vq
, old_vq
;
10887 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10888 vq
= MAX(arg2
/ 16, 1);
10889 vq
= MIN(vq
, cpu
->sve_max_vq
);
10892 aarch64_sve_narrow_vq(env
, vq
);
10894 env
->vfp
.zcr_el
[1] = vq
- 1;
10895 arm_rebuild_hflags(env
);
10899 case TARGET_PR_SVE_GET_VL
:
10900 ret
= -TARGET_EINVAL
;
10902 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10903 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10904 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10908 case TARGET_PR_PAC_RESET_KEYS
:
10910 CPUARMState
*env
= cpu_env
;
10911 ARMCPU
*cpu
= env_archcpu(env
);
10913 if (arg3
|| arg4
|| arg5
) {
10914 return -TARGET_EINVAL
;
10916 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10917 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10918 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10919 TARGET_PR_PAC_APGAKEY
);
10925 } else if (arg2
& ~all
) {
10926 return -TARGET_EINVAL
;
10928 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10929 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10930 sizeof(ARMPACKey
), &err
);
10932 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10933 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10934 sizeof(ARMPACKey
), &err
);
10936 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10937 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10938 sizeof(ARMPACKey
), &err
);
10940 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10941 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10942 sizeof(ARMPACKey
), &err
);
10944 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10945 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10946 sizeof(ARMPACKey
), &err
);
10950 * Some unknown failure in the crypto. The best
10951 * we can do is log it and fail the syscall.
10952 * The real syscall cannot fail this way.
10954 qemu_log_mask(LOG_UNIMP
,
10955 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10956 error_get_pretty(err
));
10958 return -TARGET_EIO
;
10963 return -TARGET_EINVAL
;
10964 #endif /* AARCH64 */
10965 case PR_GET_SECCOMP
:
10966 case PR_SET_SECCOMP
:
10967 /* Disable seccomp to prevent the target disabling syscalls we
10969 return -TARGET_EINVAL
;
10971 /* Most prctl options have no pointer arguments */
10972 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10975 #ifdef TARGET_NR_arch_prctl
10976 case TARGET_NR_arch_prctl
:
10977 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10979 #ifdef TARGET_NR_pread64
10980 case TARGET_NR_pread64
:
10981 if (regpairs_aligned(cpu_env
, num
)) {
10985 if (arg2
== 0 && arg3
== 0) {
10986 /* Special-case NULL buffer and zero length, which should succeed */
10989 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10991 return -TARGET_EFAULT
;
10994 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10995 unlock_user(p
, arg2
, ret
);
10997 case TARGET_NR_pwrite64
:
10998 if (regpairs_aligned(cpu_env
, num
)) {
11002 if (arg2
== 0 && arg3
== 0) {
11003 /* Special-case NULL buffer and zero length, which should succeed */
11006 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11008 return -TARGET_EFAULT
;
11011 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11012 unlock_user(p
, arg2
, 0);
11015 case TARGET_NR_getcwd
:
11016 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11017 return -TARGET_EFAULT
;
11018 ret
= get_errno(sys_getcwd1(p
, arg2
));
11019 unlock_user(p
, arg1
, ret
);
11021 case TARGET_NR_capget
:
11022 case TARGET_NR_capset
:
11024 struct target_user_cap_header
*target_header
;
11025 struct target_user_cap_data
*target_data
= NULL
;
11026 struct __user_cap_header_struct header
;
11027 struct __user_cap_data_struct data
[2];
11028 struct __user_cap_data_struct
*dataptr
= NULL
;
11029 int i
, target_datalen
;
11030 int data_items
= 1;
11032 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11033 return -TARGET_EFAULT
;
11035 header
.version
= tswap32(target_header
->version
);
11036 header
.pid
= tswap32(target_header
->pid
);
11038 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11039 /* Version 2 and up takes pointer to two user_data structs */
11043 target_datalen
= sizeof(*target_data
) * data_items
;
11046 if (num
== TARGET_NR_capget
) {
11047 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11049 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11051 if (!target_data
) {
11052 unlock_user_struct(target_header
, arg1
, 0);
11053 return -TARGET_EFAULT
;
11056 if (num
== TARGET_NR_capset
) {
11057 for (i
= 0; i
< data_items
; i
++) {
11058 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11059 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11060 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11067 if (num
== TARGET_NR_capget
) {
11068 ret
= get_errno(capget(&header
, dataptr
));
11070 ret
= get_errno(capset(&header
, dataptr
));
11073 /* The kernel always updates version for both capget and capset */
11074 target_header
->version
= tswap32(header
.version
);
11075 unlock_user_struct(target_header
, arg1
, 1);
11078 if (num
== TARGET_NR_capget
) {
11079 for (i
= 0; i
< data_items
; i
++) {
11080 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11081 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11082 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11084 unlock_user(target_data
, arg2
, target_datalen
);
11086 unlock_user(target_data
, arg2
, 0);
11091 case TARGET_NR_sigaltstack
:
11092 return do_sigaltstack(arg1
, arg2
,
11093 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11095 #ifdef CONFIG_SENDFILE
11096 #ifdef TARGET_NR_sendfile
11097 case TARGET_NR_sendfile
:
11099 off_t
*offp
= NULL
;
11102 ret
= get_user_sal(off
, arg3
);
11103 if (is_error(ret
)) {
11108 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11109 if (!is_error(ret
) && arg3
) {
11110 abi_long ret2
= put_user_sal(off
, arg3
);
11111 if (is_error(ret2
)) {
11118 #ifdef TARGET_NR_sendfile64
11119 case TARGET_NR_sendfile64
:
11121 off_t
*offp
= NULL
;
11124 ret
= get_user_s64(off
, arg3
);
11125 if (is_error(ret
)) {
11130 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11131 if (!is_error(ret
) && arg3
) {
11132 abi_long ret2
= put_user_s64(off
, arg3
);
11133 if (is_error(ret2
)) {
11141 #ifdef TARGET_NR_vfork
11142 case TARGET_NR_vfork
:
11143 return get_errno(do_fork(cpu_env
,
11144 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11147 #ifdef TARGET_NR_ugetrlimit
11148 case TARGET_NR_ugetrlimit
:
11150 struct rlimit rlim
;
11151 int resource
= target_to_host_resource(arg1
);
11152 ret
= get_errno(getrlimit(resource
, &rlim
));
11153 if (!is_error(ret
)) {
11154 struct target_rlimit
*target_rlim
;
11155 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11156 return -TARGET_EFAULT
;
11157 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11158 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11159 unlock_user_struct(target_rlim
, arg2
, 1);
11164 #ifdef TARGET_NR_truncate64
11165 case TARGET_NR_truncate64
:
11166 if (!(p
= lock_user_string(arg1
)))
11167 return -TARGET_EFAULT
;
11168 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11169 unlock_user(p
, arg1
, 0);
11172 #ifdef TARGET_NR_ftruncate64
11173 case TARGET_NR_ftruncate64
:
11174 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11176 #ifdef TARGET_NR_stat64
11177 case TARGET_NR_stat64
:
11178 if (!(p
= lock_user_string(arg1
))) {
11179 return -TARGET_EFAULT
;
11181 ret
= get_errno(stat(path(p
), &st
));
11182 unlock_user(p
, arg1
, 0);
11183 if (!is_error(ret
))
11184 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11187 #ifdef TARGET_NR_lstat64
11188 case TARGET_NR_lstat64
:
11189 if (!(p
= lock_user_string(arg1
))) {
11190 return -TARGET_EFAULT
;
11192 ret
= get_errno(lstat(path(p
), &st
));
11193 unlock_user(p
, arg1
, 0);
11194 if (!is_error(ret
))
11195 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11198 #ifdef TARGET_NR_fstat64
11199 case TARGET_NR_fstat64
:
11200 ret
= get_errno(fstat(arg1
, &st
));
11201 if (!is_error(ret
))
11202 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11205 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11206 #ifdef TARGET_NR_fstatat64
11207 case TARGET_NR_fstatat64
:
11209 #ifdef TARGET_NR_newfstatat
11210 case TARGET_NR_newfstatat
:
11212 if (!(p
= lock_user_string(arg2
))) {
11213 return -TARGET_EFAULT
;
11215 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11216 unlock_user(p
, arg2
, 0);
11217 if (!is_error(ret
))
11218 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11221 #if defined(TARGET_NR_statx)
11222 case TARGET_NR_statx
:
11224 struct target_statx
*target_stx
;
11228 p
= lock_user_string(arg2
);
11230 return -TARGET_EFAULT
;
11232 #if defined(__NR_statx)
11235 * It is assumed that struct statx is architecture independent.
11237 struct target_statx host_stx
;
11240 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11241 if (!is_error(ret
)) {
11242 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11243 unlock_user(p
, arg2
, 0);
11244 return -TARGET_EFAULT
;
11248 if (ret
!= -TARGET_ENOSYS
) {
11249 unlock_user(p
, arg2
, 0);
11254 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11255 unlock_user(p
, arg2
, 0);
11257 if (!is_error(ret
)) {
11258 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11259 return -TARGET_EFAULT
;
11261 memset(target_stx
, 0, sizeof(*target_stx
));
11262 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11263 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11264 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11265 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11266 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11267 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11268 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11269 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11270 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11271 __put_user(st
.st_size
, &target_stx
->stx_size
);
11272 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11273 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11274 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11275 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11276 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11277 unlock_user_struct(target_stx
, arg5
, 1);
11282 #ifdef TARGET_NR_lchown
11283 case TARGET_NR_lchown
:
11284 if (!(p
= lock_user_string(arg1
)))
11285 return -TARGET_EFAULT
;
11286 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11287 unlock_user(p
, arg1
, 0);
11290 #ifdef TARGET_NR_getuid
11291 case TARGET_NR_getuid
:
11292 return get_errno(high2lowuid(getuid()));
11294 #ifdef TARGET_NR_getgid
11295 case TARGET_NR_getgid
:
11296 return get_errno(high2lowgid(getgid()));
11298 #ifdef TARGET_NR_geteuid
11299 case TARGET_NR_geteuid
:
11300 return get_errno(high2lowuid(geteuid()));
11302 #ifdef TARGET_NR_getegid
11303 case TARGET_NR_getegid
:
11304 return get_errno(high2lowgid(getegid()));
11306 case TARGET_NR_setreuid
:
11307 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11308 case TARGET_NR_setregid
:
11309 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11310 case TARGET_NR_getgroups
:
11312 int gidsetsize
= arg1
;
11313 target_id
*target_grouplist
;
11317 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11318 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11319 if (gidsetsize
== 0)
11321 if (!is_error(ret
)) {
11322 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11323 if (!target_grouplist
)
11324 return -TARGET_EFAULT
;
11325 for(i
= 0;i
< ret
; i
++)
11326 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11327 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11331 case TARGET_NR_setgroups
:
11333 int gidsetsize
= arg1
;
11334 target_id
*target_grouplist
;
11335 gid_t
*grouplist
= NULL
;
11338 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11339 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11340 if (!target_grouplist
) {
11341 return -TARGET_EFAULT
;
11343 for (i
= 0; i
< gidsetsize
; i
++) {
11344 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11346 unlock_user(target_grouplist
, arg2
, 0);
11348 return get_errno(setgroups(gidsetsize
, grouplist
));
11350 case TARGET_NR_fchown
:
11351 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11352 #if defined(TARGET_NR_fchownat)
11353 case TARGET_NR_fchownat
:
11354 if (!(p
= lock_user_string(arg2
)))
11355 return -TARGET_EFAULT
;
11356 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11357 low2highgid(arg4
), arg5
));
11358 unlock_user(p
, arg2
, 0);
11361 #ifdef TARGET_NR_setresuid
11362 case TARGET_NR_setresuid
:
11363 return get_errno(sys_setresuid(low2highuid(arg1
),
11365 low2highuid(arg3
)));
11367 #ifdef TARGET_NR_getresuid
11368 case TARGET_NR_getresuid
:
11370 uid_t ruid
, euid
, suid
;
11371 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11372 if (!is_error(ret
)) {
11373 if (put_user_id(high2lowuid(ruid
), arg1
)
11374 || put_user_id(high2lowuid(euid
), arg2
)
11375 || put_user_id(high2lowuid(suid
), arg3
))
11376 return -TARGET_EFAULT
;
11381 #ifdef TARGET_NR_getresgid
11382 case TARGET_NR_setresgid
:
11383 return get_errno(sys_setresgid(low2highgid(arg1
),
11385 low2highgid(arg3
)));
11387 #ifdef TARGET_NR_getresgid
11388 case TARGET_NR_getresgid
:
11390 gid_t rgid
, egid
, sgid
;
11391 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11392 if (!is_error(ret
)) {
11393 if (put_user_id(high2lowgid(rgid
), arg1
)
11394 || put_user_id(high2lowgid(egid
), arg2
)
11395 || put_user_id(high2lowgid(sgid
), arg3
))
11396 return -TARGET_EFAULT
;
11401 #ifdef TARGET_NR_chown
11402 case TARGET_NR_chown
:
11403 if (!(p
= lock_user_string(arg1
)))
11404 return -TARGET_EFAULT
;
11405 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11406 unlock_user(p
, arg1
, 0);
11409 case TARGET_NR_setuid
:
11410 return get_errno(sys_setuid(low2highuid(arg1
)));
11411 case TARGET_NR_setgid
:
11412 return get_errno(sys_setgid(low2highgid(arg1
)));
11413 case TARGET_NR_setfsuid
:
11414 return get_errno(setfsuid(arg1
));
11415 case TARGET_NR_setfsgid
:
11416 return get_errno(setfsgid(arg1
));
11418 #ifdef TARGET_NR_lchown32
11419 case TARGET_NR_lchown32
:
11420 if (!(p
= lock_user_string(arg1
)))
11421 return -TARGET_EFAULT
;
11422 ret
= get_errno(lchown(p
, arg2
, arg3
));
11423 unlock_user(p
, arg1
, 0);
11426 #ifdef TARGET_NR_getuid32
11427 case TARGET_NR_getuid32
:
11428 return get_errno(getuid());
11431 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11432 /* Alpha specific */
11433 case TARGET_NR_getxuid
:
11437 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11439 return get_errno(getuid());
11441 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11442 /* Alpha specific */
11443 case TARGET_NR_getxgid
:
11447 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11449 return get_errno(getgid());
11451 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11452 /* Alpha specific */
11453 case TARGET_NR_osf_getsysinfo
:
11454 ret
= -TARGET_EOPNOTSUPP
;
11456 case TARGET_GSI_IEEE_FP_CONTROL
:
11458 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11459 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11461 swcr
&= ~SWCR_STATUS_MASK
;
11462 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11464 if (put_user_u64 (swcr
, arg2
))
11465 return -TARGET_EFAULT
;
11470 /* case GSI_IEEE_STATE_AT_SIGNAL:
11471 -- Not implemented in linux kernel.
11473 -- Retrieves current unaligned access state; not much used.
11474 case GSI_PROC_TYPE:
11475 -- Retrieves implver information; surely not used.
11476 case GSI_GET_HWRPB:
11477 -- Grabs a copy of the HWRPB; surely not used.
11482 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11483 /* Alpha specific */
11484 case TARGET_NR_osf_setsysinfo
:
11485 ret
= -TARGET_EOPNOTSUPP
;
11487 case TARGET_SSI_IEEE_FP_CONTROL
:
11489 uint64_t swcr
, fpcr
;
11491 if (get_user_u64 (swcr
, arg2
)) {
11492 return -TARGET_EFAULT
;
11496 * The kernel calls swcr_update_status to update the
11497 * status bits from the fpcr at every point that it
11498 * could be queried. Therefore, we store the status
11499 * bits only in FPCR.
11501 ((CPUAlphaState
*)cpu_env
)->swcr
11502 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11504 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11505 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11506 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11507 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11512 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11514 uint64_t exc
, fpcr
, fex
;
11516 if (get_user_u64(exc
, arg2
)) {
11517 return -TARGET_EFAULT
;
11519 exc
&= SWCR_STATUS_MASK
;
11520 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11522 /* Old exceptions are not signaled. */
11523 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11525 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11526 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11528 /* Update the hardware fpcr. */
11529 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11530 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11533 int si_code
= TARGET_FPE_FLTUNK
;
11534 target_siginfo_t info
;
11536 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11537 si_code
= TARGET_FPE_FLTUND
;
11539 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11540 si_code
= TARGET_FPE_FLTRES
;
11542 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11543 si_code
= TARGET_FPE_FLTUND
;
11545 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11546 si_code
= TARGET_FPE_FLTOVF
;
11548 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11549 si_code
= TARGET_FPE_FLTDIV
;
11551 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11552 si_code
= TARGET_FPE_FLTINV
;
11555 info
.si_signo
= SIGFPE
;
11557 info
.si_code
= si_code
;
11558 info
._sifields
._sigfault
._addr
11559 = ((CPUArchState
*)cpu_env
)->pc
;
11560 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11561 QEMU_SI_FAULT
, &info
);
11567 /* case SSI_NVPAIRS:
11568 -- Used with SSIN_UACPROC to enable unaligned accesses.
11569 case SSI_IEEE_STATE_AT_SIGNAL:
11570 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11571 -- Not implemented in linux kernel
11576 #ifdef TARGET_NR_osf_sigprocmask
11577 /* Alpha specific. */
11578 case TARGET_NR_osf_sigprocmask
:
11582 sigset_t set
, oldset
;
11585 case TARGET_SIG_BLOCK
:
11588 case TARGET_SIG_UNBLOCK
:
11591 case TARGET_SIG_SETMASK
:
11595 return -TARGET_EINVAL
;
11598 target_to_host_old_sigset(&set
, &mask
);
11599 ret
= do_sigprocmask(how
, &set
, &oldset
);
11601 host_to_target_old_sigset(&mask
, &oldset
);
11608 #ifdef TARGET_NR_getgid32
11609 case TARGET_NR_getgid32
:
11610 return get_errno(getgid());
11612 #ifdef TARGET_NR_geteuid32
11613 case TARGET_NR_geteuid32
:
11614 return get_errno(geteuid());
11616 #ifdef TARGET_NR_getegid32
11617 case TARGET_NR_getegid32
:
11618 return get_errno(getegid());
11620 #ifdef TARGET_NR_setreuid32
11621 case TARGET_NR_setreuid32
:
11622 return get_errno(setreuid(arg1
, arg2
));
11624 #ifdef TARGET_NR_setregid32
11625 case TARGET_NR_setregid32
:
11626 return get_errno(setregid(arg1
, arg2
));
11628 #ifdef TARGET_NR_getgroups32
11629 case TARGET_NR_getgroups32
:
11631 int gidsetsize
= arg1
;
11632 uint32_t *target_grouplist
;
11636 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11637 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11638 if (gidsetsize
== 0)
11640 if (!is_error(ret
)) {
11641 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11642 if (!target_grouplist
) {
11643 return -TARGET_EFAULT
;
11645 for(i
= 0;i
< ret
; i
++)
11646 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11647 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11652 #ifdef TARGET_NR_setgroups32
11653 case TARGET_NR_setgroups32
:
11655 int gidsetsize
= arg1
;
11656 uint32_t *target_grouplist
;
11660 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11661 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11662 if (!target_grouplist
) {
11663 return -TARGET_EFAULT
;
11665 for(i
= 0;i
< gidsetsize
; i
++)
11666 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11667 unlock_user(target_grouplist
, arg2
, 0);
11668 return get_errno(setgroups(gidsetsize
, grouplist
));
11671 #ifdef TARGET_NR_fchown32
11672 case TARGET_NR_fchown32
:
11673 return get_errno(fchown(arg1
, arg2
, arg3
));
11675 #ifdef TARGET_NR_setresuid32
11676 case TARGET_NR_setresuid32
:
11677 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11679 #ifdef TARGET_NR_getresuid32
11680 case TARGET_NR_getresuid32
:
11682 uid_t ruid
, euid
, suid
;
11683 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11684 if (!is_error(ret
)) {
11685 if (put_user_u32(ruid
, arg1
)
11686 || put_user_u32(euid
, arg2
)
11687 || put_user_u32(suid
, arg3
))
11688 return -TARGET_EFAULT
;
11693 #ifdef TARGET_NR_setresgid32
11694 case TARGET_NR_setresgid32
:
11695 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11697 #ifdef TARGET_NR_getresgid32
11698 case TARGET_NR_getresgid32
:
11700 gid_t rgid
, egid
, sgid
;
11701 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11702 if (!is_error(ret
)) {
11703 if (put_user_u32(rgid
, arg1
)
11704 || put_user_u32(egid
, arg2
)
11705 || put_user_u32(sgid
, arg3
))
11706 return -TARGET_EFAULT
;
11711 #ifdef TARGET_NR_chown32
11712 case TARGET_NR_chown32
:
11713 if (!(p
= lock_user_string(arg1
)))
11714 return -TARGET_EFAULT
;
11715 ret
= get_errno(chown(p
, arg2
, arg3
));
11716 unlock_user(p
, arg1
, 0);
11719 #ifdef TARGET_NR_setuid32
11720 case TARGET_NR_setuid32
:
11721 return get_errno(sys_setuid(arg1
));
11723 #ifdef TARGET_NR_setgid32
11724 case TARGET_NR_setgid32
:
11725 return get_errno(sys_setgid(arg1
));
11727 #ifdef TARGET_NR_setfsuid32
11728 case TARGET_NR_setfsuid32
:
11729 return get_errno(setfsuid(arg1
));
11731 #ifdef TARGET_NR_setfsgid32
11732 case TARGET_NR_setfsgid32
:
11733 return get_errno(setfsgid(arg1
));
11735 #ifdef TARGET_NR_mincore
11736 case TARGET_NR_mincore
:
11738 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11740 return -TARGET_ENOMEM
;
11742 p
= lock_user_string(arg3
);
11744 ret
= -TARGET_EFAULT
;
11746 ret
= get_errno(mincore(a
, arg2
, p
));
11747 unlock_user(p
, arg3
, ret
);
11749 unlock_user(a
, arg1
, 0);
11753 #ifdef TARGET_NR_arm_fadvise64_64
11754 case TARGET_NR_arm_fadvise64_64
:
11755 /* arm_fadvise64_64 looks like fadvise64_64 but
11756 * with different argument order: fd, advice, offset, len
11757 * rather than the usual fd, offset, len, advice.
11758 * Note that offset and len are both 64-bit so appear as
11759 * pairs of 32-bit registers.
11761 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11762 target_offset64(arg5
, arg6
), arg2
);
11763 return -host_to_target_errno(ret
);
11766 #if TARGET_ABI_BITS == 32
11768 #ifdef TARGET_NR_fadvise64_64
11769 case TARGET_NR_fadvise64_64
:
11770 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11771 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11779 /* 6 args: fd, offset (high, low), len (high, low), advice */
11780 if (regpairs_aligned(cpu_env
, num
)) {
11781 /* offset is in (3,4), len in (5,6) and advice in 7 */
11789 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11790 target_offset64(arg4
, arg5
), arg6
);
11791 return -host_to_target_errno(ret
);
11794 #ifdef TARGET_NR_fadvise64
11795 case TARGET_NR_fadvise64
:
11796 /* 5 args: fd, offset (high, low), len, advice */
11797 if (regpairs_aligned(cpu_env
, num
)) {
11798 /* offset is in (3,4), len in 5 and advice in 6 */
11804 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11805 return -host_to_target_errno(ret
);
11808 #else /* not a 32-bit ABI */
11809 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11810 #ifdef TARGET_NR_fadvise64_64
11811 case TARGET_NR_fadvise64_64
:
11813 #ifdef TARGET_NR_fadvise64
11814 case TARGET_NR_fadvise64
:
11816 #ifdef TARGET_S390X
11818 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11819 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11820 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11821 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11825 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11827 #endif /* end of 64-bit ABI fadvise handling */
11829 #ifdef TARGET_NR_madvise
11830 case TARGET_NR_madvise
:
11831 /* A straight passthrough may not be safe because qemu sometimes
11832 turns private file-backed mappings into anonymous mappings.
11833 This will break MADV_DONTNEED.
11834 This is a hint, so ignoring and returning success is ok. */
11837 #ifdef TARGET_NR_fcntl64
11838 case TARGET_NR_fcntl64
:
11842 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11843 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11846 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11847 copyfrom
= copy_from_user_oabi_flock64
;
11848 copyto
= copy_to_user_oabi_flock64
;
11852 cmd
= target_to_host_fcntl_cmd(arg2
);
11853 if (cmd
== -TARGET_EINVAL
) {
11858 case TARGET_F_GETLK64
:
11859 ret
= copyfrom(&fl
, arg3
);
11863 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11865 ret
= copyto(arg3
, &fl
);
11869 case TARGET_F_SETLK64
:
11870 case TARGET_F_SETLKW64
:
11871 ret
= copyfrom(&fl
, arg3
);
11875 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11878 ret
= do_fcntl(arg1
, arg2
, arg3
);
11884 #ifdef TARGET_NR_cacheflush
11885 case TARGET_NR_cacheflush
:
11886 /* self-modifying code is handled automatically, so nothing needed */
11889 #ifdef TARGET_NR_getpagesize
11890 case TARGET_NR_getpagesize
:
11891 return TARGET_PAGE_SIZE
;
11893 case TARGET_NR_gettid
:
11894 return get_errno(sys_gettid());
11895 #ifdef TARGET_NR_readahead
11896 case TARGET_NR_readahead
:
11897 #if TARGET_ABI_BITS == 32
11898 if (regpairs_aligned(cpu_env
, num
)) {
11903 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11905 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11910 #ifdef TARGET_NR_setxattr
11911 case TARGET_NR_listxattr
:
11912 case TARGET_NR_llistxattr
:
11916 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11918 return -TARGET_EFAULT
;
11921 p
= lock_user_string(arg1
);
11923 if (num
== TARGET_NR_listxattr
) {
11924 ret
= get_errno(listxattr(p
, b
, arg3
));
11926 ret
= get_errno(llistxattr(p
, b
, arg3
));
11929 ret
= -TARGET_EFAULT
;
11931 unlock_user(p
, arg1
, 0);
11932 unlock_user(b
, arg2
, arg3
);
11935 case TARGET_NR_flistxattr
:
11939 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11941 return -TARGET_EFAULT
;
11944 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11945 unlock_user(b
, arg2
, arg3
);
11948 case TARGET_NR_setxattr
:
11949 case TARGET_NR_lsetxattr
:
11951 void *p
, *n
, *v
= 0;
11953 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11955 return -TARGET_EFAULT
;
11958 p
= lock_user_string(arg1
);
11959 n
= lock_user_string(arg2
);
11961 if (num
== TARGET_NR_setxattr
) {
11962 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11964 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11967 ret
= -TARGET_EFAULT
;
11969 unlock_user(p
, arg1
, 0);
11970 unlock_user(n
, arg2
, 0);
11971 unlock_user(v
, arg3
, 0);
11974 case TARGET_NR_fsetxattr
:
11978 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11980 return -TARGET_EFAULT
;
11983 n
= lock_user_string(arg2
);
11985 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11987 ret
= -TARGET_EFAULT
;
11989 unlock_user(n
, arg2
, 0);
11990 unlock_user(v
, arg3
, 0);
11993 case TARGET_NR_getxattr
:
11994 case TARGET_NR_lgetxattr
:
11996 void *p
, *n
, *v
= 0;
11998 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12000 return -TARGET_EFAULT
;
12003 p
= lock_user_string(arg1
);
12004 n
= lock_user_string(arg2
);
12006 if (num
== TARGET_NR_getxattr
) {
12007 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12009 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12012 ret
= -TARGET_EFAULT
;
12014 unlock_user(p
, arg1
, 0);
12015 unlock_user(n
, arg2
, 0);
12016 unlock_user(v
, arg3
, arg4
);
12019 case TARGET_NR_fgetxattr
:
12023 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12025 return -TARGET_EFAULT
;
12028 n
= lock_user_string(arg2
);
12030 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12032 ret
= -TARGET_EFAULT
;
12034 unlock_user(n
, arg2
, 0);
12035 unlock_user(v
, arg3
, arg4
);
12038 case TARGET_NR_removexattr
:
12039 case TARGET_NR_lremovexattr
:
12042 p
= lock_user_string(arg1
);
12043 n
= lock_user_string(arg2
);
12045 if (num
== TARGET_NR_removexattr
) {
12046 ret
= get_errno(removexattr(p
, n
));
12048 ret
= get_errno(lremovexattr(p
, n
));
12051 ret
= -TARGET_EFAULT
;
12053 unlock_user(p
, arg1
, 0);
12054 unlock_user(n
, arg2
, 0);
12057 case TARGET_NR_fremovexattr
:
12060 n
= lock_user_string(arg2
);
12062 ret
= get_errno(fremovexattr(arg1
, n
));
12064 ret
= -TARGET_EFAULT
;
12066 unlock_user(n
, arg2
, 0);
12070 #endif /* CONFIG_ATTR */
12071 #ifdef TARGET_NR_set_thread_area
12072 case TARGET_NR_set_thread_area
:
12073 #if defined(TARGET_MIPS)
12074 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12076 #elif defined(TARGET_CRIS)
12078 ret
= -TARGET_EINVAL
;
12080 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12084 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12085 return do_set_thread_area(cpu_env
, arg1
);
12086 #elif defined(TARGET_M68K)
12088 TaskState
*ts
= cpu
->opaque
;
12089 ts
->tp_value
= arg1
;
12093 return -TARGET_ENOSYS
;
12096 #ifdef TARGET_NR_get_thread_area
12097 case TARGET_NR_get_thread_area
:
12098 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12099 return do_get_thread_area(cpu_env
, arg1
);
12100 #elif defined(TARGET_M68K)
12102 TaskState
*ts
= cpu
->opaque
;
12103 return ts
->tp_value
;
12106 return -TARGET_ENOSYS
;
12109 #ifdef TARGET_NR_getdomainname
12110 case TARGET_NR_getdomainname
:
12111 return -TARGET_ENOSYS
;
12114 #ifdef TARGET_NR_clock_settime
12115 case TARGET_NR_clock_settime
:
12117 struct timespec ts
;
12119 ret
= target_to_host_timespec(&ts
, arg2
);
12120 if (!is_error(ret
)) {
12121 ret
= get_errno(clock_settime(arg1
, &ts
));
12126 #ifdef TARGET_NR_clock_settime64
12127 case TARGET_NR_clock_settime64
:
12129 struct timespec ts
;
12131 ret
= target_to_host_timespec64(&ts
, arg2
);
12132 if (!is_error(ret
)) {
12133 ret
= get_errno(clock_settime(arg1
, &ts
));
12138 #ifdef TARGET_NR_clock_gettime
12139 case TARGET_NR_clock_gettime
:
12141 struct timespec ts
;
12142 ret
= get_errno(clock_gettime(arg1
, &ts
));
12143 if (!is_error(ret
)) {
12144 ret
= host_to_target_timespec(arg2
, &ts
);
12149 #ifdef TARGET_NR_clock_gettime64
12150 case TARGET_NR_clock_gettime64
:
12152 struct timespec ts
;
12153 ret
= get_errno(clock_gettime(arg1
, &ts
));
12154 if (!is_error(ret
)) {
12155 ret
= host_to_target_timespec64(arg2
, &ts
);
12160 #ifdef TARGET_NR_clock_getres
12161 case TARGET_NR_clock_getres
:
12163 struct timespec ts
;
12164 ret
= get_errno(clock_getres(arg1
, &ts
));
12165 if (!is_error(ret
)) {
12166 host_to_target_timespec(arg2
, &ts
);
12171 #ifdef TARGET_NR_clock_getres_time64
12172 case TARGET_NR_clock_getres_time64
:
12174 struct timespec ts
;
12175 ret
= get_errno(clock_getres(arg1
, &ts
));
12176 if (!is_error(ret
)) {
12177 host_to_target_timespec64(arg2
, &ts
);
12182 #ifdef TARGET_NR_clock_nanosleep
12183 case TARGET_NR_clock_nanosleep
:
12185 struct timespec ts
;
12186 if (target_to_host_timespec(&ts
, arg3
)) {
12187 return -TARGET_EFAULT
;
12189 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12190 &ts
, arg4
? &ts
: NULL
));
12192 * if the call is interrupted by a signal handler, it fails
12193 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12194 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12196 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12197 host_to_target_timespec(arg4
, &ts
)) {
12198 return -TARGET_EFAULT
;
12204 #ifdef TARGET_NR_clock_nanosleep_time64
12205 case TARGET_NR_clock_nanosleep_time64
:
12207 struct timespec ts
;
12209 if (target_to_host_timespec64(&ts
, arg3
)) {
12210 return -TARGET_EFAULT
;
12213 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12214 &ts
, arg4
? &ts
: NULL
));
12216 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12217 host_to_target_timespec64(arg4
, &ts
)) {
12218 return -TARGET_EFAULT
;
12224 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12225 case TARGET_NR_set_tid_address
:
12226 return get_errno(set_tid_address((int *)g2h(arg1
)));
12229 case TARGET_NR_tkill
:
12230 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12232 case TARGET_NR_tgkill
:
12233 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12234 target_to_host_signal(arg3
)));
12236 #ifdef TARGET_NR_set_robust_list
12237 case TARGET_NR_set_robust_list
:
12238 case TARGET_NR_get_robust_list
:
12239 /* The ABI for supporting robust futexes has userspace pass
12240 * the kernel a pointer to a linked list which is updated by
12241 * userspace after the syscall; the list is walked by the kernel
12242 * when the thread exits. Since the linked list in QEMU guest
12243 * memory isn't a valid linked list for the host and we have
12244 * no way to reliably intercept the thread-death event, we can't
12245 * support these. Silently return ENOSYS so that guest userspace
12246 * falls back to a non-robust futex implementation (which should
12247 * be OK except in the corner case of the guest crashing while
12248 * holding a mutex that is shared with another process via
12251 return -TARGET_ENOSYS
;
12254 #if defined(TARGET_NR_utimensat)
12255 case TARGET_NR_utimensat
:
12257 struct timespec
*tsp
, ts
[2];
12261 if (target_to_host_timespec(ts
, arg3
)) {
12262 return -TARGET_EFAULT
;
12264 if (target_to_host_timespec(ts
+ 1, arg3
+
12265 sizeof(struct target_timespec
))) {
12266 return -TARGET_EFAULT
;
12271 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12273 if (!(p
= lock_user_string(arg2
))) {
12274 return -TARGET_EFAULT
;
12276 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12277 unlock_user(p
, arg2
, 0);
12282 #ifdef TARGET_NR_utimensat_time64
12283 case TARGET_NR_utimensat_time64
:
12285 struct timespec
*tsp
, ts
[2];
12289 if (target_to_host_timespec64(ts
, arg3
)) {
12290 return -TARGET_EFAULT
;
12292 if (target_to_host_timespec64(ts
+ 1, arg3
+
12293 sizeof(struct target__kernel_timespec
))) {
12294 return -TARGET_EFAULT
;
12299 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12301 p
= lock_user_string(arg2
);
12303 return -TARGET_EFAULT
;
12305 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12306 unlock_user(p
, arg2
, 0);
12311 #ifdef TARGET_NR_futex
12312 case TARGET_NR_futex
:
12313 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12315 #ifdef TARGET_NR_futex_time64
12316 case TARGET_NR_futex_time64
:
12317 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12319 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12320 case TARGET_NR_inotify_init
:
12321 ret
= get_errno(sys_inotify_init());
12323 fd_trans_register(ret
, &target_inotify_trans
);
12327 #ifdef CONFIG_INOTIFY1
12328 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12329 case TARGET_NR_inotify_init1
:
12330 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12331 fcntl_flags_tbl
)));
12333 fd_trans_register(ret
, &target_inotify_trans
);
12338 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12339 case TARGET_NR_inotify_add_watch
:
12340 p
= lock_user_string(arg2
);
12341 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12342 unlock_user(p
, arg2
, 0);
12345 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12346 case TARGET_NR_inotify_rm_watch
:
12347 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12350 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12351 case TARGET_NR_mq_open
:
12353 struct mq_attr posix_mq_attr
;
12354 struct mq_attr
*pposix_mq_attr
;
12357 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12358 pposix_mq_attr
= NULL
;
12360 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12361 return -TARGET_EFAULT
;
12363 pposix_mq_attr
= &posix_mq_attr
;
12365 p
= lock_user_string(arg1
- 1);
12367 return -TARGET_EFAULT
;
12369 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12370 unlock_user (p
, arg1
, 0);
12374 case TARGET_NR_mq_unlink
:
12375 p
= lock_user_string(arg1
- 1);
12377 return -TARGET_EFAULT
;
12379 ret
= get_errno(mq_unlink(p
));
12380 unlock_user (p
, arg1
, 0);
12383 #ifdef TARGET_NR_mq_timedsend
12384 case TARGET_NR_mq_timedsend
:
12386 struct timespec ts
;
12388 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12390 if (target_to_host_timespec(&ts
, arg5
)) {
12391 return -TARGET_EFAULT
;
12393 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12394 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12395 return -TARGET_EFAULT
;
12398 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12400 unlock_user (p
, arg2
, arg3
);
12404 #ifdef TARGET_NR_mq_timedsend_time64
12405 case TARGET_NR_mq_timedsend_time64
:
12407 struct timespec ts
;
12409 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12411 if (target_to_host_timespec64(&ts
, arg5
)) {
12412 return -TARGET_EFAULT
;
12414 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12415 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12416 return -TARGET_EFAULT
;
12419 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12421 unlock_user(p
, arg2
, arg3
);
12426 #ifdef TARGET_NR_mq_timedreceive
12427 case TARGET_NR_mq_timedreceive
:
12429 struct timespec ts
;
12432 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12434 if (target_to_host_timespec(&ts
, arg5
)) {
12435 return -TARGET_EFAULT
;
12437 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12439 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12440 return -TARGET_EFAULT
;
12443 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12446 unlock_user (p
, arg2
, arg3
);
12448 put_user_u32(prio
, arg4
);
12452 #ifdef TARGET_NR_mq_timedreceive_time64
12453 case TARGET_NR_mq_timedreceive_time64
:
12455 struct timespec ts
;
12458 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12460 if (target_to_host_timespec64(&ts
, arg5
)) {
12461 return -TARGET_EFAULT
;
12463 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12465 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12466 return -TARGET_EFAULT
;
12469 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12472 unlock_user(p
, arg2
, arg3
);
12474 put_user_u32(prio
, arg4
);
12480 /* Not implemented for now... */
12481 /* case TARGET_NR_mq_notify: */
12484 case TARGET_NR_mq_getsetattr
:
12486 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12489 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12490 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12491 &posix_mq_attr_out
));
12492 } else if (arg3
!= 0) {
12493 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12495 if (ret
== 0 && arg3
!= 0) {
12496 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12502 #ifdef CONFIG_SPLICE
12503 #ifdef TARGET_NR_tee
12504 case TARGET_NR_tee
:
12506 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12510 #ifdef TARGET_NR_splice
12511 case TARGET_NR_splice
:
12513 loff_t loff_in
, loff_out
;
12514 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12516 if (get_user_u64(loff_in
, arg2
)) {
12517 return -TARGET_EFAULT
;
12519 ploff_in
= &loff_in
;
12522 if (get_user_u64(loff_out
, arg4
)) {
12523 return -TARGET_EFAULT
;
12525 ploff_out
= &loff_out
;
12527 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12529 if (put_user_u64(loff_in
, arg2
)) {
12530 return -TARGET_EFAULT
;
12534 if (put_user_u64(loff_out
, arg4
)) {
12535 return -TARGET_EFAULT
;
12541 #ifdef TARGET_NR_vmsplice
12542 case TARGET_NR_vmsplice
:
12544 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12546 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12547 unlock_iovec(vec
, arg2
, arg3
, 0);
12549 ret
= -host_to_target_errno(errno
);
12554 #endif /* CONFIG_SPLICE */
12555 #ifdef CONFIG_EVENTFD
12556 #if defined(TARGET_NR_eventfd)
12557 case TARGET_NR_eventfd
:
12558 ret
= get_errno(eventfd(arg1
, 0));
12560 fd_trans_register(ret
, &target_eventfd_trans
);
12564 #if defined(TARGET_NR_eventfd2)
12565 case TARGET_NR_eventfd2
:
12567 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12568 if (arg2
& TARGET_O_NONBLOCK
) {
12569 host_flags
|= O_NONBLOCK
;
12571 if (arg2
& TARGET_O_CLOEXEC
) {
12572 host_flags
|= O_CLOEXEC
;
12574 ret
= get_errno(eventfd(arg1
, host_flags
));
12576 fd_trans_register(ret
, &target_eventfd_trans
);
12581 #endif /* CONFIG_EVENTFD */
12582 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12583 case TARGET_NR_fallocate
:
12584 #if TARGET_ABI_BITS == 32
12585 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12586 target_offset64(arg5
, arg6
)));
12588 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12592 #if defined(CONFIG_SYNC_FILE_RANGE)
12593 #if defined(TARGET_NR_sync_file_range)
12594 case TARGET_NR_sync_file_range
:
12595 #if TARGET_ABI_BITS == 32
12596 #if defined(TARGET_MIPS)
12597 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12598 target_offset64(arg5
, arg6
), arg7
));
12600 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12601 target_offset64(arg4
, arg5
), arg6
));
12602 #endif /* !TARGET_MIPS */
12604 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12608 #if defined(TARGET_NR_sync_file_range2) || \
12609 defined(TARGET_NR_arm_sync_file_range)
12610 #if defined(TARGET_NR_sync_file_range2)
12611 case TARGET_NR_sync_file_range2
:
12613 #if defined(TARGET_NR_arm_sync_file_range)
12614 case TARGET_NR_arm_sync_file_range
:
12616 /* This is like sync_file_range but the arguments are reordered */
12617 #if TARGET_ABI_BITS == 32
12618 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12619 target_offset64(arg5
, arg6
), arg2
));
12621 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12626 #if defined(TARGET_NR_signalfd4)
12627 case TARGET_NR_signalfd4
:
12628 return do_signalfd4(arg1
, arg2
, arg4
);
12630 #if defined(TARGET_NR_signalfd)
12631 case TARGET_NR_signalfd
:
12632 return do_signalfd4(arg1
, arg2
, 0);
12634 #if defined(CONFIG_EPOLL)
12635 #if defined(TARGET_NR_epoll_create)
12636 case TARGET_NR_epoll_create
:
12637 return get_errno(epoll_create(arg1
));
12639 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12640 case TARGET_NR_epoll_create1
:
12641 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12643 #if defined(TARGET_NR_epoll_ctl)
12644 case TARGET_NR_epoll_ctl
:
12646 struct epoll_event ep
;
12647 struct epoll_event
*epp
= 0;
12649 if (arg2
!= EPOLL_CTL_DEL
) {
12650 struct target_epoll_event
*target_ep
;
12651 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12652 return -TARGET_EFAULT
;
12654 ep
.events
= tswap32(target_ep
->events
);
12656 * The epoll_data_t union is just opaque data to the kernel,
12657 * so we transfer all 64 bits across and need not worry what
12658 * actual data type it is.
12660 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12661 unlock_user_struct(target_ep
, arg4
, 0);
12664 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12665 * non-null pointer, even though this argument is ignored.
12670 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12674 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12675 #if defined(TARGET_NR_epoll_wait)
12676 case TARGET_NR_epoll_wait
:
12678 #if defined(TARGET_NR_epoll_pwait)
12679 case TARGET_NR_epoll_pwait
:
12682 struct target_epoll_event
*target_ep
;
12683 struct epoll_event
*ep
;
12685 int maxevents
= arg3
;
12686 int timeout
= arg4
;
12688 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12689 return -TARGET_EINVAL
;
12692 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12693 maxevents
* sizeof(struct target_epoll_event
), 1);
12695 return -TARGET_EFAULT
;
12698 ep
= g_try_new(struct epoll_event
, maxevents
);
12700 unlock_user(target_ep
, arg2
, 0);
12701 return -TARGET_ENOMEM
;
12705 #if defined(TARGET_NR_epoll_pwait)
12706 case TARGET_NR_epoll_pwait
:
12708 target_sigset_t
*target_set
;
12709 sigset_t _set
, *set
= &_set
;
12712 if (arg6
!= sizeof(target_sigset_t
)) {
12713 ret
= -TARGET_EINVAL
;
12717 target_set
= lock_user(VERIFY_READ
, arg5
,
12718 sizeof(target_sigset_t
), 1);
12720 ret
= -TARGET_EFAULT
;
12723 target_to_host_sigset(set
, target_set
);
12724 unlock_user(target_set
, arg5
, 0);
12729 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12730 set
, SIGSET_T_SIZE
));
12734 #if defined(TARGET_NR_epoll_wait)
12735 case TARGET_NR_epoll_wait
:
12736 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12741 ret
= -TARGET_ENOSYS
;
12743 if (!is_error(ret
)) {
12745 for (i
= 0; i
< ret
; i
++) {
12746 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12747 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12749 unlock_user(target_ep
, arg2
,
12750 ret
* sizeof(struct target_epoll_event
));
12752 unlock_user(target_ep
, arg2
, 0);
12759 #ifdef TARGET_NR_prlimit64
12760 case TARGET_NR_prlimit64
:
12762 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12763 struct target_rlimit64
*target_rnew
, *target_rold
;
12764 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12765 int resource
= target_to_host_resource(arg2
);
12767 if (arg3
&& (resource
!= RLIMIT_AS
&&
12768 resource
!= RLIMIT_DATA
&&
12769 resource
!= RLIMIT_STACK
)) {
12770 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12771 return -TARGET_EFAULT
;
12773 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12774 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12775 unlock_user_struct(target_rnew
, arg3
, 0);
12779 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12780 if (!is_error(ret
) && arg4
) {
12781 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12782 return -TARGET_EFAULT
;
12784 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12785 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12786 unlock_user_struct(target_rold
, arg4
, 1);
12791 #ifdef TARGET_NR_gethostname
12792 case TARGET_NR_gethostname
:
12794 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12796 ret
= get_errno(gethostname(name
, arg2
));
12797 unlock_user(name
, arg1
, arg2
);
12799 ret
= -TARGET_EFAULT
;
12804 #ifdef TARGET_NR_atomic_cmpxchg_32
12805 case TARGET_NR_atomic_cmpxchg_32
:
12807 /* should use start_exclusive from main.c */
12808 abi_ulong mem_value
;
12809 if (get_user_u32(mem_value
, arg6
)) {
12810 target_siginfo_t info
;
12811 info
.si_signo
= SIGSEGV
;
12813 info
.si_code
= TARGET_SEGV_MAPERR
;
12814 info
._sifields
._sigfault
._addr
= arg6
;
12815 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12816 QEMU_SI_FAULT
, &info
);
12820 if (mem_value
== arg2
)
12821 put_user_u32(arg1
, arg6
);
12825 #ifdef TARGET_NR_atomic_barrier
12826 case TARGET_NR_atomic_barrier
:
12827 /* Like the kernel implementation and the
12828 qemu arm barrier, no-op this? */
12832 #ifdef TARGET_NR_timer_create
12833 case TARGET_NR_timer_create
:
12835 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12837 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12840 int timer_index
= next_free_host_timer();
12842 if (timer_index
< 0) {
12843 ret
= -TARGET_EAGAIN
;
12845 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12848 phost_sevp
= &host_sevp
;
12849 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12855 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12859 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12860 return -TARGET_EFAULT
;
12868 #ifdef TARGET_NR_timer_settime
12869 case TARGET_NR_timer_settime
:
12871 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12872 * struct itimerspec * old_value */
12873 target_timer_t timerid
= get_timer_id(arg1
);
12877 } else if (arg3
== 0) {
12878 ret
= -TARGET_EINVAL
;
12880 timer_t htimer
= g_posix_timers
[timerid
];
12881 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12883 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12884 return -TARGET_EFAULT
;
12887 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12888 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12889 return -TARGET_EFAULT
;
12896 #ifdef TARGET_NR_timer_settime64
12897 case TARGET_NR_timer_settime64
:
12899 target_timer_t timerid
= get_timer_id(arg1
);
12903 } else if (arg3
== 0) {
12904 ret
= -TARGET_EINVAL
;
12906 timer_t htimer
= g_posix_timers
[timerid
];
12907 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12909 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12910 return -TARGET_EFAULT
;
12913 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12914 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12915 return -TARGET_EFAULT
;
12922 #ifdef TARGET_NR_timer_gettime
12923 case TARGET_NR_timer_gettime
:
12925 /* args: timer_t timerid, struct itimerspec *curr_value */
12926 target_timer_t timerid
= get_timer_id(arg1
);
12930 } else if (!arg2
) {
12931 ret
= -TARGET_EFAULT
;
12933 timer_t htimer
= g_posix_timers
[timerid
];
12934 struct itimerspec hspec
;
12935 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12937 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12938 ret
= -TARGET_EFAULT
;
12945 #ifdef TARGET_NR_timer_gettime64
12946 case TARGET_NR_timer_gettime64
:
12948 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12949 target_timer_t timerid
= get_timer_id(arg1
);
12953 } else if (!arg2
) {
12954 ret
= -TARGET_EFAULT
;
12956 timer_t htimer
= g_posix_timers
[timerid
];
12957 struct itimerspec hspec
;
12958 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12960 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12961 ret
= -TARGET_EFAULT
;
12968 #ifdef TARGET_NR_timer_getoverrun
12969 case TARGET_NR_timer_getoverrun
:
12971 /* args: timer_t timerid */
12972 target_timer_t timerid
= get_timer_id(arg1
);
12977 timer_t htimer
= g_posix_timers
[timerid
];
12978 ret
= get_errno(timer_getoverrun(htimer
));
12984 #ifdef TARGET_NR_timer_delete
12985 case TARGET_NR_timer_delete
:
12987 /* args: timer_t timerid */
12988 target_timer_t timerid
= get_timer_id(arg1
);
12993 timer_t htimer
= g_posix_timers
[timerid
];
12994 ret
= get_errno(timer_delete(htimer
));
12995 g_posix_timers
[timerid
] = 0;
13001 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13002 case TARGET_NR_timerfd_create
:
13003 return get_errno(timerfd_create(arg1
,
13004 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13007 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13008 case TARGET_NR_timerfd_gettime
:
13010 struct itimerspec its_curr
;
13012 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13014 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13015 return -TARGET_EFAULT
;
13021 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13022 case TARGET_NR_timerfd_gettime64
:
13024 struct itimerspec its_curr
;
13026 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13028 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13029 return -TARGET_EFAULT
;
13035 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13036 case TARGET_NR_timerfd_settime
:
13038 struct itimerspec its_new
, its_old
, *p_new
;
13041 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13042 return -TARGET_EFAULT
;
13049 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13051 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13052 return -TARGET_EFAULT
;
13058 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13059 case TARGET_NR_timerfd_settime64
:
13061 struct itimerspec its_new
, its_old
, *p_new
;
13064 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13065 return -TARGET_EFAULT
;
13072 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13074 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13075 return -TARGET_EFAULT
;
13081 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13082 case TARGET_NR_ioprio_get
:
13083 return get_errno(ioprio_get(arg1
, arg2
));
13086 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13087 case TARGET_NR_ioprio_set
:
13088 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13091 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13092 case TARGET_NR_setns
:
13093 return get_errno(setns(arg1
, arg2
));
13095 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13096 case TARGET_NR_unshare
:
13097 return get_errno(unshare(arg1
));
13099 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13100 case TARGET_NR_kcmp
:
13101 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13103 #ifdef TARGET_NR_swapcontext
13104 case TARGET_NR_swapcontext
:
13105 /* PowerPC specific. */
13106 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13108 #ifdef TARGET_NR_memfd_create
13109 case TARGET_NR_memfd_create
:
13110 p
= lock_user_string(arg1
);
13112 return -TARGET_EFAULT
;
13114 ret
= get_errno(memfd_create(p
, arg2
));
13115 fd_trans_unregister(ret
);
13116 unlock_user(p
, arg1
, 0);
13119 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13120 case TARGET_NR_membarrier
:
13121 return get_errno(membarrier(arg1
, arg2
));
13124 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13125 case TARGET_NR_copy_file_range
:
13127 loff_t inoff
, outoff
;
13128 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13131 if (get_user_u64(inoff
, arg2
)) {
13132 return -TARGET_EFAULT
;
13137 if (get_user_u64(outoff
, arg4
)) {
13138 return -TARGET_EFAULT
;
13142 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13144 if (!is_error(ret
) && ret
> 0) {
13146 if (put_user_u64(inoff
, arg2
)) {
13147 return -TARGET_EFAULT
;
13151 if (put_user_u64(outoff
, arg4
)) {
13152 return -TARGET_EFAULT
;
13161 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13162 return -TARGET_ENOSYS
;
13167 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13168 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13169 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13172 CPUState
*cpu
= env_cpu(cpu_env
);
13175 #ifdef DEBUG_ERESTARTSYS
13176 /* Debug-only code for exercising the syscall-restart code paths
13177 * in the per-architecture cpu main loops: restart every syscall
13178 * the guest makes once before letting it through.
13184 return -TARGET_ERESTARTSYS
;
13189 record_syscall_start(cpu
, num
, arg1
,
13190 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13192 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13193 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13196 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13197 arg5
, arg6
, arg7
, arg8
);
13199 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13200 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13201 arg3
, arg4
, arg5
, arg6
);
13204 record_syscall_return(cpu
, num
, ret
);