4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <linux/btrfs.h>
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
122 #include "linux_loop.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
134 #define CLONE_IO 0x80000000 /* Clone io context */
137 /* We can't directly call the host clone syscall, because this will
138 * badly confuse libc (breaking mutexes, for example). So we must
139 * divide clone flags into:
140 * * flag combinations that look like pthread_create()
141 * * flag combinations that look like fork()
142 * * flags we can implement within QEMU itself
143 * * flags we can't support and will return an error for
145 /* For thread creation, all these flags must be present; for
146 * fork, none must be present.
148 #define CLONE_THREAD_FLAGS \
149 (CLONE_VM | CLONE_FS | CLONE_FILES | \
150 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 /* These flags are ignored:
153 * CLONE_DETACHED is now ignored by the kernel;
154 * CLONE_IO is just an optimisation hint to the I/O scheduler
156 #define CLONE_IGNORED_FLAGS \
157 (CLONE_DETACHED | CLONE_IO)
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS \
161 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
162 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS \
166 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
167 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 #define CLONE_INVALID_FORK_FLAGS \
170 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 #define CLONE_INVALID_THREAD_FLAGS \
173 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
174 CLONE_IGNORED_FLAGS))
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177 * have almost all been allocated. We cannot support any of
178 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180 * The checks against the invalid thread masks above will catch these.
181 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185 * once. This exercises the codepaths for restart.
187 //#define DEBUG_ERESTARTSYS
189 //#include <linux/msdos_fs.h>
190 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
191 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
201 #define _syscall0(type,name) \
202 static type name (void) \
204 return syscall(__NR_##name); \
207 #define _syscall1(type,name,type1,arg1) \
208 static type name (type1 arg1) \
210 return syscall(__NR_##name, arg1); \
213 #define _syscall2(type,name,type1,arg1,type2,arg2) \
214 static type name (type1 arg1,type2 arg2) \
216 return syscall(__NR_##name, arg1, arg2); \
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
220 static type name (type1 arg1,type2 arg2,type3 arg3) \
222 return syscall(__NR_##name, arg1, arg2, arg3); \
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
240 type5,arg5,type6,arg6) \
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid
)
279 /* For the 64-bit guest on 32-bit host case we must emulate
280 * getdents using getdents64, because otherwise the host
281 * might hand us back more dirent records than we can fit
282 * into the guest buffer after structure format conversion.
283 * Otherwise we emulate getdents with getdents if the host has it.
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
292 #if (defined(TARGET_NR_getdents) && \
293 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
299 loff_t
*, res
, uint
, wh
);
301 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
302 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
304 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group
,int,error_code
)
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address
,int *,tidptr
)
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
317 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
321 unsigned long *, user_mask_ptr
);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
324 unsigned long *, user_mask_ptr
);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
327 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
329 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
330 struct __user_cap_data_struct
*, data
);
331 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
332 struct __user_cap_data_struct
*, data
);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get
, int, which
, int, who
)
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
345 unsigned long, idx1
, unsigned long, idx2
)
349 * It is assumed that struct statx is architecture independent.
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
353 unsigned int, mask
, struct target_statx
*, statxbuf
)
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier
, int, cmd
, int, flags
)
359 static bitmask_transtbl fcntl_flags_tbl
[] = {
360 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
361 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
362 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
363 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
364 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
365 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
366 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
367 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
368 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
369 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
370 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
371 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
372 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
373 #if defined(O_DIRECT)
374 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
376 #if defined(O_NOATIME)
377 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
379 #if defined(O_CLOEXEC)
380 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
383 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
385 #if defined(O_TMPFILE)
386 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
388 /* Don't terminate the list prematurely on 64-bit host+guest. */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
395 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
401 const struct timespec
*,tsp
,int,flags
)
403 static int sys_utimensat(int dirfd
, const char *pathname
,
404 const struct timespec times
[2], int flags
)
410 #endif /* TARGET_NR_utimensat */
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
416 const char *, new, unsigned int, flags
)
418 static int sys_renameat2(int oldfd
, const char *old
,
419 int newfd
, const char *new, int flags
)
422 return renameat(oldfd
, old
, newfd
, new);
428 #endif /* TARGET_NR_renameat2 */
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
436 return (inotify_init());
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
442 return (inotify_add_watch(fd
, pathname
, mask
));
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
448 return (inotify_rm_watch(fd
, wd
));
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags
)
455 return (inotify_init1(flags
));
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY */
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64
{
477 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
478 const struct host_rlimit64
*, new_limit
,
479 struct host_rlimit64
*, old_limit
)
483 #if defined(TARGET_NR_timer_create)
484 /* Maximum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers
[32] = { 0, } ;
487 static inline int next_free_host_timer(void)
490 /* FIXME: Does finding the next free slot require a lock? */
491 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
492 if (g_posix_timers
[k
] == 0) {
493 g_posix_timers
[k
] = (timer_t
) 1;
501 #define ERRNO_TABLE_SIZE 1200
503 /* target_to_host_errno_table[] is initialized from
504 * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
509 * This list is the union of errno values overridden in asm-<arch>/errno.h
510 * minus the errnos that are not actually generic to all archs.
512 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
513 [EAGAIN
] = TARGET_EAGAIN
,
514 [EIDRM
] = TARGET_EIDRM
,
515 [ECHRNG
] = TARGET_ECHRNG
,
516 [EL2NSYNC
] = TARGET_EL2NSYNC
,
517 [EL3HLT
] = TARGET_EL3HLT
,
518 [EL3RST
] = TARGET_EL3RST
,
519 [ELNRNG
] = TARGET_ELNRNG
,
520 [EUNATCH
] = TARGET_EUNATCH
,
521 [ENOCSI
] = TARGET_ENOCSI
,
522 [EL2HLT
] = TARGET_EL2HLT
,
523 [EDEADLK
] = TARGET_EDEADLK
,
524 [ENOLCK
] = TARGET_ENOLCK
,
525 [EBADE
] = TARGET_EBADE
,
526 [EBADR
] = TARGET_EBADR
,
527 [EXFULL
] = TARGET_EXFULL
,
528 [ENOANO
] = TARGET_ENOANO
,
529 [EBADRQC
] = TARGET_EBADRQC
,
530 [EBADSLT
] = TARGET_EBADSLT
,
531 [EBFONT
] = TARGET_EBFONT
,
532 [ENOSTR
] = TARGET_ENOSTR
,
533 [ENODATA
] = TARGET_ENODATA
,
534 [ETIME
] = TARGET_ETIME
,
535 [ENOSR
] = TARGET_ENOSR
,
536 [ENONET
] = TARGET_ENONET
,
537 [ENOPKG
] = TARGET_ENOPKG
,
538 [EREMOTE
] = TARGET_EREMOTE
,
539 [ENOLINK
] = TARGET_ENOLINK
,
540 [EADV
] = TARGET_EADV
,
541 [ESRMNT
] = TARGET_ESRMNT
,
542 [ECOMM
] = TARGET_ECOMM
,
543 [EPROTO
] = TARGET_EPROTO
,
544 [EDOTDOT
] = TARGET_EDOTDOT
,
545 [EMULTIHOP
] = TARGET_EMULTIHOP
,
546 [EBADMSG
] = TARGET_EBADMSG
,
547 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
548 [EOVERFLOW
] = TARGET_EOVERFLOW
,
549 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
550 [EBADFD
] = TARGET_EBADFD
,
551 [EREMCHG
] = TARGET_EREMCHG
,
552 [ELIBACC
] = TARGET_ELIBACC
,
553 [ELIBBAD
] = TARGET_ELIBBAD
,
554 [ELIBSCN
] = TARGET_ELIBSCN
,
555 [ELIBMAX
] = TARGET_ELIBMAX
,
556 [ELIBEXEC
] = TARGET_ELIBEXEC
,
557 [EILSEQ
] = TARGET_EILSEQ
,
558 [ENOSYS
] = TARGET_ENOSYS
,
559 [ELOOP
] = TARGET_ELOOP
,
560 [ERESTART
] = TARGET_ERESTART
,
561 [ESTRPIPE
] = TARGET_ESTRPIPE
,
562 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
563 [EUSERS
] = TARGET_EUSERS
,
564 [ENOTSOCK
] = TARGET_ENOTSOCK
,
565 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
566 [EMSGSIZE
] = TARGET_EMSGSIZE
,
567 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
568 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
569 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
570 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
571 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
572 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
573 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
574 [EADDRINUSE
] = TARGET_EADDRINUSE
,
575 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
576 [ENETDOWN
] = TARGET_ENETDOWN
,
577 [ENETUNREACH
] = TARGET_ENETUNREACH
,
578 [ENETRESET
] = TARGET_ENETRESET
,
579 [ECONNABORTED
] = TARGET_ECONNABORTED
,
580 [ECONNRESET
] = TARGET_ECONNRESET
,
581 [ENOBUFS
] = TARGET_ENOBUFS
,
582 [EISCONN
] = TARGET_EISCONN
,
583 [ENOTCONN
] = TARGET_ENOTCONN
,
584 [EUCLEAN
] = TARGET_EUCLEAN
,
585 [ENOTNAM
] = TARGET_ENOTNAM
,
586 [ENAVAIL
] = TARGET_ENAVAIL
,
587 [EISNAM
] = TARGET_EISNAM
,
588 [EREMOTEIO
] = TARGET_EREMOTEIO
,
589 [EDQUOT
] = TARGET_EDQUOT
,
590 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
591 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
592 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
593 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
594 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
595 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
596 [EALREADY
] = TARGET_EALREADY
,
597 [EINPROGRESS
] = TARGET_EINPROGRESS
,
598 [ESTALE
] = TARGET_ESTALE
,
599 [ECANCELED
] = TARGET_ECANCELED
,
600 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
601 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
603 [ENOKEY
] = TARGET_ENOKEY
,
606 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
609 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
612 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
615 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
617 #ifdef ENOTRECOVERABLE
618 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
621 [ENOMSG
] = TARGET_ENOMSG
,
624 [ERFKILL
] = TARGET_ERFKILL
,
627 [EHWPOISON
] = TARGET_EHWPOISON
,
631 static inline int host_to_target_errno(int err
)
633 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
634 host_to_target_errno_table
[err
]) {
635 return host_to_target_errno_table
[err
];
640 static inline int target_to_host_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 target_to_host_errno_table
[err
]) {
644 return target_to_host_errno_table
[err
];
649 static inline abi_long
get_errno(abi_long ret
)
652 return -host_to_target_errno(errno
);
657 const char *target_strerror(int err
)
659 if (err
== TARGET_ERESTARTSYS
) {
660 return "To be restarted";
662 if (err
== TARGET_QEMU_ESIGRETURN
) {
663 return "Successful exit from sigreturn";
666 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
669 return strerror(target_to_host_errno(err
));
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
675 return safe_syscall(__NR_##name); \
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
681 return safe_syscall(__NR_##name, arg1); \
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
687 return safe_syscall(__NR_##name, arg1, arg2); \
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714 type5 arg5, type6 arg6) \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
720 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
721 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
722 int, flags
, mode_t
, mode
)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
725 struct rusage
*, rusage
)
727 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
728 int, options
, struct rusage
*, rusage
)
729 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
732 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
733 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
736 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
737 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
740 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
741 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
745 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
749 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
751 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
752 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
753 safe_syscall2(int, tkill
, int, tid
, int, sig
)
754 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
755 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
756 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
757 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
758 unsigned long, pos_l
, unsigned long, pos_h
)
759 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
760 unsigned long, pos_l
, unsigned long, pos_h
)
761 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
763 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
764 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
765 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
766 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
767 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
768 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
769 safe_syscall2(int, flock
, int, fd
, int, operation
)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
772 const struct timespec
*, uts
, size_t, sigsetsize
)
774 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
778 struct timespec
*, rem
)
780 #if defined(TARGET_NR_clock_nanosleep) || \
781 defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
783 const struct timespec
*, req
, struct timespec
*, rem
)
787 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
790 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
791 void *, ptr
, long, fifth
)
795 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
799 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
800 long, msgtype
, int, flags
)
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
804 unsigned, nsops
, const struct timespec
*, timeout
)
806 #if defined(TARGET_NR_mq_timedsend) || \
807 defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
809 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
811 #if defined(TARGET_NR_mq_timedreceive) || \
812 defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
814 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817 * "third argument might be integer or pointer or not present" behaviour of
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823 * use the flock64 struct rather than unsuffixed flock
824 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
832 static inline int host_to_target_sock_type(int host_type
)
836 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
838 target_type
= TARGET_SOCK_DGRAM
;
841 target_type
= TARGET_SOCK_STREAM
;
844 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
848 #if defined(SOCK_CLOEXEC)
849 if (host_type
& SOCK_CLOEXEC
) {
850 target_type
|= TARGET_SOCK_CLOEXEC
;
854 #if defined(SOCK_NONBLOCK)
855 if (host_type
& SOCK_NONBLOCK
) {
856 target_type
|= TARGET_SOCK_NONBLOCK
;
863 static abi_ulong target_brk
;
864 static abi_ulong target_original_brk
;
865 static abi_ulong brk_page
;
867 void target_set_brk(abi_ulong new_brk
)
869 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
870 brk_page
= HOST_PAGE_ALIGN(target_brk
);
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
876 /* do_brk() must return target values and target errnos. */
877 abi_long
do_brk(abi_ulong new_brk
)
879 abi_long mapped_addr
;
880 abi_ulong new_alloc_size
;
882 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
885 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
888 if (new_brk
< target_original_brk
) {
889 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
894 /* If the new brk is less than the highest page reserved to the
895 * target heap allocation, set it and we're almost done... */
896 if (new_brk
<= brk_page
) {
897 /* Heap contents are initialized to zero, as for anonymous
899 if (new_brk
> target_brk
) {
900 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
902 target_brk
= new_brk
;
903 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
907 /* We need to allocate more memory after the brk... Note that
908 * we don't use MAP_FIXED because that will map over the top of
909 * any existing mapping (like the one with the host libc or qemu
910 * itself); instead we treat "mapped but at wrong address" as
911 * a failure and unmap again.
913 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
914 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
915 PROT_READ
|PROT_WRITE
,
916 MAP_ANON
|MAP_PRIVATE
, 0, 0));
918 if (mapped_addr
== brk_page
) {
919 /* Heap contents are initialized to zero, as for anonymous
920 * mapped pages. Technically the new pages are already
921 * initialized to zero since they *are* anonymous mapped
922 * pages, however we have to take care with the contents that
923 * come from the remaining part of the previous page: it may
924 * contains garbage data due to a previous heap usage (grown
926 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
928 target_brk
= new_brk
;
929 brk_page
= HOST_PAGE_ALIGN(target_brk
);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
933 } else if (mapped_addr
!= -1) {
934 /* Mapped but at wrong address, meaning there wasn't actually
935 * enough space for this brk.
937 target_munmap(mapped_addr
, new_alloc_size
);
939 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
942 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
945 #if defined(TARGET_ALPHA)
946 /* We (partially) emulate OSF/1 on Alpha, which requires we
947 return a proper errno, not an unchanged brk value. */
948 return -TARGET_ENOMEM
;
950 /* For everything else, return the previous break. */
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
955 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
956 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
957 abi_ulong target_fds_addr
,
961 abi_ulong b
, *target_fds
;
963 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
964 if (!(target_fds
= lock_user(VERIFY_READ
,
966 sizeof(abi_ulong
) * nw
,
968 return -TARGET_EFAULT
;
972 for (i
= 0; i
< nw
; i
++) {
973 /* grab the abi_ulong */
974 __get_user(b
, &target_fds
[i
]);
975 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
976 /* check the bit inside the abi_ulong */
983 unlock_user(target_fds
, target_fds_addr
, 0);
988 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
989 abi_ulong target_fds_addr
,
992 if (target_fds_addr
) {
993 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
994 return -TARGET_EFAULT
;
1002 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1008 abi_ulong
*target_fds
;
1010 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1011 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1013 sizeof(abi_ulong
) * nw
,
1015 return -TARGET_EFAULT
;
1018 for (i
= 0; i
< nw
; i
++) {
1020 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1021 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1024 __put_user(v
, &target_fds
[i
]);
1027 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1033 #if defined(__alpha__)
1034 #define HOST_HZ 1024
1039 static inline abi_long
host_to_target_clock_t(long ticks
)
1041 #if HOST_HZ == TARGET_HZ
1044 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1048 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1049 const struct rusage
*rusage
)
1051 struct target_rusage
*target_rusage
;
1053 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1054 return -TARGET_EFAULT
;
1055 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1056 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1057 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1058 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1059 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1060 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1061 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1062 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1063 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1064 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1065 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1066 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1067 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1068 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1069 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1070 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1071 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1072 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1073 unlock_user_struct(target_rusage
, target_addr
, 1);
1078 #ifdef TARGET_NR_setrlimit
1079 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1081 abi_ulong target_rlim_swap
;
1084 target_rlim_swap
= tswapal(target_rlim
);
1085 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1086 return RLIM_INFINITY
;
1088 result
= target_rlim_swap
;
1089 if (target_rlim_swap
!= (rlim_t
)result
)
1090 return RLIM_INFINITY
;
1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1097 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1099 abi_ulong target_rlim_swap
;
1102 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1103 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1105 target_rlim_swap
= rlim
;
1106 result
= tswapal(target_rlim_swap
);
1112 static inline int target_to_host_resource(int code
)
1115 case TARGET_RLIMIT_AS
:
1117 case TARGET_RLIMIT_CORE
:
1119 case TARGET_RLIMIT_CPU
:
1121 case TARGET_RLIMIT_DATA
:
1123 case TARGET_RLIMIT_FSIZE
:
1124 return RLIMIT_FSIZE
;
1125 case TARGET_RLIMIT_LOCKS
:
1126 return RLIMIT_LOCKS
;
1127 case TARGET_RLIMIT_MEMLOCK
:
1128 return RLIMIT_MEMLOCK
;
1129 case TARGET_RLIMIT_MSGQUEUE
:
1130 return RLIMIT_MSGQUEUE
;
1131 case TARGET_RLIMIT_NICE
:
1133 case TARGET_RLIMIT_NOFILE
:
1134 return RLIMIT_NOFILE
;
1135 case TARGET_RLIMIT_NPROC
:
1136 return RLIMIT_NPROC
;
1137 case TARGET_RLIMIT_RSS
:
1139 case TARGET_RLIMIT_RTPRIO
:
1140 return RLIMIT_RTPRIO
;
1141 case TARGET_RLIMIT_SIGPENDING
:
1142 return RLIMIT_SIGPENDING
;
1143 case TARGET_RLIMIT_STACK
:
1144 return RLIMIT_STACK
;
1150 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1151 abi_ulong target_tv_addr
)
1153 struct target_timeval
*target_tv
;
1155 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1156 return -TARGET_EFAULT
;
1159 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1160 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1162 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1167 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1168 const struct timeval
*tv
)
1170 struct target_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1173 return -TARGET_EFAULT
;
1176 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1185 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1186 abi_ulong target_tv_addr
)
1188 struct target__kernel_sock_timeval
*target_tv
;
1190 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1191 return -TARGET_EFAULT
;
1194 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1195 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1197 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1203 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1204 const struct timeval
*tv
)
1206 struct target__kernel_sock_timeval
*target_tv
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1209 return -TARGET_EFAULT
;
1212 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1213 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1215 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1220 #if defined(TARGET_NR_futex) || \
1221 defined(TARGET_NR_rt_sigtimedwait) || \
1222 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1223 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1224 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1225 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1226 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1227 defined(TARGET_NR_timer_settime) || \
1228 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1229 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1230 abi_ulong target_addr
)
1232 struct target_timespec
*target_ts
;
1234 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1235 return -TARGET_EFAULT
;
1237 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1238 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1239 unlock_user_struct(target_ts
, target_addr
, 0);
1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1245 defined(TARGET_NR_timer_settime64) || \
1246 defined(TARGET_NR_mq_timedsend_time64) || \
1247 defined(TARGET_NR_mq_timedreceive_time64) || \
1248 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1249 defined(TARGET_NR_clock_nanosleep_time64) || \
1250 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1251 defined(TARGET_NR_utimensat) || \
1252 defined(TARGET_NR_utimensat_time64) || \
1253 defined(TARGET_NR_semtimedop_time64) || \
1254 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1255 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1256 abi_ulong target_addr
)
1258 struct target__kernel_timespec
*target_ts
;
1260 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1261 return -TARGET_EFAULT
;
1263 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1264 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1265 /* in 32bit mode, this drops the padding */
1266 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1267 unlock_user_struct(target_ts
, target_addr
, 0);
1272 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1273 struct timespec
*host_ts
)
1275 struct target_timespec
*target_ts
;
1277 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1278 return -TARGET_EFAULT
;
1280 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1281 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1282 unlock_user_struct(target_ts
, target_addr
, 1);
1286 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1287 struct timespec
*host_ts
)
1289 struct target__kernel_timespec
*target_ts
;
1291 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1292 return -TARGET_EFAULT
;
1294 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1295 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1296 unlock_user_struct(target_ts
, target_addr
, 1);
1300 #if defined(TARGET_NR_gettimeofday)
1301 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1302 struct timezone
*tz
)
1304 struct target_timezone
*target_tz
;
1306 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1307 return -TARGET_EFAULT
;
1310 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1311 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1313 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1319 #if defined(TARGET_NR_settimeofday)
1320 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1321 abi_ulong target_tz_addr
)
1323 struct target_timezone
*target_tz
;
1325 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1326 return -TARGET_EFAULT
;
1329 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1330 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1332 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1338 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1341 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1342 abi_ulong target_mq_attr_addr
)
1344 struct target_mq_attr
*target_mq_attr
;
1346 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1347 target_mq_attr_addr
, 1))
1348 return -TARGET_EFAULT
;
1350 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1351 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1352 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1353 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1355 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1360 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1361 const struct mq_attr
*attr
)
1363 struct target_mq_attr
*target_mq_attr
;
1365 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1366 target_mq_attr_addr
, 0))
1367 return -TARGET_EFAULT
;
1369 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1370 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1371 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1372 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1374 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1380 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1381 /* do_select() must return target values and target errnos. */
1382 static abi_long
do_select(int n
,
1383 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1384 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1386 fd_set rfds
, wfds
, efds
;
1387 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1389 struct timespec ts
, *ts_ptr
;
1392 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1396 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1400 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1405 if (target_tv_addr
) {
1406 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1407 return -TARGET_EFAULT
;
1408 ts
.tv_sec
= tv
.tv_sec
;
1409 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1415 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1418 if (!is_error(ret
)) {
1419 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1420 return -TARGET_EFAULT
;
1421 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1422 return -TARGET_EFAULT
;
1423 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1424 return -TARGET_EFAULT
;
1426 if (target_tv_addr
) {
1427 tv
.tv_sec
= ts
.tv_sec
;
1428 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1429 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1430 return -TARGET_EFAULT
;
1438 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1439 static abi_long
do_old_select(abi_ulong arg1
)
1441 struct target_sel_arg_struct
*sel
;
1442 abi_ulong inp
, outp
, exp
, tvp
;
1445 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1446 return -TARGET_EFAULT
;
1449 nsel
= tswapal(sel
->n
);
1450 inp
= tswapal(sel
->inp
);
1451 outp
= tswapal(sel
->outp
);
1452 exp
= tswapal(sel
->exp
);
1453 tvp
= tswapal(sel
->tvp
);
1455 unlock_user_struct(sel
, arg1
, 0);
1457 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1462 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1463 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1464 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1467 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1468 fd_set rfds
, wfds
, efds
;
1469 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1470 struct timespec ts
, *ts_ptr
;
1474 * The 6th arg is actually two args smashed together,
1475 * so we cannot use the C library.
1483 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1484 target_sigset_t
*target_sigset
;
1492 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1496 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1500 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1506 * This takes a timespec, and not a timeval, so we cannot
1507 * use the do_select() helper ...
1511 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1512 return -TARGET_EFAULT
;
1515 if (target_to_host_timespec(&ts
, ts_addr
)) {
1516 return -TARGET_EFAULT
;
1524 /* Extract the two packed args for the sigset */
1527 sig
.size
= SIGSET_T_SIZE
;
1529 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1531 return -TARGET_EFAULT
;
1533 arg_sigset
= tswapal(arg7
[0]);
1534 arg_sigsize
= tswapal(arg7
[1]);
1535 unlock_user(arg7
, arg6
, 0);
1539 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1540 /* Like the kernel, we enforce correct size sigsets */
1541 return -TARGET_EINVAL
;
1543 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1544 sizeof(*target_sigset
), 1);
1545 if (!target_sigset
) {
1546 return -TARGET_EFAULT
;
1548 target_to_host_sigset(&set
, target_sigset
);
1549 unlock_user(target_sigset
, arg_sigset
, 0);
1557 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1560 if (!is_error(ret
)) {
1561 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1562 return -TARGET_EFAULT
;
1564 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1565 return -TARGET_EFAULT
;
1567 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1568 return -TARGET_EFAULT
;
1571 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1572 return -TARGET_EFAULT
;
1575 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1576 return -TARGET_EFAULT
;
1584 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1585 defined(TARGET_NR_ppoll_time64)
1586 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1587 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1589 struct target_pollfd
*target_pfd
;
1590 unsigned int nfds
= arg2
;
1598 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1599 return -TARGET_EINVAL
;
1601 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1602 sizeof(struct target_pollfd
) * nfds
, 1);
1604 return -TARGET_EFAULT
;
1607 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1608 for (i
= 0; i
< nfds
; i
++) {
1609 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1610 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1614 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1615 target_sigset_t
*target_set
;
1616 sigset_t _set
, *set
= &_set
;
1620 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1621 unlock_user(target_pfd
, arg1
, 0);
1622 return -TARGET_EFAULT
;
1625 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1626 unlock_user(target_pfd
, arg1
, 0);
1627 return -TARGET_EFAULT
;
1635 if (arg5
!= sizeof(target_sigset_t
)) {
1636 unlock_user(target_pfd
, arg1
, 0);
1637 return -TARGET_EINVAL
;
1640 target_set
= lock_user(VERIFY_READ
, arg4
,
1641 sizeof(target_sigset_t
), 1);
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EFAULT
;
1646 target_to_host_sigset(set
, target_set
);
1651 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1652 set
, SIGSET_T_SIZE
));
1654 if (!is_error(ret
) && arg3
) {
1656 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1657 return -TARGET_EFAULT
;
1660 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1661 return -TARGET_EFAULT
;
1666 unlock_user(target_set
, arg4
, 0);
1669 struct timespec ts
, *pts
;
1672 /* Convert ms to secs, ns */
1673 ts
.tv_sec
= arg3
/ 1000;
1674 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1677 /* -ve poll() timeout means "infinite" */
1680 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1683 if (!is_error(ret
)) {
1684 for (i
= 0; i
< nfds
; i
++) {
1685 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1688 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1693 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1696 return pipe2(host_pipe
, flags
);
1702 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1703 int flags
, int is_pipe2
)
1707 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1710 return get_errno(ret
);
1712 /* Several targets have special calling conventions for the original
1713 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1715 #if defined(TARGET_ALPHA)
1716 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1717 return host_pipe
[0];
1718 #elif defined(TARGET_MIPS)
1719 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1720 return host_pipe
[0];
1721 #elif defined(TARGET_SH4)
1722 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1723 return host_pipe
[0];
1724 #elif defined(TARGET_SPARC)
1725 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1726 return host_pipe
[0];
1730 if (put_user_s32(host_pipe
[0], pipedes
)
1731 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1732 return -TARGET_EFAULT
;
1733 return get_errno(ret
);
1736 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1737 abi_ulong target_addr
,
1740 struct target_ip_mreqn
*target_smreqn
;
1742 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1744 return -TARGET_EFAULT
;
1745 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1746 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1747 if (len
== sizeof(struct target_ip_mreqn
))
1748 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1749 unlock_user(target_smreqn
, target_addr
, 0);
1754 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1755 abi_ulong target_addr
,
1758 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1759 sa_family_t sa_family
;
1760 struct target_sockaddr
*target_saddr
;
1762 if (fd_trans_target_to_host_addr(fd
)) {
1763 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1766 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1768 return -TARGET_EFAULT
;
1770 sa_family
= tswap16(target_saddr
->sa_family
);
1772 /* Oops. The caller might send a incomplete sun_path; sun_path
1773 * must be terminated by \0 (see the manual page), but
1774 * unfortunately it is quite common to specify sockaddr_un
1775 * length as "strlen(x->sun_path)" while it should be
1776 * "strlen(...) + 1". We'll fix that here if needed.
1777 * Linux kernel has a similar feature.
1780 if (sa_family
== AF_UNIX
) {
1781 if (len
< unix_maxlen
&& len
> 0) {
1782 char *cp
= (char*)target_saddr
;
1784 if ( cp
[len
-1] && !cp
[len
] )
1787 if (len
> unix_maxlen
)
1791 memcpy(addr
, target_saddr
, len
);
1792 addr
->sa_family
= sa_family
;
1793 if (sa_family
== AF_NETLINK
) {
1794 struct sockaddr_nl
*nladdr
;
1796 nladdr
= (struct sockaddr_nl
*)addr
;
1797 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1798 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1799 } else if (sa_family
== AF_PACKET
) {
1800 struct target_sockaddr_ll
*lladdr
;
1802 lladdr
= (struct target_sockaddr_ll
*)addr
;
1803 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1804 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1806 unlock_user(target_saddr
, target_addr
, 0);
1811 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1812 struct sockaddr
*addr
,
1815 struct target_sockaddr
*target_saddr
;
1822 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1824 return -TARGET_EFAULT
;
1825 memcpy(target_saddr
, addr
, len
);
1826 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1827 sizeof(target_saddr
->sa_family
)) {
1828 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1830 if (addr
->sa_family
== AF_NETLINK
&&
1831 len
>= sizeof(struct target_sockaddr_nl
)) {
1832 struct target_sockaddr_nl
*target_nl
=
1833 (struct target_sockaddr_nl
*)target_saddr
;
1834 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1835 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1836 } else if (addr
->sa_family
== AF_PACKET
) {
1837 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1838 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1839 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1840 } else if (addr
->sa_family
== AF_INET6
&&
1841 len
>= sizeof(struct target_sockaddr_in6
)) {
1842 struct target_sockaddr_in6
*target_in6
=
1843 (struct target_sockaddr_in6
*)target_saddr
;
1844 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1846 unlock_user(target_saddr
, target_addr
, len
);
1851 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1852 struct target_msghdr
*target_msgh
)
1854 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1855 abi_long msg_controllen
;
1856 abi_ulong target_cmsg_addr
;
1857 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1858 socklen_t space
= 0;
1860 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1861 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1863 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1864 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1865 target_cmsg_start
= target_cmsg
;
1867 return -TARGET_EFAULT
;
1869 while (cmsg
&& target_cmsg
) {
1870 void *data
= CMSG_DATA(cmsg
);
1871 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1873 int len
= tswapal(target_cmsg
->cmsg_len
)
1874 - sizeof(struct target_cmsghdr
);
1876 space
+= CMSG_SPACE(len
);
1877 if (space
> msgh
->msg_controllen
) {
1878 space
-= CMSG_SPACE(len
);
1879 /* This is a QEMU bug, since we allocated the payload
1880 * area ourselves (unlike overflow in host-to-target
1881 * conversion, which is just the guest giving us a buffer
1882 * that's too small). It can't happen for the payload types
1883 * we currently support; if it becomes an issue in future
1884 * we would need to improve our allocation strategy to
1885 * something more intelligent than "twice the size of the
1886 * target buffer we're reading from".
1888 qemu_log_mask(LOG_UNIMP
,
1889 ("Unsupported ancillary data %d/%d: "
1890 "unhandled msg size\n"),
1891 tswap32(target_cmsg
->cmsg_level
),
1892 tswap32(target_cmsg
->cmsg_type
));
1896 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1897 cmsg
->cmsg_level
= SOL_SOCKET
;
1899 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1901 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1902 cmsg
->cmsg_len
= CMSG_LEN(len
);
1904 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1905 int *fd
= (int *)data
;
1906 int *target_fd
= (int *)target_data
;
1907 int i
, numfds
= len
/ sizeof(int);
1909 for (i
= 0; i
< numfds
; i
++) {
1910 __get_user(fd
[i
], target_fd
+ i
);
1912 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1913 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1914 struct ucred
*cred
= (struct ucred
*)data
;
1915 struct target_ucred
*target_cred
=
1916 (struct target_ucred
*)target_data
;
1918 __get_user(cred
->pid
, &target_cred
->pid
);
1919 __get_user(cred
->uid
, &target_cred
->uid
);
1920 __get_user(cred
->gid
, &target_cred
->gid
);
1922 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1923 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1924 memcpy(data
, target_data
, len
);
1927 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1928 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1931 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1933 msgh
->msg_controllen
= space
;
1937 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1938 struct msghdr
*msgh
)
1940 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1941 abi_long msg_controllen
;
1942 abi_ulong target_cmsg_addr
;
1943 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1944 socklen_t space
= 0;
1946 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1947 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1949 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1950 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1951 target_cmsg_start
= target_cmsg
;
1953 return -TARGET_EFAULT
;
1955 while (cmsg
&& target_cmsg
) {
1956 void *data
= CMSG_DATA(cmsg
);
1957 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1959 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1960 int tgt_len
, tgt_space
;
1962 /* We never copy a half-header but may copy half-data;
1963 * this is Linux's behaviour in put_cmsg(). Note that
1964 * truncation here is a guest problem (which we report
1965 * to the guest via the CTRUNC bit), unlike truncation
1966 * in target_to_host_cmsg, which is a QEMU bug.
1968 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1969 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1973 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1974 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1976 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1978 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1980 /* Payload types which need a different size of payload on
1981 * the target must adjust tgt_len here.
1984 switch (cmsg
->cmsg_level
) {
1986 switch (cmsg
->cmsg_type
) {
1988 tgt_len
= sizeof(struct target_timeval
);
1998 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1999 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2000 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2003 /* We must now copy-and-convert len bytes of payload
2004 * into tgt_len bytes of destination space. Bear in mind
2005 * that in both source and destination we may be dealing
2006 * with a truncated value!
2008 switch (cmsg
->cmsg_level
) {
2010 switch (cmsg
->cmsg_type
) {
2013 int *fd
= (int *)data
;
2014 int *target_fd
= (int *)target_data
;
2015 int i
, numfds
= tgt_len
/ sizeof(int);
2017 for (i
= 0; i
< numfds
; i
++) {
2018 __put_user(fd
[i
], target_fd
+ i
);
2024 struct timeval
*tv
= (struct timeval
*)data
;
2025 struct target_timeval
*target_tv
=
2026 (struct target_timeval
*)target_data
;
2028 if (len
!= sizeof(struct timeval
) ||
2029 tgt_len
!= sizeof(struct target_timeval
)) {
2033 /* copy struct timeval to target */
2034 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2035 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2038 case SCM_CREDENTIALS
:
2040 struct ucred
*cred
= (struct ucred
*)data
;
2041 struct target_ucred
*target_cred
=
2042 (struct target_ucred
*)target_data
;
2044 __put_user(cred
->pid
, &target_cred
->pid
);
2045 __put_user(cred
->uid
, &target_cred
->uid
);
2046 __put_user(cred
->gid
, &target_cred
->gid
);
2055 switch (cmsg
->cmsg_type
) {
2058 uint32_t *v
= (uint32_t *)data
;
2059 uint32_t *t_int
= (uint32_t *)target_data
;
2061 if (len
!= sizeof(uint32_t) ||
2062 tgt_len
!= sizeof(uint32_t)) {
2065 __put_user(*v
, t_int
);
2071 struct sock_extended_err ee
;
2072 struct sockaddr_in offender
;
2074 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2075 struct errhdr_t
*target_errh
=
2076 (struct errhdr_t
*)target_data
;
2078 if (len
!= sizeof(struct errhdr_t
) ||
2079 tgt_len
!= sizeof(struct errhdr_t
)) {
2082 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2083 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2084 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2085 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2086 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2087 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2088 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2089 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2090 (void *) &errh
->offender
, sizeof(errh
->offender
));
2099 switch (cmsg
->cmsg_type
) {
2102 uint32_t *v
= (uint32_t *)data
;
2103 uint32_t *t_int
= (uint32_t *)target_data
;
2105 if (len
!= sizeof(uint32_t) ||
2106 tgt_len
!= sizeof(uint32_t)) {
2109 __put_user(*v
, t_int
);
2115 struct sock_extended_err ee
;
2116 struct sockaddr_in6 offender
;
2118 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2119 struct errhdr6_t
*target_errh
=
2120 (struct errhdr6_t
*)target_data
;
2122 if (len
!= sizeof(struct errhdr6_t
) ||
2123 tgt_len
!= sizeof(struct errhdr6_t
)) {
2126 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2127 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2128 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2129 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2130 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2131 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2132 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2133 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2134 (void *) &errh
->offender
, sizeof(errh
->offender
));
2144 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2145 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2146 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2147 if (tgt_len
> len
) {
2148 memset(target_data
+ len
, 0, tgt_len
- len
);
2152 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2153 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2154 if (msg_controllen
< tgt_space
) {
2155 tgt_space
= msg_controllen
;
2157 msg_controllen
-= tgt_space
;
2159 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2160 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2163 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2165 target_msgh
->msg_controllen
= tswapal(space
);
2169 /* do_setsockopt() Must return target values and target errnos. */
2170 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2171 abi_ulong optval_addr
, socklen_t optlen
)
2175 struct ip_mreqn
*ip_mreq
;
2176 struct ip_mreq_source
*ip_mreq_source
;
2180 /* TCP options all take an 'int' value. */
2181 if (optlen
< sizeof(uint32_t))
2182 return -TARGET_EINVAL
;
2184 if (get_user_u32(val
, optval_addr
))
2185 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2193 case IP_ROUTER_ALERT
:
2197 case IP_MTU_DISCOVER
:
2204 case IP_MULTICAST_TTL
:
2205 case IP_MULTICAST_LOOP
:
2207 if (optlen
>= sizeof(uint32_t)) {
2208 if (get_user_u32(val
, optval_addr
))
2209 return -TARGET_EFAULT
;
2210 } else if (optlen
>= 1) {
2211 if (get_user_u8(val
, optval_addr
))
2212 return -TARGET_EFAULT
;
2214 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2216 case IP_ADD_MEMBERSHIP
:
2217 case IP_DROP_MEMBERSHIP
:
2218 if (optlen
< sizeof (struct target_ip_mreq
) ||
2219 optlen
> sizeof (struct target_ip_mreqn
))
2220 return -TARGET_EINVAL
;
2222 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2223 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2224 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2227 case IP_BLOCK_SOURCE
:
2228 case IP_UNBLOCK_SOURCE
:
2229 case IP_ADD_SOURCE_MEMBERSHIP
:
2230 case IP_DROP_SOURCE_MEMBERSHIP
:
2231 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2232 return -TARGET_EINVAL
;
2234 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2235 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2236 unlock_user (ip_mreq_source
, optval_addr
, 0);
2245 case IPV6_MTU_DISCOVER
:
2248 case IPV6_RECVPKTINFO
:
2249 case IPV6_UNICAST_HOPS
:
2250 case IPV6_MULTICAST_HOPS
:
2251 case IPV6_MULTICAST_LOOP
:
2253 case IPV6_RECVHOPLIMIT
:
2254 case IPV6_2292HOPLIMIT
:
2257 case IPV6_2292PKTINFO
:
2258 case IPV6_RECVTCLASS
:
2259 case IPV6_RECVRTHDR
:
2260 case IPV6_2292RTHDR
:
2261 case IPV6_RECVHOPOPTS
:
2262 case IPV6_2292HOPOPTS
:
2263 case IPV6_RECVDSTOPTS
:
2264 case IPV6_2292DSTOPTS
:
2266 #ifdef IPV6_RECVPATHMTU
2267 case IPV6_RECVPATHMTU
:
2269 #ifdef IPV6_TRANSPARENT
2270 case IPV6_TRANSPARENT
:
2272 #ifdef IPV6_FREEBIND
2275 #ifdef IPV6_RECVORIGDSTADDR
2276 case IPV6_RECVORIGDSTADDR
:
2279 if (optlen
< sizeof(uint32_t)) {
2280 return -TARGET_EINVAL
;
2282 if (get_user_u32(val
, optval_addr
)) {
2283 return -TARGET_EFAULT
;
2285 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2286 &val
, sizeof(val
)));
2290 struct in6_pktinfo pki
;
2292 if (optlen
< sizeof(pki
)) {
2293 return -TARGET_EINVAL
;
2296 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2297 return -TARGET_EFAULT
;
2300 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2302 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2303 &pki
, sizeof(pki
)));
2306 case IPV6_ADD_MEMBERSHIP
:
2307 case IPV6_DROP_MEMBERSHIP
:
2309 struct ipv6_mreq ipv6mreq
;
2311 if (optlen
< sizeof(ipv6mreq
)) {
2312 return -TARGET_EINVAL
;
2315 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2316 return -TARGET_EFAULT
;
2319 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2321 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2322 &ipv6mreq
, sizeof(ipv6mreq
)));
2333 struct icmp6_filter icmp6f
;
2335 if (optlen
> sizeof(icmp6f
)) {
2336 optlen
= sizeof(icmp6f
);
2339 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2340 return -TARGET_EFAULT
;
2343 for (val
= 0; val
< 8; val
++) {
2344 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2347 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2359 /* those take an u32 value */
2360 if (optlen
< sizeof(uint32_t)) {
2361 return -TARGET_EINVAL
;
2364 if (get_user_u32(val
, optval_addr
)) {
2365 return -TARGET_EFAULT
;
2367 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2368 &val
, sizeof(val
)));
2375 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2380 char *alg_key
= g_malloc(optlen
);
2383 return -TARGET_ENOMEM
;
2385 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2387 return -TARGET_EFAULT
;
2389 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2394 case ALG_SET_AEAD_AUTHSIZE
:
2396 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2405 case TARGET_SOL_SOCKET
:
2407 case TARGET_SO_RCVTIMEO
:
2411 optname
= SO_RCVTIMEO
;
2414 if (optlen
!= sizeof(struct target_timeval
)) {
2415 return -TARGET_EINVAL
;
2418 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2419 return -TARGET_EFAULT
;
2422 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2426 case TARGET_SO_SNDTIMEO
:
2427 optname
= SO_SNDTIMEO
;
2429 case TARGET_SO_ATTACH_FILTER
:
2431 struct target_sock_fprog
*tfprog
;
2432 struct target_sock_filter
*tfilter
;
2433 struct sock_fprog fprog
;
2434 struct sock_filter
*filter
;
2437 if (optlen
!= sizeof(*tfprog
)) {
2438 return -TARGET_EINVAL
;
2440 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2441 return -TARGET_EFAULT
;
2443 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2444 tswapal(tfprog
->filter
), 0)) {
2445 unlock_user_struct(tfprog
, optval_addr
, 1);
2446 return -TARGET_EFAULT
;
2449 fprog
.len
= tswap16(tfprog
->len
);
2450 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2451 if (filter
== NULL
) {
2452 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2453 unlock_user_struct(tfprog
, optval_addr
, 1);
2454 return -TARGET_ENOMEM
;
2456 for (i
= 0; i
< fprog
.len
; i
++) {
2457 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2458 filter
[i
].jt
= tfilter
[i
].jt
;
2459 filter
[i
].jf
= tfilter
[i
].jf
;
2460 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2462 fprog
.filter
= filter
;
2464 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2465 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2468 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2469 unlock_user_struct(tfprog
, optval_addr
, 1);
2472 case TARGET_SO_BINDTODEVICE
:
2474 char *dev_ifname
, *addr_ifname
;
2476 if (optlen
> IFNAMSIZ
- 1) {
2477 optlen
= IFNAMSIZ
- 1;
2479 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2481 return -TARGET_EFAULT
;
2483 optname
= SO_BINDTODEVICE
;
2484 addr_ifname
= alloca(IFNAMSIZ
);
2485 memcpy(addr_ifname
, dev_ifname
, optlen
);
2486 addr_ifname
[optlen
] = 0;
2487 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2488 addr_ifname
, optlen
));
2489 unlock_user (dev_ifname
, optval_addr
, 0);
2492 case TARGET_SO_LINGER
:
2495 struct target_linger
*tlg
;
2497 if (optlen
!= sizeof(struct target_linger
)) {
2498 return -TARGET_EINVAL
;
2500 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2501 return -TARGET_EFAULT
;
2503 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2504 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2505 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2507 unlock_user_struct(tlg
, optval_addr
, 0);
2510 /* Options with 'int' argument. */
2511 case TARGET_SO_DEBUG
:
2514 case TARGET_SO_REUSEADDR
:
2515 optname
= SO_REUSEADDR
;
2518 case TARGET_SO_REUSEPORT
:
2519 optname
= SO_REUSEPORT
;
2522 case TARGET_SO_TYPE
:
2525 case TARGET_SO_ERROR
:
2528 case TARGET_SO_DONTROUTE
:
2529 optname
= SO_DONTROUTE
;
2531 case TARGET_SO_BROADCAST
:
2532 optname
= SO_BROADCAST
;
2534 case TARGET_SO_SNDBUF
:
2535 optname
= SO_SNDBUF
;
2537 case TARGET_SO_SNDBUFFORCE
:
2538 optname
= SO_SNDBUFFORCE
;
2540 case TARGET_SO_RCVBUF
:
2541 optname
= SO_RCVBUF
;
2543 case TARGET_SO_RCVBUFFORCE
:
2544 optname
= SO_RCVBUFFORCE
;
2546 case TARGET_SO_KEEPALIVE
:
2547 optname
= SO_KEEPALIVE
;
2549 case TARGET_SO_OOBINLINE
:
2550 optname
= SO_OOBINLINE
;
2552 case TARGET_SO_NO_CHECK
:
2553 optname
= SO_NO_CHECK
;
2555 case TARGET_SO_PRIORITY
:
2556 optname
= SO_PRIORITY
;
2559 case TARGET_SO_BSDCOMPAT
:
2560 optname
= SO_BSDCOMPAT
;
2563 case TARGET_SO_PASSCRED
:
2564 optname
= SO_PASSCRED
;
2566 case TARGET_SO_PASSSEC
:
2567 optname
= SO_PASSSEC
;
2569 case TARGET_SO_TIMESTAMP
:
2570 optname
= SO_TIMESTAMP
;
2572 case TARGET_SO_RCVLOWAT
:
2573 optname
= SO_RCVLOWAT
;
2578 if (optlen
< sizeof(uint32_t))
2579 return -TARGET_EINVAL
;
2581 if (get_user_u32(val
, optval_addr
))
2582 return -TARGET_EFAULT
;
2583 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2588 case NETLINK_PKTINFO
:
2589 case NETLINK_ADD_MEMBERSHIP
:
2590 case NETLINK_DROP_MEMBERSHIP
:
2591 case NETLINK_BROADCAST_ERROR
:
2592 case NETLINK_NO_ENOBUFS
:
2593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2594 case NETLINK_LISTEN_ALL_NSID
:
2595 case NETLINK_CAP_ACK
:
2596 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2598 case NETLINK_EXT_ACK
:
2599 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2601 case NETLINK_GET_STRICT_CHK
:
2602 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2608 if (optlen
< sizeof(uint32_t)) {
2609 return -TARGET_EINVAL
;
2611 if (get_user_u32(val
, optval_addr
)) {
2612 return -TARGET_EFAULT
;
2614 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2617 #endif /* SOL_NETLINK */
2620 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2622 ret
= -TARGET_ENOPROTOOPT
;
2627 /* do_getsockopt() Must return target values and target errnos. */
2628 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2629 abi_ulong optval_addr
, abi_ulong optlen
)
2636 case TARGET_SOL_SOCKET
:
2639 /* These don't just return a single integer */
2640 case TARGET_SO_PEERNAME
:
2642 case TARGET_SO_RCVTIMEO
: {
2646 optname
= SO_RCVTIMEO
;
2649 if (get_user_u32(len
, optlen
)) {
2650 return -TARGET_EFAULT
;
2653 return -TARGET_EINVAL
;
2657 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2662 if (len
> sizeof(struct target_timeval
)) {
2663 len
= sizeof(struct target_timeval
);
2665 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2666 return -TARGET_EFAULT
;
2668 if (put_user_u32(len
, optlen
)) {
2669 return -TARGET_EFAULT
;
2673 case TARGET_SO_SNDTIMEO
:
2674 optname
= SO_SNDTIMEO
;
2676 case TARGET_SO_PEERCRED
: {
2679 struct target_ucred
*tcr
;
2681 if (get_user_u32(len
, optlen
)) {
2682 return -TARGET_EFAULT
;
2685 return -TARGET_EINVAL
;
2689 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2697 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2698 return -TARGET_EFAULT
;
2700 __put_user(cr
.pid
, &tcr
->pid
);
2701 __put_user(cr
.uid
, &tcr
->uid
);
2702 __put_user(cr
.gid
, &tcr
->gid
);
2703 unlock_user_struct(tcr
, optval_addr
, 1);
2704 if (put_user_u32(len
, optlen
)) {
2705 return -TARGET_EFAULT
;
2709 case TARGET_SO_PEERSEC
: {
2712 if (get_user_u32(len
, optlen
)) {
2713 return -TARGET_EFAULT
;
2716 return -TARGET_EINVAL
;
2718 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2720 return -TARGET_EFAULT
;
2723 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2725 if (put_user_u32(lv
, optlen
)) {
2726 ret
= -TARGET_EFAULT
;
2728 unlock_user(name
, optval_addr
, lv
);
2731 case TARGET_SO_LINGER
:
2735 struct target_linger
*tlg
;
2737 if (get_user_u32(len
, optlen
)) {
2738 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2745 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2753 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2754 return -TARGET_EFAULT
;
2756 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2757 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2758 unlock_user_struct(tlg
, optval_addr
, 1);
2759 if (put_user_u32(len
, optlen
)) {
2760 return -TARGET_EFAULT
;
2764 /* Options with 'int' argument. */
2765 case TARGET_SO_DEBUG
:
2768 case TARGET_SO_REUSEADDR
:
2769 optname
= SO_REUSEADDR
;
2772 case TARGET_SO_REUSEPORT
:
2773 optname
= SO_REUSEPORT
;
2776 case TARGET_SO_TYPE
:
2779 case TARGET_SO_ERROR
:
2782 case TARGET_SO_DONTROUTE
:
2783 optname
= SO_DONTROUTE
;
2785 case TARGET_SO_BROADCAST
:
2786 optname
= SO_BROADCAST
;
2788 case TARGET_SO_SNDBUF
:
2789 optname
= SO_SNDBUF
;
2791 case TARGET_SO_RCVBUF
:
2792 optname
= SO_RCVBUF
;
2794 case TARGET_SO_KEEPALIVE
:
2795 optname
= SO_KEEPALIVE
;
2797 case TARGET_SO_OOBINLINE
:
2798 optname
= SO_OOBINLINE
;
2800 case TARGET_SO_NO_CHECK
:
2801 optname
= SO_NO_CHECK
;
2803 case TARGET_SO_PRIORITY
:
2804 optname
= SO_PRIORITY
;
2807 case TARGET_SO_BSDCOMPAT
:
2808 optname
= SO_BSDCOMPAT
;
2811 case TARGET_SO_PASSCRED
:
2812 optname
= SO_PASSCRED
;
2814 case TARGET_SO_TIMESTAMP
:
2815 optname
= SO_TIMESTAMP
;
2817 case TARGET_SO_RCVLOWAT
:
2818 optname
= SO_RCVLOWAT
;
2820 case TARGET_SO_ACCEPTCONN
:
2821 optname
= SO_ACCEPTCONN
;
2828 /* TCP options all take an 'int' value. */
2830 if (get_user_u32(len
, optlen
))
2831 return -TARGET_EFAULT
;
2833 return -TARGET_EINVAL
;
2835 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2838 if (optname
== SO_TYPE
) {
2839 val
= host_to_target_sock_type(val
);
2844 if (put_user_u32(val
, optval_addr
))
2845 return -TARGET_EFAULT
;
2847 if (put_user_u8(val
, optval_addr
))
2848 return -TARGET_EFAULT
;
2850 if (put_user_u32(len
, optlen
))
2851 return -TARGET_EFAULT
;
2858 case IP_ROUTER_ALERT
:
2862 case IP_MTU_DISCOVER
:
2868 case IP_MULTICAST_TTL
:
2869 case IP_MULTICAST_LOOP
:
2870 if (get_user_u32(len
, optlen
))
2871 return -TARGET_EFAULT
;
2873 return -TARGET_EINVAL
;
2875 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2878 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2880 if (put_user_u32(len
, optlen
)
2881 || put_user_u8(val
, optval_addr
))
2882 return -TARGET_EFAULT
;
2884 if (len
> sizeof(int))
2886 if (put_user_u32(len
, optlen
)
2887 || put_user_u32(val
, optval_addr
))
2888 return -TARGET_EFAULT
;
2892 ret
= -TARGET_ENOPROTOOPT
;
2898 case IPV6_MTU_DISCOVER
:
2901 case IPV6_RECVPKTINFO
:
2902 case IPV6_UNICAST_HOPS
:
2903 case IPV6_MULTICAST_HOPS
:
2904 case IPV6_MULTICAST_LOOP
:
2906 case IPV6_RECVHOPLIMIT
:
2907 case IPV6_2292HOPLIMIT
:
2910 case IPV6_2292PKTINFO
:
2911 case IPV6_RECVTCLASS
:
2912 case IPV6_RECVRTHDR
:
2913 case IPV6_2292RTHDR
:
2914 case IPV6_RECVHOPOPTS
:
2915 case IPV6_2292HOPOPTS
:
2916 case IPV6_RECVDSTOPTS
:
2917 case IPV6_2292DSTOPTS
:
2919 #ifdef IPV6_RECVPATHMTU
2920 case IPV6_RECVPATHMTU
:
2922 #ifdef IPV6_TRANSPARENT
2923 case IPV6_TRANSPARENT
:
2925 #ifdef IPV6_FREEBIND
2928 #ifdef IPV6_RECVORIGDSTADDR
2929 case IPV6_RECVORIGDSTADDR
:
2931 if (get_user_u32(len
, optlen
))
2932 return -TARGET_EFAULT
;
2934 return -TARGET_EINVAL
;
2936 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2939 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2941 if (put_user_u32(len
, optlen
)
2942 || put_user_u8(val
, optval_addr
))
2943 return -TARGET_EFAULT
;
2945 if (len
> sizeof(int))
2947 if (put_user_u32(len
, optlen
)
2948 || put_user_u32(val
, optval_addr
))
2949 return -TARGET_EFAULT
;
2953 ret
= -TARGET_ENOPROTOOPT
;
2960 case NETLINK_PKTINFO
:
2961 case NETLINK_BROADCAST_ERROR
:
2962 case NETLINK_NO_ENOBUFS
:
2963 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2964 case NETLINK_LISTEN_ALL_NSID
:
2965 case NETLINK_CAP_ACK
:
2966 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2968 case NETLINK_EXT_ACK
:
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2971 case NETLINK_GET_STRICT_CHK
:
2972 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2973 if (get_user_u32(len
, optlen
)) {
2974 return -TARGET_EFAULT
;
2976 if (len
!= sizeof(val
)) {
2977 return -TARGET_EINVAL
;
2980 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2984 if (put_user_u32(lv
, optlen
)
2985 || put_user_u32(val
, optval_addr
)) {
2986 return -TARGET_EFAULT
;
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2990 case NETLINK_LIST_MEMBERSHIPS
:
2994 if (get_user_u32(len
, optlen
)) {
2995 return -TARGET_EFAULT
;
2998 return -TARGET_EINVAL
;
3000 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3002 return -TARGET_EFAULT
;
3005 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3007 unlock_user(results
, optval_addr
, 0);
3010 /* swap host endianess to target endianess. */
3011 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3012 results
[i
] = tswap32(results
[i
]);
3014 if (put_user_u32(lv
, optlen
)) {
3015 return -TARGET_EFAULT
;
3017 unlock_user(results
, optval_addr
, 0);
3020 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3025 #endif /* SOL_NETLINK */
3028 qemu_log_mask(LOG_UNIMP
,
3029 "getsockopt level=%d optname=%d not yet supported\n",
3031 ret
= -TARGET_EOPNOTSUPP
;
3037 /* Convert target low/high pair representing file offset into the host
3038 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3039 * as the kernel doesn't handle them either.
3041 static void target_to_host_low_high(abi_ulong tlow
,
3043 unsigned long *hlow
,
3044 unsigned long *hhigh
)
3046 uint64_t off
= tlow
|
3047 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3048 TARGET_LONG_BITS
/ 2;
3051 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3054 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3055 abi_ulong count
, int copy
)
3057 struct target_iovec
*target_vec
;
3059 abi_ulong total_len
, max_len
;
3062 bool bad_address
= false;
3068 if (count
> IOV_MAX
) {
3073 vec
= g_try_new0(struct iovec
, count
);
3079 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3080 count
* sizeof(struct target_iovec
), 1);
3081 if (target_vec
== NULL
) {
3086 /* ??? If host page size > target page size, this will result in a
3087 value larger than what we can actually support. */
3088 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3091 for (i
= 0; i
< count
; i
++) {
3092 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3093 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3098 } else if (len
== 0) {
3099 /* Zero length pointer is ignored. */
3100 vec
[i
].iov_base
= 0;
3102 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3103 /* If the first buffer pointer is bad, this is a fault. But
3104 * subsequent bad buffers will result in a partial write; this
3105 * is realized by filling the vector with null pointers and
3107 if (!vec
[i
].iov_base
) {
3118 if (len
> max_len
- total_len
) {
3119 len
= max_len
- total_len
;
3122 vec
[i
].iov_len
= len
;
3126 unlock_user(target_vec
, target_addr
, 0);
3131 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3132 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3135 unlock_user(target_vec
, target_addr
, 0);
3142 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3143 abi_ulong count
, int copy
)
3145 struct target_iovec
*target_vec
;
3148 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3149 count
* sizeof(struct target_iovec
), 1);
3151 for (i
= 0; i
< count
; i
++) {
3152 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3153 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3157 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3159 unlock_user(target_vec
, target_addr
, 0);
3165 static inline int target_to_host_sock_type(int *type
)
3168 int target_type
= *type
;
3170 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3171 case TARGET_SOCK_DGRAM
:
3172 host_type
= SOCK_DGRAM
;
3174 case TARGET_SOCK_STREAM
:
3175 host_type
= SOCK_STREAM
;
3178 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3181 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3182 #if defined(SOCK_CLOEXEC)
3183 host_type
|= SOCK_CLOEXEC
;
3185 return -TARGET_EINVAL
;
3188 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3189 #if defined(SOCK_NONBLOCK)
3190 host_type
|= SOCK_NONBLOCK
;
3191 #elif !defined(O_NONBLOCK)
3192 return -TARGET_EINVAL
;
3199 /* Try to emulate socket type flags after socket creation. */
3200 static int sock_flags_fixup(int fd
, int target_type
)
3202 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3203 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3204 int flags
= fcntl(fd
, F_GETFL
);
3205 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3207 return -TARGET_EINVAL
;
3214 /* do_socket() Must return target values and target errnos. */
3215 static abi_long
do_socket(int domain
, int type
, int protocol
)
3217 int target_type
= type
;
3220 ret
= target_to_host_sock_type(&type
);
3225 if (domain
== PF_NETLINK
&& !(
3226 #ifdef CONFIG_RTNETLINK
3227 protocol
== NETLINK_ROUTE
||
3229 protocol
== NETLINK_KOBJECT_UEVENT
||
3230 protocol
== NETLINK_AUDIT
)) {
3231 return -TARGET_EPROTONOSUPPORT
;
3234 if (domain
== AF_PACKET
||
3235 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3236 protocol
= tswap16(protocol
);
3239 ret
= get_errno(socket(domain
, type
, protocol
));
3241 ret
= sock_flags_fixup(ret
, target_type
);
3242 if (type
== SOCK_PACKET
) {
3243 /* Manage an obsolete case :
3244 * if socket type is SOCK_PACKET, bind by name
3246 fd_trans_register(ret
, &target_packet_trans
);
3247 } else if (domain
== PF_NETLINK
) {
3249 #ifdef CONFIG_RTNETLINK
3251 fd_trans_register(ret
, &target_netlink_route_trans
);
3254 case NETLINK_KOBJECT_UEVENT
:
3255 /* nothing to do: messages are strings */
3258 fd_trans_register(ret
, &target_netlink_audit_trans
);
3261 g_assert_not_reached();
3268 /* do_bind() Must return target values and target errnos. */
3269 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3275 if ((int)addrlen
< 0) {
3276 return -TARGET_EINVAL
;
3279 addr
= alloca(addrlen
+1);
3281 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3285 return get_errno(bind(sockfd
, addr
, addrlen
));
3288 /* do_connect() Must return target values and target errnos. */
3289 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3295 if ((int)addrlen
< 0) {
3296 return -TARGET_EINVAL
;
3299 addr
= alloca(addrlen
+1);
3301 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3305 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3308 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3309 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3310 int flags
, int send
)
3316 abi_ulong target_vec
;
3318 if (msgp
->msg_name
) {
3319 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3320 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3321 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3322 tswapal(msgp
->msg_name
),
3324 if (ret
== -TARGET_EFAULT
) {
3325 /* For connected sockets msg_name and msg_namelen must
3326 * be ignored, so returning EFAULT immediately is wrong.
3327 * Instead, pass a bad msg_name to the host kernel, and
3328 * let it decide whether to return EFAULT or not.
3330 msg
.msg_name
= (void *)-1;
3335 msg
.msg_name
= NULL
;
3336 msg
.msg_namelen
= 0;
3338 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3339 msg
.msg_control
= alloca(msg
.msg_controllen
);
3340 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3342 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3344 count
= tswapal(msgp
->msg_iovlen
);
3345 target_vec
= tswapal(msgp
->msg_iov
);
3347 if (count
> IOV_MAX
) {
3348 /* sendrcvmsg returns a different errno for this condition than
3349 * readv/writev, so we must catch it here before lock_iovec() does.
3351 ret
= -TARGET_EMSGSIZE
;
3355 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3356 target_vec
, count
, send
);
3358 ret
= -host_to_target_errno(errno
);
3361 msg
.msg_iovlen
= count
;
3365 if (fd_trans_target_to_host_data(fd
)) {
3368 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3369 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3370 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3371 msg
.msg_iov
->iov_len
);
3373 msg
.msg_iov
->iov_base
= host_msg
;
3374 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3378 ret
= target_to_host_cmsg(&msg
, msgp
);
3380 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3384 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3385 if (!is_error(ret
)) {
3387 if (fd_trans_host_to_target_data(fd
)) {
3388 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3389 MIN(msg
.msg_iov
->iov_len
, len
));
3391 ret
= host_to_target_cmsg(msgp
, &msg
);
3393 if (!is_error(ret
)) {
3394 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3395 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3396 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3397 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3398 msg
.msg_name
, msg
.msg_namelen
);
3410 unlock_iovec(vec
, target_vec
, count
, !send
);
3415 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3416 int flags
, int send
)
3419 struct target_msghdr
*msgp
;
3421 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3425 return -TARGET_EFAULT
;
3427 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3428 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3432 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3433 * so it might not have this *mmsg-specific flag either.
3435 #ifndef MSG_WAITFORONE
3436 #define MSG_WAITFORONE 0x10000
3439 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3440 unsigned int vlen
, unsigned int flags
,
3443 struct target_mmsghdr
*mmsgp
;
3447 if (vlen
> UIO_MAXIOV
) {
3451 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3453 return -TARGET_EFAULT
;
3456 for (i
= 0; i
< vlen
; i
++) {
3457 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3458 if (is_error(ret
)) {
3461 mmsgp
[i
].msg_len
= tswap32(ret
);
3462 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3463 if (flags
& MSG_WAITFORONE
) {
3464 flags
|= MSG_DONTWAIT
;
3468 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3470 /* Return number of datagrams sent if we sent any at all;
3471 * otherwise return the error.
3479 /* do_accept4() Must return target values and target errnos. */
3480 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3481 abi_ulong target_addrlen_addr
, int flags
)
3483 socklen_t addrlen
, ret_addrlen
;
3488 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3490 if (target_addr
== 0) {
3491 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3494 /* linux returns EFAULT if addrlen pointer is invalid */
3495 if (get_user_u32(addrlen
, target_addrlen_addr
))
3496 return -TARGET_EFAULT
;
3498 if ((int)addrlen
< 0) {
3499 return -TARGET_EINVAL
;
3502 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3503 return -TARGET_EFAULT
;
3505 addr
= alloca(addrlen
);
3507 ret_addrlen
= addrlen
;
3508 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3509 if (!is_error(ret
)) {
3510 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3511 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3512 ret
= -TARGET_EFAULT
;
3518 /* do_getpeername() Must return target values and target errnos. */
3519 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3520 abi_ulong target_addrlen_addr
)
3522 socklen_t addrlen
, ret_addrlen
;
3526 if (get_user_u32(addrlen
, target_addrlen_addr
))
3527 return -TARGET_EFAULT
;
3529 if ((int)addrlen
< 0) {
3530 return -TARGET_EINVAL
;
3533 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3534 return -TARGET_EFAULT
;
3536 addr
= alloca(addrlen
);
3538 ret_addrlen
= addrlen
;
3539 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3540 if (!is_error(ret
)) {
3541 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3542 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3543 ret
= -TARGET_EFAULT
;
3549 /* do_getsockname() Must return target values and target errnos. */
3550 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3551 abi_ulong target_addrlen_addr
)
3553 socklen_t addrlen
, ret_addrlen
;
3557 if (get_user_u32(addrlen
, target_addrlen_addr
))
3558 return -TARGET_EFAULT
;
3560 if ((int)addrlen
< 0) {
3561 return -TARGET_EINVAL
;
3564 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3565 return -TARGET_EFAULT
;
3567 addr
= alloca(addrlen
);
3569 ret_addrlen
= addrlen
;
3570 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3571 if (!is_error(ret
)) {
3572 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3573 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3574 ret
= -TARGET_EFAULT
;
3580 /* do_socketpair() Must return target values and target errnos. */
3581 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3582 abi_ulong target_tab_addr
)
3587 target_to_host_sock_type(&type
);
3589 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3590 if (!is_error(ret
)) {
3591 if (put_user_s32(tab
[0], target_tab_addr
)
3592 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3593 ret
= -TARGET_EFAULT
;
3598 /* do_sendto() Must return target values and target errnos. */
3599 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3600 abi_ulong target_addr
, socklen_t addrlen
)
3604 void *copy_msg
= NULL
;
3607 if ((int)addrlen
< 0) {
3608 return -TARGET_EINVAL
;
3611 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3613 return -TARGET_EFAULT
;
3614 if (fd_trans_target_to_host_data(fd
)) {
3615 copy_msg
= host_msg
;
3616 host_msg
= g_malloc(len
);
3617 memcpy(host_msg
, copy_msg
, len
);
3618 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3624 addr
= alloca(addrlen
+1);
3625 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3629 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3631 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3636 host_msg
= copy_msg
;
3638 unlock_user(host_msg
, msg
, 0);
3642 /* do_recvfrom() Must return target values and target errnos. */
3643 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3644 abi_ulong target_addr
,
3645 abi_ulong target_addrlen
)
3647 socklen_t addrlen
, ret_addrlen
;
3652 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3654 return -TARGET_EFAULT
;
3656 if (get_user_u32(addrlen
, target_addrlen
)) {
3657 ret
= -TARGET_EFAULT
;
3660 if ((int)addrlen
< 0) {
3661 ret
= -TARGET_EINVAL
;
3664 addr
= alloca(addrlen
);
3665 ret_addrlen
= addrlen
;
3666 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3667 addr
, &ret_addrlen
));
3669 addr
= NULL
; /* To keep compiler quiet. */
3670 addrlen
= 0; /* To keep compiler quiet. */
3671 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3673 if (!is_error(ret
)) {
3674 if (fd_trans_host_to_target_data(fd
)) {
3676 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3677 if (is_error(trans
)) {
3683 host_to_target_sockaddr(target_addr
, addr
,
3684 MIN(addrlen
, ret_addrlen
));
3685 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3686 ret
= -TARGET_EFAULT
;
3690 unlock_user(host_msg
, msg
, len
);
3693 unlock_user(host_msg
, msg
, 0);
3698 #ifdef TARGET_NR_socketcall
3699 /* do_socketcall() must return target values and target errnos. */
3700 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3702 static const unsigned nargs
[] = { /* number of arguments per operation */
3703 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3704 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3705 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3706 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3707 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3708 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3709 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3710 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3711 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3712 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3713 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3714 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3715 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3716 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3717 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3718 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3719 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3720 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3721 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3722 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3724 abi_long a
[6]; /* max 6 args */
3727 /* check the range of the first argument num */
3728 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3729 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3730 return -TARGET_EINVAL
;
3732 /* ensure we have space for args */
3733 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3734 return -TARGET_EINVAL
;
3736 /* collect the arguments in a[] according to nargs[] */
3737 for (i
= 0; i
< nargs
[num
]; ++i
) {
3738 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3739 return -TARGET_EFAULT
;
3742 /* now when we have the args, invoke the appropriate underlying function */
3744 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3745 return do_socket(a
[0], a
[1], a
[2]);
3746 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3747 return do_bind(a
[0], a
[1], a
[2]);
3748 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3749 return do_connect(a
[0], a
[1], a
[2]);
3750 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3751 return get_errno(listen(a
[0], a
[1]));
3752 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3753 return do_accept4(a
[0], a
[1], a
[2], 0);
3754 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3755 return do_getsockname(a
[0], a
[1], a
[2]);
3756 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3757 return do_getpeername(a
[0], a
[1], a
[2]);
3758 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3759 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3760 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3761 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3762 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3763 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3764 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3765 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3766 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3767 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3768 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3769 return get_errno(shutdown(a
[0], a
[1]));
3770 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3771 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3772 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3773 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3774 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3775 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3776 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3777 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3778 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3779 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3780 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3781 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3782 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3783 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3785 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3786 return -TARGET_EINVAL
;
3791 #define N_SHM_REGIONS 32
3793 static struct shm_region
{
3797 } shm_regions
[N_SHM_REGIONS
];
3799 #ifndef TARGET_SEMID64_DS
3800 /* asm-generic version of this struct */
3801 struct target_semid64_ds
3803 struct target_ipc_perm sem_perm
;
3804 abi_ulong sem_otime
;
3805 #if TARGET_ABI_BITS == 32
3806 abi_ulong __unused1
;
3808 abi_ulong sem_ctime
;
3809 #if TARGET_ABI_BITS == 32
3810 abi_ulong __unused2
;
3812 abi_ulong sem_nsems
;
3813 abi_ulong __unused3
;
3814 abi_ulong __unused4
;
3818 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3819 abi_ulong target_addr
)
3821 struct target_ipc_perm
*target_ip
;
3822 struct target_semid64_ds
*target_sd
;
3824 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3825 return -TARGET_EFAULT
;
3826 target_ip
= &(target_sd
->sem_perm
);
3827 host_ip
->__key
= tswap32(target_ip
->__key
);
3828 host_ip
->uid
= tswap32(target_ip
->uid
);
3829 host_ip
->gid
= tswap32(target_ip
->gid
);
3830 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3831 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3833 host_ip
->mode
= tswap32(target_ip
->mode
);
3835 host_ip
->mode
= tswap16(target_ip
->mode
);
3837 #if defined(TARGET_PPC)
3838 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3840 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3842 unlock_user_struct(target_sd
, target_addr
, 0);
3846 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3847 struct ipc_perm
*host_ip
)
3849 struct target_ipc_perm
*target_ip
;
3850 struct target_semid64_ds
*target_sd
;
3852 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3853 return -TARGET_EFAULT
;
3854 target_ip
= &(target_sd
->sem_perm
);
3855 target_ip
->__key
= tswap32(host_ip
->__key
);
3856 target_ip
->uid
= tswap32(host_ip
->uid
);
3857 target_ip
->gid
= tswap32(host_ip
->gid
);
3858 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3859 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3860 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3861 target_ip
->mode
= tswap32(host_ip
->mode
);
3863 target_ip
->mode
= tswap16(host_ip
->mode
);
3865 #if defined(TARGET_PPC)
3866 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3868 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3870 unlock_user_struct(target_sd
, target_addr
, 1);
3874 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3875 abi_ulong target_addr
)
3877 struct target_semid64_ds
*target_sd
;
3879 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3880 return -TARGET_EFAULT
;
3881 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3882 return -TARGET_EFAULT
;
3883 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3884 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3885 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3886 unlock_user_struct(target_sd
, target_addr
, 0);
3890 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3891 struct semid_ds
*host_sd
)
3893 struct target_semid64_ds
*target_sd
;
3895 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3896 return -TARGET_EFAULT
;
3897 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3898 return -TARGET_EFAULT
;
3899 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3900 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3901 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3902 unlock_user_struct(target_sd
, target_addr
, 1);
3906 struct target_seminfo
{
3919 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3920 struct seminfo
*host_seminfo
)
3922 struct target_seminfo
*target_seminfo
;
3923 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3924 return -TARGET_EFAULT
;
3925 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3926 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3927 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3928 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3929 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3930 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3931 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3932 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3933 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3934 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3935 unlock_user_struct(target_seminfo
, target_addr
, 1);
3941 struct semid_ds
*buf
;
3942 unsigned short *array
;
3943 struct seminfo
*__buf
;
3946 union target_semun
{
3953 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3954 abi_ulong target_addr
)
3957 unsigned short *array
;
3959 struct semid_ds semid_ds
;
3962 semun
.buf
= &semid_ds
;
3964 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3966 return get_errno(ret
);
3968 nsems
= semid_ds
.sem_nsems
;
3970 *host_array
= g_try_new(unsigned short, nsems
);
3972 return -TARGET_ENOMEM
;
3974 array
= lock_user(VERIFY_READ
, target_addr
,
3975 nsems
*sizeof(unsigned short), 1);
3977 g_free(*host_array
);
3978 return -TARGET_EFAULT
;
3981 for(i
=0; i
<nsems
; i
++) {
3982 __get_user((*host_array
)[i
], &array
[i
]);
3984 unlock_user(array
, target_addr
, 0);
3989 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3990 unsigned short **host_array
)
3993 unsigned short *array
;
3995 struct semid_ds semid_ds
;
3998 semun
.buf
= &semid_ds
;
4000 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4002 return get_errno(ret
);
4004 nsems
= semid_ds
.sem_nsems
;
4006 array
= lock_user(VERIFY_WRITE
, target_addr
,
4007 nsems
*sizeof(unsigned short), 0);
4009 return -TARGET_EFAULT
;
4011 for(i
=0; i
<nsems
; i
++) {
4012 __put_user((*host_array
)[i
], &array
[i
]);
4014 g_free(*host_array
);
4015 unlock_user(array
, target_addr
, 1);
4020 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4021 abi_ulong target_arg
)
4023 union target_semun target_su
= { .buf
= target_arg
};
4025 struct semid_ds dsarg
;
4026 unsigned short *array
= NULL
;
4027 struct seminfo seminfo
;
4028 abi_long ret
= -TARGET_EINVAL
;
4035 /* In 64 bit cross-endian situations, we will erroneously pick up
4036 * the wrong half of the union for the "val" element. To rectify
4037 * this, the entire 8-byte structure is byteswapped, followed by
4038 * a swap of the 4 byte val field. In other cases, the data is
4039 * already in proper host byte order. */
4040 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4041 target_su
.buf
= tswapal(target_su
.buf
);
4042 arg
.val
= tswap32(target_su
.val
);
4044 arg
.val
= target_su
.val
;
4046 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4050 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4054 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4055 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4062 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4066 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4067 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4073 arg
.__buf
= &seminfo
;
4074 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4075 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4083 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4090 struct target_sembuf
{
4091 unsigned short sem_num
;
4096 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4097 abi_ulong target_addr
,
4100 struct target_sembuf
*target_sembuf
;
4103 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4104 nsops
*sizeof(struct target_sembuf
), 1);
4106 return -TARGET_EFAULT
;
4108 for(i
=0; i
<nsops
; i
++) {
4109 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4110 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4111 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4114 unlock_user(target_sembuf
, target_addr
, 0);
4119 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4120 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4123 * This macro is required to handle the s390 variants, which passes the
4124 * arguments in a different order than default.
4127 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4128 (__nsops), (__timeout), (__sops)
4130 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4131 (__nsops), 0, (__sops), (__timeout)
4134 static inline abi_long
do_semtimedop(int semid
,
4137 abi_long timeout
, bool time64
)
4139 struct sembuf
*sops
;
4140 struct timespec ts
, *pts
= NULL
;
4146 if (target_to_host_timespec64(pts
, timeout
)) {
4147 return -TARGET_EFAULT
;
4150 if (target_to_host_timespec(pts
, timeout
)) {
4151 return -TARGET_EFAULT
;
4156 if (nsops
> TARGET_SEMOPM
) {
4157 return -TARGET_E2BIG
;
4160 sops
= g_new(struct sembuf
, nsops
);
4162 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4164 return -TARGET_EFAULT
;
4167 ret
= -TARGET_ENOSYS
;
4168 #ifdef __NR_semtimedop
4169 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4172 if (ret
== -TARGET_ENOSYS
) {
4173 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4174 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4182 struct target_msqid_ds
4184 struct target_ipc_perm msg_perm
;
4185 abi_ulong msg_stime
;
4186 #if TARGET_ABI_BITS == 32
4187 abi_ulong __unused1
;
4189 abi_ulong msg_rtime
;
4190 #if TARGET_ABI_BITS == 32
4191 abi_ulong __unused2
;
4193 abi_ulong msg_ctime
;
4194 #if TARGET_ABI_BITS == 32
4195 abi_ulong __unused3
;
4197 abi_ulong __msg_cbytes
;
4199 abi_ulong msg_qbytes
;
4200 abi_ulong msg_lspid
;
4201 abi_ulong msg_lrpid
;
4202 abi_ulong __unused4
;
4203 abi_ulong __unused5
;
4206 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4207 abi_ulong target_addr
)
4209 struct target_msqid_ds
*target_md
;
4211 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4212 return -TARGET_EFAULT
;
4213 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4214 return -TARGET_EFAULT
;
4215 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4216 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4217 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4218 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4219 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4220 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4221 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4222 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4223 unlock_user_struct(target_md
, target_addr
, 0);
4227 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4228 struct msqid_ds
*host_md
)
4230 struct target_msqid_ds
*target_md
;
4232 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4233 return -TARGET_EFAULT
;
4234 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4235 return -TARGET_EFAULT
;
4236 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4237 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4238 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4239 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4240 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4241 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4242 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4243 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4244 unlock_user_struct(target_md
, target_addr
, 1);
4248 struct target_msginfo
{
4256 unsigned short int msgseg
;
4259 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4260 struct msginfo
*host_msginfo
)
4262 struct target_msginfo
*target_msginfo
;
4263 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4264 return -TARGET_EFAULT
;
4265 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4266 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4267 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4268 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4269 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4270 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4271 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4272 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4273 unlock_user_struct(target_msginfo
, target_addr
, 1);
4277 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4279 struct msqid_ds dsarg
;
4280 struct msginfo msginfo
;
4281 abi_long ret
= -TARGET_EINVAL
;
4289 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4290 return -TARGET_EFAULT
;
4291 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4292 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4293 return -TARGET_EFAULT
;
4296 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4300 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4301 if (host_to_target_msginfo(ptr
, &msginfo
))
4302 return -TARGET_EFAULT
;
4309 struct target_msgbuf
{
4314 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4315 ssize_t msgsz
, int msgflg
)
4317 struct target_msgbuf
*target_mb
;
4318 struct msgbuf
*host_mb
;
4322 return -TARGET_EINVAL
;
4325 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4326 return -TARGET_EFAULT
;
4327 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4329 unlock_user_struct(target_mb
, msgp
, 0);
4330 return -TARGET_ENOMEM
;
4332 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4333 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4334 ret
= -TARGET_ENOSYS
;
4336 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4339 if (ret
== -TARGET_ENOSYS
) {
4341 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4344 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4350 unlock_user_struct(target_mb
, msgp
, 0);
4356 #if defined(__sparc__)
4357 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4358 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4359 #elif defined(__s390x__)
4360 /* The s390 sys_ipc variant has only five parameters. */
4361 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4362 ((long int[]){(long int)__msgp, __msgtyp})
4364 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4365 ((long int[]){(long int)__msgp, __msgtyp}), 0
4369 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4370 ssize_t msgsz
, abi_long msgtyp
,
4373 struct target_msgbuf
*target_mb
;
4375 struct msgbuf
*host_mb
;
4379 return -TARGET_EINVAL
;
4382 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4383 return -TARGET_EFAULT
;
4385 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4387 ret
= -TARGET_ENOMEM
;
4390 ret
= -TARGET_ENOSYS
;
4392 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4395 if (ret
== -TARGET_ENOSYS
) {
4396 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4397 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4402 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4403 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4404 if (!target_mtext
) {
4405 ret
= -TARGET_EFAULT
;
4408 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4409 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4412 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4416 unlock_user_struct(target_mb
, msgp
, 1);
4421 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4422 abi_ulong target_addr
)
4424 struct target_shmid_ds
*target_sd
;
4426 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4427 return -TARGET_EFAULT
;
4428 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4429 return -TARGET_EFAULT
;
4430 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4431 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4432 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4433 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4434 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4435 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4436 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4437 unlock_user_struct(target_sd
, target_addr
, 0);
4441 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4442 struct shmid_ds
*host_sd
)
4444 struct target_shmid_ds
*target_sd
;
4446 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4447 return -TARGET_EFAULT
;
4448 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4449 return -TARGET_EFAULT
;
4450 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4451 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4452 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4453 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4454 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4455 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4456 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4457 unlock_user_struct(target_sd
, target_addr
, 1);
4461 struct target_shminfo
{
4469 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4470 struct shminfo
*host_shminfo
)
4472 struct target_shminfo
*target_shminfo
;
4473 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4474 return -TARGET_EFAULT
;
4475 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4476 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4477 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4478 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4479 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4480 unlock_user_struct(target_shminfo
, target_addr
, 1);
4484 struct target_shm_info
{
4489 abi_ulong swap_attempts
;
4490 abi_ulong swap_successes
;
4493 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4494 struct shm_info
*host_shm_info
)
4496 struct target_shm_info
*target_shm_info
;
4497 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4498 return -TARGET_EFAULT
;
4499 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4500 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4501 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4502 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4503 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4504 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4505 unlock_user_struct(target_shm_info
, target_addr
, 1);
4509 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4511 struct shmid_ds dsarg
;
4512 struct shminfo shminfo
;
4513 struct shm_info shm_info
;
4514 abi_long ret
= -TARGET_EINVAL
;
4522 if (target_to_host_shmid_ds(&dsarg
, buf
))
4523 return -TARGET_EFAULT
;
4524 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4525 if (host_to_target_shmid_ds(buf
, &dsarg
))
4526 return -TARGET_EFAULT
;
4529 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4530 if (host_to_target_shminfo(buf
, &shminfo
))
4531 return -TARGET_EFAULT
;
4534 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4535 if (host_to_target_shm_info(buf
, &shm_info
))
4536 return -TARGET_EFAULT
;
4541 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4548 #ifndef TARGET_FORCE_SHMLBA
4549 /* For most architectures, SHMLBA is the same as the page size;
4550 * some architectures have larger values, in which case they should
4551 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4552 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4553 * and defining its own value for SHMLBA.
4555 * The kernel also permits SHMLBA to be set by the architecture to a
4556 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4557 * this means that addresses are rounded to the large size if
4558 * SHM_RND is set but addresses not aligned to that size are not rejected
4559 * as long as they are at least page-aligned. Since the only architecture
4560 * which uses this is ia64 this code doesn't provide for that oddity.
4562 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4564 return TARGET_PAGE_SIZE
;
4568 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4569 int shmid
, abi_ulong shmaddr
, int shmflg
)
4573 struct shmid_ds shm_info
;
4577 /* find out the length of the shared memory segment */
4578 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4579 if (is_error(ret
)) {
4580 /* can't get length, bail out */
4584 shmlba
= target_shmlba(cpu_env
);
4586 if (shmaddr
& (shmlba
- 1)) {
4587 if (shmflg
& SHM_RND
) {
4588 shmaddr
&= ~(shmlba
- 1);
4590 return -TARGET_EINVAL
;
4593 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4594 return -TARGET_EINVAL
;
4600 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4602 abi_ulong mmap_start
;
4604 /* In order to use the host shmat, we need to honor host SHMLBA. */
4605 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4607 if (mmap_start
== -1) {
4609 host_raddr
= (void *)-1;
4611 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4614 if (host_raddr
== (void *)-1) {
4616 return get_errno((long)host_raddr
);
4618 raddr
=h2g((unsigned long)host_raddr
);
4620 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4621 PAGE_VALID
| PAGE_READ
|
4622 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4624 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4625 if (!shm_regions
[i
].in_use
) {
4626 shm_regions
[i
].in_use
= true;
4627 shm_regions
[i
].start
= raddr
;
4628 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4638 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4645 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4646 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4647 shm_regions
[i
].in_use
= false;
4648 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4652 rv
= get_errno(shmdt(g2h(shmaddr
)));
4659 #ifdef TARGET_NR_ipc
4660 /* ??? This only works with linear mappings. */
4661 /* do_ipc() must return target values and target errnos. */
4662 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4663 unsigned int call
, abi_long first
,
4664 abi_long second
, abi_long third
,
4665 abi_long ptr
, abi_long fifth
)
4670 version
= call
>> 16;
4675 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4677 case IPCOP_semtimedop
:
4679 * The s390 sys_ipc variant has only five parameters instead of six
4680 * (as for default variant) and the only difference is the handling of
4681 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4682 * to a struct timespec where the generic variant uses fifth parameter.
4684 #if defined(TARGET_S390X)
4685 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4687 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4692 ret
= get_errno(semget(first
, second
, third
));
4695 case IPCOP_semctl
: {
4696 /* The semun argument to semctl is passed by value, so dereference the
4699 get_user_ual(atptr
, ptr
);
4700 ret
= do_semctl(first
, second
, third
, atptr
);
4705 ret
= get_errno(msgget(first
, second
));
4709 ret
= do_msgsnd(first
, ptr
, second
, third
);
4713 ret
= do_msgctl(first
, second
, ptr
);
4720 struct target_ipc_kludge
{
4725 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4726 ret
= -TARGET_EFAULT
;
4730 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4732 unlock_user_struct(tmp
, ptr
, 0);
4736 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4745 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4746 if (is_error(raddr
))
4747 return get_errno(raddr
);
4748 if (put_user_ual(raddr
, third
))
4749 return -TARGET_EFAULT
;
4753 ret
= -TARGET_EINVAL
;
4758 ret
= do_shmdt(ptr
);
4762 /* IPC_* flag values are the same on all linux platforms */
4763 ret
= get_errno(shmget(first
, second
, third
));
4766 /* IPC_* and SHM_* command values are the same on all linux platforms */
4768 ret
= do_shmctl(first
, second
, ptr
);
4771 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4773 ret
= -TARGET_ENOSYS
;
4780 /* kernel structure types definitions */
4782 #define STRUCT(name, ...) STRUCT_ ## name,
4783 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4785 #include "syscall_types.h"
4789 #undef STRUCT_SPECIAL
4791 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4792 #define STRUCT_SPECIAL(name)
4793 #include "syscall_types.h"
4795 #undef STRUCT_SPECIAL
4797 #define MAX_STRUCT_SIZE 4096
4799 #ifdef CONFIG_FIEMAP
4800 /* So fiemap access checks don't overflow on 32 bit systems.
4801 * This is very slightly smaller than the limit imposed by
4802 * the underlying kernel.
4804 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4805 / sizeof(struct fiemap_extent))
4807 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4808 int fd
, int cmd
, abi_long arg
)
4810 /* The parameter for this ioctl is a struct fiemap followed
4811 * by an array of struct fiemap_extent whose size is set
4812 * in fiemap->fm_extent_count. The array is filled in by the
4815 int target_size_in
, target_size_out
;
4817 const argtype
*arg_type
= ie
->arg_type
;
4818 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4821 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4825 assert(arg_type
[0] == TYPE_PTR
);
4826 assert(ie
->access
== IOC_RW
);
4828 target_size_in
= thunk_type_size(arg_type
, 0);
4829 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4831 return -TARGET_EFAULT
;
4833 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4834 unlock_user(argptr
, arg
, 0);
4835 fm
= (struct fiemap
*)buf_temp
;
4836 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4837 return -TARGET_EINVAL
;
4840 outbufsz
= sizeof (*fm
) +
4841 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4843 if (outbufsz
> MAX_STRUCT_SIZE
) {
4844 /* We can't fit all the extents into the fixed size buffer.
4845 * Allocate one that is large enough and use it instead.
4847 fm
= g_try_malloc(outbufsz
);
4849 return -TARGET_ENOMEM
;
4851 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4854 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4855 if (!is_error(ret
)) {
4856 target_size_out
= target_size_in
;
4857 /* An extent_count of 0 means we were only counting the extents
4858 * so there are no structs to copy
4860 if (fm
->fm_extent_count
!= 0) {
4861 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4863 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4865 ret
= -TARGET_EFAULT
;
4867 /* Convert the struct fiemap */
4868 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4869 if (fm
->fm_extent_count
!= 0) {
4870 p
= argptr
+ target_size_in
;
4871 /* ...and then all the struct fiemap_extents */
4872 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4873 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4878 unlock_user(argptr
, arg
, target_size_out
);
4888 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4889 int fd
, int cmd
, abi_long arg
)
4891 const argtype
*arg_type
= ie
->arg_type
;
4895 struct ifconf
*host_ifconf
;
4897 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4898 int target_ifreq_size
;
4903 abi_long target_ifc_buf
;
4907 assert(arg_type
[0] == TYPE_PTR
);
4908 assert(ie
->access
== IOC_RW
);
4911 target_size
= thunk_type_size(arg_type
, 0);
4913 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4915 return -TARGET_EFAULT
;
4916 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4917 unlock_user(argptr
, arg
, 0);
4919 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4920 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4921 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4923 if (target_ifc_buf
!= 0) {
4924 target_ifc_len
= host_ifconf
->ifc_len
;
4925 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4926 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4928 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4929 if (outbufsz
> MAX_STRUCT_SIZE
) {
4931 * We can't fit all the extents into the fixed size buffer.
4932 * Allocate one that is large enough and use it instead.
4934 host_ifconf
= malloc(outbufsz
);
4936 return -TARGET_ENOMEM
;
4938 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4941 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4943 host_ifconf
->ifc_len
= host_ifc_len
;
4945 host_ifc_buf
= NULL
;
4947 host_ifconf
->ifc_buf
= host_ifc_buf
;
4949 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4950 if (!is_error(ret
)) {
4951 /* convert host ifc_len to target ifc_len */
4953 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4954 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4955 host_ifconf
->ifc_len
= target_ifc_len
;
4957 /* restore target ifc_buf */
4959 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4961 /* copy struct ifconf to target user */
4963 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4965 return -TARGET_EFAULT
;
4966 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4967 unlock_user(argptr
, arg
, target_size
);
4969 if (target_ifc_buf
!= 0) {
4970 /* copy ifreq[] to target user */
4971 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4972 for (i
= 0; i
< nb_ifreq
; i
++) {
4973 thunk_convert(argptr
+ i
* target_ifreq_size
,
4974 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4975 ifreq_arg_type
, THUNK_TARGET
);
4977 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4988 #if defined(CONFIG_USBFS)
4989 #if HOST_LONG_BITS > 64
4990 #error USBDEVFS thunks do not support >64 bit hosts yet.
4993 uint64_t target_urb_adr
;
4994 uint64_t target_buf_adr
;
4995 char *target_buf_ptr
;
4996 struct usbdevfs_urb host_urb
;
4999 static GHashTable
*usbdevfs_urb_hashtable(void)
5001 static GHashTable
*urb_hashtable
;
5003 if (!urb_hashtable
) {
5004 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5006 return urb_hashtable
;
5009 static void urb_hashtable_insert(struct live_urb
*urb
)
5011 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5012 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5015 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5017 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5018 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5021 static void urb_hashtable_remove(struct live_urb
*urb
)
5023 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5024 g_hash_table_remove(urb_hashtable
, urb
);
5028 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5029 int fd
, int cmd
, abi_long arg
)
5031 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5032 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5033 struct live_urb
*lurb
;
5037 uintptr_t target_urb_adr
;
5040 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5042 memset(buf_temp
, 0, sizeof(uint64_t));
5043 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5044 if (is_error(ret
)) {
5048 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5049 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5050 if (!lurb
->target_urb_adr
) {
5051 return -TARGET_EFAULT
;
5053 urb_hashtable_remove(lurb
);
5054 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5055 lurb
->host_urb
.buffer_length
);
5056 lurb
->target_buf_ptr
= NULL
;
5058 /* restore the guest buffer pointer */
5059 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5061 /* update the guest urb struct */
5062 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5065 return -TARGET_EFAULT
;
5067 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5068 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5070 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5071 /* write back the urb handle */
5072 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5075 return -TARGET_EFAULT
;
5078 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5079 target_urb_adr
= lurb
->target_urb_adr
;
5080 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5081 unlock_user(argptr
, arg
, target_size
);
5088 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5089 uint8_t *buf_temp
__attribute__((unused
)),
5090 int fd
, int cmd
, abi_long arg
)
5092 struct live_urb
*lurb
;
5094 /* map target address back to host URB with metadata. */
5095 lurb
= urb_hashtable_lookup(arg
);
5097 return -TARGET_EFAULT
;
5099 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5103 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5104 int fd
, int cmd
, abi_long arg
)
5106 const argtype
*arg_type
= ie
->arg_type
;
5111 struct live_urb
*lurb
;
5114 * each submitted URB needs to map to a unique ID for the
5115 * kernel, and that unique ID needs to be a pointer to
5116 * host memory. hence, we need to malloc for each URB.
5117 * isochronous transfers have a variable length struct.
5120 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5122 /* construct host copy of urb and metadata */
5123 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5125 return -TARGET_ENOMEM
;
5128 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5131 return -TARGET_EFAULT
;
5133 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5134 unlock_user(argptr
, arg
, 0);
5136 lurb
->target_urb_adr
= arg
;
5137 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5139 /* buffer space used depends on endpoint type so lock the entire buffer */
5140 /* control type urbs should check the buffer contents for true direction */
5141 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5142 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5143 lurb
->host_urb
.buffer_length
, 1);
5144 if (lurb
->target_buf_ptr
== NULL
) {
5146 return -TARGET_EFAULT
;
5149 /* update buffer pointer in host copy */
5150 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5152 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5153 if (is_error(ret
)) {
5154 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5157 urb_hashtable_insert(lurb
);
5162 #endif /* CONFIG_USBFS */
5164 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5165 int cmd
, abi_long arg
)
5168 struct dm_ioctl
*host_dm
;
5169 abi_long guest_data
;
5170 uint32_t guest_data_size
;
5172 const argtype
*arg_type
= ie
->arg_type
;
5174 void *big_buf
= NULL
;
5178 target_size
= thunk_type_size(arg_type
, 0);
5179 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5181 ret
= -TARGET_EFAULT
;
5184 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5185 unlock_user(argptr
, arg
, 0);
5187 /* buf_temp is too small, so fetch things into a bigger buffer */
5188 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5189 memcpy(big_buf
, buf_temp
, target_size
);
5193 guest_data
= arg
+ host_dm
->data_start
;
5194 if ((guest_data
- arg
) < 0) {
5195 ret
= -TARGET_EINVAL
;
5198 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5199 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5201 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5203 ret
= -TARGET_EFAULT
;
5207 switch (ie
->host_cmd
) {
5209 case DM_LIST_DEVICES
:
5212 case DM_DEV_SUSPEND
:
5215 case DM_TABLE_STATUS
:
5216 case DM_TABLE_CLEAR
:
5218 case DM_LIST_VERSIONS
:
5222 case DM_DEV_SET_GEOMETRY
:
5223 /* data contains only strings */
5224 memcpy(host_data
, argptr
, guest_data_size
);
5227 memcpy(host_data
, argptr
, guest_data_size
);
5228 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5232 void *gspec
= argptr
;
5233 void *cur_data
= host_data
;
5234 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5235 int spec_size
= thunk_type_size(arg_type
, 0);
5238 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5239 struct dm_target_spec
*spec
= cur_data
;
5243 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5244 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5246 spec
->next
= sizeof(*spec
) + slen
;
5247 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5249 cur_data
+= spec
->next
;
5254 ret
= -TARGET_EINVAL
;
5255 unlock_user(argptr
, guest_data
, 0);
5258 unlock_user(argptr
, guest_data
, 0);
5260 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5261 if (!is_error(ret
)) {
5262 guest_data
= arg
+ host_dm
->data_start
;
5263 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5264 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5265 switch (ie
->host_cmd
) {
5270 case DM_DEV_SUSPEND
:
5273 case DM_TABLE_CLEAR
:
5275 case DM_DEV_SET_GEOMETRY
:
5276 /* no return data */
5278 case DM_LIST_DEVICES
:
5280 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5281 uint32_t remaining_data
= guest_data_size
;
5282 void *cur_data
= argptr
;
5283 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5284 int nl_size
= 12; /* can't use thunk_size due to alignment */
5287 uint32_t next
= nl
->next
;
5289 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5291 if (remaining_data
< nl
->next
) {
5292 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5295 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5296 strcpy(cur_data
+ nl_size
, nl
->name
);
5297 cur_data
+= nl
->next
;
5298 remaining_data
-= nl
->next
;
5302 nl
= (void*)nl
+ next
;
5307 case DM_TABLE_STATUS
:
5309 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5310 void *cur_data
= argptr
;
5311 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5312 int spec_size
= thunk_type_size(arg_type
, 0);
5315 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5316 uint32_t next
= spec
->next
;
5317 int slen
= strlen((char*)&spec
[1]) + 1;
5318 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5319 if (guest_data_size
< spec
->next
) {
5320 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5323 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5324 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5325 cur_data
= argptr
+ spec
->next
;
5326 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5332 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5333 int count
= *(uint32_t*)hdata
;
5334 uint64_t *hdev
= hdata
+ 8;
5335 uint64_t *gdev
= argptr
+ 8;
5338 *(uint32_t*)argptr
= tswap32(count
);
5339 for (i
= 0; i
< count
; i
++) {
5340 *gdev
= tswap64(*hdev
);
5346 case DM_LIST_VERSIONS
:
5348 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5349 uint32_t remaining_data
= guest_data_size
;
5350 void *cur_data
= argptr
;
5351 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5352 int vers_size
= thunk_type_size(arg_type
, 0);
5355 uint32_t next
= vers
->next
;
5357 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5359 if (remaining_data
< vers
->next
) {
5360 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5363 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5364 strcpy(cur_data
+ vers_size
, vers
->name
);
5365 cur_data
+= vers
->next
;
5366 remaining_data
-= vers
->next
;
5370 vers
= (void*)vers
+ next
;
5375 unlock_user(argptr
, guest_data
, 0);
5376 ret
= -TARGET_EINVAL
;
5379 unlock_user(argptr
, guest_data
, guest_data_size
);
5381 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5383 ret
= -TARGET_EFAULT
;
5386 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5387 unlock_user(argptr
, arg
, target_size
);
5394 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5395 int cmd
, abi_long arg
)
5399 const argtype
*arg_type
= ie
->arg_type
;
5400 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5403 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5404 struct blkpg_partition host_part
;
5406 /* Read and convert blkpg */
5408 target_size
= thunk_type_size(arg_type
, 0);
5409 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5411 ret
= -TARGET_EFAULT
;
5414 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5415 unlock_user(argptr
, arg
, 0);
5417 switch (host_blkpg
->op
) {
5418 case BLKPG_ADD_PARTITION
:
5419 case BLKPG_DEL_PARTITION
:
5420 /* payload is struct blkpg_partition */
5423 /* Unknown opcode */
5424 ret
= -TARGET_EINVAL
;
5428 /* Read and convert blkpg->data */
5429 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5430 target_size
= thunk_type_size(part_arg_type
, 0);
5431 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5433 ret
= -TARGET_EFAULT
;
5436 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5437 unlock_user(argptr
, arg
, 0);
5439 /* Swizzle the data pointer to our local copy and call! */
5440 host_blkpg
->data
= &host_part
;
5441 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5447 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5448 int fd
, int cmd
, abi_long arg
)
5450 const argtype
*arg_type
= ie
->arg_type
;
5451 const StructEntry
*se
;
5452 const argtype
*field_types
;
5453 const int *dst_offsets
, *src_offsets
;
5456 abi_ulong
*target_rt_dev_ptr
= NULL
;
5457 unsigned long *host_rt_dev_ptr
= NULL
;
5461 assert(ie
->access
== IOC_W
);
5462 assert(*arg_type
== TYPE_PTR
);
5464 assert(*arg_type
== TYPE_STRUCT
);
5465 target_size
= thunk_type_size(arg_type
, 0);
5466 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5468 return -TARGET_EFAULT
;
5471 assert(*arg_type
== (int)STRUCT_rtentry
);
5472 se
= struct_entries
+ *arg_type
++;
5473 assert(se
->convert
[0] == NULL
);
5474 /* convert struct here to be able to catch rt_dev string */
5475 field_types
= se
->field_types
;
5476 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5477 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5478 for (i
= 0; i
< se
->nb_fields
; i
++) {
5479 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5480 assert(*field_types
== TYPE_PTRVOID
);
5481 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5482 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5483 if (*target_rt_dev_ptr
!= 0) {
5484 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5485 tswapal(*target_rt_dev_ptr
));
5486 if (!*host_rt_dev_ptr
) {
5487 unlock_user(argptr
, arg
, 0);
5488 return -TARGET_EFAULT
;
5491 *host_rt_dev_ptr
= 0;
5496 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5497 argptr
+ src_offsets
[i
],
5498 field_types
, THUNK_HOST
);
5500 unlock_user(argptr
, arg
, 0);
5502 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5504 assert(host_rt_dev_ptr
!= NULL
);
5505 assert(target_rt_dev_ptr
!= NULL
);
5506 if (*host_rt_dev_ptr
!= 0) {
5507 unlock_user((void *)*host_rt_dev_ptr
,
5508 *target_rt_dev_ptr
, 0);
5513 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5514 int fd
, int cmd
, abi_long arg
)
5516 int sig
= target_to_host_signal(arg
);
5517 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5520 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5521 int fd
, int cmd
, abi_long arg
)
5526 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5527 if (is_error(ret
)) {
5531 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5532 if (copy_to_user_timeval(arg
, &tv
)) {
5533 return -TARGET_EFAULT
;
5536 if (copy_to_user_timeval64(arg
, &tv
)) {
5537 return -TARGET_EFAULT
;
5544 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5545 int fd
, int cmd
, abi_long arg
)
5550 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5551 if (is_error(ret
)) {
5555 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5556 if (host_to_target_timespec(arg
, &ts
)) {
5557 return -TARGET_EFAULT
;
5560 if (host_to_target_timespec64(arg
, &ts
)) {
5561 return -TARGET_EFAULT
;
5569 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5570 int fd
, int cmd
, abi_long arg
)
5572 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5573 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5579 static void unlock_drm_version(struct drm_version
*host_ver
,
5580 struct target_drm_version
*target_ver
,
5583 unlock_user(host_ver
->name
, target_ver
->name
,
5584 copy
? host_ver
->name_len
: 0);
5585 unlock_user(host_ver
->date
, target_ver
->date
,
5586 copy
? host_ver
->date_len
: 0);
5587 unlock_user(host_ver
->desc
, target_ver
->desc
,
5588 copy
? host_ver
->desc_len
: 0);
5591 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5592 struct target_drm_version
*target_ver
)
5594 memset(host_ver
, 0, sizeof(*host_ver
));
5596 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5597 if (host_ver
->name_len
) {
5598 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5599 target_ver
->name_len
, 0);
5600 if (!host_ver
->name
) {
5605 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5606 if (host_ver
->date_len
) {
5607 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5608 target_ver
->date_len
, 0);
5609 if (!host_ver
->date
) {
5614 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5615 if (host_ver
->desc_len
) {
5616 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5617 target_ver
->desc_len
, 0);
5618 if (!host_ver
->desc
) {
5625 unlock_drm_version(host_ver
, target_ver
, false);
5629 static inline void host_to_target_drmversion(
5630 struct target_drm_version
*target_ver
,
5631 struct drm_version
*host_ver
)
5633 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5634 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5635 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5636 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5637 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5638 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5639 unlock_drm_version(host_ver
, target_ver
, true);
5642 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5643 int fd
, int cmd
, abi_long arg
)
5645 struct drm_version
*ver
;
5646 struct target_drm_version
*target_ver
;
5649 switch (ie
->host_cmd
) {
5650 case DRM_IOCTL_VERSION
:
5651 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5652 return -TARGET_EFAULT
;
5654 ver
= (struct drm_version
*)buf_temp
;
5655 ret
= target_to_host_drmversion(ver
, target_ver
);
5656 if (!is_error(ret
)) {
5657 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5658 if (is_error(ret
)) {
5659 unlock_drm_version(ver
, target_ver
, false);
5661 host_to_target_drmversion(target_ver
, ver
);
5664 unlock_user_struct(target_ver
, arg
, 0);
5667 return -TARGET_ENOSYS
;
5670 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5671 struct drm_i915_getparam
*gparam
,
5672 int fd
, abi_long arg
)
5676 struct target_drm_i915_getparam
*target_gparam
;
5678 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5679 return -TARGET_EFAULT
;
5682 __get_user(gparam
->param
, &target_gparam
->param
);
5683 gparam
->value
= &value
;
5684 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5685 put_user_s32(value
, target_gparam
->value
);
5687 unlock_user_struct(target_gparam
, arg
, 0);
5691 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5692 int fd
, int cmd
, abi_long arg
)
5694 switch (ie
->host_cmd
) {
5695 case DRM_IOCTL_I915_GETPARAM
:
5696 return do_ioctl_drm_i915_getparam(ie
,
5697 (struct drm_i915_getparam
*)buf_temp
,
5700 return -TARGET_ENOSYS
;
5706 IOCTLEntry ioctl_entries
[] = {
5707 #define IOCTL(cmd, access, ...) \
5708 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5709 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5710 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5711 #define IOCTL_IGNORE(cmd) \
5712 { TARGET_ ## cmd, 0, #cmd },
5717 /* ??? Implement proper locking for ioctls. */
5718 /* do_ioctl() Must return target values and target errnos. */
5719 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5721 const IOCTLEntry
*ie
;
5722 const argtype
*arg_type
;
5724 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5730 if (ie
->target_cmd
== 0) {
5732 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5733 return -TARGET_ENOSYS
;
5735 if (ie
->target_cmd
== cmd
)
5739 arg_type
= ie
->arg_type
;
5741 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5742 } else if (!ie
->host_cmd
) {
5743 /* Some architectures define BSD ioctls in their headers
5744 that are not implemented in Linux. */
5745 return -TARGET_ENOSYS
;
5748 switch(arg_type
[0]) {
5751 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5757 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5761 target_size
= thunk_type_size(arg_type
, 0);
5762 switch(ie
->access
) {
5764 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5765 if (!is_error(ret
)) {
5766 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5768 return -TARGET_EFAULT
;
5769 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5770 unlock_user(argptr
, arg
, target_size
);
5774 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5776 return -TARGET_EFAULT
;
5777 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5778 unlock_user(argptr
, arg
, 0);
5779 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5783 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5785 return -TARGET_EFAULT
;
5786 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5787 unlock_user(argptr
, arg
, 0);
5788 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5789 if (!is_error(ret
)) {
5790 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5792 return -TARGET_EFAULT
;
5793 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5794 unlock_user(argptr
, arg
, target_size
);
5800 qemu_log_mask(LOG_UNIMP
,
5801 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5802 (long)cmd
, arg_type
[0]);
5803 ret
= -TARGET_ENOSYS
;
5809 static const bitmask_transtbl iflag_tbl
[] = {
5810 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5811 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5812 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5813 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5814 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5815 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5816 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5817 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5818 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5819 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5820 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5821 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5822 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5823 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5824 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5828 static const bitmask_transtbl oflag_tbl
[] = {
5829 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5830 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5831 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5832 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5833 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5834 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5835 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5836 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5837 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5838 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5839 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5840 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5841 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5842 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5843 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5844 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5845 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5846 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5847 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5848 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5849 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5850 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5851 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5852 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5856 static const bitmask_transtbl cflag_tbl
[] = {
5857 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5858 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5859 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5860 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5861 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5862 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5863 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5864 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5865 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5866 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5867 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5868 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5869 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5870 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5871 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5872 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5873 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5874 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5875 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5876 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5877 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5878 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5879 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5880 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5881 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5882 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5883 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5884 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5885 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5886 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5887 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5891 static const bitmask_transtbl lflag_tbl
[] = {
5892 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5893 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5894 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5895 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5896 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5897 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5898 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5899 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5900 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5901 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5902 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5903 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5904 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5905 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5906 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5907 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5911 static void target_to_host_termios (void *dst
, const void *src
)
5913 struct host_termios
*host
= dst
;
5914 const struct target_termios
*target
= src
;
5917 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5919 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5921 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5923 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5924 host
->c_line
= target
->c_line
;
5926 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5927 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5928 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5929 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5930 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5931 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5932 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5933 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5934 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5935 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5936 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5937 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5938 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5939 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5940 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5941 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5942 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5943 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5946 static void host_to_target_termios (void *dst
, const void *src
)
5948 struct target_termios
*target
= dst
;
5949 const struct host_termios
*host
= src
;
5952 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5954 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5956 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5958 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5959 target
->c_line
= host
->c_line
;
5961 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5962 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5963 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5964 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5965 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5966 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5967 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5968 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5969 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5970 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5971 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5972 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5973 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5974 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5975 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5976 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5977 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5978 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5981 static const StructEntry struct_termios_def
= {
5982 .convert
= { host_to_target_termios
, target_to_host_termios
},
5983 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5984 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5985 .print
= print_termios
,
5988 static bitmask_transtbl mmap_flags_tbl
[] = {
5989 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5990 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5991 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5992 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5993 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5994 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5995 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5996 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5997 MAP_DENYWRITE
, MAP_DENYWRITE
},
5998 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5999 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6000 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6001 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6002 MAP_NORESERVE
, MAP_NORESERVE
},
6003 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6004 /* MAP_STACK had been ignored by the kernel for quite some time.
6005 Recognize it for the target insofar as we do not want to pass
6006 it through to the host. */
6007 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6012 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6013 * TARGET_I386 is defined if TARGET_X86_64 is defined
6015 #if defined(TARGET_I386)
6017 /* NOTE: there is really one LDT for all the threads */
6018 static uint8_t *ldt_table
;
6020 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6027 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6028 if (size
> bytecount
)
6030 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6032 return -TARGET_EFAULT
;
6033 /* ??? Should this by byteswapped? */
6034 memcpy(p
, ldt_table
, size
);
6035 unlock_user(p
, ptr
, size
);
6039 /* XXX: add locking support */
6040 static abi_long
write_ldt(CPUX86State
*env
,
6041 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6043 struct target_modify_ldt_ldt_s ldt_info
;
6044 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6045 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6046 int seg_not_present
, useable
, lm
;
6047 uint32_t *lp
, entry_1
, entry_2
;
6049 if (bytecount
!= sizeof(ldt_info
))
6050 return -TARGET_EINVAL
;
6051 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6052 return -TARGET_EFAULT
;
6053 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6054 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6055 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6056 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6057 unlock_user_struct(target_ldt_info
, ptr
, 0);
6059 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6060 return -TARGET_EINVAL
;
6061 seg_32bit
= ldt_info
.flags
& 1;
6062 contents
= (ldt_info
.flags
>> 1) & 3;
6063 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6064 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6065 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6066 useable
= (ldt_info
.flags
>> 6) & 1;
6070 lm
= (ldt_info
.flags
>> 7) & 1;
6072 if (contents
== 3) {
6074 return -TARGET_EINVAL
;
6075 if (seg_not_present
== 0)
6076 return -TARGET_EINVAL
;
6078 /* allocate the LDT */
6080 env
->ldt
.base
= target_mmap(0,
6081 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6082 PROT_READ
|PROT_WRITE
,
6083 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6084 if (env
->ldt
.base
== -1)
6085 return -TARGET_ENOMEM
;
6086 memset(g2h(env
->ldt
.base
), 0,
6087 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6088 env
->ldt
.limit
= 0xffff;
6089 ldt_table
= g2h(env
->ldt
.base
);
6092 /* NOTE: same code as Linux kernel */
6093 /* Allow LDTs to be cleared by the user. */
6094 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6097 read_exec_only
== 1 &&
6099 limit_in_pages
== 0 &&
6100 seg_not_present
== 1 &&
6108 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6109 (ldt_info
.limit
& 0x0ffff);
6110 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6111 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6112 (ldt_info
.limit
& 0xf0000) |
6113 ((read_exec_only
^ 1) << 9) |
6115 ((seg_not_present
^ 1) << 15) |
6117 (limit_in_pages
<< 23) |
6121 entry_2
|= (useable
<< 20);
6123 /* Install the new entry ... */
6125 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6126 lp
[0] = tswap32(entry_1
);
6127 lp
[1] = tswap32(entry_2
);
6131 /* specific and weird i386 syscalls */
6132 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6133 unsigned long bytecount
)
6139 ret
= read_ldt(ptr
, bytecount
);
6142 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6145 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6148 ret
= -TARGET_ENOSYS
;
6154 #if defined(TARGET_ABI32)
6155 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6157 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6158 struct target_modify_ldt_ldt_s ldt_info
;
6159 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6160 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6161 int seg_not_present
, useable
, lm
;
6162 uint32_t *lp
, entry_1
, entry_2
;
6165 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6166 if (!target_ldt_info
)
6167 return -TARGET_EFAULT
;
6168 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6169 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6170 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6171 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6172 if (ldt_info
.entry_number
== -1) {
6173 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6174 if (gdt_table
[i
] == 0) {
6175 ldt_info
.entry_number
= i
;
6176 target_ldt_info
->entry_number
= tswap32(i
);
6181 unlock_user_struct(target_ldt_info
, ptr
, 1);
6183 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6184 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6185 return -TARGET_EINVAL
;
6186 seg_32bit
= ldt_info
.flags
& 1;
6187 contents
= (ldt_info
.flags
>> 1) & 3;
6188 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6189 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6190 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6191 useable
= (ldt_info
.flags
>> 6) & 1;
6195 lm
= (ldt_info
.flags
>> 7) & 1;
6198 if (contents
== 3) {
6199 if (seg_not_present
== 0)
6200 return -TARGET_EINVAL
;
6203 /* NOTE: same code as Linux kernel */
6204 /* Allow LDTs to be cleared by the user. */
6205 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6206 if ((contents
== 0 &&
6207 read_exec_only
== 1 &&
6209 limit_in_pages
== 0 &&
6210 seg_not_present
== 1 &&
6218 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6219 (ldt_info
.limit
& 0x0ffff);
6220 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6221 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6222 (ldt_info
.limit
& 0xf0000) |
6223 ((read_exec_only
^ 1) << 9) |
6225 ((seg_not_present
^ 1) << 15) |
6227 (limit_in_pages
<< 23) |
6232 /* Install the new entry ... */
6234 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6235 lp
[0] = tswap32(entry_1
);
6236 lp
[1] = tswap32(entry_2
);
6240 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6242 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6243 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6244 uint32_t base_addr
, limit
, flags
;
6245 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6246 int seg_not_present
, useable
, lm
;
6247 uint32_t *lp
, entry_1
, entry_2
;
6249 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6250 if (!target_ldt_info
)
6251 return -TARGET_EFAULT
;
6252 idx
= tswap32(target_ldt_info
->entry_number
);
6253 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6254 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6255 unlock_user_struct(target_ldt_info
, ptr
, 1);
6256 return -TARGET_EINVAL
;
6258 lp
= (uint32_t *)(gdt_table
+ idx
);
6259 entry_1
= tswap32(lp
[0]);
6260 entry_2
= tswap32(lp
[1]);
6262 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6263 contents
= (entry_2
>> 10) & 3;
6264 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6265 seg_32bit
= (entry_2
>> 22) & 1;
6266 limit_in_pages
= (entry_2
>> 23) & 1;
6267 useable
= (entry_2
>> 20) & 1;
6271 lm
= (entry_2
>> 21) & 1;
6273 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6274 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6275 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6276 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6277 base_addr
= (entry_1
>> 16) |
6278 (entry_2
& 0xff000000) |
6279 ((entry_2
& 0xff) << 16);
6280 target_ldt_info
->base_addr
= tswapal(base_addr
);
6281 target_ldt_info
->limit
= tswap32(limit
);
6282 target_ldt_info
->flags
= tswap32(flags
);
6283 unlock_user_struct(target_ldt_info
, ptr
, 1);
6287 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6289 return -TARGET_ENOSYS
;
6292 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6299 case TARGET_ARCH_SET_GS
:
6300 case TARGET_ARCH_SET_FS
:
6301 if (code
== TARGET_ARCH_SET_GS
)
6305 cpu_x86_load_seg(env
, idx
, 0);
6306 env
->segs
[idx
].base
= addr
;
6308 case TARGET_ARCH_GET_GS
:
6309 case TARGET_ARCH_GET_FS
:
6310 if (code
== TARGET_ARCH_GET_GS
)
6314 val
= env
->segs
[idx
].base
;
6315 if (put_user(val
, addr
, abi_ulong
))
6316 ret
= -TARGET_EFAULT
;
6319 ret
= -TARGET_EINVAL
;
6324 #endif /* defined(TARGET_ABI32 */
6326 #endif /* defined(TARGET_I386) */
6328 #define NEW_STACK_SIZE 0x40000
6331 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6334 pthread_mutex_t mutex
;
6335 pthread_cond_t cond
;
6338 abi_ulong child_tidptr
;
6339 abi_ulong parent_tidptr
;
6343 static void *clone_func(void *arg
)
6345 new_thread_info
*info
= arg
;
6350 rcu_register_thread();
6351 tcg_register_thread();
6355 ts
= (TaskState
*)cpu
->opaque
;
6356 info
->tid
= sys_gettid();
6358 if (info
->child_tidptr
)
6359 put_user_u32(info
->tid
, info
->child_tidptr
);
6360 if (info
->parent_tidptr
)
6361 put_user_u32(info
->tid
, info
->parent_tidptr
);
6362 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6363 /* Enable signals. */
6364 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6365 /* Signal to the parent that we're ready. */
6366 pthread_mutex_lock(&info
->mutex
);
6367 pthread_cond_broadcast(&info
->cond
);
6368 pthread_mutex_unlock(&info
->mutex
);
6369 /* Wait until the parent has finished initializing the tls state. */
6370 pthread_mutex_lock(&clone_lock
);
6371 pthread_mutex_unlock(&clone_lock
);
6377 /* do_fork() Must return host values and target errnos (unlike most
6378 do_*() functions). */
6379 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6380 abi_ulong parent_tidptr
, target_ulong newtls
,
6381 abi_ulong child_tidptr
)
6383 CPUState
*cpu
= env_cpu(env
);
6387 CPUArchState
*new_env
;
6390 flags
&= ~CLONE_IGNORED_FLAGS
;
6392 /* Emulate vfork() with fork() */
6393 if (flags
& CLONE_VFORK
)
6394 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6396 if (flags
& CLONE_VM
) {
6397 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6398 new_thread_info info
;
6399 pthread_attr_t attr
;
6401 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6402 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6403 return -TARGET_EINVAL
;
6406 ts
= g_new0(TaskState
, 1);
6407 init_task_state(ts
);
6409 /* Grab a mutex so that thread setup appears atomic. */
6410 pthread_mutex_lock(&clone_lock
);
6412 /* we create a new CPU instance. */
6413 new_env
= cpu_copy(env
);
6414 /* Init regs that differ from the parent. */
6415 cpu_clone_regs_child(new_env
, newsp
, flags
);
6416 cpu_clone_regs_parent(env
, flags
);
6417 new_cpu
= env_cpu(new_env
);
6418 new_cpu
->opaque
= ts
;
6419 ts
->bprm
= parent_ts
->bprm
;
6420 ts
->info
= parent_ts
->info
;
6421 ts
->signal_mask
= parent_ts
->signal_mask
;
6423 if (flags
& CLONE_CHILD_CLEARTID
) {
6424 ts
->child_tidptr
= child_tidptr
;
6427 if (flags
& CLONE_SETTLS
) {
6428 cpu_set_tls (new_env
, newtls
);
6431 memset(&info
, 0, sizeof(info
));
6432 pthread_mutex_init(&info
.mutex
, NULL
);
6433 pthread_mutex_lock(&info
.mutex
);
6434 pthread_cond_init(&info
.cond
, NULL
);
6436 if (flags
& CLONE_CHILD_SETTID
) {
6437 info
.child_tidptr
= child_tidptr
;
6439 if (flags
& CLONE_PARENT_SETTID
) {
6440 info
.parent_tidptr
= parent_tidptr
;
6443 ret
= pthread_attr_init(&attr
);
6444 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6445 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6446 /* It is not safe to deliver signals until the child has finished
6447 initializing, so temporarily block all signals. */
6448 sigfillset(&sigmask
);
6449 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6450 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6452 /* If this is our first additional thread, we need to ensure we
6453 * generate code for parallel execution and flush old translations.
6455 if (!parallel_cpus
) {
6456 parallel_cpus
= true;
6460 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6461 /* TODO: Free new CPU state if thread creation failed. */
6463 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6464 pthread_attr_destroy(&attr
);
6466 /* Wait for the child to initialize. */
6467 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6472 pthread_mutex_unlock(&info
.mutex
);
6473 pthread_cond_destroy(&info
.cond
);
6474 pthread_mutex_destroy(&info
.mutex
);
6475 pthread_mutex_unlock(&clone_lock
);
6477 /* if no CLONE_VM, we consider it is a fork */
6478 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6479 return -TARGET_EINVAL
;
6482 /* We can't support custom termination signals */
6483 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6484 return -TARGET_EINVAL
;
6487 if (block_signals()) {
6488 return -TARGET_ERESTARTSYS
;
6494 /* Child Process. */
6495 cpu_clone_regs_child(env
, newsp
, flags
);
6497 /* There is a race condition here. The parent process could
6498 theoretically read the TID in the child process before the child
6499 tid is set. This would require using either ptrace
6500 (not implemented) or having *_tidptr to point at a shared memory
6501 mapping. We can't repeat the spinlock hack used above because
6502 the child process gets its own copy of the lock. */
6503 if (flags
& CLONE_CHILD_SETTID
)
6504 put_user_u32(sys_gettid(), child_tidptr
);
6505 if (flags
& CLONE_PARENT_SETTID
)
6506 put_user_u32(sys_gettid(), parent_tidptr
);
6507 ts
= (TaskState
*)cpu
->opaque
;
6508 if (flags
& CLONE_SETTLS
)
6509 cpu_set_tls (env
, newtls
);
6510 if (flags
& CLONE_CHILD_CLEARTID
)
6511 ts
->child_tidptr
= child_tidptr
;
6513 cpu_clone_regs_parent(env
, flags
);
6520 /* warning : doesn't handle linux specific flags... */
6521 static int target_to_host_fcntl_cmd(int cmd
)
6526 case TARGET_F_DUPFD
:
6527 case TARGET_F_GETFD
:
6528 case TARGET_F_SETFD
:
6529 case TARGET_F_GETFL
:
6530 case TARGET_F_SETFL
:
6531 case TARGET_F_OFD_GETLK
:
6532 case TARGET_F_OFD_SETLK
:
6533 case TARGET_F_OFD_SETLKW
:
6536 case TARGET_F_GETLK
:
6539 case TARGET_F_SETLK
:
6542 case TARGET_F_SETLKW
:
6545 case TARGET_F_GETOWN
:
6548 case TARGET_F_SETOWN
:
6551 case TARGET_F_GETSIG
:
6554 case TARGET_F_SETSIG
:
6557 #if TARGET_ABI_BITS == 32
6558 case TARGET_F_GETLK64
:
6561 case TARGET_F_SETLK64
:
6564 case TARGET_F_SETLKW64
:
6568 case TARGET_F_SETLEASE
:
6571 case TARGET_F_GETLEASE
:
6574 #ifdef F_DUPFD_CLOEXEC
6575 case TARGET_F_DUPFD_CLOEXEC
:
6576 ret
= F_DUPFD_CLOEXEC
;
6579 case TARGET_F_NOTIFY
:
6583 case TARGET_F_GETOWN_EX
:
6588 case TARGET_F_SETOWN_EX
:
6593 case TARGET_F_SETPIPE_SZ
:
6596 case TARGET_F_GETPIPE_SZ
:
6601 ret
= -TARGET_EINVAL
;
6605 #if defined(__powerpc64__)
6606 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6607 * is not supported by kernel. The glibc fcntl call actually adjusts
6608 * them to 5, 6 and 7 before making the syscall(). Since we make the
6609 * syscall directly, adjust to what is supported by the kernel.
6611 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6612 ret
-= F_GETLK64
- 5;
6619 #define FLOCK_TRANSTBL \
6621 TRANSTBL_CONVERT(F_RDLCK); \
6622 TRANSTBL_CONVERT(F_WRLCK); \
6623 TRANSTBL_CONVERT(F_UNLCK); \
6624 TRANSTBL_CONVERT(F_EXLCK); \
6625 TRANSTBL_CONVERT(F_SHLCK); \
6628 static int target_to_host_flock(int type
)
6630 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6632 #undef TRANSTBL_CONVERT
6633 return -TARGET_EINVAL
;
6636 static int host_to_target_flock(int type
)
6638 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6640 #undef TRANSTBL_CONVERT
6641 /* if we don't know how to convert the value coming
6642 * from the host we copy to the target field as-is
6647 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6648 abi_ulong target_flock_addr
)
6650 struct target_flock
*target_fl
;
6653 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6654 return -TARGET_EFAULT
;
6657 __get_user(l_type
, &target_fl
->l_type
);
6658 l_type
= target_to_host_flock(l_type
);
6662 fl
->l_type
= l_type
;
6663 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6664 __get_user(fl
->l_start
, &target_fl
->l_start
);
6665 __get_user(fl
->l_len
, &target_fl
->l_len
);
6666 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6667 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6671 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6672 const struct flock64
*fl
)
6674 struct target_flock
*target_fl
;
6677 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6678 return -TARGET_EFAULT
;
6681 l_type
= host_to_target_flock(fl
->l_type
);
6682 __put_user(l_type
, &target_fl
->l_type
);
6683 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6684 __put_user(fl
->l_start
, &target_fl
->l_start
);
6685 __put_user(fl
->l_len
, &target_fl
->l_len
);
6686 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6687 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6691 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6692 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6694 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6695 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6696 abi_ulong target_flock_addr
)
6698 struct target_oabi_flock64
*target_fl
;
6701 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6702 return -TARGET_EFAULT
;
6705 __get_user(l_type
, &target_fl
->l_type
);
6706 l_type
= target_to_host_flock(l_type
);
6710 fl
->l_type
= l_type
;
6711 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6712 __get_user(fl
->l_start
, &target_fl
->l_start
);
6713 __get_user(fl
->l_len
, &target_fl
->l_len
);
6714 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6715 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6719 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6720 const struct flock64
*fl
)
6722 struct target_oabi_flock64
*target_fl
;
6725 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6726 return -TARGET_EFAULT
;
6729 l_type
= host_to_target_flock(fl
->l_type
);
6730 __put_user(l_type
, &target_fl
->l_type
);
6731 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6732 __put_user(fl
->l_start
, &target_fl
->l_start
);
6733 __put_user(fl
->l_len
, &target_fl
->l_len
);
6734 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6735 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6740 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6741 abi_ulong target_flock_addr
)
6743 struct target_flock64
*target_fl
;
6746 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6747 return -TARGET_EFAULT
;
6750 __get_user(l_type
, &target_fl
->l_type
);
6751 l_type
= target_to_host_flock(l_type
);
6755 fl
->l_type
= l_type
;
6756 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6757 __get_user(fl
->l_start
, &target_fl
->l_start
);
6758 __get_user(fl
->l_len
, &target_fl
->l_len
);
6759 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6760 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6764 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6765 const struct flock64
*fl
)
6767 struct target_flock64
*target_fl
;
6770 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6771 return -TARGET_EFAULT
;
6774 l_type
= host_to_target_flock(fl
->l_type
);
6775 __put_user(l_type
, &target_fl
->l_type
);
6776 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6777 __put_user(fl
->l_start
, &target_fl
->l_start
);
6778 __put_user(fl
->l_len
, &target_fl
->l_len
);
6779 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6780 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6784 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6786 struct flock64 fl64
;
6788 struct f_owner_ex fox
;
6789 struct target_f_owner_ex
*target_fox
;
6792 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6794 if (host_cmd
== -TARGET_EINVAL
)
6798 case TARGET_F_GETLK
:
6799 ret
= copy_from_user_flock(&fl64
, arg
);
6803 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6805 ret
= copy_to_user_flock(arg
, &fl64
);
6809 case TARGET_F_SETLK
:
6810 case TARGET_F_SETLKW
:
6811 ret
= copy_from_user_flock(&fl64
, arg
);
6815 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6818 case TARGET_F_GETLK64
:
6819 case TARGET_F_OFD_GETLK
:
6820 ret
= copy_from_user_flock64(&fl64
, arg
);
6824 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6826 ret
= copy_to_user_flock64(arg
, &fl64
);
6829 case TARGET_F_SETLK64
:
6830 case TARGET_F_SETLKW64
:
6831 case TARGET_F_OFD_SETLK
:
6832 case TARGET_F_OFD_SETLKW
:
6833 ret
= copy_from_user_flock64(&fl64
, arg
);
6837 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6840 case TARGET_F_GETFL
:
6841 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6843 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6847 case TARGET_F_SETFL
:
6848 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6849 target_to_host_bitmask(arg
,
6854 case TARGET_F_GETOWN_EX
:
6855 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6857 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6858 return -TARGET_EFAULT
;
6859 target_fox
->type
= tswap32(fox
.type
);
6860 target_fox
->pid
= tswap32(fox
.pid
);
6861 unlock_user_struct(target_fox
, arg
, 1);
6867 case TARGET_F_SETOWN_EX
:
6868 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6869 return -TARGET_EFAULT
;
6870 fox
.type
= tswap32(target_fox
->type
);
6871 fox
.pid
= tswap32(target_fox
->pid
);
6872 unlock_user_struct(target_fox
, arg
, 0);
6873 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6877 case TARGET_F_SETSIG
:
6878 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6881 case TARGET_F_GETSIG
:
6882 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6885 case TARGET_F_SETOWN
:
6886 case TARGET_F_GETOWN
:
6887 case TARGET_F_SETLEASE
:
6888 case TARGET_F_GETLEASE
:
6889 case TARGET_F_SETPIPE_SZ
:
6890 case TARGET_F_GETPIPE_SZ
:
6891 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6895 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6903 static inline int high2lowuid(int uid
)
6911 static inline int high2lowgid(int gid
)
6919 static inline int low2highuid(int uid
)
6921 if ((int16_t)uid
== -1)
6927 static inline int low2highgid(int gid
)
6929 if ((int16_t)gid
== -1)
6934 static inline int tswapid(int id
)
6939 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6941 #else /* !USE_UID16 */
6942 static inline int high2lowuid(int uid
)
6946 static inline int high2lowgid(int gid
)
6950 static inline int low2highuid(int uid
)
6954 static inline int low2highgid(int gid
)
6958 static inline int tswapid(int id
)
6963 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6965 #endif /* USE_UID16 */
6967 /* We must do direct syscalls for setting UID/GID, because we want to
6968 * implement the Linux system call semantics of "change only for this thread",
6969 * not the libc/POSIX semantics of "change for all threads in process".
6970 * (See http://ewontfix.com/17/ for more details.)
6971 * We use the 32-bit version of the syscalls if present; if it is not
6972 * then either the host architecture supports 32-bit UIDs natively with
6973 * the standard syscall, or the 16-bit UID is the best we can do.
6975 #ifdef __NR_setuid32
6976 #define __NR_sys_setuid __NR_setuid32
6978 #define __NR_sys_setuid __NR_setuid
6980 #ifdef __NR_setgid32
6981 #define __NR_sys_setgid __NR_setgid32
6983 #define __NR_sys_setgid __NR_setgid
6985 #ifdef __NR_setresuid32
6986 #define __NR_sys_setresuid __NR_setresuid32
6988 #define __NR_sys_setresuid __NR_setresuid
6990 #ifdef __NR_setresgid32
6991 #define __NR_sys_setresgid __NR_setresgid32
6993 #define __NR_sys_setresgid __NR_setresgid
6996 _syscall1(int, sys_setuid
, uid_t
, uid
)
6997 _syscall1(int, sys_setgid
, gid_t
, gid
)
6998 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6999 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7001 void syscall_init(void)
7004 const argtype
*arg_type
;
7008 thunk_init(STRUCT_MAX
);
7010 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7011 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7012 #include "syscall_types.h"
7014 #undef STRUCT_SPECIAL
7016 /* Build target_to_host_errno_table[] table from
7017 * host_to_target_errno_table[]. */
7018 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7019 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7022 /* we patch the ioctl size if necessary. We rely on the fact that
7023 no ioctl has all the bits at '1' in the size field */
7025 while (ie
->target_cmd
!= 0) {
7026 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7027 TARGET_IOC_SIZEMASK
) {
7028 arg_type
= ie
->arg_type
;
7029 if (arg_type
[0] != TYPE_PTR
) {
7030 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7035 size
= thunk_type_size(arg_type
, 0);
7036 ie
->target_cmd
= (ie
->target_cmd
&
7037 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7038 (size
<< TARGET_IOC_SIZESHIFT
);
7041 /* automatic consistency check if same arch */
7042 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7043 (defined(__x86_64__) && defined(TARGET_X86_64))
7044 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7045 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7046 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7053 #ifdef TARGET_NR_truncate64
7054 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7059 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7063 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7067 #ifdef TARGET_NR_ftruncate64
7068 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7073 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7077 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7081 #if defined(TARGET_NR_timer_settime) || \
7082 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7083 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7084 abi_ulong target_addr
)
7086 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7087 offsetof(struct target_itimerspec
,
7089 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7090 offsetof(struct target_itimerspec
,
7092 return -TARGET_EFAULT
;
7099 #if defined(TARGET_NR_timer_settime64) || \
7100 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7101 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7102 abi_ulong target_addr
)
7104 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7105 offsetof(struct target__kernel_itimerspec
,
7107 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7108 offsetof(struct target__kernel_itimerspec
,
7110 return -TARGET_EFAULT
;
7117 #if ((defined(TARGET_NR_timerfd_gettime) || \
7118 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7119 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7120 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7121 struct itimerspec
*host_its
)
7123 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7125 &host_its
->it_interval
) ||
7126 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7128 &host_its
->it_value
)) {
7129 return -TARGET_EFAULT
;
7135 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7136 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7137 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7138 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7139 struct itimerspec
*host_its
)
7141 if (host_to_target_timespec64(target_addr
+
7142 offsetof(struct target__kernel_itimerspec
,
7144 &host_its
->it_interval
) ||
7145 host_to_target_timespec64(target_addr
+
7146 offsetof(struct target__kernel_itimerspec
,
7148 &host_its
->it_value
)) {
7149 return -TARGET_EFAULT
;
7155 #if defined(TARGET_NR_adjtimex) || \
7156 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7157 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7158 abi_long target_addr
)
7160 struct target_timex
*target_tx
;
7162 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7163 return -TARGET_EFAULT
;
7166 __get_user(host_tx
->modes
, &target_tx
->modes
);
7167 __get_user(host_tx
->offset
, &target_tx
->offset
);
7168 __get_user(host_tx
->freq
, &target_tx
->freq
);
7169 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7170 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7171 __get_user(host_tx
->status
, &target_tx
->status
);
7172 __get_user(host_tx
->constant
, &target_tx
->constant
);
7173 __get_user(host_tx
->precision
, &target_tx
->precision
);
7174 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7175 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7176 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7177 __get_user(host_tx
->tick
, &target_tx
->tick
);
7178 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7179 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7180 __get_user(host_tx
->shift
, &target_tx
->shift
);
7181 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7182 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7183 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7184 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7185 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7186 __get_user(host_tx
->tai
, &target_tx
->tai
);
7188 unlock_user_struct(target_tx
, target_addr
, 0);
7192 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7193 struct timex
*host_tx
)
7195 struct target_timex
*target_tx
;
7197 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7198 return -TARGET_EFAULT
;
7201 __put_user(host_tx
->modes
, &target_tx
->modes
);
7202 __put_user(host_tx
->offset
, &target_tx
->offset
);
7203 __put_user(host_tx
->freq
, &target_tx
->freq
);
7204 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7205 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7206 __put_user(host_tx
->status
, &target_tx
->status
);
7207 __put_user(host_tx
->constant
, &target_tx
->constant
);
7208 __put_user(host_tx
->precision
, &target_tx
->precision
);
7209 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7210 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7211 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7212 __put_user(host_tx
->tick
, &target_tx
->tick
);
7213 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7214 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7215 __put_user(host_tx
->shift
, &target_tx
->shift
);
7216 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7217 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7218 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7219 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7220 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7221 __put_user(host_tx
->tai
, &target_tx
->tai
);
7223 unlock_user_struct(target_tx
, target_addr
, 1);
7229 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7230 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7231 abi_long target_addr
)
7233 struct target__kernel_timex
*target_tx
;
7235 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7236 offsetof(struct target__kernel_timex
,
7238 return -TARGET_EFAULT
;
7241 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7242 return -TARGET_EFAULT
;
7245 __get_user(host_tx
->modes
, &target_tx
->modes
);
7246 __get_user(host_tx
->offset
, &target_tx
->offset
);
7247 __get_user(host_tx
->freq
, &target_tx
->freq
);
7248 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7249 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7250 __get_user(host_tx
->status
, &target_tx
->status
);
7251 __get_user(host_tx
->constant
, &target_tx
->constant
);
7252 __get_user(host_tx
->precision
, &target_tx
->precision
);
7253 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7254 __get_user(host_tx
->tick
, &target_tx
->tick
);
7255 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7256 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7257 __get_user(host_tx
->shift
, &target_tx
->shift
);
7258 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7259 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7260 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7261 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7262 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7263 __get_user(host_tx
->tai
, &target_tx
->tai
);
7265 unlock_user_struct(target_tx
, target_addr
, 0);
7269 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7270 struct timex
*host_tx
)
7272 struct target__kernel_timex
*target_tx
;
7274 if (copy_to_user_timeval64(target_addr
+
7275 offsetof(struct target__kernel_timex
, time
),
7277 return -TARGET_EFAULT
;
7280 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7281 return -TARGET_EFAULT
;
7284 __put_user(host_tx
->modes
, &target_tx
->modes
);
7285 __put_user(host_tx
->offset
, &target_tx
->offset
);
7286 __put_user(host_tx
->freq
, &target_tx
->freq
);
7287 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7288 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7289 __put_user(host_tx
->status
, &target_tx
->status
);
7290 __put_user(host_tx
->constant
, &target_tx
->constant
);
7291 __put_user(host_tx
->precision
, &target_tx
->precision
);
7292 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7293 __put_user(host_tx
->tick
, &target_tx
->tick
);
7294 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7295 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7296 __put_user(host_tx
->shift
, &target_tx
->shift
);
7297 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7298 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7299 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7300 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7301 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7302 __put_user(host_tx
->tai
, &target_tx
->tai
);
7304 unlock_user_struct(target_tx
, target_addr
, 1);
7309 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7310 abi_ulong target_addr
)
7312 struct target_sigevent
*target_sevp
;
7314 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7315 return -TARGET_EFAULT
;
7318 /* This union is awkward on 64 bit systems because it has a 32 bit
7319 * integer and a pointer in it; we follow the conversion approach
7320 * used for handling sigval types in signal.c so the guest should get
7321 * the correct value back even if we did a 64 bit byteswap and it's
7322 * using the 32 bit integer.
7324 host_sevp
->sigev_value
.sival_ptr
=
7325 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7326 host_sevp
->sigev_signo
=
7327 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7328 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7329 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7331 unlock_user_struct(target_sevp
, target_addr
, 1);
7335 #if defined(TARGET_NR_mlockall)
7336 static inline int target_to_host_mlockall_arg(int arg
)
7340 if (arg
& TARGET_MCL_CURRENT
) {
7341 result
|= MCL_CURRENT
;
7343 if (arg
& TARGET_MCL_FUTURE
) {
7344 result
|= MCL_FUTURE
;
7347 if (arg
& TARGET_MCL_ONFAULT
) {
7348 result
|= MCL_ONFAULT
;
7356 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7357 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7358 defined(TARGET_NR_newfstatat))
7359 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7360 abi_ulong target_addr
,
7361 struct stat
*host_st
)
7363 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7364 if (((CPUARMState
*)cpu_env
)->eabi
) {
7365 struct target_eabi_stat64
*target_st
;
7367 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7368 return -TARGET_EFAULT
;
7369 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7370 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7371 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7372 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7373 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7375 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7376 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7377 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7378 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7379 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7380 __put_user(host_st
->st_size
, &target_st
->st_size
);
7381 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7382 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7383 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7384 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7385 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7386 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7387 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7388 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7389 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7391 unlock_user_struct(target_st
, target_addr
, 1);
7395 #if defined(TARGET_HAS_STRUCT_STAT64)
7396 struct target_stat64
*target_st
;
7398 struct target_stat
*target_st
;
7401 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7402 return -TARGET_EFAULT
;
7403 memset(target_st
, 0, sizeof(*target_st
));
7404 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7405 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7406 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7407 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7409 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7410 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7411 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7412 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7413 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7414 /* XXX: better use of kernel struct */
7415 __put_user(host_st
->st_size
, &target_st
->st_size
);
7416 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7417 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7418 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7419 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7420 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7421 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7422 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7423 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7424 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7426 unlock_user_struct(target_st
, target_addr
, 1);
7433 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7434 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7435 abi_ulong target_addr
)
7437 struct target_statx
*target_stx
;
7439 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7440 return -TARGET_EFAULT
;
7442 memset(target_stx
, 0, sizeof(*target_stx
));
7444 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7445 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7446 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7447 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7448 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7449 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7450 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7451 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7452 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7453 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7454 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7455 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7456 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7457 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7458 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7459 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7460 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7461 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7462 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7463 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7464 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7465 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7466 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7468 unlock_user_struct(target_stx
, target_addr
, 1);
7474 static int do_sys_futex(int *uaddr
, int op
, int val
,
7475 const struct timespec
*timeout
, int *uaddr2
,
7478 #if HOST_LONG_BITS == 64
7479 #if defined(__NR_futex)
7480 /* always a 64-bit time_t, it doesn't define _time64 version */
7481 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486 if (sizeof(timeout
->tv_sec
) == 8) {
7487 /* _time64 function on 32bit arch */
7488 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7491 #if defined(__NR_futex)
7492 /* old function on 32bit arch */
7493 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7495 #endif /* HOST_LONG_BITS == 64 */
7496 g_assert_not_reached();
7499 static int do_safe_futex(int *uaddr
, int op
, int val
,
7500 const struct timespec
*timeout
, int *uaddr2
,
7503 #if HOST_LONG_BITS == 64
7504 #if defined(__NR_futex)
7505 /* always a 64-bit time_t, it doesn't define _time64 version */
7506 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7508 #else /* HOST_LONG_BITS == 64 */
7509 #if defined(__NR_futex_time64)
7510 if (sizeof(timeout
->tv_sec
) == 8) {
7511 /* _time64 function on 32bit arch */
7512 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7516 #if defined(__NR_futex)
7517 /* old function on 32bit arch */
7518 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7520 #endif /* HOST_LONG_BITS == 64 */
7521 return -TARGET_ENOSYS
;
7524 /* ??? Using host futex calls even when target atomic operations
7525 are not really atomic probably breaks things. However implementing
7526 futexes locally would make futexes shared between multiple processes
7527 tricky. However they're probably useless because guest atomic
7528 operations won't work either. */
7529 #if defined(TARGET_NR_futex)
7530 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7531 target_ulong uaddr2
, int val3
)
7533 struct timespec ts
, *pts
;
7536 /* ??? We assume FUTEX_* constants are the same on both host
7538 #ifdef FUTEX_CMD_MASK
7539 base_op
= op
& FUTEX_CMD_MASK
;
7545 case FUTEX_WAIT_BITSET
:
7548 target_to_host_timespec(pts
, timeout
);
7552 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7554 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7556 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7558 case FUTEX_CMP_REQUEUE
:
7560 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7561 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7562 But the prototype takes a `struct timespec *'; insert casts
7563 to satisfy the compiler. We do not need to tswap TIMEOUT
7564 since it's not compared to guest memory. */
7565 pts
= (struct timespec
*)(uintptr_t) timeout
;
7566 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7567 (base_op
== FUTEX_CMP_REQUEUE
7571 return -TARGET_ENOSYS
;
7576 #if defined(TARGET_NR_futex_time64)
7577 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7578 target_ulong uaddr2
, int val3
)
7580 struct timespec ts
, *pts
;
7583 /* ??? We assume FUTEX_* constants are the same on both host
7585 #ifdef FUTEX_CMD_MASK
7586 base_op
= op
& FUTEX_CMD_MASK
;
7592 case FUTEX_WAIT_BITSET
:
7595 if (target_to_host_timespec64(pts
, timeout
)) {
7596 return -TARGET_EFAULT
;
7601 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7603 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7605 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7607 case FUTEX_CMP_REQUEUE
:
7609 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7610 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7611 But the prototype takes a `struct timespec *'; insert casts
7612 to satisfy the compiler. We do not need to tswap TIMEOUT
7613 since it's not compared to guest memory. */
7614 pts
= (struct timespec
*)(uintptr_t) timeout
;
7615 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7616 (base_op
== FUTEX_CMP_REQUEUE
7620 return -TARGET_ENOSYS
;
7625 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7626 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7627 abi_long handle
, abi_long mount_id
,
7630 struct file_handle
*target_fh
;
7631 struct file_handle
*fh
;
7635 unsigned int size
, total_size
;
7637 if (get_user_s32(size
, handle
)) {
7638 return -TARGET_EFAULT
;
7641 name
= lock_user_string(pathname
);
7643 return -TARGET_EFAULT
;
7646 total_size
= sizeof(struct file_handle
) + size
;
7647 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7649 unlock_user(name
, pathname
, 0);
7650 return -TARGET_EFAULT
;
7653 fh
= g_malloc0(total_size
);
7654 fh
->handle_bytes
= size
;
7656 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7657 unlock_user(name
, pathname
, 0);
7659 /* man name_to_handle_at(2):
7660 * Other than the use of the handle_bytes field, the caller should treat
7661 * the file_handle structure as an opaque data type
7664 memcpy(target_fh
, fh
, total_size
);
7665 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7666 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7668 unlock_user(target_fh
, handle
, total_size
);
7670 if (put_user_s32(mid
, mount_id
)) {
7671 return -TARGET_EFAULT
;
7679 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7680 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7683 struct file_handle
*target_fh
;
7684 struct file_handle
*fh
;
7685 unsigned int size
, total_size
;
7688 if (get_user_s32(size
, handle
)) {
7689 return -TARGET_EFAULT
;
7692 total_size
= sizeof(struct file_handle
) + size
;
7693 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7695 return -TARGET_EFAULT
;
7698 fh
= g_memdup(target_fh
, total_size
);
7699 fh
->handle_bytes
= size
;
7700 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7702 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7703 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7707 unlock_user(target_fh
, handle
, total_size
);
7713 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7715 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7718 target_sigset_t
*target_mask
;
7722 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7723 return -TARGET_EINVAL
;
7725 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7726 return -TARGET_EFAULT
;
7729 target_to_host_sigset(&host_mask
, target_mask
);
7731 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7733 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7735 fd_trans_register(ret
, &target_signalfd_trans
);
7738 unlock_user_struct(target_mask
, mask
, 0);
7744 /* Map host to target signal numbers for the wait family of syscalls.
7745 Assume all other status bits are the same. */
7746 int host_to_target_waitstatus(int status
)
7748 if (WIFSIGNALED(status
)) {
7749 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7751 if (WIFSTOPPED(status
)) {
7752 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7758 static int open_self_cmdline(void *cpu_env
, int fd
)
7760 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7761 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7764 for (i
= 0; i
< bprm
->argc
; i
++) {
7765 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7767 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7775 static int open_self_maps(void *cpu_env
, int fd
)
7777 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7778 TaskState
*ts
= cpu
->opaque
;
7779 GSList
*map_info
= read_self_maps();
7783 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7784 MapInfo
*e
= (MapInfo
*) s
->data
;
7786 if (h2g_valid(e
->start
)) {
7787 unsigned long min
= e
->start
;
7788 unsigned long max
= e
->end
;
7789 int flags
= page_get_flags(h2g(min
));
7792 max
= h2g_valid(max
- 1) ?
7793 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7795 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7799 if (h2g(min
) == ts
->info
->stack_limit
) {
7805 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7806 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7807 h2g(min
), h2g(max
- 1) + 1,
7808 e
->is_read
? 'r' : '-',
7809 e
->is_write
? 'w' : '-',
7810 e
->is_exec
? 'x' : '-',
7811 e
->is_priv
? 'p' : '-',
7812 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7814 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7821 free_self_maps(map_info
);
7823 #ifdef TARGET_VSYSCALL_PAGE
7825 * We only support execution from the vsyscall page.
7826 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7828 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7829 " --xp 00000000 00:00 0",
7830 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7831 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7837 static int open_self_stat(void *cpu_env
, int fd
)
7839 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7840 TaskState
*ts
= cpu
->opaque
;
7841 g_autoptr(GString
) buf
= g_string_new(NULL
);
7844 for (i
= 0; i
< 44; i
++) {
7847 g_string_printf(buf
, FMT_pid
" ", getpid());
7848 } else if (i
== 1) {
7850 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7851 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7852 g_string_printf(buf
, "(%.15s) ", bin
);
7853 } else if (i
== 27) {
7855 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7857 /* for the rest, there is MasterCard */
7858 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7861 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7869 static int open_self_auxv(void *cpu_env
, int fd
)
7871 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7872 TaskState
*ts
= cpu
->opaque
;
7873 abi_ulong auxv
= ts
->info
->saved_auxv
;
7874 abi_ulong len
= ts
->info
->auxv_len
;
7878 * Auxiliary vector is stored in target process stack.
7879 * read in whole auxv vector and copy it to file
7881 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7885 r
= write(fd
, ptr
, len
);
7892 lseek(fd
, 0, SEEK_SET
);
7893 unlock_user(ptr
, auxv
, len
);
7899 static int is_proc_myself(const char *filename
, const char *entry
)
7901 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7902 filename
+= strlen("/proc/");
7903 if (!strncmp(filename
, "self/", strlen("self/"))) {
7904 filename
+= strlen("self/");
7905 } else if (*filename
>= '1' && *filename
<= '9') {
7907 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7908 if (!strncmp(filename
, myself
, strlen(myself
))) {
7909 filename
+= strlen(myself
);
7916 if (!strcmp(filename
, entry
)) {
7923 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7924 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7925 static int is_proc(const char *filename
, const char *entry
)
7927 return strcmp(filename
, entry
) == 0;
7931 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7932 static int open_net_route(void *cpu_env
, int fd
)
7939 fp
= fopen("/proc/net/route", "r");
7946 read
= getline(&line
, &len
, fp
);
7947 dprintf(fd
, "%s", line
);
7951 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7953 uint32_t dest
, gw
, mask
;
7954 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7957 fields
= sscanf(line
,
7958 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7959 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7960 &mask
, &mtu
, &window
, &irtt
);
7964 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7965 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7966 metric
, tswap32(mask
), mtu
, window
, irtt
);
7976 #if defined(TARGET_SPARC)
7977 static int open_cpuinfo(void *cpu_env
, int fd
)
7979 dprintf(fd
, "type\t\t: sun4u\n");
7984 #if defined(TARGET_HPPA)
7985 static int open_cpuinfo(void *cpu_env
, int fd
)
7987 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7988 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7989 dprintf(fd
, "capabilities\t: os32\n");
7990 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7991 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7996 #if defined(TARGET_M68K)
7997 static int open_hardware(void *cpu_env
, int fd
)
7999 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8004 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8007 const char *filename
;
8008 int (*fill
)(void *cpu_env
, int fd
);
8009 int (*cmp
)(const char *s1
, const char *s2
);
8011 const struct fake_open
*fake_open
;
8012 static const struct fake_open fakes
[] = {
8013 { "maps", open_self_maps
, is_proc_myself
},
8014 { "stat", open_self_stat
, is_proc_myself
},
8015 { "auxv", open_self_auxv
, is_proc_myself
},
8016 { "cmdline", open_self_cmdline
, is_proc_myself
},
8017 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8018 { "/proc/net/route", open_net_route
, is_proc
},
8020 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8021 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8023 #if defined(TARGET_M68K)
8024 { "/proc/hardware", open_hardware
, is_proc
},
8026 { NULL
, NULL
, NULL
}
8029 if (is_proc_myself(pathname
, "exe")) {
8030 int execfd
= qemu_getauxval(AT_EXECFD
);
8031 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8034 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8035 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8040 if (fake_open
->filename
) {
8042 char filename
[PATH_MAX
];
8045 /* create temporary file to map stat to */
8046 tmpdir
= getenv("TMPDIR");
8049 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8050 fd
= mkstemp(filename
);
8056 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8062 lseek(fd
, 0, SEEK_SET
);
8067 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8070 #define TIMER_MAGIC 0x0caf0000
8071 #define TIMER_MAGIC_MASK 0xffff0000
8073 /* Convert QEMU provided timer ID back to internal 16bit index format */
8074 static target_timer_t
get_timer_id(abi_long arg
)
8076 target_timer_t timerid
= arg
;
8078 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8079 return -TARGET_EINVAL
;
8084 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8085 return -TARGET_EINVAL
;
8091 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8093 abi_ulong target_addr
,
8096 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8097 unsigned host_bits
= sizeof(*host_mask
) * 8;
8098 abi_ulong
*target_mask
;
8101 assert(host_size
>= target_size
);
8103 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8105 return -TARGET_EFAULT
;
8107 memset(host_mask
, 0, host_size
);
8109 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8110 unsigned bit
= i
* target_bits
;
8113 __get_user(val
, &target_mask
[i
]);
8114 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8115 if (val
& (1UL << j
)) {
8116 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8121 unlock_user(target_mask
, target_addr
, 0);
8125 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8127 abi_ulong target_addr
,
8130 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8131 unsigned host_bits
= sizeof(*host_mask
) * 8;
8132 abi_ulong
*target_mask
;
8135 assert(host_size
>= target_size
);
8137 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8139 return -TARGET_EFAULT
;
8142 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8143 unsigned bit
= i
* target_bits
;
8146 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8147 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8151 __put_user(val
, &target_mask
[i
]);
8154 unlock_user(target_mask
, target_addr
, target_size
);
8158 /* This is an internal helper for do_syscall so that it is easier
8159 * to have a single return point, so that actions, such as logging
8160 * of syscall results, can be performed.
8161 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8163 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8164 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8165 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8168 CPUState
*cpu
= env_cpu(cpu_env
);
8170 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8171 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8172 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8173 || defined(TARGET_NR_statx)
8176 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8177 || defined(TARGET_NR_fstatfs)
8183 case TARGET_NR_exit
:
8184 /* In old applications this may be used to implement _exit(2).
8185 However in threaded applications it is used for thread termination,
8186 and _exit_group is used for application termination.
8187 Do thread termination if we have more then one thread. */
8189 if (block_signals()) {
8190 return -TARGET_ERESTARTSYS
;
8193 pthread_mutex_lock(&clone_lock
);
8195 if (CPU_NEXT(first_cpu
)) {
8196 TaskState
*ts
= cpu
->opaque
;
8198 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8199 object_unref(OBJECT(cpu
));
8201 * At this point the CPU should be unrealized and removed
8202 * from cpu lists. We can clean-up the rest of the thread
8203 * data without the lock held.
8206 pthread_mutex_unlock(&clone_lock
);
8208 if (ts
->child_tidptr
) {
8209 put_user_u32(0, ts
->child_tidptr
);
8210 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8215 rcu_unregister_thread();
8219 pthread_mutex_unlock(&clone_lock
);
8220 preexit_cleanup(cpu_env
, arg1
);
8222 return 0; /* avoid warning */
8223 case TARGET_NR_read
:
8224 if (arg2
== 0 && arg3
== 0) {
8225 return get_errno(safe_read(arg1
, 0, 0));
8227 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8228 return -TARGET_EFAULT
;
8229 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8231 fd_trans_host_to_target_data(arg1
)) {
8232 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8234 unlock_user(p
, arg2
, ret
);
8237 case TARGET_NR_write
:
8238 if (arg2
== 0 && arg3
== 0) {
8239 return get_errno(safe_write(arg1
, 0, 0));
8241 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8242 return -TARGET_EFAULT
;
8243 if (fd_trans_target_to_host_data(arg1
)) {
8244 void *copy
= g_malloc(arg3
);
8245 memcpy(copy
, p
, arg3
);
8246 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8248 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8252 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8254 unlock_user(p
, arg2
, 0);
8257 #ifdef TARGET_NR_open
8258 case TARGET_NR_open
:
8259 if (!(p
= lock_user_string(arg1
)))
8260 return -TARGET_EFAULT
;
8261 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8262 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8264 fd_trans_unregister(ret
);
8265 unlock_user(p
, arg1
, 0);
8268 case TARGET_NR_openat
:
8269 if (!(p
= lock_user_string(arg2
)))
8270 return -TARGET_EFAULT
;
8271 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8272 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8274 fd_trans_unregister(ret
);
8275 unlock_user(p
, arg2
, 0);
8277 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8278 case TARGET_NR_name_to_handle_at
:
8279 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8282 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8283 case TARGET_NR_open_by_handle_at
:
8284 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8285 fd_trans_unregister(ret
);
8288 case TARGET_NR_close
:
8289 fd_trans_unregister(arg1
);
8290 return get_errno(close(arg1
));
8293 return do_brk(arg1
);
8294 #ifdef TARGET_NR_fork
8295 case TARGET_NR_fork
:
8296 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8298 #ifdef TARGET_NR_waitpid
8299 case TARGET_NR_waitpid
:
8302 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8303 if (!is_error(ret
) && arg2
&& ret
8304 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8305 return -TARGET_EFAULT
;
8309 #ifdef TARGET_NR_waitid
8310 case TARGET_NR_waitid
:
8314 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8315 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8316 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8317 return -TARGET_EFAULT
;
8318 host_to_target_siginfo(p
, &info
);
8319 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8324 #ifdef TARGET_NR_creat /* not on alpha */
8325 case TARGET_NR_creat
:
8326 if (!(p
= lock_user_string(arg1
)))
8327 return -TARGET_EFAULT
;
8328 ret
= get_errno(creat(p
, arg2
));
8329 fd_trans_unregister(ret
);
8330 unlock_user(p
, arg1
, 0);
8333 #ifdef TARGET_NR_link
8334 case TARGET_NR_link
:
8337 p
= lock_user_string(arg1
);
8338 p2
= lock_user_string(arg2
);
8340 ret
= -TARGET_EFAULT
;
8342 ret
= get_errno(link(p
, p2
));
8343 unlock_user(p2
, arg2
, 0);
8344 unlock_user(p
, arg1
, 0);
8348 #if defined(TARGET_NR_linkat)
8349 case TARGET_NR_linkat
:
8353 return -TARGET_EFAULT
;
8354 p
= lock_user_string(arg2
);
8355 p2
= lock_user_string(arg4
);
8357 ret
= -TARGET_EFAULT
;
8359 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8360 unlock_user(p
, arg2
, 0);
8361 unlock_user(p2
, arg4
, 0);
8365 #ifdef TARGET_NR_unlink
8366 case TARGET_NR_unlink
:
8367 if (!(p
= lock_user_string(arg1
)))
8368 return -TARGET_EFAULT
;
8369 ret
= get_errno(unlink(p
));
8370 unlock_user(p
, arg1
, 0);
8373 #if defined(TARGET_NR_unlinkat)
8374 case TARGET_NR_unlinkat
:
8375 if (!(p
= lock_user_string(arg2
)))
8376 return -TARGET_EFAULT
;
8377 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8378 unlock_user(p
, arg2
, 0);
8381 case TARGET_NR_execve
:
8383 char **argp
, **envp
;
8386 abi_ulong guest_argp
;
8387 abi_ulong guest_envp
;
8394 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8395 if (get_user_ual(addr
, gp
))
8396 return -TARGET_EFAULT
;
8403 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8404 if (get_user_ual(addr
, gp
))
8405 return -TARGET_EFAULT
;
8411 argp
= g_new0(char *, argc
+ 1);
8412 envp
= g_new0(char *, envc
+ 1);
8414 for (gp
= guest_argp
, q
= argp
; gp
;
8415 gp
+= sizeof(abi_ulong
), q
++) {
8416 if (get_user_ual(addr
, gp
))
8420 if (!(*q
= lock_user_string(addr
)))
8422 total_size
+= strlen(*q
) + 1;
8426 for (gp
= guest_envp
, q
= envp
; gp
;
8427 gp
+= sizeof(abi_ulong
), q
++) {
8428 if (get_user_ual(addr
, gp
))
8432 if (!(*q
= lock_user_string(addr
)))
8434 total_size
+= strlen(*q
) + 1;
8438 if (!(p
= lock_user_string(arg1
)))
8440 /* Although execve() is not an interruptible syscall it is
8441 * a special case where we must use the safe_syscall wrapper:
8442 * if we allow a signal to happen before we make the host
8443 * syscall then we will 'lose' it, because at the point of
8444 * execve the process leaves QEMU's control. So we use the
8445 * safe syscall wrapper to ensure that we either take the
8446 * signal as a guest signal, or else it does not happen
8447 * before the execve completes and makes it the other
8448 * program's problem.
8450 ret
= get_errno(safe_execve(p
, argp
, envp
));
8451 unlock_user(p
, arg1
, 0);
8456 ret
= -TARGET_EFAULT
;
8459 for (gp
= guest_argp
, q
= argp
; *q
;
8460 gp
+= sizeof(abi_ulong
), q
++) {
8461 if (get_user_ual(addr
, gp
)
8464 unlock_user(*q
, addr
, 0);
8466 for (gp
= guest_envp
, q
= envp
; *q
;
8467 gp
+= sizeof(abi_ulong
), q
++) {
8468 if (get_user_ual(addr
, gp
)
8471 unlock_user(*q
, addr
, 0);
8478 case TARGET_NR_chdir
:
8479 if (!(p
= lock_user_string(arg1
)))
8480 return -TARGET_EFAULT
;
8481 ret
= get_errno(chdir(p
));
8482 unlock_user(p
, arg1
, 0);
8484 #ifdef TARGET_NR_time
8485 case TARGET_NR_time
:
8488 ret
= get_errno(time(&host_time
));
8491 && put_user_sal(host_time
, arg1
))
8492 return -TARGET_EFAULT
;
8496 #ifdef TARGET_NR_mknod
8497 case TARGET_NR_mknod
:
8498 if (!(p
= lock_user_string(arg1
)))
8499 return -TARGET_EFAULT
;
8500 ret
= get_errno(mknod(p
, arg2
, arg3
));
8501 unlock_user(p
, arg1
, 0);
8504 #if defined(TARGET_NR_mknodat)
8505 case TARGET_NR_mknodat
:
8506 if (!(p
= lock_user_string(arg2
)))
8507 return -TARGET_EFAULT
;
8508 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8509 unlock_user(p
, arg2
, 0);
8512 #ifdef TARGET_NR_chmod
8513 case TARGET_NR_chmod
:
8514 if (!(p
= lock_user_string(arg1
)))
8515 return -TARGET_EFAULT
;
8516 ret
= get_errno(chmod(p
, arg2
));
8517 unlock_user(p
, arg1
, 0);
8520 #ifdef TARGET_NR_lseek
8521 case TARGET_NR_lseek
:
8522 return get_errno(lseek(arg1
, arg2
, arg3
));
8524 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8525 /* Alpha specific */
8526 case TARGET_NR_getxpid
:
8527 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8528 return get_errno(getpid());
8530 #ifdef TARGET_NR_getpid
8531 case TARGET_NR_getpid
:
8532 return get_errno(getpid());
8534 case TARGET_NR_mount
:
8536 /* need to look at the data field */
8540 p
= lock_user_string(arg1
);
8542 return -TARGET_EFAULT
;
8548 p2
= lock_user_string(arg2
);
8551 unlock_user(p
, arg1
, 0);
8553 return -TARGET_EFAULT
;
8557 p3
= lock_user_string(arg3
);
8560 unlock_user(p
, arg1
, 0);
8562 unlock_user(p2
, arg2
, 0);
8563 return -TARGET_EFAULT
;
8569 /* FIXME - arg5 should be locked, but it isn't clear how to
8570 * do that since it's not guaranteed to be a NULL-terminated
8574 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8576 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8578 ret
= get_errno(ret
);
8581 unlock_user(p
, arg1
, 0);
8583 unlock_user(p2
, arg2
, 0);
8585 unlock_user(p3
, arg3
, 0);
8589 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8590 #if defined(TARGET_NR_umount)
8591 case TARGET_NR_umount
:
8593 #if defined(TARGET_NR_oldumount)
8594 case TARGET_NR_oldumount
:
8596 if (!(p
= lock_user_string(arg1
)))
8597 return -TARGET_EFAULT
;
8598 ret
= get_errno(umount(p
));
8599 unlock_user(p
, arg1
, 0);
8602 #ifdef TARGET_NR_stime /* not on alpha */
8603 case TARGET_NR_stime
:
8607 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8608 return -TARGET_EFAULT
;
8610 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8613 #ifdef TARGET_NR_alarm /* not on alpha */
8614 case TARGET_NR_alarm
:
8617 #ifdef TARGET_NR_pause /* not on alpha */
8618 case TARGET_NR_pause
:
8619 if (!block_signals()) {
8620 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8622 return -TARGET_EINTR
;
8624 #ifdef TARGET_NR_utime
8625 case TARGET_NR_utime
:
8627 struct utimbuf tbuf
, *host_tbuf
;
8628 struct target_utimbuf
*target_tbuf
;
8630 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8631 return -TARGET_EFAULT
;
8632 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8633 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8634 unlock_user_struct(target_tbuf
, arg2
, 0);
8639 if (!(p
= lock_user_string(arg1
)))
8640 return -TARGET_EFAULT
;
8641 ret
= get_errno(utime(p
, host_tbuf
));
8642 unlock_user(p
, arg1
, 0);
8646 #ifdef TARGET_NR_utimes
8647 case TARGET_NR_utimes
:
8649 struct timeval
*tvp
, tv
[2];
8651 if (copy_from_user_timeval(&tv
[0], arg2
)
8652 || copy_from_user_timeval(&tv
[1],
8653 arg2
+ sizeof(struct target_timeval
)))
8654 return -TARGET_EFAULT
;
8659 if (!(p
= lock_user_string(arg1
)))
8660 return -TARGET_EFAULT
;
8661 ret
= get_errno(utimes(p
, tvp
));
8662 unlock_user(p
, arg1
, 0);
8666 #if defined(TARGET_NR_futimesat)
8667 case TARGET_NR_futimesat
:
8669 struct timeval
*tvp
, tv
[2];
8671 if (copy_from_user_timeval(&tv
[0], arg3
)
8672 || copy_from_user_timeval(&tv
[1],
8673 arg3
+ sizeof(struct target_timeval
)))
8674 return -TARGET_EFAULT
;
8679 if (!(p
= lock_user_string(arg2
))) {
8680 return -TARGET_EFAULT
;
8682 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8683 unlock_user(p
, arg2
, 0);
8687 #ifdef TARGET_NR_access
8688 case TARGET_NR_access
:
8689 if (!(p
= lock_user_string(arg1
))) {
8690 return -TARGET_EFAULT
;
8692 ret
= get_errno(access(path(p
), arg2
));
8693 unlock_user(p
, arg1
, 0);
8696 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8697 case TARGET_NR_faccessat
:
8698 if (!(p
= lock_user_string(arg2
))) {
8699 return -TARGET_EFAULT
;
8701 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8702 unlock_user(p
, arg2
, 0);
8705 #ifdef TARGET_NR_nice /* not on alpha */
8706 case TARGET_NR_nice
:
8707 return get_errno(nice(arg1
));
8709 case TARGET_NR_sync
:
8712 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8713 case TARGET_NR_syncfs
:
8714 return get_errno(syncfs(arg1
));
8716 case TARGET_NR_kill
:
8717 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8718 #ifdef TARGET_NR_rename
8719 case TARGET_NR_rename
:
8722 p
= lock_user_string(arg1
);
8723 p2
= lock_user_string(arg2
);
8725 ret
= -TARGET_EFAULT
;
8727 ret
= get_errno(rename(p
, p2
));
8728 unlock_user(p2
, arg2
, 0);
8729 unlock_user(p
, arg1
, 0);
8733 #if defined(TARGET_NR_renameat)
8734 case TARGET_NR_renameat
:
8737 p
= lock_user_string(arg2
);
8738 p2
= lock_user_string(arg4
);
8740 ret
= -TARGET_EFAULT
;
8742 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8743 unlock_user(p2
, arg4
, 0);
8744 unlock_user(p
, arg2
, 0);
8748 #if defined(TARGET_NR_renameat2)
8749 case TARGET_NR_renameat2
:
8752 p
= lock_user_string(arg2
);
8753 p2
= lock_user_string(arg4
);
8755 ret
= -TARGET_EFAULT
;
8757 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8759 unlock_user(p2
, arg4
, 0);
8760 unlock_user(p
, arg2
, 0);
8764 #ifdef TARGET_NR_mkdir
8765 case TARGET_NR_mkdir
:
8766 if (!(p
= lock_user_string(arg1
)))
8767 return -TARGET_EFAULT
;
8768 ret
= get_errno(mkdir(p
, arg2
));
8769 unlock_user(p
, arg1
, 0);
8772 #if defined(TARGET_NR_mkdirat)
8773 case TARGET_NR_mkdirat
:
8774 if (!(p
= lock_user_string(arg2
)))
8775 return -TARGET_EFAULT
;
8776 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8777 unlock_user(p
, arg2
, 0);
8780 #ifdef TARGET_NR_rmdir
8781 case TARGET_NR_rmdir
:
8782 if (!(p
= lock_user_string(arg1
)))
8783 return -TARGET_EFAULT
;
8784 ret
= get_errno(rmdir(p
));
8785 unlock_user(p
, arg1
, 0);
8789 ret
= get_errno(dup(arg1
));
8791 fd_trans_dup(arg1
, ret
);
8794 #ifdef TARGET_NR_pipe
8795 case TARGET_NR_pipe
:
8796 return do_pipe(cpu_env
, arg1
, 0, 0);
8798 #ifdef TARGET_NR_pipe2
8799 case TARGET_NR_pipe2
:
8800 return do_pipe(cpu_env
, arg1
,
8801 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8803 case TARGET_NR_times
:
8805 struct target_tms
*tmsp
;
8807 ret
= get_errno(times(&tms
));
8809 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8811 return -TARGET_EFAULT
;
8812 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8813 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8814 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8815 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8818 ret
= host_to_target_clock_t(ret
);
8821 case TARGET_NR_acct
:
8823 ret
= get_errno(acct(NULL
));
8825 if (!(p
= lock_user_string(arg1
))) {
8826 return -TARGET_EFAULT
;
8828 ret
= get_errno(acct(path(p
)));
8829 unlock_user(p
, arg1
, 0);
8832 #ifdef TARGET_NR_umount2
8833 case TARGET_NR_umount2
:
8834 if (!(p
= lock_user_string(arg1
)))
8835 return -TARGET_EFAULT
;
8836 ret
= get_errno(umount2(p
, arg2
));
8837 unlock_user(p
, arg1
, 0);
8840 case TARGET_NR_ioctl
:
8841 return do_ioctl(arg1
, arg2
, arg3
);
8842 #ifdef TARGET_NR_fcntl
8843 case TARGET_NR_fcntl
:
8844 return do_fcntl(arg1
, arg2
, arg3
);
8846 case TARGET_NR_setpgid
:
8847 return get_errno(setpgid(arg1
, arg2
));
8848 case TARGET_NR_umask
:
8849 return get_errno(umask(arg1
));
8850 case TARGET_NR_chroot
:
8851 if (!(p
= lock_user_string(arg1
)))
8852 return -TARGET_EFAULT
;
8853 ret
= get_errno(chroot(p
));
8854 unlock_user(p
, arg1
, 0);
8856 #ifdef TARGET_NR_dup2
8857 case TARGET_NR_dup2
:
8858 ret
= get_errno(dup2(arg1
, arg2
));
8860 fd_trans_dup(arg1
, arg2
);
8864 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8865 case TARGET_NR_dup3
:
8869 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8872 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8873 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8875 fd_trans_dup(arg1
, arg2
);
8880 #ifdef TARGET_NR_getppid /* not on alpha */
8881 case TARGET_NR_getppid
:
8882 return get_errno(getppid());
8884 #ifdef TARGET_NR_getpgrp
8885 case TARGET_NR_getpgrp
:
8886 return get_errno(getpgrp());
8888 case TARGET_NR_setsid
:
8889 return get_errno(setsid());
8890 #ifdef TARGET_NR_sigaction
8891 case TARGET_NR_sigaction
:
8893 #if defined(TARGET_ALPHA)
8894 struct target_sigaction act
, oact
, *pact
= 0;
8895 struct target_old_sigaction
*old_act
;
8897 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8898 return -TARGET_EFAULT
;
8899 act
._sa_handler
= old_act
->_sa_handler
;
8900 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8901 act
.sa_flags
= old_act
->sa_flags
;
8902 act
.sa_restorer
= 0;
8903 unlock_user_struct(old_act
, arg2
, 0);
8906 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8907 if (!is_error(ret
) && arg3
) {
8908 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8909 return -TARGET_EFAULT
;
8910 old_act
->_sa_handler
= oact
._sa_handler
;
8911 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8912 old_act
->sa_flags
= oact
.sa_flags
;
8913 unlock_user_struct(old_act
, arg3
, 1);
8915 #elif defined(TARGET_MIPS)
8916 struct target_sigaction act
, oact
, *pact
, *old_act
;
8919 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8920 return -TARGET_EFAULT
;
8921 act
._sa_handler
= old_act
->_sa_handler
;
8922 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8923 act
.sa_flags
= old_act
->sa_flags
;
8924 unlock_user_struct(old_act
, arg2
, 0);
8930 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8932 if (!is_error(ret
) && arg3
) {
8933 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8934 return -TARGET_EFAULT
;
8935 old_act
->_sa_handler
= oact
._sa_handler
;
8936 old_act
->sa_flags
= oact
.sa_flags
;
8937 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8938 old_act
->sa_mask
.sig
[1] = 0;
8939 old_act
->sa_mask
.sig
[2] = 0;
8940 old_act
->sa_mask
.sig
[3] = 0;
8941 unlock_user_struct(old_act
, arg3
, 1);
8944 struct target_old_sigaction
*old_act
;
8945 struct target_sigaction act
, oact
, *pact
;
8947 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8948 return -TARGET_EFAULT
;
8949 act
._sa_handler
= old_act
->_sa_handler
;
8950 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8951 act
.sa_flags
= old_act
->sa_flags
;
8952 act
.sa_restorer
= old_act
->sa_restorer
;
8953 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8954 act
.ka_restorer
= 0;
8956 unlock_user_struct(old_act
, arg2
, 0);
8961 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8962 if (!is_error(ret
) && arg3
) {
8963 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8964 return -TARGET_EFAULT
;
8965 old_act
->_sa_handler
= oact
._sa_handler
;
8966 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8967 old_act
->sa_flags
= oact
.sa_flags
;
8968 old_act
->sa_restorer
= oact
.sa_restorer
;
8969 unlock_user_struct(old_act
, arg3
, 1);
8975 case TARGET_NR_rt_sigaction
:
8977 #if defined(TARGET_ALPHA)
8978 /* For Alpha and SPARC this is a 5 argument syscall, with
8979 * a 'restorer' parameter which must be copied into the
8980 * sa_restorer field of the sigaction struct.
8981 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8982 * and arg5 is the sigsetsize.
8983 * Alpha also has a separate rt_sigaction struct that it uses
8984 * here; SPARC uses the usual sigaction struct.
8986 struct target_rt_sigaction
*rt_act
;
8987 struct target_sigaction act
, oact
, *pact
= 0;
8989 if (arg4
!= sizeof(target_sigset_t
)) {
8990 return -TARGET_EINVAL
;
8993 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8994 return -TARGET_EFAULT
;
8995 act
._sa_handler
= rt_act
->_sa_handler
;
8996 act
.sa_mask
= rt_act
->sa_mask
;
8997 act
.sa_flags
= rt_act
->sa_flags
;
8998 act
.sa_restorer
= arg5
;
8999 unlock_user_struct(rt_act
, arg2
, 0);
9002 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9003 if (!is_error(ret
) && arg3
) {
9004 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9005 return -TARGET_EFAULT
;
9006 rt_act
->_sa_handler
= oact
._sa_handler
;
9007 rt_act
->sa_mask
= oact
.sa_mask
;
9008 rt_act
->sa_flags
= oact
.sa_flags
;
9009 unlock_user_struct(rt_act
, arg3
, 1);
9013 target_ulong restorer
= arg4
;
9014 target_ulong sigsetsize
= arg5
;
9016 target_ulong sigsetsize
= arg4
;
9018 struct target_sigaction
*act
;
9019 struct target_sigaction
*oact
;
9021 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9022 return -TARGET_EINVAL
;
9025 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9026 return -TARGET_EFAULT
;
9028 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9029 act
->ka_restorer
= restorer
;
9035 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9036 ret
= -TARGET_EFAULT
;
9037 goto rt_sigaction_fail
;
9041 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9044 unlock_user_struct(act
, arg2
, 0);
9046 unlock_user_struct(oact
, arg3
, 1);
9050 #ifdef TARGET_NR_sgetmask /* not on alpha */
9051 case TARGET_NR_sgetmask
:
9054 abi_ulong target_set
;
9055 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9057 host_to_target_old_sigset(&target_set
, &cur_set
);
9063 #ifdef TARGET_NR_ssetmask /* not on alpha */
9064 case TARGET_NR_ssetmask
:
9067 abi_ulong target_set
= arg1
;
9068 target_to_host_old_sigset(&set
, &target_set
);
9069 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9071 host_to_target_old_sigset(&target_set
, &oset
);
9077 #ifdef TARGET_NR_sigprocmask
9078 case TARGET_NR_sigprocmask
:
9080 #if defined(TARGET_ALPHA)
9081 sigset_t set
, oldset
;
9086 case TARGET_SIG_BLOCK
:
9089 case TARGET_SIG_UNBLOCK
:
9092 case TARGET_SIG_SETMASK
:
9096 return -TARGET_EINVAL
;
9099 target_to_host_old_sigset(&set
, &mask
);
9101 ret
= do_sigprocmask(how
, &set
, &oldset
);
9102 if (!is_error(ret
)) {
9103 host_to_target_old_sigset(&mask
, &oldset
);
9105 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9108 sigset_t set
, oldset
, *set_ptr
;
9113 case TARGET_SIG_BLOCK
:
9116 case TARGET_SIG_UNBLOCK
:
9119 case TARGET_SIG_SETMASK
:
9123 return -TARGET_EINVAL
;
9125 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9126 return -TARGET_EFAULT
;
9127 target_to_host_old_sigset(&set
, p
);
9128 unlock_user(p
, arg2
, 0);
9134 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9135 if (!is_error(ret
) && arg3
) {
9136 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9137 return -TARGET_EFAULT
;
9138 host_to_target_old_sigset(p
, &oldset
);
9139 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9145 case TARGET_NR_rt_sigprocmask
:
9148 sigset_t set
, oldset
, *set_ptr
;
9150 if (arg4
!= sizeof(target_sigset_t
)) {
9151 return -TARGET_EINVAL
;
9156 case TARGET_SIG_BLOCK
:
9159 case TARGET_SIG_UNBLOCK
:
9162 case TARGET_SIG_SETMASK
:
9166 return -TARGET_EINVAL
;
9168 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9169 return -TARGET_EFAULT
;
9170 target_to_host_sigset(&set
, p
);
9171 unlock_user(p
, arg2
, 0);
9177 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9178 if (!is_error(ret
) && arg3
) {
9179 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9180 return -TARGET_EFAULT
;
9181 host_to_target_sigset(p
, &oldset
);
9182 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9186 #ifdef TARGET_NR_sigpending
9187 case TARGET_NR_sigpending
:
9190 ret
= get_errno(sigpending(&set
));
9191 if (!is_error(ret
)) {
9192 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9193 return -TARGET_EFAULT
;
9194 host_to_target_old_sigset(p
, &set
);
9195 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9200 case TARGET_NR_rt_sigpending
:
9204 /* Yes, this check is >, not != like most. We follow the kernel's
9205 * logic and it does it like this because it implements
9206 * NR_sigpending through the same code path, and in that case
9207 * the old_sigset_t is smaller in size.
9209 if (arg2
> sizeof(target_sigset_t
)) {
9210 return -TARGET_EINVAL
;
9213 ret
= get_errno(sigpending(&set
));
9214 if (!is_error(ret
)) {
9215 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9216 return -TARGET_EFAULT
;
9217 host_to_target_sigset(p
, &set
);
9218 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9222 #ifdef TARGET_NR_sigsuspend
9223 case TARGET_NR_sigsuspend
:
9225 TaskState
*ts
= cpu
->opaque
;
9226 #if defined(TARGET_ALPHA)
9227 abi_ulong mask
= arg1
;
9228 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9230 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9231 return -TARGET_EFAULT
;
9232 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9233 unlock_user(p
, arg1
, 0);
9235 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9237 if (ret
!= -TARGET_ERESTARTSYS
) {
9238 ts
->in_sigsuspend
= 1;
9243 case TARGET_NR_rt_sigsuspend
:
9245 TaskState
*ts
= cpu
->opaque
;
9247 if (arg2
!= sizeof(target_sigset_t
)) {
9248 return -TARGET_EINVAL
;
9250 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9251 return -TARGET_EFAULT
;
9252 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9253 unlock_user(p
, arg1
, 0);
9254 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9256 if (ret
!= -TARGET_ERESTARTSYS
) {
9257 ts
->in_sigsuspend
= 1;
9261 #ifdef TARGET_NR_rt_sigtimedwait
9262 case TARGET_NR_rt_sigtimedwait
:
9265 struct timespec uts
, *puts
;
9268 if (arg4
!= sizeof(target_sigset_t
)) {
9269 return -TARGET_EINVAL
;
9272 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9273 return -TARGET_EFAULT
;
9274 target_to_host_sigset(&set
, p
);
9275 unlock_user(p
, arg1
, 0);
9278 if (target_to_host_timespec(puts
, arg3
)) {
9279 return -TARGET_EFAULT
;
9284 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9286 if (!is_error(ret
)) {
9288 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9291 return -TARGET_EFAULT
;
9293 host_to_target_siginfo(p
, &uinfo
);
9294 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9296 ret
= host_to_target_signal(ret
);
9301 #ifdef TARGET_NR_rt_sigtimedwait_time64
9302 case TARGET_NR_rt_sigtimedwait_time64
:
9305 struct timespec uts
, *puts
;
9308 if (arg4
!= sizeof(target_sigset_t
)) {
9309 return -TARGET_EINVAL
;
9312 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9314 return -TARGET_EFAULT
;
9316 target_to_host_sigset(&set
, p
);
9317 unlock_user(p
, arg1
, 0);
9320 if (target_to_host_timespec64(puts
, arg3
)) {
9321 return -TARGET_EFAULT
;
9326 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9328 if (!is_error(ret
)) {
9330 p
= lock_user(VERIFY_WRITE
, arg2
,
9331 sizeof(target_siginfo_t
), 0);
9333 return -TARGET_EFAULT
;
9335 host_to_target_siginfo(p
, &uinfo
);
9336 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9338 ret
= host_to_target_signal(ret
);
9343 case TARGET_NR_rt_sigqueueinfo
:
9347 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9349 return -TARGET_EFAULT
;
9351 target_to_host_siginfo(&uinfo
, p
);
9352 unlock_user(p
, arg3
, 0);
9353 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9356 case TARGET_NR_rt_tgsigqueueinfo
:
9360 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9362 return -TARGET_EFAULT
;
9364 target_to_host_siginfo(&uinfo
, p
);
9365 unlock_user(p
, arg4
, 0);
9366 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9369 #ifdef TARGET_NR_sigreturn
9370 case TARGET_NR_sigreturn
:
9371 if (block_signals()) {
9372 return -TARGET_ERESTARTSYS
;
9374 return do_sigreturn(cpu_env
);
9376 case TARGET_NR_rt_sigreturn
:
9377 if (block_signals()) {
9378 return -TARGET_ERESTARTSYS
;
9380 return do_rt_sigreturn(cpu_env
);
9381 case TARGET_NR_sethostname
:
9382 if (!(p
= lock_user_string(arg1
)))
9383 return -TARGET_EFAULT
;
9384 ret
= get_errno(sethostname(p
, arg2
));
9385 unlock_user(p
, arg1
, 0);
9387 #ifdef TARGET_NR_setrlimit
9388 case TARGET_NR_setrlimit
:
9390 int resource
= target_to_host_resource(arg1
);
9391 struct target_rlimit
*target_rlim
;
9393 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9394 return -TARGET_EFAULT
;
9395 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9396 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9397 unlock_user_struct(target_rlim
, arg2
, 0);
9399 * If we just passed through resource limit settings for memory then
9400 * they would also apply to QEMU's own allocations, and QEMU will
9401 * crash or hang or die if its allocations fail. Ideally we would
9402 * track the guest allocations in QEMU and apply the limits ourselves.
9403 * For now, just tell the guest the call succeeded but don't actually
9406 if (resource
!= RLIMIT_AS
&&
9407 resource
!= RLIMIT_DATA
&&
9408 resource
!= RLIMIT_STACK
) {
9409 return get_errno(setrlimit(resource
, &rlim
));
9415 #ifdef TARGET_NR_getrlimit
9416 case TARGET_NR_getrlimit
:
9418 int resource
= target_to_host_resource(arg1
);
9419 struct target_rlimit
*target_rlim
;
9422 ret
= get_errno(getrlimit(resource
, &rlim
));
9423 if (!is_error(ret
)) {
9424 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9425 return -TARGET_EFAULT
;
9426 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9427 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9428 unlock_user_struct(target_rlim
, arg2
, 1);
9433 case TARGET_NR_getrusage
:
9435 struct rusage rusage
;
9436 ret
= get_errno(getrusage(arg1
, &rusage
));
9437 if (!is_error(ret
)) {
9438 ret
= host_to_target_rusage(arg2
, &rusage
);
9442 #if defined(TARGET_NR_gettimeofday)
9443 case TARGET_NR_gettimeofday
:
9448 ret
= get_errno(gettimeofday(&tv
, &tz
));
9449 if (!is_error(ret
)) {
9450 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9451 return -TARGET_EFAULT
;
9453 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9454 return -TARGET_EFAULT
;
9460 #if defined(TARGET_NR_settimeofday)
9461 case TARGET_NR_settimeofday
:
9463 struct timeval tv
, *ptv
= NULL
;
9464 struct timezone tz
, *ptz
= NULL
;
9467 if (copy_from_user_timeval(&tv
, arg1
)) {
9468 return -TARGET_EFAULT
;
9474 if (copy_from_user_timezone(&tz
, arg2
)) {
9475 return -TARGET_EFAULT
;
9480 return get_errno(settimeofday(ptv
, ptz
));
9483 #if defined(TARGET_NR_select)
9484 case TARGET_NR_select
:
9485 #if defined(TARGET_WANT_NI_OLD_SELECT)
9486 /* some architectures used to have old_select here
9487 * but now ENOSYS it.
9489 ret
= -TARGET_ENOSYS
;
9490 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9491 ret
= do_old_select(arg1
);
9493 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9497 #ifdef TARGET_NR_pselect6
9498 case TARGET_NR_pselect6
:
9499 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9501 #ifdef TARGET_NR_pselect6_time64
9502 case TARGET_NR_pselect6_time64
:
9503 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9505 #ifdef TARGET_NR_symlink
9506 case TARGET_NR_symlink
:
9509 p
= lock_user_string(arg1
);
9510 p2
= lock_user_string(arg2
);
9512 ret
= -TARGET_EFAULT
;
9514 ret
= get_errno(symlink(p
, p2
));
9515 unlock_user(p2
, arg2
, 0);
9516 unlock_user(p
, arg1
, 0);
9520 #if defined(TARGET_NR_symlinkat)
9521 case TARGET_NR_symlinkat
:
9524 p
= lock_user_string(arg1
);
9525 p2
= lock_user_string(arg3
);
9527 ret
= -TARGET_EFAULT
;
9529 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9530 unlock_user(p2
, arg3
, 0);
9531 unlock_user(p
, arg1
, 0);
9535 #ifdef TARGET_NR_readlink
9536 case TARGET_NR_readlink
:
9539 p
= lock_user_string(arg1
);
9540 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9542 ret
= -TARGET_EFAULT
;
9544 /* Short circuit this for the magic exe check. */
9545 ret
= -TARGET_EINVAL
;
9546 } else if (is_proc_myself((const char *)p
, "exe")) {
9547 char real
[PATH_MAX
], *temp
;
9548 temp
= realpath(exec_path
, real
);
9549 /* Return value is # of bytes that we wrote to the buffer. */
9551 ret
= get_errno(-1);
9553 /* Don't worry about sign mismatch as earlier mapping
9554 * logic would have thrown a bad address error. */
9555 ret
= MIN(strlen(real
), arg3
);
9556 /* We cannot NUL terminate the string. */
9557 memcpy(p2
, real
, ret
);
9560 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9562 unlock_user(p2
, arg2
, ret
);
9563 unlock_user(p
, arg1
, 0);
9567 #if defined(TARGET_NR_readlinkat)
9568 case TARGET_NR_readlinkat
:
9571 p
= lock_user_string(arg2
);
9572 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9574 ret
= -TARGET_EFAULT
;
9575 } else if (is_proc_myself((const char *)p
, "exe")) {
9576 char real
[PATH_MAX
], *temp
;
9577 temp
= realpath(exec_path
, real
);
9578 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9579 snprintf((char *)p2
, arg4
, "%s", real
);
9581 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9583 unlock_user(p2
, arg3
, ret
);
9584 unlock_user(p
, arg2
, 0);
9588 #ifdef TARGET_NR_swapon
9589 case TARGET_NR_swapon
:
9590 if (!(p
= lock_user_string(arg1
)))
9591 return -TARGET_EFAULT
;
9592 ret
= get_errno(swapon(p
, arg2
));
9593 unlock_user(p
, arg1
, 0);
9596 case TARGET_NR_reboot
:
9597 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9598 /* arg4 must be ignored in all other cases */
9599 p
= lock_user_string(arg4
);
9601 return -TARGET_EFAULT
;
9603 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9604 unlock_user(p
, arg4
, 0);
9606 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9609 #ifdef TARGET_NR_mmap
9610 case TARGET_NR_mmap
:
9611 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9612 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9613 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9614 || defined(TARGET_S390X)
9617 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9618 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9619 return -TARGET_EFAULT
;
9626 unlock_user(v
, arg1
, 0);
9627 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9628 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9632 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9633 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9639 #ifdef TARGET_NR_mmap2
9640 case TARGET_NR_mmap2
:
9642 #define MMAP_SHIFT 12
9644 ret
= target_mmap(arg1
, arg2
, arg3
,
9645 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9646 arg5
, arg6
<< MMAP_SHIFT
);
9647 return get_errno(ret
);
9649 case TARGET_NR_munmap
:
9650 return get_errno(target_munmap(arg1
, arg2
));
9651 case TARGET_NR_mprotect
:
9653 TaskState
*ts
= cpu
->opaque
;
9654 /* Special hack to detect libc making the stack executable. */
9655 if ((arg3
& PROT_GROWSDOWN
)
9656 && arg1
>= ts
->info
->stack_limit
9657 && arg1
<= ts
->info
->start_stack
) {
9658 arg3
&= ~PROT_GROWSDOWN
;
9659 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9660 arg1
= ts
->info
->stack_limit
;
9663 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9664 #ifdef TARGET_NR_mremap
9665 case TARGET_NR_mremap
:
9666 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9668 /* ??? msync/mlock/munlock are broken for softmmu. */
9669 #ifdef TARGET_NR_msync
9670 case TARGET_NR_msync
:
9671 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9673 #ifdef TARGET_NR_mlock
9674 case TARGET_NR_mlock
:
9675 return get_errno(mlock(g2h(arg1
), arg2
));
9677 #ifdef TARGET_NR_munlock
9678 case TARGET_NR_munlock
:
9679 return get_errno(munlock(g2h(arg1
), arg2
));
9681 #ifdef TARGET_NR_mlockall
9682 case TARGET_NR_mlockall
:
9683 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9685 #ifdef TARGET_NR_munlockall
9686 case TARGET_NR_munlockall
:
9687 return get_errno(munlockall());
9689 #ifdef TARGET_NR_truncate
9690 case TARGET_NR_truncate
:
9691 if (!(p
= lock_user_string(arg1
)))
9692 return -TARGET_EFAULT
;
9693 ret
= get_errno(truncate(p
, arg2
));
9694 unlock_user(p
, arg1
, 0);
9697 #ifdef TARGET_NR_ftruncate
9698 case TARGET_NR_ftruncate
:
9699 return get_errno(ftruncate(arg1
, arg2
));
9701 case TARGET_NR_fchmod
:
9702 return get_errno(fchmod(arg1
, arg2
));
9703 #if defined(TARGET_NR_fchmodat)
9704 case TARGET_NR_fchmodat
:
9705 if (!(p
= lock_user_string(arg2
)))
9706 return -TARGET_EFAULT
;
9707 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9708 unlock_user(p
, arg2
, 0);
9711 case TARGET_NR_getpriority
:
9712 /* Note that negative values are valid for getpriority, so we must
9713 differentiate based on errno settings. */
9715 ret
= getpriority(arg1
, arg2
);
9716 if (ret
== -1 && errno
!= 0) {
9717 return -host_to_target_errno(errno
);
9720 /* Return value is the unbiased priority. Signal no error. */
9721 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9723 /* Return value is a biased priority to avoid negative numbers. */
9727 case TARGET_NR_setpriority
:
9728 return get_errno(setpriority(arg1
, arg2
, arg3
));
9729 #ifdef TARGET_NR_statfs
9730 case TARGET_NR_statfs
:
9731 if (!(p
= lock_user_string(arg1
))) {
9732 return -TARGET_EFAULT
;
9734 ret
= get_errno(statfs(path(p
), &stfs
));
9735 unlock_user(p
, arg1
, 0);
9737 if (!is_error(ret
)) {
9738 struct target_statfs
*target_stfs
;
9740 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9741 return -TARGET_EFAULT
;
9742 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9743 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9744 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9745 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9746 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9747 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9748 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9749 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9750 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9751 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9752 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9753 #ifdef _STATFS_F_FLAGS
9754 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9756 __put_user(0, &target_stfs
->f_flags
);
9758 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9759 unlock_user_struct(target_stfs
, arg2
, 1);
9763 #ifdef TARGET_NR_fstatfs
9764 case TARGET_NR_fstatfs
:
9765 ret
= get_errno(fstatfs(arg1
, &stfs
));
9766 goto convert_statfs
;
9768 #ifdef TARGET_NR_statfs64
9769 case TARGET_NR_statfs64
:
9770 if (!(p
= lock_user_string(arg1
))) {
9771 return -TARGET_EFAULT
;
9773 ret
= get_errno(statfs(path(p
), &stfs
));
9774 unlock_user(p
, arg1
, 0);
9776 if (!is_error(ret
)) {
9777 struct target_statfs64
*target_stfs
;
9779 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9780 return -TARGET_EFAULT
;
9781 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9782 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9783 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9784 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9785 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9786 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9787 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9788 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9789 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9790 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9791 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9792 #ifdef _STATFS_F_FLAGS
9793 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9795 __put_user(0, &target_stfs
->f_flags
);
9797 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9798 unlock_user_struct(target_stfs
, arg3
, 1);
9801 case TARGET_NR_fstatfs64
:
9802 ret
= get_errno(fstatfs(arg1
, &stfs
));
9803 goto convert_statfs64
;
9805 #ifdef TARGET_NR_socketcall
9806 case TARGET_NR_socketcall
:
9807 return do_socketcall(arg1
, arg2
);
9809 #ifdef TARGET_NR_accept
9810 case TARGET_NR_accept
:
9811 return do_accept4(arg1
, arg2
, arg3
, 0);
9813 #ifdef TARGET_NR_accept4
9814 case TARGET_NR_accept4
:
9815 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9817 #ifdef TARGET_NR_bind
9818 case TARGET_NR_bind
:
9819 return do_bind(arg1
, arg2
, arg3
);
9821 #ifdef TARGET_NR_connect
9822 case TARGET_NR_connect
:
9823 return do_connect(arg1
, arg2
, arg3
);
9825 #ifdef TARGET_NR_getpeername
9826 case TARGET_NR_getpeername
:
9827 return do_getpeername(arg1
, arg2
, arg3
);
9829 #ifdef TARGET_NR_getsockname
9830 case TARGET_NR_getsockname
:
9831 return do_getsockname(arg1
, arg2
, arg3
);
9833 #ifdef TARGET_NR_getsockopt
9834 case TARGET_NR_getsockopt
:
9835 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9837 #ifdef TARGET_NR_listen
9838 case TARGET_NR_listen
:
9839 return get_errno(listen(arg1
, arg2
));
9841 #ifdef TARGET_NR_recv
9842 case TARGET_NR_recv
:
9843 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9845 #ifdef TARGET_NR_recvfrom
9846 case TARGET_NR_recvfrom
:
9847 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9849 #ifdef TARGET_NR_recvmsg
9850 case TARGET_NR_recvmsg
:
9851 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9853 #ifdef TARGET_NR_send
9854 case TARGET_NR_send
:
9855 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9857 #ifdef TARGET_NR_sendmsg
9858 case TARGET_NR_sendmsg
:
9859 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9861 #ifdef TARGET_NR_sendmmsg
9862 case TARGET_NR_sendmmsg
:
9863 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9865 #ifdef TARGET_NR_recvmmsg
9866 case TARGET_NR_recvmmsg
:
9867 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9869 #ifdef TARGET_NR_sendto
9870 case TARGET_NR_sendto
:
9871 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9873 #ifdef TARGET_NR_shutdown
9874 case TARGET_NR_shutdown
:
9875 return get_errno(shutdown(arg1
, arg2
));
9877 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9878 case TARGET_NR_getrandom
:
9879 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9881 return -TARGET_EFAULT
;
9883 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9884 unlock_user(p
, arg1
, ret
);
9887 #ifdef TARGET_NR_socket
9888 case TARGET_NR_socket
:
9889 return do_socket(arg1
, arg2
, arg3
);
9891 #ifdef TARGET_NR_socketpair
9892 case TARGET_NR_socketpair
:
9893 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9895 #ifdef TARGET_NR_setsockopt
9896 case TARGET_NR_setsockopt
:
9897 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9899 #if defined(TARGET_NR_syslog)
9900 case TARGET_NR_syslog
:
9905 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9906 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9907 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9908 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9909 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9910 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9911 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9912 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9913 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9914 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9915 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9916 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9919 return -TARGET_EINVAL
;
9924 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9926 return -TARGET_EFAULT
;
9928 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9929 unlock_user(p
, arg2
, arg3
);
9933 return -TARGET_EINVAL
;
9938 case TARGET_NR_setitimer
:
9940 struct itimerval value
, ovalue
, *pvalue
;
9944 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9945 || copy_from_user_timeval(&pvalue
->it_value
,
9946 arg2
+ sizeof(struct target_timeval
)))
9947 return -TARGET_EFAULT
;
9951 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9952 if (!is_error(ret
) && arg3
) {
9953 if (copy_to_user_timeval(arg3
,
9954 &ovalue
.it_interval
)
9955 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9957 return -TARGET_EFAULT
;
9961 case TARGET_NR_getitimer
:
9963 struct itimerval value
;
9965 ret
= get_errno(getitimer(arg1
, &value
));
9966 if (!is_error(ret
) && arg2
) {
9967 if (copy_to_user_timeval(arg2
,
9969 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9971 return -TARGET_EFAULT
;
9975 #ifdef TARGET_NR_stat
9976 case TARGET_NR_stat
:
9977 if (!(p
= lock_user_string(arg1
))) {
9978 return -TARGET_EFAULT
;
9980 ret
= get_errno(stat(path(p
), &st
));
9981 unlock_user(p
, arg1
, 0);
9984 #ifdef TARGET_NR_lstat
9985 case TARGET_NR_lstat
:
9986 if (!(p
= lock_user_string(arg1
))) {
9987 return -TARGET_EFAULT
;
9989 ret
= get_errno(lstat(path(p
), &st
));
9990 unlock_user(p
, arg1
, 0);
9993 #ifdef TARGET_NR_fstat
9994 case TARGET_NR_fstat
:
9996 ret
= get_errno(fstat(arg1
, &st
));
9997 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10000 if (!is_error(ret
)) {
10001 struct target_stat
*target_st
;
10003 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10004 return -TARGET_EFAULT
;
10005 memset(target_st
, 0, sizeof(*target_st
));
10006 __put_user(st
.st_dev
, &target_st
->st_dev
);
10007 __put_user(st
.st_ino
, &target_st
->st_ino
);
10008 __put_user(st
.st_mode
, &target_st
->st_mode
);
10009 __put_user(st
.st_uid
, &target_st
->st_uid
);
10010 __put_user(st
.st_gid
, &target_st
->st_gid
);
10011 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10012 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10013 __put_user(st
.st_size
, &target_st
->st_size
);
10014 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10015 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10016 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10017 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10018 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10019 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10020 defined(TARGET_STAT_HAVE_NSEC)
10021 __put_user(st
.st_atim
.tv_nsec
,
10022 &target_st
->target_st_atime_nsec
);
10023 __put_user(st
.st_mtim
.tv_nsec
,
10024 &target_st
->target_st_mtime_nsec
);
10025 __put_user(st
.st_ctim
.tv_nsec
,
10026 &target_st
->target_st_ctime_nsec
);
10028 unlock_user_struct(target_st
, arg2
, 1);
10033 case TARGET_NR_vhangup
:
10034 return get_errno(vhangup());
10035 #ifdef TARGET_NR_syscall
10036 case TARGET_NR_syscall
:
10037 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10038 arg6
, arg7
, arg8
, 0);
10040 #if defined(TARGET_NR_wait4)
10041 case TARGET_NR_wait4
:
10044 abi_long status_ptr
= arg2
;
10045 struct rusage rusage
, *rusage_ptr
;
10046 abi_ulong target_rusage
= arg4
;
10047 abi_long rusage_err
;
10049 rusage_ptr
= &rusage
;
10052 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10053 if (!is_error(ret
)) {
10054 if (status_ptr
&& ret
) {
10055 status
= host_to_target_waitstatus(status
);
10056 if (put_user_s32(status
, status_ptr
))
10057 return -TARGET_EFAULT
;
10059 if (target_rusage
) {
10060 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10069 #ifdef TARGET_NR_swapoff
10070 case TARGET_NR_swapoff
:
10071 if (!(p
= lock_user_string(arg1
)))
10072 return -TARGET_EFAULT
;
10073 ret
= get_errno(swapoff(p
));
10074 unlock_user(p
, arg1
, 0);
10077 case TARGET_NR_sysinfo
:
10079 struct target_sysinfo
*target_value
;
10080 struct sysinfo value
;
10081 ret
= get_errno(sysinfo(&value
));
10082 if (!is_error(ret
) && arg1
)
10084 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10085 return -TARGET_EFAULT
;
10086 __put_user(value
.uptime
, &target_value
->uptime
);
10087 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10088 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10089 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10090 __put_user(value
.totalram
, &target_value
->totalram
);
10091 __put_user(value
.freeram
, &target_value
->freeram
);
10092 __put_user(value
.sharedram
, &target_value
->sharedram
);
10093 __put_user(value
.bufferram
, &target_value
->bufferram
);
10094 __put_user(value
.totalswap
, &target_value
->totalswap
);
10095 __put_user(value
.freeswap
, &target_value
->freeswap
);
10096 __put_user(value
.procs
, &target_value
->procs
);
10097 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10098 __put_user(value
.freehigh
, &target_value
->freehigh
);
10099 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10100 unlock_user_struct(target_value
, arg1
, 1);
10104 #ifdef TARGET_NR_ipc
10105 case TARGET_NR_ipc
:
10106 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10108 #ifdef TARGET_NR_semget
10109 case TARGET_NR_semget
:
10110 return get_errno(semget(arg1
, arg2
, arg3
));
10112 #ifdef TARGET_NR_semop
10113 case TARGET_NR_semop
:
10114 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10116 #ifdef TARGET_NR_semtimedop
10117 case TARGET_NR_semtimedop
:
10118 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10120 #ifdef TARGET_NR_semtimedop_time64
10121 case TARGET_NR_semtimedop_time64
:
10122 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10124 #ifdef TARGET_NR_semctl
10125 case TARGET_NR_semctl
:
10126 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10128 #ifdef TARGET_NR_msgctl
10129 case TARGET_NR_msgctl
:
10130 return do_msgctl(arg1
, arg2
, arg3
);
10132 #ifdef TARGET_NR_msgget
10133 case TARGET_NR_msgget
:
10134 return get_errno(msgget(arg1
, arg2
));
10136 #ifdef TARGET_NR_msgrcv
10137 case TARGET_NR_msgrcv
:
10138 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10140 #ifdef TARGET_NR_msgsnd
10141 case TARGET_NR_msgsnd
:
10142 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10144 #ifdef TARGET_NR_shmget
10145 case TARGET_NR_shmget
:
10146 return get_errno(shmget(arg1
, arg2
, arg3
));
10148 #ifdef TARGET_NR_shmctl
10149 case TARGET_NR_shmctl
:
10150 return do_shmctl(arg1
, arg2
, arg3
);
10152 #ifdef TARGET_NR_shmat
10153 case TARGET_NR_shmat
:
10154 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10156 #ifdef TARGET_NR_shmdt
10157 case TARGET_NR_shmdt
:
10158 return do_shmdt(arg1
);
10160 case TARGET_NR_fsync
:
10161 return get_errno(fsync(arg1
));
10162 case TARGET_NR_clone
:
10163 /* Linux manages to have three different orderings for its
10164 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10165 * match the kernel's CONFIG_CLONE_* settings.
10166 * Microblaze is further special in that it uses a sixth
10167 * implicit argument to clone for the TLS pointer.
10169 #if defined(TARGET_MICROBLAZE)
10170 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10171 #elif defined(TARGET_CLONE_BACKWARDS)
10172 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10173 #elif defined(TARGET_CLONE_BACKWARDS2)
10174 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10176 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10179 #ifdef __NR_exit_group
10180 /* new thread calls */
10181 case TARGET_NR_exit_group
:
10182 preexit_cleanup(cpu_env
, arg1
);
10183 return get_errno(exit_group(arg1
));
10185 case TARGET_NR_setdomainname
:
10186 if (!(p
= lock_user_string(arg1
)))
10187 return -TARGET_EFAULT
;
10188 ret
= get_errno(setdomainname(p
, arg2
));
10189 unlock_user(p
, arg1
, 0);
10191 case TARGET_NR_uname
:
10192 /* no need to transcode because we use the linux syscall */
10194 struct new_utsname
* buf
;
10196 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10197 return -TARGET_EFAULT
;
10198 ret
= get_errno(sys_uname(buf
));
10199 if (!is_error(ret
)) {
10200 /* Overwrite the native machine name with whatever is being
10202 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10203 sizeof(buf
->machine
));
10204 /* Allow the user to override the reported release. */
10205 if (qemu_uname_release
&& *qemu_uname_release
) {
10206 g_strlcpy(buf
->release
, qemu_uname_release
,
10207 sizeof(buf
->release
));
10210 unlock_user_struct(buf
, arg1
, 1);
10214 case TARGET_NR_modify_ldt
:
10215 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10216 #if !defined(TARGET_X86_64)
10217 case TARGET_NR_vm86
:
10218 return do_vm86(cpu_env
, arg1
, arg2
);
10221 #if defined(TARGET_NR_adjtimex)
10222 case TARGET_NR_adjtimex
:
10224 struct timex host_buf
;
10226 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10227 return -TARGET_EFAULT
;
10229 ret
= get_errno(adjtimex(&host_buf
));
10230 if (!is_error(ret
)) {
10231 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10232 return -TARGET_EFAULT
;
10238 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10239 case TARGET_NR_clock_adjtime
:
10241 struct timex htx
, *phtx
= &htx
;
10243 if (target_to_host_timex(phtx
, arg2
) != 0) {
10244 return -TARGET_EFAULT
;
10246 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10247 if (!is_error(ret
) && phtx
) {
10248 if (host_to_target_timex(arg2
, phtx
) != 0) {
10249 return -TARGET_EFAULT
;
10255 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10256 case TARGET_NR_clock_adjtime64
:
10260 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10261 return -TARGET_EFAULT
;
10263 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10264 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10265 return -TARGET_EFAULT
;
10270 case TARGET_NR_getpgid
:
10271 return get_errno(getpgid(arg1
));
10272 case TARGET_NR_fchdir
:
10273 return get_errno(fchdir(arg1
));
10274 case TARGET_NR_personality
:
10275 return get_errno(personality(arg1
));
10276 #ifdef TARGET_NR__llseek /* Not on alpha */
10277 case TARGET_NR__llseek
:
10280 #if !defined(__NR_llseek)
10281 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10283 ret
= get_errno(res
);
10288 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10290 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10291 return -TARGET_EFAULT
;
10296 #ifdef TARGET_NR_getdents
10297 case TARGET_NR_getdents
:
10298 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10299 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10301 struct target_dirent
*target_dirp
;
10302 struct linux_dirent
*dirp
;
10303 abi_long count
= arg3
;
10305 dirp
= g_try_malloc(count
);
10307 return -TARGET_ENOMEM
;
10310 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10311 if (!is_error(ret
)) {
10312 struct linux_dirent
*de
;
10313 struct target_dirent
*tde
;
10315 int reclen
, treclen
;
10316 int count1
, tnamelen
;
10320 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10321 return -TARGET_EFAULT
;
10324 reclen
= de
->d_reclen
;
10325 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10326 assert(tnamelen
>= 0);
10327 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10328 assert(count1
+ treclen
<= count
);
10329 tde
->d_reclen
= tswap16(treclen
);
10330 tde
->d_ino
= tswapal(de
->d_ino
);
10331 tde
->d_off
= tswapal(de
->d_off
);
10332 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10333 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10335 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10339 unlock_user(target_dirp
, arg2
, ret
);
10345 struct linux_dirent
*dirp
;
10346 abi_long count
= arg3
;
10348 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10349 return -TARGET_EFAULT
;
10350 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10351 if (!is_error(ret
)) {
10352 struct linux_dirent
*de
;
10357 reclen
= de
->d_reclen
;
10360 de
->d_reclen
= tswap16(reclen
);
10361 tswapls(&de
->d_ino
);
10362 tswapls(&de
->d_off
);
10363 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10367 unlock_user(dirp
, arg2
, ret
);
10371 /* Implement getdents in terms of getdents64 */
10373 struct linux_dirent64
*dirp
;
10374 abi_long count
= arg3
;
10376 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10378 return -TARGET_EFAULT
;
10380 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10381 if (!is_error(ret
)) {
10382 /* Convert the dirent64 structs to target dirent. We do this
10383 * in-place, since we can guarantee that a target_dirent is no
10384 * larger than a dirent64; however this means we have to be
10385 * careful to read everything before writing in the new format.
10387 struct linux_dirent64
*de
;
10388 struct target_dirent
*tde
;
10393 tde
= (struct target_dirent
*)dirp
;
10395 int namelen
, treclen
;
10396 int reclen
= de
->d_reclen
;
10397 uint64_t ino
= de
->d_ino
;
10398 int64_t off
= de
->d_off
;
10399 uint8_t type
= de
->d_type
;
10401 namelen
= strlen(de
->d_name
);
10402 treclen
= offsetof(struct target_dirent
, d_name
)
10404 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10406 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10407 tde
->d_ino
= tswapal(ino
);
10408 tde
->d_off
= tswapal(off
);
10409 tde
->d_reclen
= tswap16(treclen
);
10410 /* The target_dirent type is in what was formerly a padding
10411 * byte at the end of the structure:
10413 *(((char *)tde
) + treclen
- 1) = type
;
10415 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10416 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10422 unlock_user(dirp
, arg2
, ret
);
10426 #endif /* TARGET_NR_getdents */
10427 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10428 case TARGET_NR_getdents64
:
10430 struct linux_dirent64
*dirp
;
10431 abi_long count
= arg3
;
10432 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10433 return -TARGET_EFAULT
;
10434 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10435 if (!is_error(ret
)) {
10436 struct linux_dirent64
*de
;
10441 reclen
= de
->d_reclen
;
10444 de
->d_reclen
= tswap16(reclen
);
10445 tswap64s((uint64_t *)&de
->d_ino
);
10446 tswap64s((uint64_t *)&de
->d_off
);
10447 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10451 unlock_user(dirp
, arg2
, ret
);
10454 #endif /* TARGET_NR_getdents64 */
10455 #if defined(TARGET_NR__newselect)
10456 case TARGET_NR__newselect
:
10457 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10459 #ifdef TARGET_NR_poll
10460 case TARGET_NR_poll
:
10461 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10463 #ifdef TARGET_NR_ppoll
10464 case TARGET_NR_ppoll
:
10465 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10467 #ifdef TARGET_NR_ppoll_time64
10468 case TARGET_NR_ppoll_time64
:
10469 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10471 case TARGET_NR_flock
:
10472 /* NOTE: the flock constant seems to be the same for every
10474 return get_errno(safe_flock(arg1
, arg2
));
10475 case TARGET_NR_readv
:
10477 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10479 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10480 unlock_iovec(vec
, arg2
, arg3
, 1);
10482 ret
= -host_to_target_errno(errno
);
10486 case TARGET_NR_writev
:
10488 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10490 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10491 unlock_iovec(vec
, arg2
, arg3
, 0);
10493 ret
= -host_to_target_errno(errno
);
10497 #if defined(TARGET_NR_preadv)
10498 case TARGET_NR_preadv
:
10500 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10502 unsigned long low
, high
;
10504 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10505 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10506 unlock_iovec(vec
, arg2
, arg3
, 1);
10508 ret
= -host_to_target_errno(errno
);
10513 #if defined(TARGET_NR_pwritev)
10514 case TARGET_NR_pwritev
:
10516 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10518 unsigned long low
, high
;
10520 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10521 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10522 unlock_iovec(vec
, arg2
, arg3
, 0);
10524 ret
= -host_to_target_errno(errno
);
10529 case TARGET_NR_getsid
:
10530 return get_errno(getsid(arg1
));
10531 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10532 case TARGET_NR_fdatasync
:
10533 return get_errno(fdatasync(arg1
));
10535 case TARGET_NR_sched_getaffinity
:
10537 unsigned int mask_size
;
10538 unsigned long *mask
;
10541 * sched_getaffinity needs multiples of ulong, so need to take
10542 * care of mismatches between target ulong and host ulong sizes.
10544 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10545 return -TARGET_EINVAL
;
10547 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10549 mask
= alloca(mask_size
);
10550 memset(mask
, 0, mask_size
);
10551 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10553 if (!is_error(ret
)) {
10555 /* More data returned than the caller's buffer will fit.
10556 * This only happens if sizeof(abi_long) < sizeof(long)
10557 * and the caller passed us a buffer holding an odd number
10558 * of abi_longs. If the host kernel is actually using the
10559 * extra 4 bytes then fail EINVAL; otherwise we can just
10560 * ignore them and only copy the interesting part.
10562 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10563 if (numcpus
> arg2
* 8) {
10564 return -TARGET_EINVAL
;
10569 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10570 return -TARGET_EFAULT
;
10575 case TARGET_NR_sched_setaffinity
:
10577 unsigned int mask_size
;
10578 unsigned long *mask
;
10581 * sched_setaffinity needs multiples of ulong, so need to take
10582 * care of mismatches between target ulong and host ulong sizes.
10584 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10585 return -TARGET_EINVAL
;
10587 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10588 mask
= alloca(mask_size
);
10590 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10595 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10597 case TARGET_NR_getcpu
:
10599 unsigned cpu
, node
;
10600 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10601 arg2
? &node
: NULL
,
10603 if (is_error(ret
)) {
10606 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10607 return -TARGET_EFAULT
;
10609 if (arg2
&& put_user_u32(node
, arg2
)) {
10610 return -TARGET_EFAULT
;
10614 case TARGET_NR_sched_setparam
:
10616 struct sched_param
*target_schp
;
10617 struct sched_param schp
;
10620 return -TARGET_EINVAL
;
10622 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10623 return -TARGET_EFAULT
;
10624 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10625 unlock_user_struct(target_schp
, arg2
, 0);
10626 return get_errno(sched_setparam(arg1
, &schp
));
10628 case TARGET_NR_sched_getparam
:
10630 struct sched_param
*target_schp
;
10631 struct sched_param schp
;
10634 return -TARGET_EINVAL
;
10636 ret
= get_errno(sched_getparam(arg1
, &schp
));
10637 if (!is_error(ret
)) {
10638 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10639 return -TARGET_EFAULT
;
10640 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10641 unlock_user_struct(target_schp
, arg2
, 1);
10645 case TARGET_NR_sched_setscheduler
:
10647 struct sched_param
*target_schp
;
10648 struct sched_param schp
;
10650 return -TARGET_EINVAL
;
10652 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10653 return -TARGET_EFAULT
;
10654 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10655 unlock_user_struct(target_schp
, arg3
, 0);
10656 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10658 case TARGET_NR_sched_getscheduler
:
10659 return get_errno(sched_getscheduler(arg1
));
10660 case TARGET_NR_sched_yield
:
10661 return get_errno(sched_yield());
10662 case TARGET_NR_sched_get_priority_max
:
10663 return get_errno(sched_get_priority_max(arg1
));
10664 case TARGET_NR_sched_get_priority_min
:
10665 return get_errno(sched_get_priority_min(arg1
));
10666 #ifdef TARGET_NR_sched_rr_get_interval
10667 case TARGET_NR_sched_rr_get_interval
:
10669 struct timespec ts
;
10670 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10671 if (!is_error(ret
)) {
10672 ret
= host_to_target_timespec(arg2
, &ts
);
10677 #ifdef TARGET_NR_sched_rr_get_interval_time64
10678 case TARGET_NR_sched_rr_get_interval_time64
:
10680 struct timespec ts
;
10681 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10682 if (!is_error(ret
)) {
10683 ret
= host_to_target_timespec64(arg2
, &ts
);
10688 #if defined(TARGET_NR_nanosleep)
10689 case TARGET_NR_nanosleep
:
10691 struct timespec req
, rem
;
10692 target_to_host_timespec(&req
, arg1
);
10693 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10694 if (is_error(ret
) && arg2
) {
10695 host_to_target_timespec(arg2
, &rem
);
10700 case TARGET_NR_prctl
:
10702 case PR_GET_PDEATHSIG
:
10705 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10706 if (!is_error(ret
) && arg2
10707 && put_user_s32(deathsig
, arg2
)) {
10708 return -TARGET_EFAULT
;
10715 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10717 return -TARGET_EFAULT
;
10719 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10720 arg3
, arg4
, arg5
));
10721 unlock_user(name
, arg2
, 16);
10726 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10728 return -TARGET_EFAULT
;
10730 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10731 arg3
, arg4
, arg5
));
10732 unlock_user(name
, arg2
, 0);
10737 case TARGET_PR_GET_FP_MODE
:
10739 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10741 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10742 ret
|= TARGET_PR_FP_MODE_FR
;
10744 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10745 ret
|= TARGET_PR_FP_MODE_FRE
;
10749 case TARGET_PR_SET_FP_MODE
:
10751 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10752 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10753 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10754 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10755 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10757 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10758 TARGET_PR_FP_MODE_FRE
;
10760 /* If nothing to change, return right away, successfully. */
10761 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10764 /* Check the value is valid */
10765 if (arg2
& ~known_bits
) {
10766 return -TARGET_EOPNOTSUPP
;
10768 /* Setting FRE without FR is not supported. */
10769 if (new_fre
&& !new_fr
) {
10770 return -TARGET_EOPNOTSUPP
;
10772 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10773 /* FR1 is not supported */
10774 return -TARGET_EOPNOTSUPP
;
10776 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10777 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10778 /* cannot set FR=0 */
10779 return -TARGET_EOPNOTSUPP
;
10781 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10782 /* Cannot set FRE=1 */
10783 return -TARGET_EOPNOTSUPP
;
10787 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10788 for (i
= 0; i
< 32 ; i
+= 2) {
10789 if (!old_fr
&& new_fr
) {
10790 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10791 } else if (old_fr
&& !new_fr
) {
10792 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10797 env
->CP0_Status
|= (1 << CP0St_FR
);
10798 env
->hflags
|= MIPS_HFLAG_F64
;
10800 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10801 env
->hflags
&= ~MIPS_HFLAG_F64
;
10804 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10805 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10806 env
->hflags
|= MIPS_HFLAG_FRE
;
10809 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10810 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10816 #ifdef TARGET_AARCH64
10817 case TARGET_PR_SVE_SET_VL
:
10819 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10820 * PR_SVE_VL_INHERIT. Note the kernel definition
10821 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10822 * even though the current architectural maximum is VQ=16.
10824 ret
= -TARGET_EINVAL
;
10825 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10826 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10827 CPUARMState
*env
= cpu_env
;
10828 ARMCPU
*cpu
= env_archcpu(env
);
10829 uint32_t vq
, old_vq
;
10831 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10832 vq
= MAX(arg2
/ 16, 1);
10833 vq
= MIN(vq
, cpu
->sve_max_vq
);
10836 aarch64_sve_narrow_vq(env
, vq
);
10838 env
->vfp
.zcr_el
[1] = vq
- 1;
10839 arm_rebuild_hflags(env
);
10843 case TARGET_PR_SVE_GET_VL
:
10844 ret
= -TARGET_EINVAL
;
10846 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10847 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10848 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10852 case TARGET_PR_PAC_RESET_KEYS
:
10854 CPUARMState
*env
= cpu_env
;
10855 ARMCPU
*cpu
= env_archcpu(env
);
10857 if (arg3
|| arg4
|| arg5
) {
10858 return -TARGET_EINVAL
;
10860 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10861 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10862 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10863 TARGET_PR_PAC_APGAKEY
);
10869 } else if (arg2
& ~all
) {
10870 return -TARGET_EINVAL
;
10872 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10873 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10874 sizeof(ARMPACKey
), &err
);
10876 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10877 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10878 sizeof(ARMPACKey
), &err
);
10880 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10881 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10882 sizeof(ARMPACKey
), &err
);
10884 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10885 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10886 sizeof(ARMPACKey
), &err
);
10888 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10889 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10890 sizeof(ARMPACKey
), &err
);
10894 * Some unknown failure in the crypto. The best
10895 * we can do is log it and fail the syscall.
10896 * The real syscall cannot fail this way.
10898 qemu_log_mask(LOG_UNIMP
,
10899 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10900 error_get_pretty(err
));
10902 return -TARGET_EIO
;
10907 return -TARGET_EINVAL
;
10908 #endif /* AARCH64 */
10909 case PR_GET_SECCOMP
:
10910 case PR_SET_SECCOMP
:
10911 /* Disable seccomp to prevent the target disabling syscalls we
10913 return -TARGET_EINVAL
;
10915 /* Most prctl options have no pointer arguments */
10916 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10919 #ifdef TARGET_NR_arch_prctl
10920 case TARGET_NR_arch_prctl
:
10921 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10923 #ifdef TARGET_NR_pread64
10924 case TARGET_NR_pread64
:
10925 if (regpairs_aligned(cpu_env
, num
)) {
10929 if (arg2
== 0 && arg3
== 0) {
10930 /* Special-case NULL buffer and zero length, which should succeed */
10933 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10935 return -TARGET_EFAULT
;
10938 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10939 unlock_user(p
, arg2
, ret
);
10941 case TARGET_NR_pwrite64
:
10942 if (regpairs_aligned(cpu_env
, num
)) {
10946 if (arg2
== 0 && arg3
== 0) {
10947 /* Special-case NULL buffer and zero length, which should succeed */
10950 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10952 return -TARGET_EFAULT
;
10955 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10956 unlock_user(p
, arg2
, 0);
10959 case TARGET_NR_getcwd
:
10960 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10961 return -TARGET_EFAULT
;
10962 ret
= get_errno(sys_getcwd1(p
, arg2
));
10963 unlock_user(p
, arg1
, ret
);
10965 case TARGET_NR_capget
:
10966 case TARGET_NR_capset
:
10968 struct target_user_cap_header
*target_header
;
10969 struct target_user_cap_data
*target_data
= NULL
;
10970 struct __user_cap_header_struct header
;
10971 struct __user_cap_data_struct data
[2];
10972 struct __user_cap_data_struct
*dataptr
= NULL
;
10973 int i
, target_datalen
;
10974 int data_items
= 1;
10976 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10977 return -TARGET_EFAULT
;
10979 header
.version
= tswap32(target_header
->version
);
10980 header
.pid
= tswap32(target_header
->pid
);
10982 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10983 /* Version 2 and up takes pointer to two user_data structs */
10987 target_datalen
= sizeof(*target_data
) * data_items
;
10990 if (num
== TARGET_NR_capget
) {
10991 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10993 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10995 if (!target_data
) {
10996 unlock_user_struct(target_header
, arg1
, 0);
10997 return -TARGET_EFAULT
;
11000 if (num
== TARGET_NR_capset
) {
11001 for (i
= 0; i
< data_items
; i
++) {
11002 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11003 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11004 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11011 if (num
== TARGET_NR_capget
) {
11012 ret
= get_errno(capget(&header
, dataptr
));
11014 ret
= get_errno(capset(&header
, dataptr
));
11017 /* The kernel always updates version for both capget and capset */
11018 target_header
->version
= tswap32(header
.version
);
11019 unlock_user_struct(target_header
, arg1
, 1);
11022 if (num
== TARGET_NR_capget
) {
11023 for (i
= 0; i
< data_items
; i
++) {
11024 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11025 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11026 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11028 unlock_user(target_data
, arg2
, target_datalen
);
11030 unlock_user(target_data
, arg2
, 0);
11035 case TARGET_NR_sigaltstack
:
11036 return do_sigaltstack(arg1
, arg2
,
11037 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11039 #ifdef CONFIG_SENDFILE
11040 #ifdef TARGET_NR_sendfile
11041 case TARGET_NR_sendfile
:
11043 off_t
*offp
= NULL
;
11046 ret
= get_user_sal(off
, arg3
);
11047 if (is_error(ret
)) {
11052 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11053 if (!is_error(ret
) && arg3
) {
11054 abi_long ret2
= put_user_sal(off
, arg3
);
11055 if (is_error(ret2
)) {
11062 #ifdef TARGET_NR_sendfile64
11063 case TARGET_NR_sendfile64
:
11065 off_t
*offp
= NULL
;
11068 ret
= get_user_s64(off
, arg3
);
11069 if (is_error(ret
)) {
11074 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11075 if (!is_error(ret
) && arg3
) {
11076 abi_long ret2
= put_user_s64(off
, arg3
);
11077 if (is_error(ret2
)) {
11085 #ifdef TARGET_NR_vfork
11086 case TARGET_NR_vfork
:
11087 return get_errno(do_fork(cpu_env
,
11088 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11091 #ifdef TARGET_NR_ugetrlimit
11092 case TARGET_NR_ugetrlimit
:
11094 struct rlimit rlim
;
11095 int resource
= target_to_host_resource(arg1
);
11096 ret
= get_errno(getrlimit(resource
, &rlim
));
11097 if (!is_error(ret
)) {
11098 struct target_rlimit
*target_rlim
;
11099 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11100 return -TARGET_EFAULT
;
11101 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11102 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11103 unlock_user_struct(target_rlim
, arg2
, 1);
11108 #ifdef TARGET_NR_truncate64
11109 case TARGET_NR_truncate64
:
11110 if (!(p
= lock_user_string(arg1
)))
11111 return -TARGET_EFAULT
;
11112 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11113 unlock_user(p
, arg1
, 0);
11116 #ifdef TARGET_NR_ftruncate64
11117 case TARGET_NR_ftruncate64
:
11118 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11120 #ifdef TARGET_NR_stat64
11121 case TARGET_NR_stat64
:
11122 if (!(p
= lock_user_string(arg1
))) {
11123 return -TARGET_EFAULT
;
11125 ret
= get_errno(stat(path(p
), &st
));
11126 unlock_user(p
, arg1
, 0);
11127 if (!is_error(ret
))
11128 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11131 #ifdef TARGET_NR_lstat64
11132 case TARGET_NR_lstat64
:
11133 if (!(p
= lock_user_string(arg1
))) {
11134 return -TARGET_EFAULT
;
11136 ret
= get_errno(lstat(path(p
), &st
));
11137 unlock_user(p
, arg1
, 0);
11138 if (!is_error(ret
))
11139 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11142 #ifdef TARGET_NR_fstat64
11143 case TARGET_NR_fstat64
:
11144 ret
= get_errno(fstat(arg1
, &st
));
11145 if (!is_error(ret
))
11146 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11149 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11150 #ifdef TARGET_NR_fstatat64
11151 case TARGET_NR_fstatat64
:
11153 #ifdef TARGET_NR_newfstatat
11154 case TARGET_NR_newfstatat
:
11156 if (!(p
= lock_user_string(arg2
))) {
11157 return -TARGET_EFAULT
;
11159 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11160 unlock_user(p
, arg2
, 0);
11161 if (!is_error(ret
))
11162 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11165 #if defined(TARGET_NR_statx)
11166 case TARGET_NR_statx
:
11168 struct target_statx
*target_stx
;
11172 p
= lock_user_string(arg2
);
11174 return -TARGET_EFAULT
;
11176 #if defined(__NR_statx)
11179 * It is assumed that struct statx is architecture independent.
11181 struct target_statx host_stx
;
11184 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11185 if (!is_error(ret
)) {
11186 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11187 unlock_user(p
, arg2
, 0);
11188 return -TARGET_EFAULT
;
11192 if (ret
!= -TARGET_ENOSYS
) {
11193 unlock_user(p
, arg2
, 0);
11198 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11199 unlock_user(p
, arg2
, 0);
11201 if (!is_error(ret
)) {
11202 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11203 return -TARGET_EFAULT
;
11205 memset(target_stx
, 0, sizeof(*target_stx
));
11206 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11207 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11208 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11209 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11210 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11211 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11212 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11213 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11214 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11215 __put_user(st
.st_size
, &target_stx
->stx_size
);
11216 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11217 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11218 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11219 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11220 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11221 unlock_user_struct(target_stx
, arg5
, 1);
11226 #ifdef TARGET_NR_lchown
11227 case TARGET_NR_lchown
:
11228 if (!(p
= lock_user_string(arg1
)))
11229 return -TARGET_EFAULT
;
11230 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11231 unlock_user(p
, arg1
, 0);
11234 #ifdef TARGET_NR_getuid
11235 case TARGET_NR_getuid
:
11236 return get_errno(high2lowuid(getuid()));
11238 #ifdef TARGET_NR_getgid
11239 case TARGET_NR_getgid
:
11240 return get_errno(high2lowgid(getgid()));
11242 #ifdef TARGET_NR_geteuid
11243 case TARGET_NR_geteuid
:
11244 return get_errno(high2lowuid(geteuid()));
11246 #ifdef TARGET_NR_getegid
11247 case TARGET_NR_getegid
:
11248 return get_errno(high2lowgid(getegid()));
11250 case TARGET_NR_setreuid
:
11251 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11252 case TARGET_NR_setregid
:
11253 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11254 case TARGET_NR_getgroups
:
11256 int gidsetsize
= arg1
;
11257 target_id
*target_grouplist
;
11261 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11262 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11263 if (gidsetsize
== 0)
11265 if (!is_error(ret
)) {
11266 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11267 if (!target_grouplist
)
11268 return -TARGET_EFAULT
;
11269 for(i
= 0;i
< ret
; i
++)
11270 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11271 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11275 case TARGET_NR_setgroups
:
11277 int gidsetsize
= arg1
;
11278 target_id
*target_grouplist
;
11279 gid_t
*grouplist
= NULL
;
11282 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11283 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11284 if (!target_grouplist
) {
11285 return -TARGET_EFAULT
;
11287 for (i
= 0; i
< gidsetsize
; i
++) {
11288 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11290 unlock_user(target_grouplist
, arg2
, 0);
11292 return get_errno(setgroups(gidsetsize
, grouplist
));
11294 case TARGET_NR_fchown
:
11295 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11296 #if defined(TARGET_NR_fchownat)
11297 case TARGET_NR_fchownat
:
11298 if (!(p
= lock_user_string(arg2
)))
11299 return -TARGET_EFAULT
;
11300 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11301 low2highgid(arg4
), arg5
));
11302 unlock_user(p
, arg2
, 0);
11305 #ifdef TARGET_NR_setresuid
11306 case TARGET_NR_setresuid
:
11307 return get_errno(sys_setresuid(low2highuid(arg1
),
11309 low2highuid(arg3
)));
11311 #ifdef TARGET_NR_getresuid
11312 case TARGET_NR_getresuid
:
11314 uid_t ruid
, euid
, suid
;
11315 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11316 if (!is_error(ret
)) {
11317 if (put_user_id(high2lowuid(ruid
), arg1
)
11318 || put_user_id(high2lowuid(euid
), arg2
)
11319 || put_user_id(high2lowuid(suid
), arg3
))
11320 return -TARGET_EFAULT
;
11325 #ifdef TARGET_NR_getresgid
11326 case TARGET_NR_setresgid
:
11327 return get_errno(sys_setresgid(low2highgid(arg1
),
11329 low2highgid(arg3
)));
11331 #ifdef TARGET_NR_getresgid
11332 case TARGET_NR_getresgid
:
11334 gid_t rgid
, egid
, sgid
;
11335 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11336 if (!is_error(ret
)) {
11337 if (put_user_id(high2lowgid(rgid
), arg1
)
11338 || put_user_id(high2lowgid(egid
), arg2
)
11339 || put_user_id(high2lowgid(sgid
), arg3
))
11340 return -TARGET_EFAULT
;
11345 #ifdef TARGET_NR_chown
11346 case TARGET_NR_chown
:
11347 if (!(p
= lock_user_string(arg1
)))
11348 return -TARGET_EFAULT
;
11349 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11350 unlock_user(p
, arg1
, 0);
11353 case TARGET_NR_setuid
:
11354 return get_errno(sys_setuid(low2highuid(arg1
)));
11355 case TARGET_NR_setgid
:
11356 return get_errno(sys_setgid(low2highgid(arg1
)));
11357 case TARGET_NR_setfsuid
:
11358 return get_errno(setfsuid(arg1
));
11359 case TARGET_NR_setfsgid
:
11360 return get_errno(setfsgid(arg1
));
11362 #ifdef TARGET_NR_lchown32
11363 case TARGET_NR_lchown32
:
11364 if (!(p
= lock_user_string(arg1
)))
11365 return -TARGET_EFAULT
;
11366 ret
= get_errno(lchown(p
, arg2
, arg3
));
11367 unlock_user(p
, arg1
, 0);
11370 #ifdef TARGET_NR_getuid32
11371 case TARGET_NR_getuid32
:
11372 return get_errno(getuid());
11375 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11376 /* Alpha specific */
11377 case TARGET_NR_getxuid
:
11381 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11383 return get_errno(getuid());
11385 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11386 /* Alpha specific */
11387 case TARGET_NR_getxgid
:
11391 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11393 return get_errno(getgid());
11395 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11396 /* Alpha specific */
11397 case TARGET_NR_osf_getsysinfo
:
11398 ret
= -TARGET_EOPNOTSUPP
;
11400 case TARGET_GSI_IEEE_FP_CONTROL
:
11402 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11403 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11405 swcr
&= ~SWCR_STATUS_MASK
;
11406 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11408 if (put_user_u64 (swcr
, arg2
))
11409 return -TARGET_EFAULT
;
11414 /* case GSI_IEEE_STATE_AT_SIGNAL:
11415 -- Not implemented in linux kernel.
11417 -- Retrieves current unaligned access state; not much used.
11418 case GSI_PROC_TYPE:
11419 -- Retrieves implver information; surely not used.
11420 case GSI_GET_HWRPB:
11421 -- Grabs a copy of the HWRPB; surely not used.
11426 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11427 /* Alpha specific */
11428 case TARGET_NR_osf_setsysinfo
:
11429 ret
= -TARGET_EOPNOTSUPP
;
11431 case TARGET_SSI_IEEE_FP_CONTROL
:
11433 uint64_t swcr
, fpcr
;
11435 if (get_user_u64 (swcr
, arg2
)) {
11436 return -TARGET_EFAULT
;
11440 * The kernel calls swcr_update_status to update the
11441 * status bits from the fpcr at every point that it
11442 * could be queried. Therefore, we store the status
11443 * bits only in FPCR.
11445 ((CPUAlphaState
*)cpu_env
)->swcr
11446 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11448 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11449 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11450 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11451 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11456 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11458 uint64_t exc
, fpcr
, fex
;
11460 if (get_user_u64(exc
, arg2
)) {
11461 return -TARGET_EFAULT
;
11463 exc
&= SWCR_STATUS_MASK
;
11464 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11466 /* Old exceptions are not signaled. */
11467 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11469 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11470 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11472 /* Update the hardware fpcr. */
11473 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11474 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11477 int si_code
= TARGET_FPE_FLTUNK
;
11478 target_siginfo_t info
;
11480 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11481 si_code
= TARGET_FPE_FLTUND
;
11483 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11484 si_code
= TARGET_FPE_FLTRES
;
11486 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11487 si_code
= TARGET_FPE_FLTUND
;
11489 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11490 si_code
= TARGET_FPE_FLTOVF
;
11492 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11493 si_code
= TARGET_FPE_FLTDIV
;
11495 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11496 si_code
= TARGET_FPE_FLTINV
;
11499 info
.si_signo
= SIGFPE
;
11501 info
.si_code
= si_code
;
11502 info
._sifields
._sigfault
._addr
11503 = ((CPUArchState
*)cpu_env
)->pc
;
11504 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11505 QEMU_SI_FAULT
, &info
);
11511 /* case SSI_NVPAIRS:
11512 -- Used with SSIN_UACPROC to enable unaligned accesses.
11513 case SSI_IEEE_STATE_AT_SIGNAL:
11514 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11515 -- Not implemented in linux kernel
11520 #ifdef TARGET_NR_osf_sigprocmask
11521 /* Alpha specific. */
11522 case TARGET_NR_osf_sigprocmask
:
11526 sigset_t set
, oldset
;
11529 case TARGET_SIG_BLOCK
:
11532 case TARGET_SIG_UNBLOCK
:
11535 case TARGET_SIG_SETMASK
:
11539 return -TARGET_EINVAL
;
11542 target_to_host_old_sigset(&set
, &mask
);
11543 ret
= do_sigprocmask(how
, &set
, &oldset
);
11545 host_to_target_old_sigset(&mask
, &oldset
);
11552 #ifdef TARGET_NR_getgid32
11553 case TARGET_NR_getgid32
:
11554 return get_errno(getgid());
11556 #ifdef TARGET_NR_geteuid32
11557 case TARGET_NR_geteuid32
:
11558 return get_errno(geteuid());
11560 #ifdef TARGET_NR_getegid32
11561 case TARGET_NR_getegid32
:
11562 return get_errno(getegid());
11564 #ifdef TARGET_NR_setreuid32
11565 case TARGET_NR_setreuid32
:
11566 return get_errno(setreuid(arg1
, arg2
));
11568 #ifdef TARGET_NR_setregid32
11569 case TARGET_NR_setregid32
:
11570 return get_errno(setregid(arg1
, arg2
));
11572 #ifdef TARGET_NR_getgroups32
11573 case TARGET_NR_getgroups32
:
11575 int gidsetsize
= arg1
;
11576 uint32_t *target_grouplist
;
11580 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11581 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11582 if (gidsetsize
== 0)
11584 if (!is_error(ret
)) {
11585 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11586 if (!target_grouplist
) {
11587 return -TARGET_EFAULT
;
11589 for(i
= 0;i
< ret
; i
++)
11590 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11591 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11596 #ifdef TARGET_NR_setgroups32
11597 case TARGET_NR_setgroups32
:
11599 int gidsetsize
= arg1
;
11600 uint32_t *target_grouplist
;
11604 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11605 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11606 if (!target_grouplist
) {
11607 return -TARGET_EFAULT
;
11609 for(i
= 0;i
< gidsetsize
; i
++)
11610 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11611 unlock_user(target_grouplist
, arg2
, 0);
11612 return get_errno(setgroups(gidsetsize
, grouplist
));
11615 #ifdef TARGET_NR_fchown32
11616 case TARGET_NR_fchown32
:
11617 return get_errno(fchown(arg1
, arg2
, arg3
));
11619 #ifdef TARGET_NR_setresuid32
11620 case TARGET_NR_setresuid32
:
11621 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11623 #ifdef TARGET_NR_getresuid32
11624 case TARGET_NR_getresuid32
:
11626 uid_t ruid
, euid
, suid
;
11627 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11628 if (!is_error(ret
)) {
11629 if (put_user_u32(ruid
, arg1
)
11630 || put_user_u32(euid
, arg2
)
11631 || put_user_u32(suid
, arg3
))
11632 return -TARGET_EFAULT
;
11637 #ifdef TARGET_NR_setresgid32
11638 case TARGET_NR_setresgid32
:
11639 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11641 #ifdef TARGET_NR_getresgid32
11642 case TARGET_NR_getresgid32
:
11644 gid_t rgid
, egid
, sgid
;
11645 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11646 if (!is_error(ret
)) {
11647 if (put_user_u32(rgid
, arg1
)
11648 || put_user_u32(egid
, arg2
)
11649 || put_user_u32(sgid
, arg3
))
11650 return -TARGET_EFAULT
;
11655 #ifdef TARGET_NR_chown32
11656 case TARGET_NR_chown32
:
11657 if (!(p
= lock_user_string(arg1
)))
11658 return -TARGET_EFAULT
;
11659 ret
= get_errno(chown(p
, arg2
, arg3
));
11660 unlock_user(p
, arg1
, 0);
11663 #ifdef TARGET_NR_setuid32
11664 case TARGET_NR_setuid32
:
11665 return get_errno(sys_setuid(arg1
));
11667 #ifdef TARGET_NR_setgid32
11668 case TARGET_NR_setgid32
:
11669 return get_errno(sys_setgid(arg1
));
11671 #ifdef TARGET_NR_setfsuid32
11672 case TARGET_NR_setfsuid32
:
11673 return get_errno(setfsuid(arg1
));
11675 #ifdef TARGET_NR_setfsgid32
11676 case TARGET_NR_setfsgid32
:
11677 return get_errno(setfsgid(arg1
));
11679 #ifdef TARGET_NR_mincore
11680 case TARGET_NR_mincore
:
11682 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11684 return -TARGET_ENOMEM
;
11686 p
= lock_user_string(arg3
);
11688 ret
= -TARGET_EFAULT
;
11690 ret
= get_errno(mincore(a
, arg2
, p
));
11691 unlock_user(p
, arg3
, ret
);
11693 unlock_user(a
, arg1
, 0);
11697 #ifdef TARGET_NR_arm_fadvise64_64
11698 case TARGET_NR_arm_fadvise64_64
:
11699 /* arm_fadvise64_64 looks like fadvise64_64 but
11700 * with different argument order: fd, advice, offset, len
11701 * rather than the usual fd, offset, len, advice.
11702 * Note that offset and len are both 64-bit so appear as
11703 * pairs of 32-bit registers.
11705 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11706 target_offset64(arg5
, arg6
), arg2
);
11707 return -host_to_target_errno(ret
);
11710 #if TARGET_ABI_BITS == 32
11712 #ifdef TARGET_NR_fadvise64_64
11713 case TARGET_NR_fadvise64_64
:
11714 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11715 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11723 /* 6 args: fd, offset (high, low), len (high, low), advice */
11724 if (regpairs_aligned(cpu_env
, num
)) {
11725 /* offset is in (3,4), len in (5,6) and advice in 7 */
11733 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11734 target_offset64(arg4
, arg5
), arg6
);
11735 return -host_to_target_errno(ret
);
11738 #ifdef TARGET_NR_fadvise64
11739 case TARGET_NR_fadvise64
:
11740 /* 5 args: fd, offset (high, low), len, advice */
11741 if (regpairs_aligned(cpu_env
, num
)) {
11742 /* offset is in (3,4), len in 5 and advice in 6 */
11748 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11749 return -host_to_target_errno(ret
);
11752 #else /* not a 32-bit ABI */
11753 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11754 #ifdef TARGET_NR_fadvise64_64
11755 case TARGET_NR_fadvise64_64
:
11757 #ifdef TARGET_NR_fadvise64
11758 case TARGET_NR_fadvise64
:
11760 #ifdef TARGET_S390X
11762 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11763 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11764 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11765 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11769 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11771 #endif /* end of 64-bit ABI fadvise handling */
11773 #ifdef TARGET_NR_madvise
11774 case TARGET_NR_madvise
:
11775 /* A straight passthrough may not be safe because qemu sometimes
11776 turns private file-backed mappings into anonymous mappings.
11777 This will break MADV_DONTNEED.
11778 This is a hint, so ignoring and returning success is ok. */
11781 #ifdef TARGET_NR_fcntl64
11782 case TARGET_NR_fcntl64
:
11786 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11787 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11790 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11791 copyfrom
= copy_from_user_oabi_flock64
;
11792 copyto
= copy_to_user_oabi_flock64
;
11796 cmd
= target_to_host_fcntl_cmd(arg2
);
11797 if (cmd
== -TARGET_EINVAL
) {
11802 case TARGET_F_GETLK64
:
11803 ret
= copyfrom(&fl
, arg3
);
11807 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11809 ret
= copyto(arg3
, &fl
);
11813 case TARGET_F_SETLK64
:
11814 case TARGET_F_SETLKW64
:
11815 ret
= copyfrom(&fl
, arg3
);
11819 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11822 ret
= do_fcntl(arg1
, arg2
, arg3
);
11828 #ifdef TARGET_NR_cacheflush
11829 case TARGET_NR_cacheflush
:
11830 /* self-modifying code is handled automatically, so nothing needed */
11833 #ifdef TARGET_NR_getpagesize
11834 case TARGET_NR_getpagesize
:
11835 return TARGET_PAGE_SIZE
;
11837 case TARGET_NR_gettid
:
11838 return get_errno(sys_gettid());
11839 #ifdef TARGET_NR_readahead
11840 case TARGET_NR_readahead
:
11841 #if TARGET_ABI_BITS == 32
11842 if (regpairs_aligned(cpu_env
, num
)) {
11847 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11849 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11854 #ifdef TARGET_NR_setxattr
11855 case TARGET_NR_listxattr
:
11856 case TARGET_NR_llistxattr
:
11860 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11862 return -TARGET_EFAULT
;
11865 p
= lock_user_string(arg1
);
11867 if (num
== TARGET_NR_listxattr
) {
11868 ret
= get_errno(listxattr(p
, b
, arg3
));
11870 ret
= get_errno(llistxattr(p
, b
, arg3
));
11873 ret
= -TARGET_EFAULT
;
11875 unlock_user(p
, arg1
, 0);
11876 unlock_user(b
, arg2
, arg3
);
11879 case TARGET_NR_flistxattr
:
11883 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11885 return -TARGET_EFAULT
;
11888 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11889 unlock_user(b
, arg2
, arg3
);
11892 case TARGET_NR_setxattr
:
11893 case TARGET_NR_lsetxattr
:
11895 void *p
, *n
, *v
= 0;
11897 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11899 return -TARGET_EFAULT
;
11902 p
= lock_user_string(arg1
);
11903 n
= lock_user_string(arg2
);
11905 if (num
== TARGET_NR_setxattr
) {
11906 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11908 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11911 ret
= -TARGET_EFAULT
;
11913 unlock_user(p
, arg1
, 0);
11914 unlock_user(n
, arg2
, 0);
11915 unlock_user(v
, arg3
, 0);
11918 case TARGET_NR_fsetxattr
:
11922 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11924 return -TARGET_EFAULT
;
11927 n
= lock_user_string(arg2
);
11929 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11931 ret
= -TARGET_EFAULT
;
11933 unlock_user(n
, arg2
, 0);
11934 unlock_user(v
, arg3
, 0);
11937 case TARGET_NR_getxattr
:
11938 case TARGET_NR_lgetxattr
:
11940 void *p
, *n
, *v
= 0;
11942 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11944 return -TARGET_EFAULT
;
11947 p
= lock_user_string(arg1
);
11948 n
= lock_user_string(arg2
);
11950 if (num
== TARGET_NR_getxattr
) {
11951 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11953 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11956 ret
= -TARGET_EFAULT
;
11958 unlock_user(p
, arg1
, 0);
11959 unlock_user(n
, arg2
, 0);
11960 unlock_user(v
, arg3
, arg4
);
11963 case TARGET_NR_fgetxattr
:
11967 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11969 return -TARGET_EFAULT
;
11972 n
= lock_user_string(arg2
);
11974 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11976 ret
= -TARGET_EFAULT
;
11978 unlock_user(n
, arg2
, 0);
11979 unlock_user(v
, arg3
, arg4
);
11982 case TARGET_NR_removexattr
:
11983 case TARGET_NR_lremovexattr
:
11986 p
= lock_user_string(arg1
);
11987 n
= lock_user_string(arg2
);
11989 if (num
== TARGET_NR_removexattr
) {
11990 ret
= get_errno(removexattr(p
, n
));
11992 ret
= get_errno(lremovexattr(p
, n
));
11995 ret
= -TARGET_EFAULT
;
11997 unlock_user(p
, arg1
, 0);
11998 unlock_user(n
, arg2
, 0);
12001 case TARGET_NR_fremovexattr
:
12004 n
= lock_user_string(arg2
);
12006 ret
= get_errno(fremovexattr(arg1
, n
));
12008 ret
= -TARGET_EFAULT
;
12010 unlock_user(n
, arg2
, 0);
12014 #endif /* CONFIG_ATTR */
12015 #ifdef TARGET_NR_set_thread_area
12016 case TARGET_NR_set_thread_area
:
12017 #if defined(TARGET_MIPS)
12018 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12020 #elif defined(TARGET_CRIS)
12022 ret
= -TARGET_EINVAL
;
12024 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12028 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12029 return do_set_thread_area(cpu_env
, arg1
);
12030 #elif defined(TARGET_M68K)
12032 TaskState
*ts
= cpu
->opaque
;
12033 ts
->tp_value
= arg1
;
12037 return -TARGET_ENOSYS
;
12040 #ifdef TARGET_NR_get_thread_area
12041 case TARGET_NR_get_thread_area
:
12042 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12043 return do_get_thread_area(cpu_env
, arg1
);
12044 #elif defined(TARGET_M68K)
12046 TaskState
*ts
= cpu
->opaque
;
12047 return ts
->tp_value
;
12050 return -TARGET_ENOSYS
;
12053 #ifdef TARGET_NR_getdomainname
12054 case TARGET_NR_getdomainname
:
12055 return -TARGET_ENOSYS
;
12058 #ifdef TARGET_NR_clock_settime
12059 case TARGET_NR_clock_settime
:
12061 struct timespec ts
;
12063 ret
= target_to_host_timespec(&ts
, arg2
);
12064 if (!is_error(ret
)) {
12065 ret
= get_errno(clock_settime(arg1
, &ts
));
12070 #ifdef TARGET_NR_clock_settime64
12071 case TARGET_NR_clock_settime64
:
12073 struct timespec ts
;
12075 ret
= target_to_host_timespec64(&ts
, arg2
);
12076 if (!is_error(ret
)) {
12077 ret
= get_errno(clock_settime(arg1
, &ts
));
12082 #ifdef TARGET_NR_clock_gettime
12083 case TARGET_NR_clock_gettime
:
12085 struct timespec ts
;
12086 ret
= get_errno(clock_gettime(arg1
, &ts
));
12087 if (!is_error(ret
)) {
12088 ret
= host_to_target_timespec(arg2
, &ts
);
12093 #ifdef TARGET_NR_clock_gettime64
12094 case TARGET_NR_clock_gettime64
:
12096 struct timespec ts
;
12097 ret
= get_errno(clock_gettime(arg1
, &ts
));
12098 if (!is_error(ret
)) {
12099 ret
= host_to_target_timespec64(arg2
, &ts
);
12104 #ifdef TARGET_NR_clock_getres
12105 case TARGET_NR_clock_getres
:
12107 struct timespec ts
;
12108 ret
= get_errno(clock_getres(arg1
, &ts
));
12109 if (!is_error(ret
)) {
12110 host_to_target_timespec(arg2
, &ts
);
12115 #ifdef TARGET_NR_clock_getres_time64
12116 case TARGET_NR_clock_getres_time64
:
12118 struct timespec ts
;
12119 ret
= get_errno(clock_getres(arg1
, &ts
));
12120 if (!is_error(ret
)) {
12121 host_to_target_timespec64(arg2
, &ts
);
12126 #ifdef TARGET_NR_clock_nanosleep
12127 case TARGET_NR_clock_nanosleep
:
12129 struct timespec ts
;
12130 if (target_to_host_timespec(&ts
, arg3
)) {
12131 return -TARGET_EFAULT
;
12133 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12134 &ts
, arg4
? &ts
: NULL
));
12136 * if the call is interrupted by a signal handler, it fails
12137 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12138 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12140 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12141 host_to_target_timespec(arg4
, &ts
)) {
12142 return -TARGET_EFAULT
;
12148 #ifdef TARGET_NR_clock_nanosleep_time64
12149 case TARGET_NR_clock_nanosleep_time64
:
12151 struct timespec ts
;
12153 if (target_to_host_timespec64(&ts
, arg3
)) {
12154 return -TARGET_EFAULT
;
12157 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12158 &ts
, arg4
? &ts
: NULL
));
12160 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12161 host_to_target_timespec64(arg4
, &ts
)) {
12162 return -TARGET_EFAULT
;
12168 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12169 case TARGET_NR_set_tid_address
:
12170 return get_errno(set_tid_address((int *)g2h(arg1
)));
12173 case TARGET_NR_tkill
:
12174 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12176 case TARGET_NR_tgkill
:
12177 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12178 target_to_host_signal(arg3
)));
12180 #ifdef TARGET_NR_set_robust_list
12181 case TARGET_NR_set_robust_list
:
12182 case TARGET_NR_get_robust_list
:
12183 /* The ABI for supporting robust futexes has userspace pass
12184 * the kernel a pointer to a linked list which is updated by
12185 * userspace after the syscall; the list is walked by the kernel
12186 * when the thread exits. Since the linked list in QEMU guest
12187 * memory isn't a valid linked list for the host and we have
12188 * no way to reliably intercept the thread-death event, we can't
12189 * support these. Silently return ENOSYS so that guest userspace
12190 * falls back to a non-robust futex implementation (which should
12191 * be OK except in the corner case of the guest crashing while
12192 * holding a mutex that is shared with another process via
12195 return -TARGET_ENOSYS
;
12198 #if defined(TARGET_NR_utimensat)
12199 case TARGET_NR_utimensat
:
12201 struct timespec
*tsp
, ts
[2];
12205 if (target_to_host_timespec(ts
, arg3
)) {
12206 return -TARGET_EFAULT
;
12208 if (target_to_host_timespec(ts
+ 1, arg3
+
12209 sizeof(struct target_timespec
))) {
12210 return -TARGET_EFAULT
;
12215 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12217 if (!(p
= lock_user_string(arg2
))) {
12218 return -TARGET_EFAULT
;
12220 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12221 unlock_user(p
, arg2
, 0);
12226 #ifdef TARGET_NR_utimensat_time64
12227 case TARGET_NR_utimensat_time64
:
12229 struct timespec
*tsp
, ts
[2];
12233 if (target_to_host_timespec64(ts
, arg3
)) {
12234 return -TARGET_EFAULT
;
12236 if (target_to_host_timespec64(ts
+ 1, arg3
+
12237 sizeof(struct target__kernel_timespec
))) {
12238 return -TARGET_EFAULT
;
12243 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12245 p
= lock_user_string(arg2
);
12247 return -TARGET_EFAULT
;
12249 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12250 unlock_user(p
, arg2
, 0);
12255 #ifdef TARGET_NR_futex
12256 case TARGET_NR_futex
:
12257 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12259 #ifdef TARGET_NR_futex_time64
12260 case TARGET_NR_futex_time64
:
12261 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12263 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12264 case TARGET_NR_inotify_init
:
12265 ret
= get_errno(sys_inotify_init());
12267 fd_trans_register(ret
, &target_inotify_trans
);
12271 #ifdef CONFIG_INOTIFY1
12272 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12273 case TARGET_NR_inotify_init1
:
12274 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12275 fcntl_flags_tbl
)));
12277 fd_trans_register(ret
, &target_inotify_trans
);
12282 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12283 case TARGET_NR_inotify_add_watch
:
12284 p
= lock_user_string(arg2
);
12285 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12286 unlock_user(p
, arg2
, 0);
12289 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12290 case TARGET_NR_inotify_rm_watch
:
12291 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12294 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12295 case TARGET_NR_mq_open
:
12297 struct mq_attr posix_mq_attr
;
12298 struct mq_attr
*pposix_mq_attr
;
12301 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12302 pposix_mq_attr
= NULL
;
12304 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12305 return -TARGET_EFAULT
;
12307 pposix_mq_attr
= &posix_mq_attr
;
12309 p
= lock_user_string(arg1
- 1);
12311 return -TARGET_EFAULT
;
12313 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12314 unlock_user (p
, arg1
, 0);
12318 case TARGET_NR_mq_unlink
:
12319 p
= lock_user_string(arg1
- 1);
12321 return -TARGET_EFAULT
;
12323 ret
= get_errno(mq_unlink(p
));
12324 unlock_user (p
, arg1
, 0);
12327 #ifdef TARGET_NR_mq_timedsend
12328 case TARGET_NR_mq_timedsend
:
12330 struct timespec ts
;
12332 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12334 if (target_to_host_timespec(&ts
, arg5
)) {
12335 return -TARGET_EFAULT
;
12337 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12338 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12339 return -TARGET_EFAULT
;
12342 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12344 unlock_user (p
, arg2
, arg3
);
12348 #ifdef TARGET_NR_mq_timedsend_time64
12349 case TARGET_NR_mq_timedsend_time64
:
12351 struct timespec ts
;
12353 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12355 if (target_to_host_timespec64(&ts
, arg5
)) {
12356 return -TARGET_EFAULT
;
12358 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12359 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12360 return -TARGET_EFAULT
;
12363 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12365 unlock_user(p
, arg2
, arg3
);
12370 #ifdef TARGET_NR_mq_timedreceive
12371 case TARGET_NR_mq_timedreceive
:
12373 struct timespec ts
;
12376 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12378 if (target_to_host_timespec(&ts
, arg5
)) {
12379 return -TARGET_EFAULT
;
12381 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12383 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12384 return -TARGET_EFAULT
;
12387 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12390 unlock_user (p
, arg2
, arg3
);
12392 put_user_u32(prio
, arg4
);
12396 #ifdef TARGET_NR_mq_timedreceive_time64
12397 case TARGET_NR_mq_timedreceive_time64
:
12399 struct timespec ts
;
12402 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12404 if (target_to_host_timespec64(&ts
, arg5
)) {
12405 return -TARGET_EFAULT
;
12407 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12409 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12410 return -TARGET_EFAULT
;
12413 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12416 unlock_user(p
, arg2
, arg3
);
12418 put_user_u32(prio
, arg4
);
12424 /* Not implemented for now... */
12425 /* case TARGET_NR_mq_notify: */
12428 case TARGET_NR_mq_getsetattr
:
12430 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12433 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12434 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12435 &posix_mq_attr_out
));
12436 } else if (arg3
!= 0) {
12437 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12439 if (ret
== 0 && arg3
!= 0) {
12440 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12446 #ifdef CONFIG_SPLICE
12447 #ifdef TARGET_NR_tee
12448 case TARGET_NR_tee
:
12450 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12454 #ifdef TARGET_NR_splice
12455 case TARGET_NR_splice
:
12457 loff_t loff_in
, loff_out
;
12458 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12460 if (get_user_u64(loff_in
, arg2
)) {
12461 return -TARGET_EFAULT
;
12463 ploff_in
= &loff_in
;
12466 if (get_user_u64(loff_out
, arg4
)) {
12467 return -TARGET_EFAULT
;
12469 ploff_out
= &loff_out
;
12471 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12473 if (put_user_u64(loff_in
, arg2
)) {
12474 return -TARGET_EFAULT
;
12478 if (put_user_u64(loff_out
, arg4
)) {
12479 return -TARGET_EFAULT
;
12485 #ifdef TARGET_NR_vmsplice
12486 case TARGET_NR_vmsplice
:
12488 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12490 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12491 unlock_iovec(vec
, arg2
, arg3
, 0);
12493 ret
= -host_to_target_errno(errno
);
12498 #endif /* CONFIG_SPLICE */
12499 #ifdef CONFIG_EVENTFD
12500 #if defined(TARGET_NR_eventfd)
12501 case TARGET_NR_eventfd
:
12502 ret
= get_errno(eventfd(arg1
, 0));
12504 fd_trans_register(ret
, &target_eventfd_trans
);
12508 #if defined(TARGET_NR_eventfd2)
12509 case TARGET_NR_eventfd2
:
12511 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12512 if (arg2
& TARGET_O_NONBLOCK
) {
12513 host_flags
|= O_NONBLOCK
;
12515 if (arg2
& TARGET_O_CLOEXEC
) {
12516 host_flags
|= O_CLOEXEC
;
12518 ret
= get_errno(eventfd(arg1
, host_flags
));
12520 fd_trans_register(ret
, &target_eventfd_trans
);
12525 #endif /* CONFIG_EVENTFD */
12526 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12527 case TARGET_NR_fallocate
:
12528 #if TARGET_ABI_BITS == 32
12529 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12530 target_offset64(arg5
, arg6
)));
12532 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12536 #if defined(CONFIG_SYNC_FILE_RANGE)
12537 #if defined(TARGET_NR_sync_file_range)
12538 case TARGET_NR_sync_file_range
:
12539 #if TARGET_ABI_BITS == 32
12540 #if defined(TARGET_MIPS)
12541 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12542 target_offset64(arg5
, arg6
), arg7
));
12544 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12545 target_offset64(arg4
, arg5
), arg6
));
12546 #endif /* !TARGET_MIPS */
12548 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12552 #if defined(TARGET_NR_sync_file_range2) || \
12553 defined(TARGET_NR_arm_sync_file_range)
12554 #if defined(TARGET_NR_sync_file_range2)
12555 case TARGET_NR_sync_file_range2
:
12557 #if defined(TARGET_NR_arm_sync_file_range)
12558 case TARGET_NR_arm_sync_file_range
:
12560 /* This is like sync_file_range but the arguments are reordered */
12561 #if TARGET_ABI_BITS == 32
12562 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12563 target_offset64(arg5
, arg6
), arg2
));
12565 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12570 #if defined(TARGET_NR_signalfd4)
12571 case TARGET_NR_signalfd4
:
12572 return do_signalfd4(arg1
, arg2
, arg4
);
12574 #if defined(TARGET_NR_signalfd)
12575 case TARGET_NR_signalfd
:
12576 return do_signalfd4(arg1
, arg2
, 0);
12578 #if defined(CONFIG_EPOLL)
12579 #if defined(TARGET_NR_epoll_create)
12580 case TARGET_NR_epoll_create
:
12581 return get_errno(epoll_create(arg1
));
12583 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12584 case TARGET_NR_epoll_create1
:
12585 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12587 #if defined(TARGET_NR_epoll_ctl)
12588 case TARGET_NR_epoll_ctl
:
12590 struct epoll_event ep
;
12591 struct epoll_event
*epp
= 0;
12593 if (arg2
!= EPOLL_CTL_DEL
) {
12594 struct target_epoll_event
*target_ep
;
12595 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12596 return -TARGET_EFAULT
;
12598 ep
.events
= tswap32(target_ep
->events
);
12600 * The epoll_data_t union is just opaque data to the kernel,
12601 * so we transfer all 64 bits across and need not worry what
12602 * actual data type it is.
12604 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12605 unlock_user_struct(target_ep
, arg4
, 0);
12608 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12609 * non-null pointer, even though this argument is ignored.
12614 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12618 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12619 #if defined(TARGET_NR_epoll_wait)
12620 case TARGET_NR_epoll_wait
:
12622 #if defined(TARGET_NR_epoll_pwait)
12623 case TARGET_NR_epoll_pwait
:
12626 struct target_epoll_event
*target_ep
;
12627 struct epoll_event
*ep
;
12629 int maxevents
= arg3
;
12630 int timeout
= arg4
;
12632 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12633 return -TARGET_EINVAL
;
12636 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12637 maxevents
* sizeof(struct target_epoll_event
), 1);
12639 return -TARGET_EFAULT
;
12642 ep
= g_try_new(struct epoll_event
, maxevents
);
12644 unlock_user(target_ep
, arg2
, 0);
12645 return -TARGET_ENOMEM
;
12649 #if defined(TARGET_NR_epoll_pwait)
12650 case TARGET_NR_epoll_pwait
:
12652 target_sigset_t
*target_set
;
12653 sigset_t _set
, *set
= &_set
;
12656 if (arg6
!= sizeof(target_sigset_t
)) {
12657 ret
= -TARGET_EINVAL
;
12661 target_set
= lock_user(VERIFY_READ
, arg5
,
12662 sizeof(target_sigset_t
), 1);
12664 ret
= -TARGET_EFAULT
;
12667 target_to_host_sigset(set
, target_set
);
12668 unlock_user(target_set
, arg5
, 0);
12673 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12674 set
, SIGSET_T_SIZE
));
12678 #if defined(TARGET_NR_epoll_wait)
12679 case TARGET_NR_epoll_wait
:
12680 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12685 ret
= -TARGET_ENOSYS
;
12687 if (!is_error(ret
)) {
12689 for (i
= 0; i
< ret
; i
++) {
12690 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12691 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12693 unlock_user(target_ep
, arg2
,
12694 ret
* sizeof(struct target_epoll_event
));
12696 unlock_user(target_ep
, arg2
, 0);
12703 #ifdef TARGET_NR_prlimit64
12704 case TARGET_NR_prlimit64
:
12706 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12707 struct target_rlimit64
*target_rnew
, *target_rold
;
12708 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12709 int resource
= target_to_host_resource(arg2
);
12711 if (arg3
&& (resource
!= RLIMIT_AS
&&
12712 resource
!= RLIMIT_DATA
&&
12713 resource
!= RLIMIT_STACK
)) {
12714 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12715 return -TARGET_EFAULT
;
12717 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12718 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12719 unlock_user_struct(target_rnew
, arg3
, 0);
12723 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12724 if (!is_error(ret
) && arg4
) {
12725 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12726 return -TARGET_EFAULT
;
12728 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12729 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12730 unlock_user_struct(target_rold
, arg4
, 1);
12735 #ifdef TARGET_NR_gethostname
12736 case TARGET_NR_gethostname
:
12738 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12740 ret
= get_errno(gethostname(name
, arg2
));
12741 unlock_user(name
, arg1
, arg2
);
12743 ret
= -TARGET_EFAULT
;
12748 #ifdef TARGET_NR_atomic_cmpxchg_32
12749 case TARGET_NR_atomic_cmpxchg_32
:
12751 /* should use start_exclusive from main.c */
12752 abi_ulong mem_value
;
12753 if (get_user_u32(mem_value
, arg6
)) {
12754 target_siginfo_t info
;
12755 info
.si_signo
= SIGSEGV
;
12757 info
.si_code
= TARGET_SEGV_MAPERR
;
12758 info
._sifields
._sigfault
._addr
= arg6
;
12759 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12760 QEMU_SI_FAULT
, &info
);
12764 if (mem_value
== arg2
)
12765 put_user_u32(arg1
, arg6
);
12769 #ifdef TARGET_NR_atomic_barrier
12770 case TARGET_NR_atomic_barrier
:
12771 /* Like the kernel implementation and the
12772 qemu arm barrier, no-op this? */
12776 #ifdef TARGET_NR_timer_create
12777 case TARGET_NR_timer_create
:
12779 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12781 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12784 int timer_index
= next_free_host_timer();
12786 if (timer_index
< 0) {
12787 ret
= -TARGET_EAGAIN
;
12789 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12792 phost_sevp
= &host_sevp
;
12793 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12799 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12803 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12804 return -TARGET_EFAULT
;
12812 #ifdef TARGET_NR_timer_settime
12813 case TARGET_NR_timer_settime
:
12815 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12816 * struct itimerspec * old_value */
12817 target_timer_t timerid
= get_timer_id(arg1
);
12821 } else if (arg3
== 0) {
12822 ret
= -TARGET_EINVAL
;
12824 timer_t htimer
= g_posix_timers
[timerid
];
12825 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12827 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12828 return -TARGET_EFAULT
;
12831 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12832 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12833 return -TARGET_EFAULT
;
12840 #ifdef TARGET_NR_timer_settime64
12841 case TARGET_NR_timer_settime64
:
12843 target_timer_t timerid
= get_timer_id(arg1
);
12847 } else if (arg3
== 0) {
12848 ret
= -TARGET_EINVAL
;
12850 timer_t htimer
= g_posix_timers
[timerid
];
12851 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12853 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12854 return -TARGET_EFAULT
;
12857 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12858 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12859 return -TARGET_EFAULT
;
12866 #ifdef TARGET_NR_timer_gettime
12867 case TARGET_NR_timer_gettime
:
12869 /* args: timer_t timerid, struct itimerspec *curr_value */
12870 target_timer_t timerid
= get_timer_id(arg1
);
12874 } else if (!arg2
) {
12875 ret
= -TARGET_EFAULT
;
12877 timer_t htimer
= g_posix_timers
[timerid
];
12878 struct itimerspec hspec
;
12879 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12881 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12882 ret
= -TARGET_EFAULT
;
12889 #ifdef TARGET_NR_timer_gettime64
12890 case TARGET_NR_timer_gettime64
:
12892 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12893 target_timer_t timerid
= get_timer_id(arg1
);
12897 } else if (!arg2
) {
12898 ret
= -TARGET_EFAULT
;
12900 timer_t htimer
= g_posix_timers
[timerid
];
12901 struct itimerspec hspec
;
12902 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12904 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12905 ret
= -TARGET_EFAULT
;
12912 #ifdef TARGET_NR_timer_getoverrun
12913 case TARGET_NR_timer_getoverrun
:
12915 /* args: timer_t timerid */
12916 target_timer_t timerid
= get_timer_id(arg1
);
12921 timer_t htimer
= g_posix_timers
[timerid
];
12922 ret
= get_errno(timer_getoverrun(htimer
));
12928 #ifdef TARGET_NR_timer_delete
12929 case TARGET_NR_timer_delete
:
12931 /* args: timer_t timerid */
12932 target_timer_t timerid
= get_timer_id(arg1
);
12937 timer_t htimer
= g_posix_timers
[timerid
];
12938 ret
= get_errno(timer_delete(htimer
));
12939 g_posix_timers
[timerid
] = 0;
12945 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12946 case TARGET_NR_timerfd_create
:
12947 return get_errno(timerfd_create(arg1
,
12948 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12951 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12952 case TARGET_NR_timerfd_gettime
:
12954 struct itimerspec its_curr
;
12956 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12958 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12959 return -TARGET_EFAULT
;
12965 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12966 case TARGET_NR_timerfd_gettime64
:
12968 struct itimerspec its_curr
;
12970 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12972 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12973 return -TARGET_EFAULT
;
12979 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12980 case TARGET_NR_timerfd_settime
:
12982 struct itimerspec its_new
, its_old
, *p_new
;
12985 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12986 return -TARGET_EFAULT
;
12993 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12995 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12996 return -TARGET_EFAULT
;
13002 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13003 case TARGET_NR_timerfd_settime64
:
13005 struct itimerspec its_new
, its_old
, *p_new
;
13008 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13009 return -TARGET_EFAULT
;
13016 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13018 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13019 return -TARGET_EFAULT
;
13025 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13026 case TARGET_NR_ioprio_get
:
13027 return get_errno(ioprio_get(arg1
, arg2
));
13030 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13031 case TARGET_NR_ioprio_set
:
13032 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13035 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13036 case TARGET_NR_setns
:
13037 return get_errno(setns(arg1
, arg2
));
13039 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13040 case TARGET_NR_unshare
:
13041 return get_errno(unshare(arg1
));
13043 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13044 case TARGET_NR_kcmp
:
13045 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13047 #ifdef TARGET_NR_swapcontext
13048 case TARGET_NR_swapcontext
:
13049 /* PowerPC specific. */
13050 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13052 #ifdef TARGET_NR_memfd_create
13053 case TARGET_NR_memfd_create
:
13054 p
= lock_user_string(arg1
);
13056 return -TARGET_EFAULT
;
13058 ret
= get_errno(memfd_create(p
, arg2
));
13059 fd_trans_unregister(ret
);
13060 unlock_user(p
, arg1
, 0);
13063 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13064 case TARGET_NR_membarrier
:
13065 return get_errno(membarrier(arg1
, arg2
));
13069 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13070 return -TARGET_ENOSYS
;
13075 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13076 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13077 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13080 CPUState
*cpu
= env_cpu(cpu_env
);
13083 #ifdef DEBUG_ERESTARTSYS
13084 /* Debug-only code for exercising the syscall-restart code paths
13085 * in the per-architecture cpu main loops: restart every syscall
13086 * the guest makes once before letting it through.
13092 return -TARGET_ERESTARTSYS
;
13097 record_syscall_start(cpu
, num
, arg1
,
13098 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13100 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13101 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13104 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13105 arg5
, arg6
, arg7
, arg8
);
13107 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13108 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13109 arg3
, arg4
, arg5
, arg6
);
13112 record_syscall_return(cpu
, num
, ret
);