4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 #define __NR_sys_gettid __NR_gettid
281 _syscall0(int, sys_gettid
)
283 /* For the 64-bit guest on 32-bit host case we must emulate
284 * getdents using getdents64, because otherwise the host
285 * might hand us back more dirent records than we can fit
286 * into the guest buffer after structure format conversion.
287 * Otherwise we emulate getdents with getdents if the host has it.
289 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
290 #define EMULATE_GETDENTS_WITH_GETDENTS
293 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
294 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
296 #if (defined(TARGET_NR_getdents) && \
297 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
298 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
299 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
301 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
302 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
303 loff_t
*, res
, uint
, wh
);
305 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
306 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
308 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
309 #ifdef __NR_exit_group
310 _syscall1(int,exit_group
,int,error_code
)
312 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
313 _syscall1(int,set_tid_address
,int *,tidptr
)
315 #if defined(__NR_futex)
316 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
317 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
319 #if defined(__NR_futex_time64)
320 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
321 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
323 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
324 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
325 unsigned long *, user_mask_ptr
);
326 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
327 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
328 unsigned long *, user_mask_ptr
);
329 #define __NR_sys_getcpu __NR_getcpu
330 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
331 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
333 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
334 struct __user_cap_data_struct
*, data
);
335 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
336 struct __user_cap_data_struct
*, data
);
337 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
338 _syscall2(int, ioprio_get
, int, which
, int, who
)
340 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
341 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
343 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
344 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
347 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
348 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
349 unsigned long, idx1
, unsigned long, idx2
)
353 * It is assumed that struct statx is architecture independent.
355 #if defined(TARGET_NR_statx) && defined(__NR_statx)
356 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
357 unsigned int, mask
, struct target_statx
*, statxbuf
)
359 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
360 _syscall2(int, membarrier
, int, cmd
, int, flags
)
363 static bitmask_transtbl fcntl_flags_tbl
[] = {
364 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
365 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
366 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
367 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
368 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
369 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
370 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
371 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
372 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
373 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
374 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
375 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
376 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
377 #if defined(O_DIRECT)
378 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
380 #if defined(O_NOATIME)
381 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
383 #if defined(O_CLOEXEC)
384 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
387 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
389 #if defined(O_TMPFILE)
390 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
392 /* Don't terminate the list prematurely on 64-bit host+guest. */
393 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
394 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
399 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
401 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
402 #if defined(__NR_utimensat)
403 #define __NR_sys_utimensat __NR_utimensat
404 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
405 const struct timespec
*,tsp
,int,flags
)
407 static int sys_utimensat(int dirfd
, const char *pathname
,
408 const struct timespec times
[2], int flags
)
414 #endif /* TARGET_NR_utimensat */
416 #ifdef TARGET_NR_renameat2
417 #if defined(__NR_renameat2)
418 #define __NR_sys_renameat2 __NR_renameat2
419 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
420 const char *, new, unsigned int, flags
)
422 static int sys_renameat2(int oldfd
, const char *old
,
423 int newfd
, const char *new, int flags
)
426 return renameat(oldfd
, old
, newfd
, new);
432 #endif /* TARGET_NR_renameat2 */
434 #ifdef CONFIG_INOTIFY
435 #include <sys/inotify.h>
437 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
438 static int sys_inotify_init(void)
440 return (inotify_init());
443 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
444 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
446 return (inotify_add_watch(fd
, pathname
, mask
));
449 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
450 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
452 return (inotify_rm_watch(fd
, wd
));
455 #ifdef CONFIG_INOTIFY1
456 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
457 static int sys_inotify_init1(int flags
)
459 return (inotify_init1(flags
));
464 /* Userspace can usually survive runtime without inotify */
465 #undef TARGET_NR_inotify_init
466 #undef TARGET_NR_inotify_init1
467 #undef TARGET_NR_inotify_add_watch
468 #undef TARGET_NR_inotify_rm_watch
469 #endif /* CONFIG_INOTIFY */
471 #if defined(TARGET_NR_prlimit64)
472 #ifndef __NR_prlimit64
473 # define __NR_prlimit64 -1
475 #define __NR_sys_prlimit64 __NR_prlimit64
476 /* The glibc rlimit structure may not be that used by the underlying syscall */
477 struct host_rlimit64
{
481 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
482 const struct host_rlimit64
*, new_limit
,
483 struct host_rlimit64
*, old_limit
)
487 #if defined(TARGET_NR_timer_create)
488 /* Maximum of 32 active POSIX timers allowed at any one time. */
489 static timer_t g_posix_timers
[32] = { 0, } ;
491 static inline int next_free_host_timer(void)
494 /* FIXME: Does finding the next free slot require a lock? */
495 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
496 if (g_posix_timers
[k
] == 0) {
497 g_posix_timers
[k
] = (timer_t
) 1;
505 #define ERRNO_TABLE_SIZE 1200
507 /* target_to_host_errno_table[] is initialized from
508 * host_to_target_errno_table[] in syscall_init(). */
509 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
513 * This list is the union of errno values overridden in asm-<arch>/errno.h
514 * minus the errnos that are not actually generic to all archs.
516 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
517 [EAGAIN
] = TARGET_EAGAIN
,
518 [EIDRM
] = TARGET_EIDRM
,
519 [ECHRNG
] = TARGET_ECHRNG
,
520 [EL2NSYNC
] = TARGET_EL2NSYNC
,
521 [EL3HLT
] = TARGET_EL3HLT
,
522 [EL3RST
] = TARGET_EL3RST
,
523 [ELNRNG
] = TARGET_ELNRNG
,
524 [EUNATCH
] = TARGET_EUNATCH
,
525 [ENOCSI
] = TARGET_ENOCSI
,
526 [EL2HLT
] = TARGET_EL2HLT
,
527 [EDEADLK
] = TARGET_EDEADLK
,
528 [ENOLCK
] = TARGET_ENOLCK
,
529 [EBADE
] = TARGET_EBADE
,
530 [EBADR
] = TARGET_EBADR
,
531 [EXFULL
] = TARGET_EXFULL
,
532 [ENOANO
] = TARGET_ENOANO
,
533 [EBADRQC
] = TARGET_EBADRQC
,
534 [EBADSLT
] = TARGET_EBADSLT
,
535 [EBFONT
] = TARGET_EBFONT
,
536 [ENOSTR
] = TARGET_ENOSTR
,
537 [ENODATA
] = TARGET_ENODATA
,
538 [ETIME
] = TARGET_ETIME
,
539 [ENOSR
] = TARGET_ENOSR
,
540 [ENONET
] = TARGET_ENONET
,
541 [ENOPKG
] = TARGET_ENOPKG
,
542 [EREMOTE
] = TARGET_EREMOTE
,
543 [ENOLINK
] = TARGET_ENOLINK
,
544 [EADV
] = TARGET_EADV
,
545 [ESRMNT
] = TARGET_ESRMNT
,
546 [ECOMM
] = TARGET_ECOMM
,
547 [EPROTO
] = TARGET_EPROTO
,
548 [EDOTDOT
] = TARGET_EDOTDOT
,
549 [EMULTIHOP
] = TARGET_EMULTIHOP
,
550 [EBADMSG
] = TARGET_EBADMSG
,
551 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
552 [EOVERFLOW
] = TARGET_EOVERFLOW
,
553 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
554 [EBADFD
] = TARGET_EBADFD
,
555 [EREMCHG
] = TARGET_EREMCHG
,
556 [ELIBACC
] = TARGET_ELIBACC
,
557 [ELIBBAD
] = TARGET_ELIBBAD
,
558 [ELIBSCN
] = TARGET_ELIBSCN
,
559 [ELIBMAX
] = TARGET_ELIBMAX
,
560 [ELIBEXEC
] = TARGET_ELIBEXEC
,
561 [EILSEQ
] = TARGET_EILSEQ
,
562 [ENOSYS
] = TARGET_ENOSYS
,
563 [ELOOP
] = TARGET_ELOOP
,
564 [ERESTART
] = TARGET_ERESTART
,
565 [ESTRPIPE
] = TARGET_ESTRPIPE
,
566 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
567 [EUSERS
] = TARGET_EUSERS
,
568 [ENOTSOCK
] = TARGET_ENOTSOCK
,
569 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
570 [EMSGSIZE
] = TARGET_EMSGSIZE
,
571 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
572 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
573 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
574 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
575 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
576 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
577 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
578 [EADDRINUSE
] = TARGET_EADDRINUSE
,
579 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
580 [ENETDOWN
] = TARGET_ENETDOWN
,
581 [ENETUNREACH
] = TARGET_ENETUNREACH
,
582 [ENETRESET
] = TARGET_ENETRESET
,
583 [ECONNABORTED
] = TARGET_ECONNABORTED
,
584 [ECONNRESET
] = TARGET_ECONNRESET
,
585 [ENOBUFS
] = TARGET_ENOBUFS
,
586 [EISCONN
] = TARGET_EISCONN
,
587 [ENOTCONN
] = TARGET_ENOTCONN
,
588 [EUCLEAN
] = TARGET_EUCLEAN
,
589 [ENOTNAM
] = TARGET_ENOTNAM
,
590 [ENAVAIL
] = TARGET_ENAVAIL
,
591 [EISNAM
] = TARGET_EISNAM
,
592 [EREMOTEIO
] = TARGET_EREMOTEIO
,
593 [EDQUOT
] = TARGET_EDQUOT
,
594 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
595 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
596 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
597 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
598 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
599 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
600 [EALREADY
] = TARGET_EALREADY
,
601 [EINPROGRESS
] = TARGET_EINPROGRESS
,
602 [ESTALE
] = TARGET_ESTALE
,
603 [ECANCELED
] = TARGET_ECANCELED
,
604 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
605 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
607 [ENOKEY
] = TARGET_ENOKEY
,
610 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
613 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
616 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
619 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
621 #ifdef ENOTRECOVERABLE
622 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
625 [ENOMSG
] = TARGET_ENOMSG
,
628 [ERFKILL
] = TARGET_ERFKILL
,
631 [EHWPOISON
] = TARGET_EHWPOISON
,
635 static inline int host_to_target_errno(int err
)
637 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
638 host_to_target_errno_table
[err
]) {
639 return host_to_target_errno_table
[err
];
644 static inline int target_to_host_errno(int err
)
646 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
647 target_to_host_errno_table
[err
]) {
648 return target_to_host_errno_table
[err
];
653 static inline abi_long
get_errno(abi_long ret
)
656 return -host_to_target_errno(errno
);
661 const char *target_strerror(int err
)
663 if (err
== TARGET_ERESTARTSYS
) {
664 return "To be restarted";
666 if (err
== TARGET_QEMU_ESIGRETURN
) {
667 return "Successful exit from sigreturn";
670 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
673 return strerror(target_to_host_errno(err
));
676 #define safe_syscall0(type, name) \
677 static type safe_##name(void) \
679 return safe_syscall(__NR_##name); \
682 #define safe_syscall1(type, name, type1, arg1) \
683 static type safe_##name(type1 arg1) \
685 return safe_syscall(__NR_##name, arg1); \
688 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
689 static type safe_##name(type1 arg1, type2 arg2) \
691 return safe_syscall(__NR_##name, arg1, arg2); \
694 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
700 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
704 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
707 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
708 type4, arg4, type5, arg5) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
712 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
715 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
716 type4, arg4, type5, arg5, type6, arg6) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
718 type5 arg5, type6 arg6) \
720 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
723 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
724 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
725 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
726 int, flags
, mode_t
, mode
)
727 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
728 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
729 struct rusage
*, rusage
)
731 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
732 int, options
, struct rusage
*, rusage
)
733 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
734 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
735 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
736 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
737 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
739 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
740 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
741 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
744 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
745 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
747 #if defined(__NR_futex)
748 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
749 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
751 #if defined(__NR_futex_time64)
752 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
753 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
755 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
756 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
757 safe_syscall2(int, tkill
, int, tid
, int, sig
)
758 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
759 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
760 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
761 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
762 unsigned long, pos_l
, unsigned long, pos_h
)
763 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
764 unsigned long, pos_l
, unsigned long, pos_h
)
765 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
767 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
768 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
769 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
770 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
771 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
772 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
773 safe_syscall2(int, flock
, int, fd
, int, operation
)
774 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
775 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
776 const struct timespec
*, uts
, size_t, sigsetsize
)
778 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
780 #if defined(TARGET_NR_nanosleep)
781 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
782 struct timespec
*, rem
)
784 #if defined(TARGET_NR_clock_nanosleep) || \
785 defined(TARGET_NR_clock_nanosleep_time64)
786 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
787 const struct timespec
*, req
, struct timespec
*, rem
)
791 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
794 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
795 void *, ptr
, long, fifth
)
799 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
803 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
804 long, msgtype
, int, flags
)
806 #ifdef __NR_semtimedop
807 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
808 unsigned, nsops
, const struct timespec
*, timeout
)
810 #if defined(TARGET_NR_mq_timedsend) || \
811 defined(TARGET_NR_mq_timedsend_time64)
812 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
813 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
815 #if defined(TARGET_NR_mq_timedreceive) || \
816 defined(TARGET_NR_mq_timedreceive_time64)
817 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
818 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
820 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
821 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
822 int, outfd
, loff_t
*, poutoff
, size_t, length
,
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827 * "third argument might be integer or pointer or not present" behaviour of
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833 * use the flock64 struct rather than unsuffixed flock
834 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
842 static inline int host_to_target_sock_type(int host_type
)
846 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
848 target_type
= TARGET_SOCK_DGRAM
;
851 target_type
= TARGET_SOCK_STREAM
;
854 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
858 #if defined(SOCK_CLOEXEC)
859 if (host_type
& SOCK_CLOEXEC
) {
860 target_type
|= TARGET_SOCK_CLOEXEC
;
864 #if defined(SOCK_NONBLOCK)
865 if (host_type
& SOCK_NONBLOCK
) {
866 target_type
|= TARGET_SOCK_NONBLOCK
;
873 static abi_ulong target_brk
;
874 static abi_ulong target_original_brk
;
875 static abi_ulong brk_page
;
877 void target_set_brk(abi_ulong new_brk
)
879 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
880 brk_page
= HOST_PAGE_ALIGN(target_brk
);
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
886 /* do_brk() must return target values and target errnos. */
887 abi_long
do_brk(abi_ulong new_brk
)
889 abi_long mapped_addr
;
890 abi_ulong new_alloc_size
;
892 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
895 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
898 if (new_brk
< target_original_brk
) {
899 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
904 /* If the new brk is less than the highest page reserved to the
905 * target heap allocation, set it and we're almost done... */
906 if (new_brk
<= brk_page
) {
907 /* Heap contents are initialized to zero, as for anonymous
909 if (new_brk
> target_brk
) {
910 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
912 target_brk
= new_brk
;
913 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
917 /* We need to allocate more memory after the brk... Note that
918 * we don't use MAP_FIXED because that will map over the top of
919 * any existing mapping (like the one with the host libc or qemu
920 * itself); instead we treat "mapped but at wrong address" as
921 * a failure and unmap again.
923 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
924 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
925 PROT_READ
|PROT_WRITE
,
926 MAP_ANON
|MAP_PRIVATE
, 0, 0));
928 if (mapped_addr
== brk_page
) {
929 /* Heap contents are initialized to zero, as for anonymous
930 * mapped pages. Technically the new pages are already
931 * initialized to zero since they *are* anonymous mapped
932 * pages, however we have to take care with the contents that
933 * come from the remaining part of the previous page: it may
934 * contains garbage data due to a previous heap usage (grown
936 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
938 target_brk
= new_brk
;
939 brk_page
= HOST_PAGE_ALIGN(target_brk
);
940 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
943 } else if (mapped_addr
!= -1) {
944 /* Mapped but at wrong address, meaning there wasn't actually
945 * enough space for this brk.
947 target_munmap(mapped_addr
, new_alloc_size
);
949 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
952 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
955 #if defined(TARGET_ALPHA)
956 /* We (partially) emulate OSF/1 on Alpha, which requires we
957 return a proper errno, not an unchanged brk value. */
958 return -TARGET_ENOMEM
;
960 /* For everything else, return the previous break. */
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
966 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
967 abi_ulong target_fds_addr
,
971 abi_ulong b
, *target_fds
;
973 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
974 if (!(target_fds
= lock_user(VERIFY_READ
,
976 sizeof(abi_ulong
) * nw
,
978 return -TARGET_EFAULT
;
982 for (i
= 0; i
< nw
; i
++) {
983 /* grab the abi_ulong */
984 __get_user(b
, &target_fds
[i
]);
985 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
986 /* check the bit inside the abi_ulong */
993 unlock_user(target_fds
, target_fds_addr
, 0);
998 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
999 abi_ulong target_fds_addr
,
1002 if (target_fds_addr
) {
1003 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1004 return -TARGET_EFAULT
;
1012 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1018 abi_ulong
*target_fds
;
1020 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1021 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1023 sizeof(abi_ulong
) * nw
,
1025 return -TARGET_EFAULT
;
1028 for (i
= 0; i
< nw
; i
++) {
1030 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1031 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1034 __put_user(v
, &target_fds
[i
]);
1037 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1049 static inline abi_long
host_to_target_clock_t(long ticks
)
1051 #if HOST_HZ == TARGET_HZ
1054 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1058 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1059 const struct rusage
*rusage
)
1061 struct target_rusage
*target_rusage
;
1063 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1064 return -TARGET_EFAULT
;
1065 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1066 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1067 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1068 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1069 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1070 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1071 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1072 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1073 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1074 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1075 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1076 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1077 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1078 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1079 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1080 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1081 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1082 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1083 unlock_user_struct(target_rusage
, target_addr
, 1);
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1091 abi_ulong target_rlim_swap
;
1094 target_rlim_swap
= tswapal(target_rlim
);
1095 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1096 return RLIM_INFINITY
;
1098 result
= target_rlim_swap
;
1099 if (target_rlim_swap
!= (rlim_t
)result
)
1100 return RLIM_INFINITY
;
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1109 abi_ulong target_rlim_swap
;
1112 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1113 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1115 target_rlim_swap
= rlim
;
1116 result
= tswapal(target_rlim_swap
);
1122 static inline int target_to_host_resource(int code
)
1125 case TARGET_RLIMIT_AS
:
1127 case TARGET_RLIMIT_CORE
:
1129 case TARGET_RLIMIT_CPU
:
1131 case TARGET_RLIMIT_DATA
:
1133 case TARGET_RLIMIT_FSIZE
:
1134 return RLIMIT_FSIZE
;
1135 case TARGET_RLIMIT_LOCKS
:
1136 return RLIMIT_LOCKS
;
1137 case TARGET_RLIMIT_MEMLOCK
:
1138 return RLIMIT_MEMLOCK
;
1139 case TARGET_RLIMIT_MSGQUEUE
:
1140 return RLIMIT_MSGQUEUE
;
1141 case TARGET_RLIMIT_NICE
:
1143 case TARGET_RLIMIT_NOFILE
:
1144 return RLIMIT_NOFILE
;
1145 case TARGET_RLIMIT_NPROC
:
1146 return RLIMIT_NPROC
;
1147 case TARGET_RLIMIT_RSS
:
1149 case TARGET_RLIMIT_RTPRIO
:
1150 return RLIMIT_RTPRIO
;
1151 case TARGET_RLIMIT_SIGPENDING
:
1152 return RLIMIT_SIGPENDING
;
1153 case TARGET_RLIMIT_STACK
:
1154 return RLIMIT_STACK
;
1160 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1161 abi_ulong target_tv_addr
)
1163 struct target_timeval
*target_tv
;
1165 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1166 return -TARGET_EFAULT
;
1169 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1170 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1172 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1177 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1178 const struct timeval
*tv
)
1180 struct target_timeval
*target_tv
;
1182 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1183 return -TARGET_EFAULT
;
1186 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1187 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1189 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1194 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1195 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1196 abi_ulong target_tv_addr
)
1198 struct target__kernel_sock_timeval
*target_tv
;
1200 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1201 return -TARGET_EFAULT
;
1204 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1205 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1207 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1213 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1214 const struct timeval
*tv
)
1216 struct target__kernel_sock_timeval
*target_tv
;
1218 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1219 return -TARGET_EFAULT
;
1222 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1223 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1225 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1230 #if defined(TARGET_NR_futex) || \
1231 defined(TARGET_NR_rt_sigtimedwait) || \
1232 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1233 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1234 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1235 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1236 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1237 defined(TARGET_NR_timer_settime) || \
1238 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1239 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1240 abi_ulong target_addr
)
1242 struct target_timespec
*target_ts
;
1244 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1245 return -TARGET_EFAULT
;
1247 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1248 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1249 unlock_user_struct(target_ts
, target_addr
, 0);
1254 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1255 defined(TARGET_NR_timer_settime64) || \
1256 defined(TARGET_NR_mq_timedsend_time64) || \
1257 defined(TARGET_NR_mq_timedreceive_time64) || \
1258 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1259 defined(TARGET_NR_clock_nanosleep_time64) || \
1260 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1261 defined(TARGET_NR_utimensat) || \
1262 defined(TARGET_NR_utimensat_time64) || \
1263 defined(TARGET_NR_semtimedop_time64) || \
1264 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1265 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1266 abi_ulong target_addr
)
1268 struct target__kernel_timespec
*target_ts
;
1270 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1271 return -TARGET_EFAULT
;
1273 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1274 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1275 /* in 32bit mode, this drops the padding */
1276 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1277 unlock_user_struct(target_ts
, target_addr
, 0);
1282 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1283 struct timespec
*host_ts
)
1285 struct target_timespec
*target_ts
;
1287 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1288 return -TARGET_EFAULT
;
1290 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1291 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1292 unlock_user_struct(target_ts
, target_addr
, 1);
1296 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1297 struct timespec
*host_ts
)
1299 struct target__kernel_timespec
*target_ts
;
1301 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1302 return -TARGET_EFAULT
;
1304 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1305 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1306 unlock_user_struct(target_ts
, target_addr
, 1);
1310 #if defined(TARGET_NR_gettimeofday)
1311 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1312 struct timezone
*tz
)
1314 struct target_timezone
*target_tz
;
1316 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1317 return -TARGET_EFAULT
;
1320 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1321 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1323 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1329 #if defined(TARGET_NR_settimeofday)
1330 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1331 abi_ulong target_tz_addr
)
1333 struct target_timezone
*target_tz
;
1335 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1336 return -TARGET_EFAULT
;
1339 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1340 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1342 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1351 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1352 abi_ulong target_mq_attr_addr
)
1354 struct target_mq_attr
*target_mq_attr
;
1356 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1357 target_mq_attr_addr
, 1))
1358 return -TARGET_EFAULT
;
1360 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1361 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1362 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1363 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1365 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1370 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1371 const struct mq_attr
*attr
)
1373 struct target_mq_attr
*target_mq_attr
;
1375 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1376 target_mq_attr_addr
, 0))
1377 return -TARGET_EFAULT
;
1379 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1380 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1381 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1382 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1384 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1390 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1391 /* do_select() must return target values and target errnos. */
1392 static abi_long
do_select(int n
,
1393 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1394 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1396 fd_set rfds
, wfds
, efds
;
1397 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1399 struct timespec ts
, *ts_ptr
;
1402 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1406 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1415 if (target_tv_addr
) {
1416 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1417 return -TARGET_EFAULT
;
1418 ts
.tv_sec
= tv
.tv_sec
;
1419 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1425 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1428 if (!is_error(ret
)) {
1429 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1430 return -TARGET_EFAULT
;
1431 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1432 return -TARGET_EFAULT
;
1433 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1434 return -TARGET_EFAULT
;
1436 if (target_tv_addr
) {
1437 tv
.tv_sec
= ts
.tv_sec
;
1438 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1439 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1440 return -TARGET_EFAULT
;
1448 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1449 static abi_long
do_old_select(abi_ulong arg1
)
1451 struct target_sel_arg_struct
*sel
;
1452 abi_ulong inp
, outp
, exp
, tvp
;
1455 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1456 return -TARGET_EFAULT
;
1459 nsel
= tswapal(sel
->n
);
1460 inp
= tswapal(sel
->inp
);
1461 outp
= tswapal(sel
->outp
);
1462 exp
= tswapal(sel
->exp
);
1463 tvp
= tswapal(sel
->tvp
);
1465 unlock_user_struct(sel
, arg1
, 0);
1467 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1472 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1473 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1474 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1477 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1478 fd_set rfds
, wfds
, efds
;
1479 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1480 struct timespec ts
, *ts_ptr
;
1484 * The 6th arg is actually two args smashed together,
1485 * so we cannot use the C library.
1493 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1494 target_sigset_t
*target_sigset
;
1502 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1506 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1510 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1516 * This takes a timespec, and not a timeval, so we cannot
1517 * use the do_select() helper ...
1521 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1522 return -TARGET_EFAULT
;
1525 if (target_to_host_timespec(&ts
, ts_addr
)) {
1526 return -TARGET_EFAULT
;
1534 /* Extract the two packed args for the sigset */
1537 sig
.size
= SIGSET_T_SIZE
;
1539 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1541 return -TARGET_EFAULT
;
1543 arg_sigset
= tswapal(arg7
[0]);
1544 arg_sigsize
= tswapal(arg7
[1]);
1545 unlock_user(arg7
, arg6
, 0);
1549 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1550 /* Like the kernel, we enforce correct size sigsets */
1551 return -TARGET_EINVAL
;
1553 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1554 sizeof(*target_sigset
), 1);
1555 if (!target_sigset
) {
1556 return -TARGET_EFAULT
;
1558 target_to_host_sigset(&set
, target_sigset
);
1559 unlock_user(target_sigset
, arg_sigset
, 0);
1567 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1570 if (!is_error(ret
)) {
1571 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1572 return -TARGET_EFAULT
;
1574 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1575 return -TARGET_EFAULT
;
1577 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1578 return -TARGET_EFAULT
;
1581 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1582 return -TARGET_EFAULT
;
1585 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1586 return -TARGET_EFAULT
;
1594 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1595 defined(TARGET_NR_ppoll_time64)
1596 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1597 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1599 struct target_pollfd
*target_pfd
;
1600 unsigned int nfds
= arg2
;
1608 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1609 return -TARGET_EINVAL
;
1611 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1612 sizeof(struct target_pollfd
) * nfds
, 1);
1614 return -TARGET_EFAULT
;
1617 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1618 for (i
= 0; i
< nfds
; i
++) {
1619 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1620 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1624 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1625 target_sigset_t
*target_set
;
1626 sigset_t _set
, *set
= &_set
;
1630 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1631 unlock_user(target_pfd
, arg1
, 0);
1632 return -TARGET_EFAULT
;
1635 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1636 unlock_user(target_pfd
, arg1
, 0);
1637 return -TARGET_EFAULT
;
1645 if (arg5
!= sizeof(target_sigset_t
)) {
1646 unlock_user(target_pfd
, arg1
, 0);
1647 return -TARGET_EINVAL
;
1650 target_set
= lock_user(VERIFY_READ
, arg4
,
1651 sizeof(target_sigset_t
), 1);
1653 unlock_user(target_pfd
, arg1
, 0);
1654 return -TARGET_EFAULT
;
1656 target_to_host_sigset(set
, target_set
);
1661 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1662 set
, SIGSET_T_SIZE
));
1664 if (!is_error(ret
) && arg3
) {
1666 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1667 return -TARGET_EFAULT
;
1670 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1671 return -TARGET_EFAULT
;
1676 unlock_user(target_set
, arg4
, 0);
1679 struct timespec ts
, *pts
;
1682 /* Convert ms to secs, ns */
1683 ts
.tv_sec
= arg3
/ 1000;
1684 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1687 /* -ve poll() timeout means "infinite" */
1690 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1693 if (!is_error(ret
)) {
1694 for (i
= 0; i
< nfds
; i
++) {
1695 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1698 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1703 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1706 return pipe2(host_pipe
, flags
);
1712 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1713 int flags
, int is_pipe2
)
1717 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1720 return get_errno(ret
);
1722 /* Several targets have special calling conventions for the original
1723 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1725 #if defined(TARGET_ALPHA)
1726 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1727 return host_pipe
[0];
1728 #elif defined(TARGET_MIPS)
1729 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1730 return host_pipe
[0];
1731 #elif defined(TARGET_SH4)
1732 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1733 return host_pipe
[0];
1734 #elif defined(TARGET_SPARC)
1735 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1736 return host_pipe
[0];
1740 if (put_user_s32(host_pipe
[0], pipedes
)
1741 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1742 return -TARGET_EFAULT
;
1743 return get_errno(ret
);
1746 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1747 abi_ulong target_addr
,
1750 struct target_ip_mreqn
*target_smreqn
;
1752 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1754 return -TARGET_EFAULT
;
1755 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1756 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1757 if (len
== sizeof(struct target_ip_mreqn
))
1758 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1759 unlock_user(target_smreqn
, target_addr
, 0);
1764 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1765 abi_ulong target_addr
,
1768 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1769 sa_family_t sa_family
;
1770 struct target_sockaddr
*target_saddr
;
1772 if (fd_trans_target_to_host_addr(fd
)) {
1773 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1776 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1778 return -TARGET_EFAULT
;
1780 sa_family
= tswap16(target_saddr
->sa_family
);
1782 /* Oops. The caller might send a incomplete sun_path; sun_path
1783 * must be terminated by \0 (see the manual page), but
1784 * unfortunately it is quite common to specify sockaddr_un
1785 * length as "strlen(x->sun_path)" while it should be
1786 * "strlen(...) + 1". We'll fix that here if needed.
1787 * Linux kernel has a similar feature.
1790 if (sa_family
== AF_UNIX
) {
1791 if (len
< unix_maxlen
&& len
> 0) {
1792 char *cp
= (char*)target_saddr
;
1794 if ( cp
[len
-1] && !cp
[len
] )
1797 if (len
> unix_maxlen
)
1801 memcpy(addr
, target_saddr
, len
);
1802 addr
->sa_family
= sa_family
;
1803 if (sa_family
== AF_NETLINK
) {
1804 struct sockaddr_nl
*nladdr
;
1806 nladdr
= (struct sockaddr_nl
*)addr
;
1807 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1808 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1809 } else if (sa_family
== AF_PACKET
) {
1810 struct target_sockaddr_ll
*lladdr
;
1812 lladdr
= (struct target_sockaddr_ll
*)addr
;
1813 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1814 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1816 unlock_user(target_saddr
, target_addr
, 0);
1821 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1822 struct sockaddr
*addr
,
1825 struct target_sockaddr
*target_saddr
;
1832 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1834 return -TARGET_EFAULT
;
1835 memcpy(target_saddr
, addr
, len
);
1836 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1837 sizeof(target_saddr
->sa_family
)) {
1838 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1840 if (addr
->sa_family
== AF_NETLINK
&&
1841 len
>= sizeof(struct target_sockaddr_nl
)) {
1842 struct target_sockaddr_nl
*target_nl
=
1843 (struct target_sockaddr_nl
*)target_saddr
;
1844 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1845 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1846 } else if (addr
->sa_family
== AF_PACKET
) {
1847 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1848 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1849 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1850 } else if (addr
->sa_family
== AF_INET6
&&
1851 len
>= sizeof(struct target_sockaddr_in6
)) {
1852 struct target_sockaddr_in6
*target_in6
=
1853 (struct target_sockaddr_in6
*)target_saddr
;
1854 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1856 unlock_user(target_saddr
, target_addr
, len
);
1861 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1862 struct target_msghdr
*target_msgh
)
1864 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1865 abi_long msg_controllen
;
1866 abi_ulong target_cmsg_addr
;
1867 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1868 socklen_t space
= 0;
1870 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1871 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1873 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1874 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1875 target_cmsg_start
= target_cmsg
;
1877 return -TARGET_EFAULT
;
1879 while (cmsg
&& target_cmsg
) {
1880 void *data
= CMSG_DATA(cmsg
);
1881 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1883 int len
= tswapal(target_cmsg
->cmsg_len
)
1884 - sizeof(struct target_cmsghdr
);
1886 space
+= CMSG_SPACE(len
);
1887 if (space
> msgh
->msg_controllen
) {
1888 space
-= CMSG_SPACE(len
);
1889 /* This is a QEMU bug, since we allocated the payload
1890 * area ourselves (unlike overflow in host-to-target
1891 * conversion, which is just the guest giving us a buffer
1892 * that's too small). It can't happen for the payload types
1893 * we currently support; if it becomes an issue in future
1894 * we would need to improve our allocation strategy to
1895 * something more intelligent than "twice the size of the
1896 * target buffer we're reading from".
1898 qemu_log_mask(LOG_UNIMP
,
1899 ("Unsupported ancillary data %d/%d: "
1900 "unhandled msg size\n"),
1901 tswap32(target_cmsg
->cmsg_level
),
1902 tswap32(target_cmsg
->cmsg_type
));
1906 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1907 cmsg
->cmsg_level
= SOL_SOCKET
;
1909 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1911 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1912 cmsg
->cmsg_len
= CMSG_LEN(len
);
1914 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1915 int *fd
= (int *)data
;
1916 int *target_fd
= (int *)target_data
;
1917 int i
, numfds
= len
/ sizeof(int);
1919 for (i
= 0; i
< numfds
; i
++) {
1920 __get_user(fd
[i
], target_fd
+ i
);
1922 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1923 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1924 struct ucred
*cred
= (struct ucred
*)data
;
1925 struct target_ucred
*target_cred
=
1926 (struct target_ucred
*)target_data
;
1928 __get_user(cred
->pid
, &target_cred
->pid
);
1929 __get_user(cred
->uid
, &target_cred
->uid
);
1930 __get_user(cred
->gid
, &target_cred
->gid
);
1932 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1933 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1934 memcpy(data
, target_data
, len
);
1937 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1938 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1941 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1943 msgh
->msg_controllen
= space
;
1947 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1948 struct msghdr
*msgh
)
1950 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1951 abi_long msg_controllen
;
1952 abi_ulong target_cmsg_addr
;
1953 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1954 socklen_t space
= 0;
1956 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1957 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1959 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1960 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1961 target_cmsg_start
= target_cmsg
;
1963 return -TARGET_EFAULT
;
1965 while (cmsg
&& target_cmsg
) {
1966 void *data
= CMSG_DATA(cmsg
);
1967 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1969 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1970 int tgt_len
, tgt_space
;
1972 /* We never copy a half-header but may copy half-data;
1973 * this is Linux's behaviour in put_cmsg(). Note that
1974 * truncation here is a guest problem (which we report
1975 * to the guest via the CTRUNC bit), unlike truncation
1976 * in target_to_host_cmsg, which is a QEMU bug.
1978 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1979 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1983 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1984 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1986 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1988 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1990 /* Payload types which need a different size of payload on
1991 * the target must adjust tgt_len here.
1994 switch (cmsg
->cmsg_level
) {
1996 switch (cmsg
->cmsg_type
) {
1998 tgt_len
= sizeof(struct target_timeval
);
2008 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2009 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2010 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2013 /* We must now copy-and-convert len bytes of payload
2014 * into tgt_len bytes of destination space. Bear in mind
2015 * that in both source and destination we may be dealing
2016 * with a truncated value!
2018 switch (cmsg
->cmsg_level
) {
2020 switch (cmsg
->cmsg_type
) {
2023 int *fd
= (int *)data
;
2024 int *target_fd
= (int *)target_data
;
2025 int i
, numfds
= tgt_len
/ sizeof(int);
2027 for (i
= 0; i
< numfds
; i
++) {
2028 __put_user(fd
[i
], target_fd
+ i
);
2034 struct timeval
*tv
= (struct timeval
*)data
;
2035 struct target_timeval
*target_tv
=
2036 (struct target_timeval
*)target_data
;
2038 if (len
!= sizeof(struct timeval
) ||
2039 tgt_len
!= sizeof(struct target_timeval
)) {
2043 /* copy struct timeval to target */
2044 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2045 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2048 case SCM_CREDENTIALS
:
2050 struct ucred
*cred
= (struct ucred
*)data
;
2051 struct target_ucred
*target_cred
=
2052 (struct target_ucred
*)target_data
;
2054 __put_user(cred
->pid
, &target_cred
->pid
);
2055 __put_user(cred
->uid
, &target_cred
->uid
);
2056 __put_user(cred
->gid
, &target_cred
->gid
);
2065 switch (cmsg
->cmsg_type
) {
2068 uint32_t *v
= (uint32_t *)data
;
2069 uint32_t *t_int
= (uint32_t *)target_data
;
2071 if (len
!= sizeof(uint32_t) ||
2072 tgt_len
!= sizeof(uint32_t)) {
2075 __put_user(*v
, t_int
);
2081 struct sock_extended_err ee
;
2082 struct sockaddr_in offender
;
2084 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2085 struct errhdr_t
*target_errh
=
2086 (struct errhdr_t
*)target_data
;
2088 if (len
!= sizeof(struct errhdr_t
) ||
2089 tgt_len
!= sizeof(struct errhdr_t
)) {
2092 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2093 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2094 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2095 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2096 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2097 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2098 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2099 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2100 (void *) &errh
->offender
, sizeof(errh
->offender
));
2109 switch (cmsg
->cmsg_type
) {
2112 uint32_t *v
= (uint32_t *)data
;
2113 uint32_t *t_int
= (uint32_t *)target_data
;
2115 if (len
!= sizeof(uint32_t) ||
2116 tgt_len
!= sizeof(uint32_t)) {
2119 __put_user(*v
, t_int
);
2125 struct sock_extended_err ee
;
2126 struct sockaddr_in6 offender
;
2128 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2129 struct errhdr6_t
*target_errh
=
2130 (struct errhdr6_t
*)target_data
;
2132 if (len
!= sizeof(struct errhdr6_t
) ||
2133 tgt_len
!= sizeof(struct errhdr6_t
)) {
2136 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2137 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2138 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2139 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2140 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2141 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2142 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2143 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2144 (void *) &errh
->offender
, sizeof(errh
->offender
));
2154 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2155 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2156 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2157 if (tgt_len
> len
) {
2158 memset(target_data
+ len
, 0, tgt_len
- len
);
2162 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2163 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2164 if (msg_controllen
< tgt_space
) {
2165 tgt_space
= msg_controllen
;
2167 msg_controllen
-= tgt_space
;
2169 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2170 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2173 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2175 target_msgh
->msg_controllen
= tswapal(space
);
2179 /* do_setsockopt() Must return target values and target errnos. */
2180 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2181 abi_ulong optval_addr
, socklen_t optlen
)
2185 struct ip_mreqn
*ip_mreq
;
2186 struct ip_mreq_source
*ip_mreq_source
;
2191 /* TCP and UDP options all take an 'int' value. */
2192 if (optlen
< sizeof(uint32_t))
2193 return -TARGET_EINVAL
;
2195 if (get_user_u32(val
, optval_addr
))
2196 return -TARGET_EFAULT
;
2197 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2204 case IP_ROUTER_ALERT
:
2208 case IP_MTU_DISCOVER
:
2215 case IP_MULTICAST_TTL
:
2216 case IP_MULTICAST_LOOP
:
2218 if (optlen
>= sizeof(uint32_t)) {
2219 if (get_user_u32(val
, optval_addr
))
2220 return -TARGET_EFAULT
;
2221 } else if (optlen
>= 1) {
2222 if (get_user_u8(val
, optval_addr
))
2223 return -TARGET_EFAULT
;
2225 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2227 case IP_ADD_MEMBERSHIP
:
2228 case IP_DROP_MEMBERSHIP
:
2229 if (optlen
< sizeof (struct target_ip_mreq
) ||
2230 optlen
> sizeof (struct target_ip_mreqn
))
2231 return -TARGET_EINVAL
;
2233 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2234 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2235 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2238 case IP_BLOCK_SOURCE
:
2239 case IP_UNBLOCK_SOURCE
:
2240 case IP_ADD_SOURCE_MEMBERSHIP
:
2241 case IP_DROP_SOURCE_MEMBERSHIP
:
2242 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2243 return -TARGET_EINVAL
;
2245 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2246 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2247 unlock_user (ip_mreq_source
, optval_addr
, 0);
2256 case IPV6_MTU_DISCOVER
:
2259 case IPV6_RECVPKTINFO
:
2260 case IPV6_UNICAST_HOPS
:
2261 case IPV6_MULTICAST_HOPS
:
2262 case IPV6_MULTICAST_LOOP
:
2264 case IPV6_RECVHOPLIMIT
:
2265 case IPV6_2292HOPLIMIT
:
2268 case IPV6_2292PKTINFO
:
2269 case IPV6_RECVTCLASS
:
2270 case IPV6_RECVRTHDR
:
2271 case IPV6_2292RTHDR
:
2272 case IPV6_RECVHOPOPTS
:
2273 case IPV6_2292HOPOPTS
:
2274 case IPV6_RECVDSTOPTS
:
2275 case IPV6_2292DSTOPTS
:
2277 case IPV6_ADDR_PREFERENCES
:
2278 #ifdef IPV6_RECVPATHMTU
2279 case IPV6_RECVPATHMTU
:
2281 #ifdef IPV6_TRANSPARENT
2282 case IPV6_TRANSPARENT
:
2284 #ifdef IPV6_FREEBIND
2287 #ifdef IPV6_RECVORIGDSTADDR
2288 case IPV6_RECVORIGDSTADDR
:
2291 if (optlen
< sizeof(uint32_t)) {
2292 return -TARGET_EINVAL
;
2294 if (get_user_u32(val
, optval_addr
)) {
2295 return -TARGET_EFAULT
;
2297 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2298 &val
, sizeof(val
)));
2302 struct in6_pktinfo pki
;
2304 if (optlen
< sizeof(pki
)) {
2305 return -TARGET_EINVAL
;
2308 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2309 return -TARGET_EFAULT
;
2312 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2314 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2315 &pki
, sizeof(pki
)));
2318 case IPV6_ADD_MEMBERSHIP
:
2319 case IPV6_DROP_MEMBERSHIP
:
2321 struct ipv6_mreq ipv6mreq
;
2323 if (optlen
< sizeof(ipv6mreq
)) {
2324 return -TARGET_EINVAL
;
2327 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2328 return -TARGET_EFAULT
;
2331 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2333 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2334 &ipv6mreq
, sizeof(ipv6mreq
)));
2345 struct icmp6_filter icmp6f
;
2347 if (optlen
> sizeof(icmp6f
)) {
2348 optlen
= sizeof(icmp6f
);
2351 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2352 return -TARGET_EFAULT
;
2355 for (val
= 0; val
< 8; val
++) {
2356 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2359 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2371 /* those take an u32 value */
2372 if (optlen
< sizeof(uint32_t)) {
2373 return -TARGET_EINVAL
;
2376 if (get_user_u32(val
, optval_addr
)) {
2377 return -TARGET_EFAULT
;
2379 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2380 &val
, sizeof(val
)));
2387 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2392 char *alg_key
= g_malloc(optlen
);
2395 return -TARGET_ENOMEM
;
2397 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2399 return -TARGET_EFAULT
;
2401 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2406 case ALG_SET_AEAD_AUTHSIZE
:
2408 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2417 case TARGET_SOL_SOCKET
:
2419 case TARGET_SO_RCVTIMEO
:
2423 optname
= SO_RCVTIMEO
;
2426 if (optlen
!= sizeof(struct target_timeval
)) {
2427 return -TARGET_EINVAL
;
2430 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2431 return -TARGET_EFAULT
;
2434 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2438 case TARGET_SO_SNDTIMEO
:
2439 optname
= SO_SNDTIMEO
;
2441 case TARGET_SO_ATTACH_FILTER
:
2443 struct target_sock_fprog
*tfprog
;
2444 struct target_sock_filter
*tfilter
;
2445 struct sock_fprog fprog
;
2446 struct sock_filter
*filter
;
2449 if (optlen
!= sizeof(*tfprog
)) {
2450 return -TARGET_EINVAL
;
2452 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2453 return -TARGET_EFAULT
;
2455 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2456 tswapal(tfprog
->filter
), 0)) {
2457 unlock_user_struct(tfprog
, optval_addr
, 1);
2458 return -TARGET_EFAULT
;
2461 fprog
.len
= tswap16(tfprog
->len
);
2462 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2463 if (filter
== NULL
) {
2464 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2465 unlock_user_struct(tfprog
, optval_addr
, 1);
2466 return -TARGET_ENOMEM
;
2468 for (i
= 0; i
< fprog
.len
; i
++) {
2469 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2470 filter
[i
].jt
= tfilter
[i
].jt
;
2471 filter
[i
].jf
= tfilter
[i
].jf
;
2472 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2474 fprog
.filter
= filter
;
2476 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2477 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2480 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2481 unlock_user_struct(tfprog
, optval_addr
, 1);
2484 case TARGET_SO_BINDTODEVICE
:
2486 char *dev_ifname
, *addr_ifname
;
2488 if (optlen
> IFNAMSIZ
- 1) {
2489 optlen
= IFNAMSIZ
- 1;
2491 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2493 return -TARGET_EFAULT
;
2495 optname
= SO_BINDTODEVICE
;
2496 addr_ifname
= alloca(IFNAMSIZ
);
2497 memcpy(addr_ifname
, dev_ifname
, optlen
);
2498 addr_ifname
[optlen
] = 0;
2499 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2500 addr_ifname
, optlen
));
2501 unlock_user (dev_ifname
, optval_addr
, 0);
2504 case TARGET_SO_LINGER
:
2507 struct target_linger
*tlg
;
2509 if (optlen
!= sizeof(struct target_linger
)) {
2510 return -TARGET_EINVAL
;
2512 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2513 return -TARGET_EFAULT
;
2515 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2516 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2517 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2519 unlock_user_struct(tlg
, optval_addr
, 0);
2522 /* Options with 'int' argument. */
2523 case TARGET_SO_DEBUG
:
2526 case TARGET_SO_REUSEADDR
:
2527 optname
= SO_REUSEADDR
;
2530 case TARGET_SO_REUSEPORT
:
2531 optname
= SO_REUSEPORT
;
2534 case TARGET_SO_TYPE
:
2537 case TARGET_SO_ERROR
:
2540 case TARGET_SO_DONTROUTE
:
2541 optname
= SO_DONTROUTE
;
2543 case TARGET_SO_BROADCAST
:
2544 optname
= SO_BROADCAST
;
2546 case TARGET_SO_SNDBUF
:
2547 optname
= SO_SNDBUF
;
2549 case TARGET_SO_SNDBUFFORCE
:
2550 optname
= SO_SNDBUFFORCE
;
2552 case TARGET_SO_RCVBUF
:
2553 optname
= SO_RCVBUF
;
2555 case TARGET_SO_RCVBUFFORCE
:
2556 optname
= SO_RCVBUFFORCE
;
2558 case TARGET_SO_KEEPALIVE
:
2559 optname
= SO_KEEPALIVE
;
2561 case TARGET_SO_OOBINLINE
:
2562 optname
= SO_OOBINLINE
;
2564 case TARGET_SO_NO_CHECK
:
2565 optname
= SO_NO_CHECK
;
2567 case TARGET_SO_PRIORITY
:
2568 optname
= SO_PRIORITY
;
2571 case TARGET_SO_BSDCOMPAT
:
2572 optname
= SO_BSDCOMPAT
;
2575 case TARGET_SO_PASSCRED
:
2576 optname
= SO_PASSCRED
;
2578 case TARGET_SO_PASSSEC
:
2579 optname
= SO_PASSSEC
;
2581 case TARGET_SO_TIMESTAMP
:
2582 optname
= SO_TIMESTAMP
;
2584 case TARGET_SO_RCVLOWAT
:
2585 optname
= SO_RCVLOWAT
;
2590 if (optlen
< sizeof(uint32_t))
2591 return -TARGET_EINVAL
;
2593 if (get_user_u32(val
, optval_addr
))
2594 return -TARGET_EFAULT
;
2595 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2600 case NETLINK_PKTINFO
:
2601 case NETLINK_ADD_MEMBERSHIP
:
2602 case NETLINK_DROP_MEMBERSHIP
:
2603 case NETLINK_BROADCAST_ERROR
:
2604 case NETLINK_NO_ENOBUFS
:
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606 case NETLINK_LISTEN_ALL_NSID
:
2607 case NETLINK_CAP_ACK
:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2610 case NETLINK_EXT_ACK
:
2611 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2613 case NETLINK_GET_STRICT_CHK
:
2614 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2620 if (optlen
< sizeof(uint32_t)) {
2621 return -TARGET_EINVAL
;
2623 if (get_user_u32(val
, optval_addr
)) {
2624 return -TARGET_EFAULT
;
2626 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2629 #endif /* SOL_NETLINK */
2632 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2634 ret
= -TARGET_ENOPROTOOPT
;
2639 /* do_getsockopt() Must return target values and target errnos. */
2640 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2641 abi_ulong optval_addr
, abi_ulong optlen
)
2648 case TARGET_SOL_SOCKET
:
2651 /* These don't just return a single integer */
2652 case TARGET_SO_PEERNAME
:
2654 case TARGET_SO_RCVTIMEO
: {
2658 optname
= SO_RCVTIMEO
;
2661 if (get_user_u32(len
, optlen
)) {
2662 return -TARGET_EFAULT
;
2665 return -TARGET_EINVAL
;
2669 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2674 if (len
> sizeof(struct target_timeval
)) {
2675 len
= sizeof(struct target_timeval
);
2677 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2678 return -TARGET_EFAULT
;
2680 if (put_user_u32(len
, optlen
)) {
2681 return -TARGET_EFAULT
;
2685 case TARGET_SO_SNDTIMEO
:
2686 optname
= SO_SNDTIMEO
;
2688 case TARGET_SO_PEERCRED
: {
2691 struct target_ucred
*tcr
;
2693 if (get_user_u32(len
, optlen
)) {
2694 return -TARGET_EFAULT
;
2697 return -TARGET_EINVAL
;
2701 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2709 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2710 return -TARGET_EFAULT
;
2712 __put_user(cr
.pid
, &tcr
->pid
);
2713 __put_user(cr
.uid
, &tcr
->uid
);
2714 __put_user(cr
.gid
, &tcr
->gid
);
2715 unlock_user_struct(tcr
, optval_addr
, 1);
2716 if (put_user_u32(len
, optlen
)) {
2717 return -TARGET_EFAULT
;
2721 case TARGET_SO_PEERSEC
: {
2724 if (get_user_u32(len
, optlen
)) {
2725 return -TARGET_EFAULT
;
2728 return -TARGET_EINVAL
;
2730 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2732 return -TARGET_EFAULT
;
2735 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2737 if (put_user_u32(lv
, optlen
)) {
2738 ret
= -TARGET_EFAULT
;
2740 unlock_user(name
, optval_addr
, lv
);
2743 case TARGET_SO_LINGER
:
2747 struct target_linger
*tlg
;
2749 if (get_user_u32(len
, optlen
)) {
2750 return -TARGET_EFAULT
;
2753 return -TARGET_EINVAL
;
2757 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2765 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2766 return -TARGET_EFAULT
;
2768 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2769 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2770 unlock_user_struct(tlg
, optval_addr
, 1);
2771 if (put_user_u32(len
, optlen
)) {
2772 return -TARGET_EFAULT
;
2776 /* Options with 'int' argument. */
2777 case TARGET_SO_DEBUG
:
2780 case TARGET_SO_REUSEADDR
:
2781 optname
= SO_REUSEADDR
;
2784 case TARGET_SO_REUSEPORT
:
2785 optname
= SO_REUSEPORT
;
2788 case TARGET_SO_TYPE
:
2791 case TARGET_SO_ERROR
:
2794 case TARGET_SO_DONTROUTE
:
2795 optname
= SO_DONTROUTE
;
2797 case TARGET_SO_BROADCAST
:
2798 optname
= SO_BROADCAST
;
2800 case TARGET_SO_SNDBUF
:
2801 optname
= SO_SNDBUF
;
2803 case TARGET_SO_RCVBUF
:
2804 optname
= SO_RCVBUF
;
2806 case TARGET_SO_KEEPALIVE
:
2807 optname
= SO_KEEPALIVE
;
2809 case TARGET_SO_OOBINLINE
:
2810 optname
= SO_OOBINLINE
;
2812 case TARGET_SO_NO_CHECK
:
2813 optname
= SO_NO_CHECK
;
2815 case TARGET_SO_PRIORITY
:
2816 optname
= SO_PRIORITY
;
2819 case TARGET_SO_BSDCOMPAT
:
2820 optname
= SO_BSDCOMPAT
;
2823 case TARGET_SO_PASSCRED
:
2824 optname
= SO_PASSCRED
;
2826 case TARGET_SO_TIMESTAMP
:
2827 optname
= SO_TIMESTAMP
;
2829 case TARGET_SO_RCVLOWAT
:
2830 optname
= SO_RCVLOWAT
;
2832 case TARGET_SO_ACCEPTCONN
:
2833 optname
= SO_ACCEPTCONN
;
2841 /* TCP and UDP options all take an 'int' value. */
2843 if (get_user_u32(len
, optlen
))
2844 return -TARGET_EFAULT
;
2846 return -TARGET_EINVAL
;
2848 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2851 if (optname
== SO_TYPE
) {
2852 val
= host_to_target_sock_type(val
);
2857 if (put_user_u32(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2860 if (put_user_u8(val
, optval_addr
))
2861 return -TARGET_EFAULT
;
2863 if (put_user_u32(len
, optlen
))
2864 return -TARGET_EFAULT
;
2871 case IP_ROUTER_ALERT
:
2875 case IP_MTU_DISCOVER
:
2881 case IP_MULTICAST_TTL
:
2882 case IP_MULTICAST_LOOP
:
2883 if (get_user_u32(len
, optlen
))
2884 return -TARGET_EFAULT
;
2886 return -TARGET_EINVAL
;
2888 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2891 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2893 if (put_user_u32(len
, optlen
)
2894 || put_user_u8(val
, optval_addr
))
2895 return -TARGET_EFAULT
;
2897 if (len
> sizeof(int))
2899 if (put_user_u32(len
, optlen
)
2900 || put_user_u32(val
, optval_addr
))
2901 return -TARGET_EFAULT
;
2905 ret
= -TARGET_ENOPROTOOPT
;
2911 case IPV6_MTU_DISCOVER
:
2914 case IPV6_RECVPKTINFO
:
2915 case IPV6_UNICAST_HOPS
:
2916 case IPV6_MULTICAST_HOPS
:
2917 case IPV6_MULTICAST_LOOP
:
2919 case IPV6_RECVHOPLIMIT
:
2920 case IPV6_2292HOPLIMIT
:
2923 case IPV6_2292PKTINFO
:
2924 case IPV6_RECVTCLASS
:
2925 case IPV6_RECVRTHDR
:
2926 case IPV6_2292RTHDR
:
2927 case IPV6_RECVHOPOPTS
:
2928 case IPV6_2292HOPOPTS
:
2929 case IPV6_RECVDSTOPTS
:
2930 case IPV6_2292DSTOPTS
:
2932 case IPV6_ADDR_PREFERENCES
:
2933 #ifdef IPV6_RECVPATHMTU
2934 case IPV6_RECVPATHMTU
:
2936 #ifdef IPV6_TRANSPARENT
2937 case IPV6_TRANSPARENT
:
2939 #ifdef IPV6_FREEBIND
2942 #ifdef IPV6_RECVORIGDSTADDR
2943 case IPV6_RECVORIGDSTADDR
:
2945 if (get_user_u32(len
, optlen
))
2946 return -TARGET_EFAULT
;
2948 return -TARGET_EINVAL
;
2950 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2953 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2955 if (put_user_u32(len
, optlen
)
2956 || put_user_u8(val
, optval_addr
))
2957 return -TARGET_EFAULT
;
2959 if (len
> sizeof(int))
2961 if (put_user_u32(len
, optlen
)
2962 || put_user_u32(val
, optval_addr
))
2963 return -TARGET_EFAULT
;
2967 ret
= -TARGET_ENOPROTOOPT
;
2974 case NETLINK_PKTINFO
:
2975 case NETLINK_BROADCAST_ERROR
:
2976 case NETLINK_NO_ENOBUFS
:
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2978 case NETLINK_LISTEN_ALL_NSID
:
2979 case NETLINK_CAP_ACK
:
2980 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2982 case NETLINK_EXT_ACK
:
2983 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2984 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2985 case NETLINK_GET_STRICT_CHK
:
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2987 if (get_user_u32(len
, optlen
)) {
2988 return -TARGET_EFAULT
;
2990 if (len
!= sizeof(val
)) {
2991 return -TARGET_EINVAL
;
2994 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2998 if (put_user_u32(lv
, optlen
)
2999 || put_user_u32(val
, optval_addr
)) {
3000 return -TARGET_EFAULT
;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3004 case NETLINK_LIST_MEMBERSHIPS
:
3008 if (get_user_u32(len
, optlen
)) {
3009 return -TARGET_EFAULT
;
3012 return -TARGET_EINVAL
;
3014 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3016 return -TARGET_EFAULT
;
3019 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3021 unlock_user(results
, optval_addr
, 0);
3024 /* swap host endianess to target endianess. */
3025 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3026 results
[i
] = tswap32(results
[i
]);
3028 if (put_user_u32(lv
, optlen
)) {
3029 return -TARGET_EFAULT
;
3031 unlock_user(results
, optval_addr
, 0);
3034 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3039 #endif /* SOL_NETLINK */
3042 qemu_log_mask(LOG_UNIMP
,
3043 "getsockopt level=%d optname=%d not yet supported\n",
3045 ret
= -TARGET_EOPNOTSUPP
;
3051 /* Convert target low/high pair representing file offset into the host
3052 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3053 * as the kernel doesn't handle them either.
3055 static void target_to_host_low_high(abi_ulong tlow
,
3057 unsigned long *hlow
,
3058 unsigned long *hhigh
)
3060 uint64_t off
= tlow
|
3061 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3062 TARGET_LONG_BITS
/ 2;
3065 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3068 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3069 abi_ulong count
, int copy
)
3071 struct target_iovec
*target_vec
;
3073 abi_ulong total_len
, max_len
;
3076 bool bad_address
= false;
3082 if (count
> IOV_MAX
) {
3087 vec
= g_try_new0(struct iovec
, count
);
3093 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3094 count
* sizeof(struct target_iovec
), 1);
3095 if (target_vec
== NULL
) {
3100 /* ??? If host page size > target page size, this will result in a
3101 value larger than what we can actually support. */
3102 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3105 for (i
= 0; i
< count
; i
++) {
3106 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3107 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3112 } else if (len
== 0) {
3113 /* Zero length pointer is ignored. */
3114 vec
[i
].iov_base
= 0;
3116 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3117 /* If the first buffer pointer is bad, this is a fault. But
3118 * subsequent bad buffers will result in a partial write; this
3119 * is realized by filling the vector with null pointers and
3121 if (!vec
[i
].iov_base
) {
3132 if (len
> max_len
- total_len
) {
3133 len
= max_len
- total_len
;
3136 vec
[i
].iov_len
= len
;
3140 unlock_user(target_vec
, target_addr
, 0);
3145 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3146 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3149 unlock_user(target_vec
, target_addr
, 0);
3156 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3157 abi_ulong count
, int copy
)
3159 struct target_iovec
*target_vec
;
3162 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3163 count
* sizeof(struct target_iovec
), 1);
3165 for (i
= 0; i
< count
; i
++) {
3166 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3167 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3171 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3173 unlock_user(target_vec
, target_addr
, 0);
3179 static inline int target_to_host_sock_type(int *type
)
3182 int target_type
= *type
;
3184 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3185 case TARGET_SOCK_DGRAM
:
3186 host_type
= SOCK_DGRAM
;
3188 case TARGET_SOCK_STREAM
:
3189 host_type
= SOCK_STREAM
;
3192 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3195 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3196 #if defined(SOCK_CLOEXEC)
3197 host_type
|= SOCK_CLOEXEC
;
3199 return -TARGET_EINVAL
;
3202 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3203 #if defined(SOCK_NONBLOCK)
3204 host_type
|= SOCK_NONBLOCK
;
3205 #elif !defined(O_NONBLOCK)
3206 return -TARGET_EINVAL
;
3213 /* Try to emulate socket type flags after socket creation. */
3214 static int sock_flags_fixup(int fd
, int target_type
)
3216 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3217 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3218 int flags
= fcntl(fd
, F_GETFL
);
3219 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3221 return -TARGET_EINVAL
;
3228 /* do_socket() Must return target values and target errnos. */
3229 static abi_long
do_socket(int domain
, int type
, int protocol
)
3231 int target_type
= type
;
3234 ret
= target_to_host_sock_type(&type
);
3239 if (domain
== PF_NETLINK
&& !(
3240 #ifdef CONFIG_RTNETLINK
3241 protocol
== NETLINK_ROUTE
||
3243 protocol
== NETLINK_KOBJECT_UEVENT
||
3244 protocol
== NETLINK_AUDIT
)) {
3245 return -TARGET_EPROTONOSUPPORT
;
3248 if (domain
== AF_PACKET
||
3249 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3250 protocol
= tswap16(protocol
);
3253 ret
= get_errno(socket(domain
, type
, protocol
));
3255 ret
= sock_flags_fixup(ret
, target_type
);
3256 if (type
== SOCK_PACKET
) {
3257 /* Manage an obsolete case :
3258 * if socket type is SOCK_PACKET, bind by name
3260 fd_trans_register(ret
, &target_packet_trans
);
3261 } else if (domain
== PF_NETLINK
) {
3263 #ifdef CONFIG_RTNETLINK
3265 fd_trans_register(ret
, &target_netlink_route_trans
);
3268 case NETLINK_KOBJECT_UEVENT
:
3269 /* nothing to do: messages are strings */
3272 fd_trans_register(ret
, &target_netlink_audit_trans
);
3275 g_assert_not_reached();
3282 /* do_bind() Must return target values and target errnos. */
3283 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3289 if ((int)addrlen
< 0) {
3290 return -TARGET_EINVAL
;
3293 addr
= alloca(addrlen
+1);
3295 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3299 return get_errno(bind(sockfd
, addr
, addrlen
));
3302 /* do_connect() Must return target values and target errnos. */
3303 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3309 if ((int)addrlen
< 0) {
3310 return -TARGET_EINVAL
;
3313 addr
= alloca(addrlen
+1);
3315 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3319 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3322 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3323 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3324 int flags
, int send
)
3330 abi_ulong target_vec
;
3332 if (msgp
->msg_name
) {
3333 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3334 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3335 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3336 tswapal(msgp
->msg_name
),
3338 if (ret
== -TARGET_EFAULT
) {
3339 /* For connected sockets msg_name and msg_namelen must
3340 * be ignored, so returning EFAULT immediately is wrong.
3341 * Instead, pass a bad msg_name to the host kernel, and
3342 * let it decide whether to return EFAULT or not.
3344 msg
.msg_name
= (void *)-1;
3349 msg
.msg_name
= NULL
;
3350 msg
.msg_namelen
= 0;
3352 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3353 msg
.msg_control
= alloca(msg
.msg_controllen
);
3354 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3356 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3358 count
= tswapal(msgp
->msg_iovlen
);
3359 target_vec
= tswapal(msgp
->msg_iov
);
3361 if (count
> IOV_MAX
) {
3362 /* sendrcvmsg returns a different errno for this condition than
3363 * readv/writev, so we must catch it here before lock_iovec() does.
3365 ret
= -TARGET_EMSGSIZE
;
3369 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3370 target_vec
, count
, send
);
3372 ret
= -host_to_target_errno(errno
);
3375 msg
.msg_iovlen
= count
;
3379 if (fd_trans_target_to_host_data(fd
)) {
3382 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3383 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3384 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3385 msg
.msg_iov
->iov_len
);
3387 msg
.msg_iov
->iov_base
= host_msg
;
3388 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3392 ret
= target_to_host_cmsg(&msg
, msgp
);
3394 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3398 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3399 if (!is_error(ret
)) {
3401 if (fd_trans_host_to_target_data(fd
)) {
3402 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3403 MIN(msg
.msg_iov
->iov_len
, len
));
3405 ret
= host_to_target_cmsg(msgp
, &msg
);
3407 if (!is_error(ret
)) {
3408 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3409 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3410 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3411 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3412 msg
.msg_name
, msg
.msg_namelen
);
3424 unlock_iovec(vec
, target_vec
, count
, !send
);
3429 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3430 int flags
, int send
)
3433 struct target_msghdr
*msgp
;
3435 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3439 return -TARGET_EFAULT
;
3441 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3442 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3446 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3447 * so it might not have this *mmsg-specific flag either.
3449 #ifndef MSG_WAITFORONE
3450 #define MSG_WAITFORONE 0x10000
3453 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3454 unsigned int vlen
, unsigned int flags
,
3457 struct target_mmsghdr
*mmsgp
;
3461 if (vlen
> UIO_MAXIOV
) {
3465 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3467 return -TARGET_EFAULT
;
3470 for (i
= 0; i
< vlen
; i
++) {
3471 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3472 if (is_error(ret
)) {
3475 mmsgp
[i
].msg_len
= tswap32(ret
);
3476 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3477 if (flags
& MSG_WAITFORONE
) {
3478 flags
|= MSG_DONTWAIT
;
3482 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3484 /* Return number of datagrams sent if we sent any at all;
3485 * otherwise return the error.
3493 /* do_accept4() Must return target values and target errnos. */
3494 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3495 abi_ulong target_addrlen_addr
, int flags
)
3497 socklen_t addrlen
, ret_addrlen
;
3502 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3504 if (target_addr
== 0) {
3505 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3508 /* linux returns EFAULT if addrlen pointer is invalid */
3509 if (get_user_u32(addrlen
, target_addrlen_addr
))
3510 return -TARGET_EFAULT
;
3512 if ((int)addrlen
< 0) {
3513 return -TARGET_EINVAL
;
3516 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3517 return -TARGET_EFAULT
;
3519 addr
= alloca(addrlen
);
3521 ret_addrlen
= addrlen
;
3522 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3523 if (!is_error(ret
)) {
3524 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3525 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3526 ret
= -TARGET_EFAULT
;
3532 /* do_getpeername() Must return target values and target errnos. */
3533 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3534 abi_ulong target_addrlen_addr
)
3536 socklen_t addrlen
, ret_addrlen
;
3540 if (get_user_u32(addrlen
, target_addrlen_addr
))
3541 return -TARGET_EFAULT
;
3543 if ((int)addrlen
< 0) {
3544 return -TARGET_EINVAL
;
3547 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3548 return -TARGET_EFAULT
;
3550 addr
= alloca(addrlen
);
3552 ret_addrlen
= addrlen
;
3553 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3554 if (!is_error(ret
)) {
3555 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3556 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3557 ret
= -TARGET_EFAULT
;
3563 /* do_getsockname() Must return target values and target errnos. */
3564 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3565 abi_ulong target_addrlen_addr
)
3567 socklen_t addrlen
, ret_addrlen
;
3571 if (get_user_u32(addrlen
, target_addrlen_addr
))
3572 return -TARGET_EFAULT
;
3574 if ((int)addrlen
< 0) {
3575 return -TARGET_EINVAL
;
3578 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3579 return -TARGET_EFAULT
;
3581 addr
= alloca(addrlen
);
3583 ret_addrlen
= addrlen
;
3584 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3585 if (!is_error(ret
)) {
3586 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3587 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3588 ret
= -TARGET_EFAULT
;
3594 /* do_socketpair() Must return target values and target errnos. */
3595 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3596 abi_ulong target_tab_addr
)
3601 target_to_host_sock_type(&type
);
3603 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3604 if (!is_error(ret
)) {
3605 if (put_user_s32(tab
[0], target_tab_addr
)
3606 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3607 ret
= -TARGET_EFAULT
;
3612 /* do_sendto() Must return target values and target errnos. */
3613 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3614 abi_ulong target_addr
, socklen_t addrlen
)
3618 void *copy_msg
= NULL
;
3621 if ((int)addrlen
< 0) {
3622 return -TARGET_EINVAL
;
3625 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3627 return -TARGET_EFAULT
;
3628 if (fd_trans_target_to_host_data(fd
)) {
3629 copy_msg
= host_msg
;
3630 host_msg
= g_malloc(len
);
3631 memcpy(host_msg
, copy_msg
, len
);
3632 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3638 addr
= alloca(addrlen
+1);
3639 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3643 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3645 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3650 host_msg
= copy_msg
;
3652 unlock_user(host_msg
, msg
, 0);
3656 /* do_recvfrom() Must return target values and target errnos. */
3657 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3658 abi_ulong target_addr
,
3659 abi_ulong target_addrlen
)
3661 socklen_t addrlen
, ret_addrlen
;
3666 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3668 return -TARGET_EFAULT
;
3670 if (get_user_u32(addrlen
, target_addrlen
)) {
3671 ret
= -TARGET_EFAULT
;
3674 if ((int)addrlen
< 0) {
3675 ret
= -TARGET_EINVAL
;
3678 addr
= alloca(addrlen
);
3679 ret_addrlen
= addrlen
;
3680 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3681 addr
, &ret_addrlen
));
3683 addr
= NULL
; /* To keep compiler quiet. */
3684 addrlen
= 0; /* To keep compiler quiet. */
3685 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3687 if (!is_error(ret
)) {
3688 if (fd_trans_host_to_target_data(fd
)) {
3690 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3691 if (is_error(trans
)) {
3697 host_to_target_sockaddr(target_addr
, addr
,
3698 MIN(addrlen
, ret_addrlen
));
3699 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3700 ret
= -TARGET_EFAULT
;
3704 unlock_user(host_msg
, msg
, len
);
3707 unlock_user(host_msg
, msg
, 0);
3712 #ifdef TARGET_NR_socketcall
3713 /* do_socketcall() must return target values and target errnos. */
3714 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3716 static const unsigned nargs
[] = { /* number of arguments per operation */
3717 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3718 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3719 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3720 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3721 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3722 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3723 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3724 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3725 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3726 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3727 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3728 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3729 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3730 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3731 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3732 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3733 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3734 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3735 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3736 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3738 abi_long a
[6]; /* max 6 args */
3741 /* check the range of the first argument num */
3742 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3743 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3744 return -TARGET_EINVAL
;
3746 /* ensure we have space for args */
3747 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3748 return -TARGET_EINVAL
;
3750 /* collect the arguments in a[] according to nargs[] */
3751 for (i
= 0; i
< nargs
[num
]; ++i
) {
3752 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3753 return -TARGET_EFAULT
;
3756 /* now when we have the args, invoke the appropriate underlying function */
3758 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3759 return do_socket(a
[0], a
[1], a
[2]);
3760 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3761 return do_bind(a
[0], a
[1], a
[2]);
3762 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3763 return do_connect(a
[0], a
[1], a
[2]);
3764 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3765 return get_errno(listen(a
[0], a
[1]));
3766 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3767 return do_accept4(a
[0], a
[1], a
[2], 0);
3768 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3769 return do_getsockname(a
[0], a
[1], a
[2]);
3770 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3771 return do_getpeername(a
[0], a
[1], a
[2]);
3772 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3773 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3774 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3775 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3776 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3777 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3778 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3779 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3780 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3781 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3782 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3783 return get_errno(shutdown(a
[0], a
[1]));
3784 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3785 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3786 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3787 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3788 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3789 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3790 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3791 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3792 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3793 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3794 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3795 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3796 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3797 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3799 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3800 return -TARGET_EINVAL
;
3805 #define N_SHM_REGIONS 32
3807 static struct shm_region
{
3811 } shm_regions
[N_SHM_REGIONS
];
3813 #ifndef TARGET_SEMID64_DS
3814 /* asm-generic version of this struct */
3815 struct target_semid64_ds
3817 struct target_ipc_perm sem_perm
;
3818 abi_ulong sem_otime
;
3819 #if TARGET_ABI_BITS == 32
3820 abi_ulong __unused1
;
3822 abi_ulong sem_ctime
;
3823 #if TARGET_ABI_BITS == 32
3824 abi_ulong __unused2
;
3826 abi_ulong sem_nsems
;
3827 abi_ulong __unused3
;
3828 abi_ulong __unused4
;
3832 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3833 abi_ulong target_addr
)
3835 struct target_ipc_perm
*target_ip
;
3836 struct target_semid64_ds
*target_sd
;
3838 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3839 return -TARGET_EFAULT
;
3840 target_ip
= &(target_sd
->sem_perm
);
3841 host_ip
->__key
= tswap32(target_ip
->__key
);
3842 host_ip
->uid
= tswap32(target_ip
->uid
);
3843 host_ip
->gid
= tswap32(target_ip
->gid
);
3844 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3845 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3846 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3847 host_ip
->mode
= tswap32(target_ip
->mode
);
3849 host_ip
->mode
= tswap16(target_ip
->mode
);
3851 #if defined(TARGET_PPC)
3852 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3854 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3856 unlock_user_struct(target_sd
, target_addr
, 0);
3860 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3861 struct ipc_perm
*host_ip
)
3863 struct target_ipc_perm
*target_ip
;
3864 struct target_semid64_ds
*target_sd
;
3866 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3867 return -TARGET_EFAULT
;
3868 target_ip
= &(target_sd
->sem_perm
);
3869 target_ip
->__key
= tswap32(host_ip
->__key
);
3870 target_ip
->uid
= tswap32(host_ip
->uid
);
3871 target_ip
->gid
= tswap32(host_ip
->gid
);
3872 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3873 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3874 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3875 target_ip
->mode
= tswap32(host_ip
->mode
);
3877 target_ip
->mode
= tswap16(host_ip
->mode
);
3879 #if defined(TARGET_PPC)
3880 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3882 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3884 unlock_user_struct(target_sd
, target_addr
, 1);
3888 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3889 abi_ulong target_addr
)
3891 struct target_semid64_ds
*target_sd
;
3893 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3894 return -TARGET_EFAULT
;
3895 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3896 return -TARGET_EFAULT
;
3897 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3898 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3899 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3900 unlock_user_struct(target_sd
, target_addr
, 0);
3904 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3905 struct semid_ds
*host_sd
)
3907 struct target_semid64_ds
*target_sd
;
3909 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3910 return -TARGET_EFAULT
;
3911 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3912 return -TARGET_EFAULT
;
3913 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3914 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3915 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3916 unlock_user_struct(target_sd
, target_addr
, 1);
3920 struct target_seminfo
{
3933 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3934 struct seminfo
*host_seminfo
)
3936 struct target_seminfo
*target_seminfo
;
3937 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3938 return -TARGET_EFAULT
;
3939 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3940 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3941 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3942 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3943 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3944 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3945 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3946 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3947 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3948 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3949 unlock_user_struct(target_seminfo
, target_addr
, 1);
3955 struct semid_ds
*buf
;
3956 unsigned short *array
;
3957 struct seminfo
*__buf
;
3960 union target_semun
{
3967 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3968 abi_ulong target_addr
)
3971 unsigned short *array
;
3973 struct semid_ds semid_ds
;
3976 semun
.buf
= &semid_ds
;
3978 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3980 return get_errno(ret
);
3982 nsems
= semid_ds
.sem_nsems
;
3984 *host_array
= g_try_new(unsigned short, nsems
);
3986 return -TARGET_ENOMEM
;
3988 array
= lock_user(VERIFY_READ
, target_addr
,
3989 nsems
*sizeof(unsigned short), 1);
3991 g_free(*host_array
);
3992 return -TARGET_EFAULT
;
3995 for(i
=0; i
<nsems
; i
++) {
3996 __get_user((*host_array
)[i
], &array
[i
]);
3998 unlock_user(array
, target_addr
, 0);
4003 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4004 unsigned short **host_array
)
4007 unsigned short *array
;
4009 struct semid_ds semid_ds
;
4012 semun
.buf
= &semid_ds
;
4014 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4016 return get_errno(ret
);
4018 nsems
= semid_ds
.sem_nsems
;
4020 array
= lock_user(VERIFY_WRITE
, target_addr
,
4021 nsems
*sizeof(unsigned short), 0);
4023 return -TARGET_EFAULT
;
4025 for(i
=0; i
<nsems
; i
++) {
4026 __put_user((*host_array
)[i
], &array
[i
]);
4028 g_free(*host_array
);
4029 unlock_user(array
, target_addr
, 1);
4034 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4035 abi_ulong target_arg
)
4037 union target_semun target_su
= { .buf
= target_arg
};
4039 struct semid_ds dsarg
;
4040 unsigned short *array
= NULL
;
4041 struct seminfo seminfo
;
4042 abi_long ret
= -TARGET_EINVAL
;
4049 /* In 64 bit cross-endian situations, we will erroneously pick up
4050 * the wrong half of the union for the "val" element. To rectify
4051 * this, the entire 8-byte structure is byteswapped, followed by
4052 * a swap of the 4 byte val field. In other cases, the data is
4053 * already in proper host byte order. */
4054 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4055 target_su
.buf
= tswapal(target_su
.buf
);
4056 arg
.val
= tswap32(target_su
.val
);
4058 arg
.val
= target_su
.val
;
4060 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4064 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4068 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4069 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4076 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4080 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4081 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4087 arg
.__buf
= &seminfo
;
4088 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4089 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4097 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4104 struct target_sembuf
{
4105 unsigned short sem_num
;
4110 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4111 abi_ulong target_addr
,
4114 struct target_sembuf
*target_sembuf
;
4117 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4118 nsops
*sizeof(struct target_sembuf
), 1);
4120 return -TARGET_EFAULT
;
4122 for(i
=0; i
<nsops
; i
++) {
4123 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4124 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4125 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4128 unlock_user(target_sembuf
, target_addr
, 0);
4133 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4134 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4137 * This macro is required to handle the s390 variants, which passes the
4138 * arguments in a different order than default.
4141 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4142 (__nsops), (__timeout), (__sops)
4144 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4145 (__nsops), 0, (__sops), (__timeout)
4148 static inline abi_long
do_semtimedop(int semid
,
4151 abi_long timeout
, bool time64
)
4153 struct sembuf
*sops
;
4154 struct timespec ts
, *pts
= NULL
;
4160 if (target_to_host_timespec64(pts
, timeout
)) {
4161 return -TARGET_EFAULT
;
4164 if (target_to_host_timespec(pts
, timeout
)) {
4165 return -TARGET_EFAULT
;
4170 if (nsops
> TARGET_SEMOPM
) {
4171 return -TARGET_E2BIG
;
4174 sops
= g_new(struct sembuf
, nsops
);
4176 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4178 return -TARGET_EFAULT
;
4181 ret
= -TARGET_ENOSYS
;
4182 #ifdef __NR_semtimedop
4183 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4186 if (ret
== -TARGET_ENOSYS
) {
4187 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4188 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4196 struct target_msqid_ds
4198 struct target_ipc_perm msg_perm
;
4199 abi_ulong msg_stime
;
4200 #if TARGET_ABI_BITS == 32
4201 abi_ulong __unused1
;
4203 abi_ulong msg_rtime
;
4204 #if TARGET_ABI_BITS == 32
4205 abi_ulong __unused2
;
4207 abi_ulong msg_ctime
;
4208 #if TARGET_ABI_BITS == 32
4209 abi_ulong __unused3
;
4211 abi_ulong __msg_cbytes
;
4213 abi_ulong msg_qbytes
;
4214 abi_ulong msg_lspid
;
4215 abi_ulong msg_lrpid
;
4216 abi_ulong __unused4
;
4217 abi_ulong __unused5
;
4220 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4221 abi_ulong target_addr
)
4223 struct target_msqid_ds
*target_md
;
4225 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4226 return -TARGET_EFAULT
;
4227 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4228 return -TARGET_EFAULT
;
4229 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4230 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4231 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4232 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4233 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4234 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4235 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4236 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4237 unlock_user_struct(target_md
, target_addr
, 0);
4241 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4242 struct msqid_ds
*host_md
)
4244 struct target_msqid_ds
*target_md
;
4246 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4247 return -TARGET_EFAULT
;
4248 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4249 return -TARGET_EFAULT
;
4250 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4251 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4252 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4253 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4254 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4255 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4256 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4257 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4258 unlock_user_struct(target_md
, target_addr
, 1);
4262 struct target_msginfo
{
4270 unsigned short int msgseg
;
4273 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4274 struct msginfo
*host_msginfo
)
4276 struct target_msginfo
*target_msginfo
;
4277 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4278 return -TARGET_EFAULT
;
4279 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4280 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4281 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4282 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4283 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4284 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4285 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4286 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4287 unlock_user_struct(target_msginfo
, target_addr
, 1);
4291 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4293 struct msqid_ds dsarg
;
4294 struct msginfo msginfo
;
4295 abi_long ret
= -TARGET_EINVAL
;
4303 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4304 return -TARGET_EFAULT
;
4305 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4306 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4307 return -TARGET_EFAULT
;
4310 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4314 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4315 if (host_to_target_msginfo(ptr
, &msginfo
))
4316 return -TARGET_EFAULT
;
4323 struct target_msgbuf
{
4328 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4329 ssize_t msgsz
, int msgflg
)
4331 struct target_msgbuf
*target_mb
;
4332 struct msgbuf
*host_mb
;
4336 return -TARGET_EINVAL
;
4339 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4340 return -TARGET_EFAULT
;
4341 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4343 unlock_user_struct(target_mb
, msgp
, 0);
4344 return -TARGET_ENOMEM
;
4346 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4347 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4348 ret
= -TARGET_ENOSYS
;
4350 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4353 if (ret
== -TARGET_ENOSYS
) {
4355 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4358 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4364 unlock_user_struct(target_mb
, msgp
, 0);
4370 #if defined(__sparc__)
4371 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4372 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4373 #elif defined(__s390x__)
4374 /* The s390 sys_ipc variant has only five parameters. */
4375 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4376 ((long int[]){(long int)__msgp, __msgtyp})
4378 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4379 ((long int[]){(long int)__msgp, __msgtyp}), 0
4383 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4384 ssize_t msgsz
, abi_long msgtyp
,
4387 struct target_msgbuf
*target_mb
;
4389 struct msgbuf
*host_mb
;
4393 return -TARGET_EINVAL
;
4396 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4397 return -TARGET_EFAULT
;
4399 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4401 ret
= -TARGET_ENOMEM
;
4404 ret
= -TARGET_ENOSYS
;
4406 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4409 if (ret
== -TARGET_ENOSYS
) {
4410 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4411 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4416 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4417 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4418 if (!target_mtext
) {
4419 ret
= -TARGET_EFAULT
;
4422 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4423 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4426 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4430 unlock_user_struct(target_mb
, msgp
, 1);
4435 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4436 abi_ulong target_addr
)
4438 struct target_shmid_ds
*target_sd
;
4440 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4441 return -TARGET_EFAULT
;
4442 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4443 return -TARGET_EFAULT
;
4444 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4445 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4446 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4447 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4448 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4449 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4450 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4451 unlock_user_struct(target_sd
, target_addr
, 0);
4455 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4456 struct shmid_ds
*host_sd
)
4458 struct target_shmid_ds
*target_sd
;
4460 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4461 return -TARGET_EFAULT
;
4462 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4463 return -TARGET_EFAULT
;
4464 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4465 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4466 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4467 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4468 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4469 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4470 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4471 unlock_user_struct(target_sd
, target_addr
, 1);
4475 struct target_shminfo
{
4483 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4484 struct shminfo
*host_shminfo
)
4486 struct target_shminfo
*target_shminfo
;
4487 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4488 return -TARGET_EFAULT
;
4489 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4490 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4491 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4492 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4493 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4494 unlock_user_struct(target_shminfo
, target_addr
, 1);
4498 struct target_shm_info
{
4503 abi_ulong swap_attempts
;
4504 abi_ulong swap_successes
;
4507 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4508 struct shm_info
*host_shm_info
)
4510 struct target_shm_info
*target_shm_info
;
4511 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4512 return -TARGET_EFAULT
;
4513 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4514 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4515 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4516 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4517 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4518 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4519 unlock_user_struct(target_shm_info
, target_addr
, 1);
4523 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4525 struct shmid_ds dsarg
;
4526 struct shminfo shminfo
;
4527 struct shm_info shm_info
;
4528 abi_long ret
= -TARGET_EINVAL
;
4536 if (target_to_host_shmid_ds(&dsarg
, buf
))
4537 return -TARGET_EFAULT
;
4538 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4539 if (host_to_target_shmid_ds(buf
, &dsarg
))
4540 return -TARGET_EFAULT
;
4543 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4544 if (host_to_target_shminfo(buf
, &shminfo
))
4545 return -TARGET_EFAULT
;
4548 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4549 if (host_to_target_shm_info(buf
, &shm_info
))
4550 return -TARGET_EFAULT
;
4555 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4562 #ifndef TARGET_FORCE_SHMLBA
4563 /* For most architectures, SHMLBA is the same as the page size;
4564 * some architectures have larger values, in which case they should
4565 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4566 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4567 * and defining its own value for SHMLBA.
4569 * The kernel also permits SHMLBA to be set by the architecture to a
4570 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4571 * this means that addresses are rounded to the large size if
4572 * SHM_RND is set but addresses not aligned to that size are not rejected
4573 * as long as they are at least page-aligned. Since the only architecture
4574 * which uses this is ia64 this code doesn't provide for that oddity.
4576 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4578 return TARGET_PAGE_SIZE
;
4582 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4583 int shmid
, abi_ulong shmaddr
, int shmflg
)
4587 struct shmid_ds shm_info
;
4591 /* find out the length of the shared memory segment */
4592 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4593 if (is_error(ret
)) {
4594 /* can't get length, bail out */
4598 shmlba
= target_shmlba(cpu_env
);
4600 if (shmaddr
& (shmlba
- 1)) {
4601 if (shmflg
& SHM_RND
) {
4602 shmaddr
&= ~(shmlba
- 1);
4604 return -TARGET_EINVAL
;
4607 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4608 return -TARGET_EINVAL
;
4614 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4616 abi_ulong mmap_start
;
4618 /* In order to use the host shmat, we need to honor host SHMLBA. */
4619 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4621 if (mmap_start
== -1) {
4623 host_raddr
= (void *)-1;
4625 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4628 if (host_raddr
== (void *)-1) {
4630 return get_errno((long)host_raddr
);
4632 raddr
=h2g((unsigned long)host_raddr
);
4634 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4635 PAGE_VALID
| PAGE_READ
|
4636 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4638 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4639 if (!shm_regions
[i
].in_use
) {
4640 shm_regions
[i
].in_use
= true;
4641 shm_regions
[i
].start
= raddr
;
4642 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4652 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4659 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4660 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4661 shm_regions
[i
].in_use
= false;
4662 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4666 rv
= get_errno(shmdt(g2h(shmaddr
)));
4673 #ifdef TARGET_NR_ipc
4674 /* ??? This only works with linear mappings. */
4675 /* do_ipc() must return target values and target errnos. */
4676 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4677 unsigned int call
, abi_long first
,
4678 abi_long second
, abi_long third
,
4679 abi_long ptr
, abi_long fifth
)
4684 version
= call
>> 16;
4689 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4691 case IPCOP_semtimedop
:
4693 * The s390 sys_ipc variant has only five parameters instead of six
4694 * (as for default variant) and the only difference is the handling of
4695 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4696 * to a struct timespec where the generic variant uses fifth parameter.
4698 #if defined(TARGET_S390X)
4699 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4701 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4706 ret
= get_errno(semget(first
, second
, third
));
4709 case IPCOP_semctl
: {
4710 /* The semun argument to semctl is passed by value, so dereference the
4713 get_user_ual(atptr
, ptr
);
4714 ret
= do_semctl(first
, second
, third
, atptr
);
4719 ret
= get_errno(msgget(first
, second
));
4723 ret
= do_msgsnd(first
, ptr
, second
, third
);
4727 ret
= do_msgctl(first
, second
, ptr
);
4734 struct target_ipc_kludge
{
4739 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4740 ret
= -TARGET_EFAULT
;
4744 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4746 unlock_user_struct(tmp
, ptr
, 0);
4750 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4759 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4760 if (is_error(raddr
))
4761 return get_errno(raddr
);
4762 if (put_user_ual(raddr
, third
))
4763 return -TARGET_EFAULT
;
4767 ret
= -TARGET_EINVAL
;
4772 ret
= do_shmdt(ptr
);
4776 /* IPC_* flag values are the same on all linux platforms */
4777 ret
= get_errno(shmget(first
, second
, third
));
4780 /* IPC_* and SHM_* command values are the same on all linux platforms */
4782 ret
= do_shmctl(first
, second
, ptr
);
4785 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4787 ret
= -TARGET_ENOSYS
;
4794 /* kernel structure types definitions */
4796 #define STRUCT(name, ...) STRUCT_ ## name,
4797 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4799 #include "syscall_types.h"
4803 #undef STRUCT_SPECIAL
4805 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4806 #define STRUCT_SPECIAL(name)
4807 #include "syscall_types.h"
4809 #undef STRUCT_SPECIAL
4811 #define MAX_STRUCT_SIZE 4096
4813 #ifdef CONFIG_FIEMAP
4814 /* So fiemap access checks don't overflow on 32 bit systems.
4815 * This is very slightly smaller than the limit imposed by
4816 * the underlying kernel.
4818 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4819 / sizeof(struct fiemap_extent))
4821 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4822 int fd
, int cmd
, abi_long arg
)
4824 /* The parameter for this ioctl is a struct fiemap followed
4825 * by an array of struct fiemap_extent whose size is set
4826 * in fiemap->fm_extent_count. The array is filled in by the
4829 int target_size_in
, target_size_out
;
4831 const argtype
*arg_type
= ie
->arg_type
;
4832 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4835 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4839 assert(arg_type
[0] == TYPE_PTR
);
4840 assert(ie
->access
== IOC_RW
);
4842 target_size_in
= thunk_type_size(arg_type
, 0);
4843 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4845 return -TARGET_EFAULT
;
4847 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4848 unlock_user(argptr
, arg
, 0);
4849 fm
= (struct fiemap
*)buf_temp
;
4850 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4851 return -TARGET_EINVAL
;
4854 outbufsz
= sizeof (*fm
) +
4855 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4857 if (outbufsz
> MAX_STRUCT_SIZE
) {
4858 /* We can't fit all the extents into the fixed size buffer.
4859 * Allocate one that is large enough and use it instead.
4861 fm
= g_try_malloc(outbufsz
);
4863 return -TARGET_ENOMEM
;
4865 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4868 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4869 if (!is_error(ret
)) {
4870 target_size_out
= target_size_in
;
4871 /* An extent_count of 0 means we were only counting the extents
4872 * so there are no structs to copy
4874 if (fm
->fm_extent_count
!= 0) {
4875 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4877 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4879 ret
= -TARGET_EFAULT
;
4881 /* Convert the struct fiemap */
4882 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4883 if (fm
->fm_extent_count
!= 0) {
4884 p
= argptr
+ target_size_in
;
4885 /* ...and then all the struct fiemap_extents */
4886 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4887 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4892 unlock_user(argptr
, arg
, target_size_out
);
4902 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4903 int fd
, int cmd
, abi_long arg
)
4905 const argtype
*arg_type
= ie
->arg_type
;
4909 struct ifconf
*host_ifconf
;
4911 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4912 int target_ifreq_size
;
4917 abi_long target_ifc_buf
;
4921 assert(arg_type
[0] == TYPE_PTR
);
4922 assert(ie
->access
== IOC_RW
);
4925 target_size
= thunk_type_size(arg_type
, 0);
4927 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4929 return -TARGET_EFAULT
;
4930 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4931 unlock_user(argptr
, arg
, 0);
4933 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4934 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4935 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4937 if (target_ifc_buf
!= 0) {
4938 target_ifc_len
= host_ifconf
->ifc_len
;
4939 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4940 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4942 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4943 if (outbufsz
> MAX_STRUCT_SIZE
) {
4945 * We can't fit all the extents into the fixed size buffer.
4946 * Allocate one that is large enough and use it instead.
4948 host_ifconf
= malloc(outbufsz
);
4950 return -TARGET_ENOMEM
;
4952 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4955 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4957 host_ifconf
->ifc_len
= host_ifc_len
;
4959 host_ifc_buf
= NULL
;
4961 host_ifconf
->ifc_buf
= host_ifc_buf
;
4963 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4964 if (!is_error(ret
)) {
4965 /* convert host ifc_len to target ifc_len */
4967 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4968 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4969 host_ifconf
->ifc_len
= target_ifc_len
;
4971 /* restore target ifc_buf */
4973 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4975 /* copy struct ifconf to target user */
4977 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4979 return -TARGET_EFAULT
;
4980 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4981 unlock_user(argptr
, arg
, target_size
);
4983 if (target_ifc_buf
!= 0) {
4984 /* copy ifreq[] to target user */
4985 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4986 for (i
= 0; i
< nb_ifreq
; i
++) {
4987 thunk_convert(argptr
+ i
* target_ifreq_size
,
4988 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4989 ifreq_arg_type
, THUNK_TARGET
);
4991 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5002 #if defined(CONFIG_USBFS)
5003 #if HOST_LONG_BITS > 64
5004 #error USBDEVFS thunks do not support >64 bit hosts yet.
5007 uint64_t target_urb_adr
;
5008 uint64_t target_buf_adr
;
5009 char *target_buf_ptr
;
5010 struct usbdevfs_urb host_urb
;
5013 static GHashTable
*usbdevfs_urb_hashtable(void)
5015 static GHashTable
*urb_hashtable
;
5017 if (!urb_hashtable
) {
5018 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5020 return urb_hashtable
;
5023 static void urb_hashtable_insert(struct live_urb
*urb
)
5025 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5026 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5029 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5031 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5032 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5035 static void urb_hashtable_remove(struct live_urb
*urb
)
5037 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5038 g_hash_table_remove(urb_hashtable
, urb
);
5042 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5043 int fd
, int cmd
, abi_long arg
)
5045 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5046 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5047 struct live_urb
*lurb
;
5051 uintptr_t target_urb_adr
;
5054 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5056 memset(buf_temp
, 0, sizeof(uint64_t));
5057 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5058 if (is_error(ret
)) {
5062 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5063 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5064 if (!lurb
->target_urb_adr
) {
5065 return -TARGET_EFAULT
;
5067 urb_hashtable_remove(lurb
);
5068 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5069 lurb
->host_urb
.buffer_length
);
5070 lurb
->target_buf_ptr
= NULL
;
5072 /* restore the guest buffer pointer */
5073 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5075 /* update the guest urb struct */
5076 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5079 return -TARGET_EFAULT
;
5081 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5082 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5084 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5085 /* write back the urb handle */
5086 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5089 return -TARGET_EFAULT
;
5092 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5093 target_urb_adr
= lurb
->target_urb_adr
;
5094 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5095 unlock_user(argptr
, arg
, target_size
);
5102 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5103 uint8_t *buf_temp
__attribute__((unused
)),
5104 int fd
, int cmd
, abi_long arg
)
5106 struct live_urb
*lurb
;
5108 /* map target address back to host URB with metadata. */
5109 lurb
= urb_hashtable_lookup(arg
);
5111 return -TARGET_EFAULT
;
5113 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5117 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5118 int fd
, int cmd
, abi_long arg
)
5120 const argtype
*arg_type
= ie
->arg_type
;
5125 struct live_urb
*lurb
;
5128 * each submitted URB needs to map to a unique ID for the
5129 * kernel, and that unique ID needs to be a pointer to
5130 * host memory. hence, we need to malloc for each URB.
5131 * isochronous transfers have a variable length struct.
5134 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5136 /* construct host copy of urb and metadata */
5137 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5139 return -TARGET_ENOMEM
;
5142 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5145 return -TARGET_EFAULT
;
5147 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5148 unlock_user(argptr
, arg
, 0);
5150 lurb
->target_urb_adr
= arg
;
5151 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5153 /* buffer space used depends on endpoint type so lock the entire buffer */
5154 /* control type urbs should check the buffer contents for true direction */
5155 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5156 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5157 lurb
->host_urb
.buffer_length
, 1);
5158 if (lurb
->target_buf_ptr
== NULL
) {
5160 return -TARGET_EFAULT
;
5163 /* update buffer pointer in host copy */
5164 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5166 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5167 if (is_error(ret
)) {
5168 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5171 urb_hashtable_insert(lurb
);
5176 #endif /* CONFIG_USBFS */
5178 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5179 int cmd
, abi_long arg
)
5182 struct dm_ioctl
*host_dm
;
5183 abi_long guest_data
;
5184 uint32_t guest_data_size
;
5186 const argtype
*arg_type
= ie
->arg_type
;
5188 void *big_buf
= NULL
;
5192 target_size
= thunk_type_size(arg_type
, 0);
5193 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5195 ret
= -TARGET_EFAULT
;
5198 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5199 unlock_user(argptr
, arg
, 0);
5201 /* buf_temp is too small, so fetch things into a bigger buffer */
5202 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5203 memcpy(big_buf
, buf_temp
, target_size
);
5207 guest_data
= arg
+ host_dm
->data_start
;
5208 if ((guest_data
- arg
) < 0) {
5209 ret
= -TARGET_EINVAL
;
5212 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5213 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5215 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5217 ret
= -TARGET_EFAULT
;
5221 switch (ie
->host_cmd
) {
5223 case DM_LIST_DEVICES
:
5226 case DM_DEV_SUSPEND
:
5229 case DM_TABLE_STATUS
:
5230 case DM_TABLE_CLEAR
:
5232 case DM_LIST_VERSIONS
:
5236 case DM_DEV_SET_GEOMETRY
:
5237 /* data contains only strings */
5238 memcpy(host_data
, argptr
, guest_data_size
);
5241 memcpy(host_data
, argptr
, guest_data_size
);
5242 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5246 void *gspec
= argptr
;
5247 void *cur_data
= host_data
;
5248 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5249 int spec_size
= thunk_type_size(arg_type
, 0);
5252 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5253 struct dm_target_spec
*spec
= cur_data
;
5257 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5258 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5260 spec
->next
= sizeof(*spec
) + slen
;
5261 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5263 cur_data
+= spec
->next
;
5268 ret
= -TARGET_EINVAL
;
5269 unlock_user(argptr
, guest_data
, 0);
5272 unlock_user(argptr
, guest_data
, 0);
5274 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5275 if (!is_error(ret
)) {
5276 guest_data
= arg
+ host_dm
->data_start
;
5277 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5278 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5279 switch (ie
->host_cmd
) {
5284 case DM_DEV_SUSPEND
:
5287 case DM_TABLE_CLEAR
:
5289 case DM_DEV_SET_GEOMETRY
:
5290 /* no return data */
5292 case DM_LIST_DEVICES
:
5294 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5295 uint32_t remaining_data
= guest_data_size
;
5296 void *cur_data
= argptr
;
5297 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5298 int nl_size
= 12; /* can't use thunk_size due to alignment */
5301 uint32_t next
= nl
->next
;
5303 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5305 if (remaining_data
< nl
->next
) {
5306 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5309 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5310 strcpy(cur_data
+ nl_size
, nl
->name
);
5311 cur_data
+= nl
->next
;
5312 remaining_data
-= nl
->next
;
5316 nl
= (void*)nl
+ next
;
5321 case DM_TABLE_STATUS
:
5323 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5324 void *cur_data
= argptr
;
5325 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5326 int spec_size
= thunk_type_size(arg_type
, 0);
5329 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5330 uint32_t next
= spec
->next
;
5331 int slen
= strlen((char*)&spec
[1]) + 1;
5332 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5333 if (guest_data_size
< spec
->next
) {
5334 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5337 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5338 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5339 cur_data
= argptr
+ spec
->next
;
5340 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5346 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5347 int count
= *(uint32_t*)hdata
;
5348 uint64_t *hdev
= hdata
+ 8;
5349 uint64_t *gdev
= argptr
+ 8;
5352 *(uint32_t*)argptr
= tswap32(count
);
5353 for (i
= 0; i
< count
; i
++) {
5354 *gdev
= tswap64(*hdev
);
5360 case DM_LIST_VERSIONS
:
5362 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5363 uint32_t remaining_data
= guest_data_size
;
5364 void *cur_data
= argptr
;
5365 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5366 int vers_size
= thunk_type_size(arg_type
, 0);
5369 uint32_t next
= vers
->next
;
5371 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5373 if (remaining_data
< vers
->next
) {
5374 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5377 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5378 strcpy(cur_data
+ vers_size
, vers
->name
);
5379 cur_data
+= vers
->next
;
5380 remaining_data
-= vers
->next
;
5384 vers
= (void*)vers
+ next
;
5389 unlock_user(argptr
, guest_data
, 0);
5390 ret
= -TARGET_EINVAL
;
5393 unlock_user(argptr
, guest_data
, guest_data_size
);
5395 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5397 ret
= -TARGET_EFAULT
;
5400 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5401 unlock_user(argptr
, arg
, target_size
);
5408 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5409 int cmd
, abi_long arg
)
5413 const argtype
*arg_type
= ie
->arg_type
;
5414 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5417 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5418 struct blkpg_partition host_part
;
5420 /* Read and convert blkpg */
5422 target_size
= thunk_type_size(arg_type
, 0);
5423 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5425 ret
= -TARGET_EFAULT
;
5428 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5429 unlock_user(argptr
, arg
, 0);
5431 switch (host_blkpg
->op
) {
5432 case BLKPG_ADD_PARTITION
:
5433 case BLKPG_DEL_PARTITION
:
5434 /* payload is struct blkpg_partition */
5437 /* Unknown opcode */
5438 ret
= -TARGET_EINVAL
;
5442 /* Read and convert blkpg->data */
5443 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5444 target_size
= thunk_type_size(part_arg_type
, 0);
5445 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5447 ret
= -TARGET_EFAULT
;
5450 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5451 unlock_user(argptr
, arg
, 0);
5453 /* Swizzle the data pointer to our local copy and call! */
5454 host_blkpg
->data
= &host_part
;
5455 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5461 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5462 int fd
, int cmd
, abi_long arg
)
5464 const argtype
*arg_type
= ie
->arg_type
;
5465 const StructEntry
*se
;
5466 const argtype
*field_types
;
5467 const int *dst_offsets
, *src_offsets
;
5470 abi_ulong
*target_rt_dev_ptr
= NULL
;
5471 unsigned long *host_rt_dev_ptr
= NULL
;
5475 assert(ie
->access
== IOC_W
);
5476 assert(*arg_type
== TYPE_PTR
);
5478 assert(*arg_type
== TYPE_STRUCT
);
5479 target_size
= thunk_type_size(arg_type
, 0);
5480 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5482 return -TARGET_EFAULT
;
5485 assert(*arg_type
== (int)STRUCT_rtentry
);
5486 se
= struct_entries
+ *arg_type
++;
5487 assert(se
->convert
[0] == NULL
);
5488 /* convert struct here to be able to catch rt_dev string */
5489 field_types
= se
->field_types
;
5490 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5491 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5492 for (i
= 0; i
< se
->nb_fields
; i
++) {
5493 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5494 assert(*field_types
== TYPE_PTRVOID
);
5495 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5496 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5497 if (*target_rt_dev_ptr
!= 0) {
5498 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5499 tswapal(*target_rt_dev_ptr
));
5500 if (!*host_rt_dev_ptr
) {
5501 unlock_user(argptr
, arg
, 0);
5502 return -TARGET_EFAULT
;
5505 *host_rt_dev_ptr
= 0;
5510 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5511 argptr
+ src_offsets
[i
],
5512 field_types
, THUNK_HOST
);
5514 unlock_user(argptr
, arg
, 0);
5516 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5518 assert(host_rt_dev_ptr
!= NULL
);
5519 assert(target_rt_dev_ptr
!= NULL
);
5520 if (*host_rt_dev_ptr
!= 0) {
5521 unlock_user((void *)*host_rt_dev_ptr
,
5522 *target_rt_dev_ptr
, 0);
5527 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5528 int fd
, int cmd
, abi_long arg
)
5530 int sig
= target_to_host_signal(arg
);
5531 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5534 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5535 int fd
, int cmd
, abi_long arg
)
5540 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5541 if (is_error(ret
)) {
5545 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5546 if (copy_to_user_timeval(arg
, &tv
)) {
5547 return -TARGET_EFAULT
;
5550 if (copy_to_user_timeval64(arg
, &tv
)) {
5551 return -TARGET_EFAULT
;
5558 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5559 int fd
, int cmd
, abi_long arg
)
5564 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5565 if (is_error(ret
)) {
5569 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5570 if (host_to_target_timespec(arg
, &ts
)) {
5571 return -TARGET_EFAULT
;
5574 if (host_to_target_timespec64(arg
, &ts
)) {
5575 return -TARGET_EFAULT
;
5583 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5584 int fd
, int cmd
, abi_long arg
)
5586 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5587 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5593 static void unlock_drm_version(struct drm_version
*host_ver
,
5594 struct target_drm_version
*target_ver
,
5597 unlock_user(host_ver
->name
, target_ver
->name
,
5598 copy
? host_ver
->name_len
: 0);
5599 unlock_user(host_ver
->date
, target_ver
->date
,
5600 copy
? host_ver
->date_len
: 0);
5601 unlock_user(host_ver
->desc
, target_ver
->desc
,
5602 copy
? host_ver
->desc_len
: 0);
5605 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5606 struct target_drm_version
*target_ver
)
5608 memset(host_ver
, 0, sizeof(*host_ver
));
5610 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5611 if (host_ver
->name_len
) {
5612 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5613 target_ver
->name_len
, 0);
5614 if (!host_ver
->name
) {
5619 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5620 if (host_ver
->date_len
) {
5621 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5622 target_ver
->date_len
, 0);
5623 if (!host_ver
->date
) {
5628 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5629 if (host_ver
->desc_len
) {
5630 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5631 target_ver
->desc_len
, 0);
5632 if (!host_ver
->desc
) {
5639 unlock_drm_version(host_ver
, target_ver
, false);
5643 static inline void host_to_target_drmversion(
5644 struct target_drm_version
*target_ver
,
5645 struct drm_version
*host_ver
)
5647 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5648 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5649 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5650 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5651 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5652 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5653 unlock_drm_version(host_ver
, target_ver
, true);
5656 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5657 int fd
, int cmd
, abi_long arg
)
5659 struct drm_version
*ver
;
5660 struct target_drm_version
*target_ver
;
5663 switch (ie
->host_cmd
) {
5664 case DRM_IOCTL_VERSION
:
5665 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5666 return -TARGET_EFAULT
;
5668 ver
= (struct drm_version
*)buf_temp
;
5669 ret
= target_to_host_drmversion(ver
, target_ver
);
5670 if (!is_error(ret
)) {
5671 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5672 if (is_error(ret
)) {
5673 unlock_drm_version(ver
, target_ver
, false);
5675 host_to_target_drmversion(target_ver
, ver
);
5678 unlock_user_struct(target_ver
, arg
, 0);
5681 return -TARGET_ENOSYS
;
5684 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5685 struct drm_i915_getparam
*gparam
,
5686 int fd
, abi_long arg
)
5690 struct target_drm_i915_getparam
*target_gparam
;
5692 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5693 return -TARGET_EFAULT
;
5696 __get_user(gparam
->param
, &target_gparam
->param
);
5697 gparam
->value
= &value
;
5698 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5699 put_user_s32(value
, target_gparam
->value
);
5701 unlock_user_struct(target_gparam
, arg
, 0);
5705 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5706 int fd
, int cmd
, abi_long arg
)
5708 switch (ie
->host_cmd
) {
5709 case DRM_IOCTL_I915_GETPARAM
:
5710 return do_ioctl_drm_i915_getparam(ie
,
5711 (struct drm_i915_getparam
*)buf_temp
,
5714 return -TARGET_ENOSYS
;
5720 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5721 int fd
, int cmd
, abi_long arg
)
5723 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5724 struct tun_filter
*target_filter
;
5727 assert(ie
->access
== IOC_W
);
5729 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5730 if (!target_filter
) {
5731 return -TARGET_EFAULT
;
5733 filter
->flags
= tswap16(target_filter
->flags
);
5734 filter
->count
= tswap16(target_filter
->count
);
5735 unlock_user(target_filter
, arg
, 0);
5737 if (filter
->count
) {
5738 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5740 return -TARGET_EFAULT
;
5743 target_addr
= lock_user(VERIFY_READ
,
5744 arg
+ offsetof(struct tun_filter
, addr
),
5745 filter
->count
* ETH_ALEN
, 1);
5747 return -TARGET_EFAULT
;
5749 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5750 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5753 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5756 IOCTLEntry ioctl_entries
[] = {
5757 #define IOCTL(cmd, access, ...) \
5758 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5759 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5760 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5761 #define IOCTL_IGNORE(cmd) \
5762 { TARGET_ ## cmd, 0, #cmd },
5767 /* ??? Implement proper locking for ioctls. */
5768 /* do_ioctl() Must return target values and target errnos. */
5769 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5771 const IOCTLEntry
*ie
;
5772 const argtype
*arg_type
;
5774 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5780 if (ie
->target_cmd
== 0) {
5782 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5783 return -TARGET_ENOSYS
;
5785 if (ie
->target_cmd
== cmd
)
5789 arg_type
= ie
->arg_type
;
5791 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5792 } else if (!ie
->host_cmd
) {
5793 /* Some architectures define BSD ioctls in their headers
5794 that are not implemented in Linux. */
5795 return -TARGET_ENOSYS
;
5798 switch(arg_type
[0]) {
5801 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5807 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5811 target_size
= thunk_type_size(arg_type
, 0);
5812 switch(ie
->access
) {
5814 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5815 if (!is_error(ret
)) {
5816 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5818 return -TARGET_EFAULT
;
5819 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5820 unlock_user(argptr
, arg
, target_size
);
5824 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5826 return -TARGET_EFAULT
;
5827 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5828 unlock_user(argptr
, arg
, 0);
5829 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5833 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5835 return -TARGET_EFAULT
;
5836 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5837 unlock_user(argptr
, arg
, 0);
5838 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5839 if (!is_error(ret
)) {
5840 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5842 return -TARGET_EFAULT
;
5843 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5844 unlock_user(argptr
, arg
, target_size
);
5850 qemu_log_mask(LOG_UNIMP
,
5851 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5852 (long)cmd
, arg_type
[0]);
5853 ret
= -TARGET_ENOSYS
;
5859 static const bitmask_transtbl iflag_tbl
[] = {
5860 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5861 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5862 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5863 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5864 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5865 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5866 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5867 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5868 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5869 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5870 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5871 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5872 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5873 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5874 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5878 static const bitmask_transtbl oflag_tbl
[] = {
5879 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5880 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5881 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5882 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5883 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5884 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5885 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5886 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5887 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5888 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5889 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5890 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5891 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5892 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5893 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5894 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5895 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5896 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5897 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5898 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5899 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5900 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5901 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5902 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5906 static const bitmask_transtbl cflag_tbl
[] = {
5907 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5908 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5909 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5910 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5911 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5912 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5913 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5914 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5915 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5916 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5917 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5918 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5919 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5920 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5921 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5922 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5923 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5924 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5925 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5926 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5927 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5928 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5929 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5930 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5931 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5932 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5933 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5934 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5935 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5936 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5937 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5941 static const bitmask_transtbl lflag_tbl
[] = {
5942 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5943 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5944 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5945 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5946 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5947 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5948 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5949 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5950 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5951 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5952 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5953 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5954 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5955 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5956 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5957 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5961 static void target_to_host_termios (void *dst
, const void *src
)
5963 struct host_termios
*host
= dst
;
5964 const struct target_termios
*target
= src
;
5967 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5969 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5971 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5973 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5974 host
->c_line
= target
->c_line
;
5976 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5977 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5978 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5979 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5980 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5981 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5982 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5983 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5984 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5985 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5986 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5987 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5988 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5989 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5990 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5991 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5992 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5993 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5996 static void host_to_target_termios (void *dst
, const void *src
)
5998 struct target_termios
*target
= dst
;
5999 const struct host_termios
*host
= src
;
6002 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6004 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6006 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6008 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6009 target
->c_line
= host
->c_line
;
6011 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6012 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6013 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6014 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6015 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6016 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6017 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6018 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6019 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6020 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6021 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6022 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6023 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6024 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6025 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6026 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6027 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6028 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6031 static const StructEntry struct_termios_def
= {
6032 .convert
= { host_to_target_termios
, target_to_host_termios
},
6033 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6034 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6035 .print
= print_termios
,
6038 static bitmask_transtbl mmap_flags_tbl
[] = {
6039 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6040 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6041 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6042 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6043 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6044 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6045 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6046 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6047 MAP_DENYWRITE
, MAP_DENYWRITE
},
6048 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6049 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6050 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6051 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6052 MAP_NORESERVE
, MAP_NORESERVE
},
6053 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6054 /* MAP_STACK had been ignored by the kernel for quite some time.
6055 Recognize it for the target insofar as we do not want to pass
6056 it through to the host. */
6057 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6062 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6063 * TARGET_I386 is defined if TARGET_X86_64 is defined
6065 #if defined(TARGET_I386)
6067 /* NOTE: there is really one LDT for all the threads */
6068 static uint8_t *ldt_table
;
6070 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6077 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6078 if (size
> bytecount
)
6080 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6082 return -TARGET_EFAULT
;
6083 /* ??? Should this by byteswapped? */
6084 memcpy(p
, ldt_table
, size
);
6085 unlock_user(p
, ptr
, size
);
6089 /* XXX: add locking support */
6090 static abi_long
write_ldt(CPUX86State
*env
,
6091 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6093 struct target_modify_ldt_ldt_s ldt_info
;
6094 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6095 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6096 int seg_not_present
, useable
, lm
;
6097 uint32_t *lp
, entry_1
, entry_2
;
6099 if (bytecount
!= sizeof(ldt_info
))
6100 return -TARGET_EINVAL
;
6101 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6102 return -TARGET_EFAULT
;
6103 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6104 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6105 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6106 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6107 unlock_user_struct(target_ldt_info
, ptr
, 0);
6109 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6110 return -TARGET_EINVAL
;
6111 seg_32bit
= ldt_info
.flags
& 1;
6112 contents
= (ldt_info
.flags
>> 1) & 3;
6113 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6114 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6115 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6116 useable
= (ldt_info
.flags
>> 6) & 1;
6120 lm
= (ldt_info
.flags
>> 7) & 1;
6122 if (contents
== 3) {
6124 return -TARGET_EINVAL
;
6125 if (seg_not_present
== 0)
6126 return -TARGET_EINVAL
;
6128 /* allocate the LDT */
6130 env
->ldt
.base
= target_mmap(0,
6131 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6132 PROT_READ
|PROT_WRITE
,
6133 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6134 if (env
->ldt
.base
== -1)
6135 return -TARGET_ENOMEM
;
6136 memset(g2h(env
->ldt
.base
), 0,
6137 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6138 env
->ldt
.limit
= 0xffff;
6139 ldt_table
= g2h(env
->ldt
.base
);
6142 /* NOTE: same code as Linux kernel */
6143 /* Allow LDTs to be cleared by the user. */
6144 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6147 read_exec_only
== 1 &&
6149 limit_in_pages
== 0 &&
6150 seg_not_present
== 1 &&
6158 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6159 (ldt_info
.limit
& 0x0ffff);
6160 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6161 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6162 (ldt_info
.limit
& 0xf0000) |
6163 ((read_exec_only
^ 1) << 9) |
6165 ((seg_not_present
^ 1) << 15) |
6167 (limit_in_pages
<< 23) |
6171 entry_2
|= (useable
<< 20);
6173 /* Install the new entry ... */
6175 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6176 lp
[0] = tswap32(entry_1
);
6177 lp
[1] = tswap32(entry_2
);
6181 /* specific and weird i386 syscalls */
6182 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6183 unsigned long bytecount
)
6189 ret
= read_ldt(ptr
, bytecount
);
6192 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6195 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6198 ret
= -TARGET_ENOSYS
;
6204 #if defined(TARGET_ABI32)
6205 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6207 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6208 struct target_modify_ldt_ldt_s ldt_info
;
6209 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6210 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6211 int seg_not_present
, useable
, lm
;
6212 uint32_t *lp
, entry_1
, entry_2
;
6215 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6216 if (!target_ldt_info
)
6217 return -TARGET_EFAULT
;
6218 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6219 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6220 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6221 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6222 if (ldt_info
.entry_number
== -1) {
6223 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6224 if (gdt_table
[i
] == 0) {
6225 ldt_info
.entry_number
= i
;
6226 target_ldt_info
->entry_number
= tswap32(i
);
6231 unlock_user_struct(target_ldt_info
, ptr
, 1);
6233 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6234 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6235 return -TARGET_EINVAL
;
6236 seg_32bit
= ldt_info
.flags
& 1;
6237 contents
= (ldt_info
.flags
>> 1) & 3;
6238 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6239 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6240 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6241 useable
= (ldt_info
.flags
>> 6) & 1;
6245 lm
= (ldt_info
.flags
>> 7) & 1;
6248 if (contents
== 3) {
6249 if (seg_not_present
== 0)
6250 return -TARGET_EINVAL
;
6253 /* NOTE: same code as Linux kernel */
6254 /* Allow LDTs to be cleared by the user. */
6255 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6256 if ((contents
== 0 &&
6257 read_exec_only
== 1 &&
6259 limit_in_pages
== 0 &&
6260 seg_not_present
== 1 &&
6268 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6269 (ldt_info
.limit
& 0x0ffff);
6270 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6271 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6272 (ldt_info
.limit
& 0xf0000) |
6273 ((read_exec_only
^ 1) << 9) |
6275 ((seg_not_present
^ 1) << 15) |
6277 (limit_in_pages
<< 23) |
6282 /* Install the new entry ... */
6284 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6285 lp
[0] = tswap32(entry_1
);
6286 lp
[1] = tswap32(entry_2
);
6290 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6292 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6293 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6294 uint32_t base_addr
, limit
, flags
;
6295 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6296 int seg_not_present
, useable
, lm
;
6297 uint32_t *lp
, entry_1
, entry_2
;
6299 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6300 if (!target_ldt_info
)
6301 return -TARGET_EFAULT
;
6302 idx
= tswap32(target_ldt_info
->entry_number
);
6303 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6304 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6305 unlock_user_struct(target_ldt_info
, ptr
, 1);
6306 return -TARGET_EINVAL
;
6308 lp
= (uint32_t *)(gdt_table
+ idx
);
6309 entry_1
= tswap32(lp
[0]);
6310 entry_2
= tswap32(lp
[1]);
6312 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6313 contents
= (entry_2
>> 10) & 3;
6314 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6315 seg_32bit
= (entry_2
>> 22) & 1;
6316 limit_in_pages
= (entry_2
>> 23) & 1;
6317 useable
= (entry_2
>> 20) & 1;
6321 lm
= (entry_2
>> 21) & 1;
6323 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6324 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6325 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6326 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6327 base_addr
= (entry_1
>> 16) |
6328 (entry_2
& 0xff000000) |
6329 ((entry_2
& 0xff) << 16);
6330 target_ldt_info
->base_addr
= tswapal(base_addr
);
6331 target_ldt_info
->limit
= tswap32(limit
);
6332 target_ldt_info
->flags
= tswap32(flags
);
6333 unlock_user_struct(target_ldt_info
, ptr
, 1);
6337 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6339 return -TARGET_ENOSYS
;
6342 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6349 case TARGET_ARCH_SET_GS
:
6350 case TARGET_ARCH_SET_FS
:
6351 if (code
== TARGET_ARCH_SET_GS
)
6355 cpu_x86_load_seg(env
, idx
, 0);
6356 env
->segs
[idx
].base
= addr
;
6358 case TARGET_ARCH_GET_GS
:
6359 case TARGET_ARCH_GET_FS
:
6360 if (code
== TARGET_ARCH_GET_GS
)
6364 val
= env
->segs
[idx
].base
;
6365 if (put_user(val
, addr
, abi_ulong
))
6366 ret
= -TARGET_EFAULT
;
6369 ret
= -TARGET_EINVAL
;
6374 #endif /* defined(TARGET_ABI32 */
6376 #endif /* defined(TARGET_I386) */
6378 #define NEW_STACK_SIZE 0x40000
6381 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6384 pthread_mutex_t mutex
;
6385 pthread_cond_t cond
;
6388 abi_ulong child_tidptr
;
6389 abi_ulong parent_tidptr
;
6393 static void *clone_func(void *arg
)
6395 new_thread_info
*info
= arg
;
6400 rcu_register_thread();
6401 tcg_register_thread();
6405 ts
= (TaskState
*)cpu
->opaque
;
6406 info
->tid
= sys_gettid();
6408 if (info
->child_tidptr
)
6409 put_user_u32(info
->tid
, info
->child_tidptr
);
6410 if (info
->parent_tidptr
)
6411 put_user_u32(info
->tid
, info
->parent_tidptr
);
6412 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6413 /* Enable signals. */
6414 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6415 /* Signal to the parent that we're ready. */
6416 pthread_mutex_lock(&info
->mutex
);
6417 pthread_cond_broadcast(&info
->cond
);
6418 pthread_mutex_unlock(&info
->mutex
);
6419 /* Wait until the parent has finished initializing the tls state. */
6420 pthread_mutex_lock(&clone_lock
);
6421 pthread_mutex_unlock(&clone_lock
);
6427 /* do_fork() Must return host values and target errnos (unlike most
6428 do_*() functions). */
6429 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6430 abi_ulong parent_tidptr
, target_ulong newtls
,
6431 abi_ulong child_tidptr
)
6433 CPUState
*cpu
= env_cpu(env
);
6437 CPUArchState
*new_env
;
6440 flags
&= ~CLONE_IGNORED_FLAGS
;
6442 /* Emulate vfork() with fork() */
6443 if (flags
& CLONE_VFORK
)
6444 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6446 if (flags
& CLONE_VM
) {
6447 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6448 new_thread_info info
;
6449 pthread_attr_t attr
;
6451 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6452 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6453 return -TARGET_EINVAL
;
6456 ts
= g_new0(TaskState
, 1);
6457 init_task_state(ts
);
6459 /* Grab a mutex so that thread setup appears atomic. */
6460 pthread_mutex_lock(&clone_lock
);
6462 /* we create a new CPU instance. */
6463 new_env
= cpu_copy(env
);
6464 /* Init regs that differ from the parent. */
6465 cpu_clone_regs_child(new_env
, newsp
, flags
);
6466 cpu_clone_regs_parent(env
, flags
);
6467 new_cpu
= env_cpu(new_env
);
6468 new_cpu
->opaque
= ts
;
6469 ts
->bprm
= parent_ts
->bprm
;
6470 ts
->info
= parent_ts
->info
;
6471 ts
->signal_mask
= parent_ts
->signal_mask
;
6473 if (flags
& CLONE_CHILD_CLEARTID
) {
6474 ts
->child_tidptr
= child_tidptr
;
6477 if (flags
& CLONE_SETTLS
) {
6478 cpu_set_tls (new_env
, newtls
);
6481 memset(&info
, 0, sizeof(info
));
6482 pthread_mutex_init(&info
.mutex
, NULL
);
6483 pthread_mutex_lock(&info
.mutex
);
6484 pthread_cond_init(&info
.cond
, NULL
);
6486 if (flags
& CLONE_CHILD_SETTID
) {
6487 info
.child_tidptr
= child_tidptr
;
6489 if (flags
& CLONE_PARENT_SETTID
) {
6490 info
.parent_tidptr
= parent_tidptr
;
6493 ret
= pthread_attr_init(&attr
);
6494 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6495 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6496 /* It is not safe to deliver signals until the child has finished
6497 initializing, so temporarily block all signals. */
6498 sigfillset(&sigmask
);
6499 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6500 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6502 /* If this is our first additional thread, we need to ensure we
6503 * generate code for parallel execution and flush old translations.
6505 if (!parallel_cpus
) {
6506 parallel_cpus
= true;
6510 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6511 /* TODO: Free new CPU state if thread creation failed. */
6513 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6514 pthread_attr_destroy(&attr
);
6516 /* Wait for the child to initialize. */
6517 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6522 pthread_mutex_unlock(&info
.mutex
);
6523 pthread_cond_destroy(&info
.cond
);
6524 pthread_mutex_destroy(&info
.mutex
);
6525 pthread_mutex_unlock(&clone_lock
);
6527 /* if no CLONE_VM, we consider it is a fork */
6528 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6529 return -TARGET_EINVAL
;
6532 /* We can't support custom termination signals */
6533 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6534 return -TARGET_EINVAL
;
6537 if (block_signals()) {
6538 return -TARGET_ERESTARTSYS
;
6544 /* Child Process. */
6545 cpu_clone_regs_child(env
, newsp
, flags
);
6547 /* There is a race condition here. The parent process could
6548 theoretically read the TID in the child process before the child
6549 tid is set. This would require using either ptrace
6550 (not implemented) or having *_tidptr to point at a shared memory
6551 mapping. We can't repeat the spinlock hack used above because
6552 the child process gets its own copy of the lock. */
6553 if (flags
& CLONE_CHILD_SETTID
)
6554 put_user_u32(sys_gettid(), child_tidptr
);
6555 if (flags
& CLONE_PARENT_SETTID
)
6556 put_user_u32(sys_gettid(), parent_tidptr
);
6557 ts
= (TaskState
*)cpu
->opaque
;
6558 if (flags
& CLONE_SETTLS
)
6559 cpu_set_tls (env
, newtls
);
6560 if (flags
& CLONE_CHILD_CLEARTID
)
6561 ts
->child_tidptr
= child_tidptr
;
6563 cpu_clone_regs_parent(env
, flags
);
6570 /* warning : doesn't handle linux specific flags... */
6571 static int target_to_host_fcntl_cmd(int cmd
)
6576 case TARGET_F_DUPFD
:
6577 case TARGET_F_GETFD
:
6578 case TARGET_F_SETFD
:
6579 case TARGET_F_GETFL
:
6580 case TARGET_F_SETFL
:
6581 case TARGET_F_OFD_GETLK
:
6582 case TARGET_F_OFD_SETLK
:
6583 case TARGET_F_OFD_SETLKW
:
6586 case TARGET_F_GETLK
:
6589 case TARGET_F_SETLK
:
6592 case TARGET_F_SETLKW
:
6595 case TARGET_F_GETOWN
:
6598 case TARGET_F_SETOWN
:
6601 case TARGET_F_GETSIG
:
6604 case TARGET_F_SETSIG
:
6607 #if TARGET_ABI_BITS == 32
6608 case TARGET_F_GETLK64
:
6611 case TARGET_F_SETLK64
:
6614 case TARGET_F_SETLKW64
:
6618 case TARGET_F_SETLEASE
:
6621 case TARGET_F_GETLEASE
:
6624 #ifdef F_DUPFD_CLOEXEC
6625 case TARGET_F_DUPFD_CLOEXEC
:
6626 ret
= F_DUPFD_CLOEXEC
;
6629 case TARGET_F_NOTIFY
:
6633 case TARGET_F_GETOWN_EX
:
6638 case TARGET_F_SETOWN_EX
:
6643 case TARGET_F_SETPIPE_SZ
:
6646 case TARGET_F_GETPIPE_SZ
:
6651 case TARGET_F_ADD_SEALS
:
6654 case TARGET_F_GET_SEALS
:
6659 ret
= -TARGET_EINVAL
;
6663 #if defined(__powerpc64__)
6664 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6665 * is not supported by kernel. The glibc fcntl call actually adjusts
6666 * them to 5, 6 and 7 before making the syscall(). Since we make the
6667 * syscall directly, adjust to what is supported by the kernel.
6669 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6670 ret
-= F_GETLK64
- 5;
6677 #define FLOCK_TRANSTBL \
6679 TRANSTBL_CONVERT(F_RDLCK); \
6680 TRANSTBL_CONVERT(F_WRLCK); \
6681 TRANSTBL_CONVERT(F_UNLCK); \
6684 static int target_to_host_flock(int type
)
6686 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6688 #undef TRANSTBL_CONVERT
6689 return -TARGET_EINVAL
;
6692 static int host_to_target_flock(int type
)
6694 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6696 #undef TRANSTBL_CONVERT
6697 /* if we don't know how to convert the value coming
6698 * from the host we copy to the target field as-is
6703 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6704 abi_ulong target_flock_addr
)
6706 struct target_flock
*target_fl
;
6709 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6710 return -TARGET_EFAULT
;
6713 __get_user(l_type
, &target_fl
->l_type
);
6714 l_type
= target_to_host_flock(l_type
);
6718 fl
->l_type
= l_type
;
6719 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6720 __get_user(fl
->l_start
, &target_fl
->l_start
);
6721 __get_user(fl
->l_len
, &target_fl
->l_len
);
6722 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6723 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6727 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6728 const struct flock64
*fl
)
6730 struct target_flock
*target_fl
;
6733 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6734 return -TARGET_EFAULT
;
6737 l_type
= host_to_target_flock(fl
->l_type
);
6738 __put_user(l_type
, &target_fl
->l_type
);
6739 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6740 __put_user(fl
->l_start
, &target_fl
->l_start
);
6741 __put_user(fl
->l_len
, &target_fl
->l_len
);
6742 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6743 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6747 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6748 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6750 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6751 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6752 abi_ulong target_flock_addr
)
6754 struct target_oabi_flock64
*target_fl
;
6757 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6758 return -TARGET_EFAULT
;
6761 __get_user(l_type
, &target_fl
->l_type
);
6762 l_type
= target_to_host_flock(l_type
);
6766 fl
->l_type
= l_type
;
6767 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6768 __get_user(fl
->l_start
, &target_fl
->l_start
);
6769 __get_user(fl
->l_len
, &target_fl
->l_len
);
6770 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6771 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6775 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6776 const struct flock64
*fl
)
6778 struct target_oabi_flock64
*target_fl
;
6781 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6782 return -TARGET_EFAULT
;
6785 l_type
= host_to_target_flock(fl
->l_type
);
6786 __put_user(l_type
, &target_fl
->l_type
);
6787 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6788 __put_user(fl
->l_start
, &target_fl
->l_start
);
6789 __put_user(fl
->l_len
, &target_fl
->l_len
);
6790 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6791 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6796 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6797 abi_ulong target_flock_addr
)
6799 struct target_flock64
*target_fl
;
6802 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6803 return -TARGET_EFAULT
;
6806 __get_user(l_type
, &target_fl
->l_type
);
6807 l_type
= target_to_host_flock(l_type
);
6811 fl
->l_type
= l_type
;
6812 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6813 __get_user(fl
->l_start
, &target_fl
->l_start
);
6814 __get_user(fl
->l_len
, &target_fl
->l_len
);
6815 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6816 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6820 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6821 const struct flock64
*fl
)
6823 struct target_flock64
*target_fl
;
6826 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6827 return -TARGET_EFAULT
;
6830 l_type
= host_to_target_flock(fl
->l_type
);
6831 __put_user(l_type
, &target_fl
->l_type
);
6832 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6833 __put_user(fl
->l_start
, &target_fl
->l_start
);
6834 __put_user(fl
->l_len
, &target_fl
->l_len
);
6835 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6836 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6840 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6842 struct flock64 fl64
;
6844 struct f_owner_ex fox
;
6845 struct target_f_owner_ex
*target_fox
;
6848 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6850 if (host_cmd
== -TARGET_EINVAL
)
6854 case TARGET_F_GETLK
:
6855 ret
= copy_from_user_flock(&fl64
, arg
);
6859 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6861 ret
= copy_to_user_flock(arg
, &fl64
);
6865 case TARGET_F_SETLK
:
6866 case TARGET_F_SETLKW
:
6867 ret
= copy_from_user_flock(&fl64
, arg
);
6871 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6874 case TARGET_F_GETLK64
:
6875 case TARGET_F_OFD_GETLK
:
6876 ret
= copy_from_user_flock64(&fl64
, arg
);
6880 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6882 ret
= copy_to_user_flock64(arg
, &fl64
);
6885 case TARGET_F_SETLK64
:
6886 case TARGET_F_SETLKW64
:
6887 case TARGET_F_OFD_SETLK
:
6888 case TARGET_F_OFD_SETLKW
:
6889 ret
= copy_from_user_flock64(&fl64
, arg
);
6893 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6896 case TARGET_F_GETFL
:
6897 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6899 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6903 case TARGET_F_SETFL
:
6904 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6905 target_to_host_bitmask(arg
,
6910 case TARGET_F_GETOWN_EX
:
6911 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6913 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6914 return -TARGET_EFAULT
;
6915 target_fox
->type
= tswap32(fox
.type
);
6916 target_fox
->pid
= tswap32(fox
.pid
);
6917 unlock_user_struct(target_fox
, arg
, 1);
6923 case TARGET_F_SETOWN_EX
:
6924 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6925 return -TARGET_EFAULT
;
6926 fox
.type
= tswap32(target_fox
->type
);
6927 fox
.pid
= tswap32(target_fox
->pid
);
6928 unlock_user_struct(target_fox
, arg
, 0);
6929 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6933 case TARGET_F_SETSIG
:
6934 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6937 case TARGET_F_GETSIG
:
6938 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6941 case TARGET_F_SETOWN
:
6942 case TARGET_F_GETOWN
:
6943 case TARGET_F_SETLEASE
:
6944 case TARGET_F_GETLEASE
:
6945 case TARGET_F_SETPIPE_SZ
:
6946 case TARGET_F_GETPIPE_SZ
:
6947 case TARGET_F_ADD_SEALS
:
6948 case TARGET_F_GET_SEALS
:
6949 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6953 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6961 static inline int high2lowuid(int uid
)
6969 static inline int high2lowgid(int gid
)
6977 static inline int low2highuid(int uid
)
6979 if ((int16_t)uid
== -1)
6985 static inline int low2highgid(int gid
)
6987 if ((int16_t)gid
== -1)
6992 static inline int tswapid(int id
)
6997 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6999 #else /* !USE_UID16 */
7000 static inline int high2lowuid(int uid
)
7004 static inline int high2lowgid(int gid
)
7008 static inline int low2highuid(int uid
)
7012 static inline int low2highgid(int gid
)
7016 static inline int tswapid(int id
)
7021 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7023 #endif /* USE_UID16 */
7025 /* We must do direct syscalls for setting UID/GID, because we want to
7026 * implement the Linux system call semantics of "change only for this thread",
7027 * not the libc/POSIX semantics of "change for all threads in process".
7028 * (See http://ewontfix.com/17/ for more details.)
7029 * We use the 32-bit version of the syscalls if present; if it is not
7030 * then either the host architecture supports 32-bit UIDs natively with
7031 * the standard syscall, or the 16-bit UID is the best we can do.
7033 #ifdef __NR_setuid32
7034 #define __NR_sys_setuid __NR_setuid32
7036 #define __NR_sys_setuid __NR_setuid
7038 #ifdef __NR_setgid32
7039 #define __NR_sys_setgid __NR_setgid32
7041 #define __NR_sys_setgid __NR_setgid
7043 #ifdef __NR_setresuid32
7044 #define __NR_sys_setresuid __NR_setresuid32
7046 #define __NR_sys_setresuid __NR_setresuid
7048 #ifdef __NR_setresgid32
7049 #define __NR_sys_setresgid __NR_setresgid32
7051 #define __NR_sys_setresgid __NR_setresgid
7054 _syscall1(int, sys_setuid
, uid_t
, uid
)
7055 _syscall1(int, sys_setgid
, gid_t
, gid
)
7056 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7057 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7059 void syscall_init(void)
7062 const argtype
*arg_type
;
7066 thunk_init(STRUCT_MAX
);
7068 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7069 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7070 #include "syscall_types.h"
7072 #undef STRUCT_SPECIAL
7074 /* Build target_to_host_errno_table[] table from
7075 * host_to_target_errno_table[]. */
7076 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7077 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7080 /* we patch the ioctl size if necessary. We rely on the fact that
7081 no ioctl has all the bits at '1' in the size field */
7083 while (ie
->target_cmd
!= 0) {
7084 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7085 TARGET_IOC_SIZEMASK
) {
7086 arg_type
= ie
->arg_type
;
7087 if (arg_type
[0] != TYPE_PTR
) {
7088 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7093 size
= thunk_type_size(arg_type
, 0);
7094 ie
->target_cmd
= (ie
->target_cmd
&
7095 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7096 (size
<< TARGET_IOC_SIZESHIFT
);
7099 /* automatic consistency check if same arch */
7100 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7101 (defined(__x86_64__) && defined(TARGET_X86_64))
7102 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7103 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7104 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7111 #ifdef TARGET_NR_truncate64
7112 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7117 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7121 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7125 #ifdef TARGET_NR_ftruncate64
7126 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7131 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7135 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7139 #if defined(TARGET_NR_timer_settime) || \
7140 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7141 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7142 abi_ulong target_addr
)
7144 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7145 offsetof(struct target_itimerspec
,
7147 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7148 offsetof(struct target_itimerspec
,
7150 return -TARGET_EFAULT
;
7157 #if defined(TARGET_NR_timer_settime64) || \
7158 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7159 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7160 abi_ulong target_addr
)
7162 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7163 offsetof(struct target__kernel_itimerspec
,
7165 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7166 offsetof(struct target__kernel_itimerspec
,
7168 return -TARGET_EFAULT
;
7175 #if ((defined(TARGET_NR_timerfd_gettime) || \
7176 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7177 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7178 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7179 struct itimerspec
*host_its
)
7181 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7183 &host_its
->it_interval
) ||
7184 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7186 &host_its
->it_value
)) {
7187 return -TARGET_EFAULT
;
7193 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7194 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7195 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7196 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7197 struct itimerspec
*host_its
)
7199 if (host_to_target_timespec64(target_addr
+
7200 offsetof(struct target__kernel_itimerspec
,
7202 &host_its
->it_interval
) ||
7203 host_to_target_timespec64(target_addr
+
7204 offsetof(struct target__kernel_itimerspec
,
7206 &host_its
->it_value
)) {
7207 return -TARGET_EFAULT
;
7213 #if defined(TARGET_NR_adjtimex) || \
7214 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7215 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7216 abi_long target_addr
)
7218 struct target_timex
*target_tx
;
7220 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7221 return -TARGET_EFAULT
;
7224 __get_user(host_tx
->modes
, &target_tx
->modes
);
7225 __get_user(host_tx
->offset
, &target_tx
->offset
);
7226 __get_user(host_tx
->freq
, &target_tx
->freq
);
7227 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7228 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7229 __get_user(host_tx
->status
, &target_tx
->status
);
7230 __get_user(host_tx
->constant
, &target_tx
->constant
);
7231 __get_user(host_tx
->precision
, &target_tx
->precision
);
7232 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7233 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7234 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7235 __get_user(host_tx
->tick
, &target_tx
->tick
);
7236 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7237 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7238 __get_user(host_tx
->shift
, &target_tx
->shift
);
7239 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7240 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7241 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7242 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7243 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7244 __get_user(host_tx
->tai
, &target_tx
->tai
);
7246 unlock_user_struct(target_tx
, target_addr
, 0);
7250 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7251 struct timex
*host_tx
)
7253 struct target_timex
*target_tx
;
7255 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7256 return -TARGET_EFAULT
;
7259 __put_user(host_tx
->modes
, &target_tx
->modes
);
7260 __put_user(host_tx
->offset
, &target_tx
->offset
);
7261 __put_user(host_tx
->freq
, &target_tx
->freq
);
7262 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7263 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7264 __put_user(host_tx
->status
, &target_tx
->status
);
7265 __put_user(host_tx
->constant
, &target_tx
->constant
);
7266 __put_user(host_tx
->precision
, &target_tx
->precision
);
7267 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7268 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7269 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7270 __put_user(host_tx
->tick
, &target_tx
->tick
);
7271 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7272 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7273 __put_user(host_tx
->shift
, &target_tx
->shift
);
7274 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7275 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7276 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7277 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7278 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7279 __put_user(host_tx
->tai
, &target_tx
->tai
);
7281 unlock_user_struct(target_tx
, target_addr
, 1);
7287 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7288 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7289 abi_long target_addr
)
7291 struct target__kernel_timex
*target_tx
;
7293 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7294 offsetof(struct target__kernel_timex
,
7296 return -TARGET_EFAULT
;
7299 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7300 return -TARGET_EFAULT
;
7303 __get_user(host_tx
->modes
, &target_tx
->modes
);
7304 __get_user(host_tx
->offset
, &target_tx
->offset
);
7305 __get_user(host_tx
->freq
, &target_tx
->freq
);
7306 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7307 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7308 __get_user(host_tx
->status
, &target_tx
->status
);
7309 __get_user(host_tx
->constant
, &target_tx
->constant
);
7310 __get_user(host_tx
->precision
, &target_tx
->precision
);
7311 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7312 __get_user(host_tx
->tick
, &target_tx
->tick
);
7313 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7314 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7315 __get_user(host_tx
->shift
, &target_tx
->shift
);
7316 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7317 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7318 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7319 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7320 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7321 __get_user(host_tx
->tai
, &target_tx
->tai
);
7323 unlock_user_struct(target_tx
, target_addr
, 0);
7327 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7328 struct timex
*host_tx
)
7330 struct target__kernel_timex
*target_tx
;
7332 if (copy_to_user_timeval64(target_addr
+
7333 offsetof(struct target__kernel_timex
, time
),
7335 return -TARGET_EFAULT
;
7338 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7339 return -TARGET_EFAULT
;
7342 __put_user(host_tx
->modes
, &target_tx
->modes
);
7343 __put_user(host_tx
->offset
, &target_tx
->offset
);
7344 __put_user(host_tx
->freq
, &target_tx
->freq
);
7345 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7346 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7347 __put_user(host_tx
->status
, &target_tx
->status
);
7348 __put_user(host_tx
->constant
, &target_tx
->constant
);
7349 __put_user(host_tx
->precision
, &target_tx
->precision
);
7350 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7351 __put_user(host_tx
->tick
, &target_tx
->tick
);
7352 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7353 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7354 __put_user(host_tx
->shift
, &target_tx
->shift
);
7355 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7356 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7357 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7358 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7359 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7360 __put_user(host_tx
->tai
, &target_tx
->tai
);
7362 unlock_user_struct(target_tx
, target_addr
, 1);
7367 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7368 abi_ulong target_addr
)
7370 struct target_sigevent
*target_sevp
;
7372 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7373 return -TARGET_EFAULT
;
7376 /* This union is awkward on 64 bit systems because it has a 32 bit
7377 * integer and a pointer in it; we follow the conversion approach
7378 * used for handling sigval types in signal.c so the guest should get
7379 * the correct value back even if we did a 64 bit byteswap and it's
7380 * using the 32 bit integer.
7382 host_sevp
->sigev_value
.sival_ptr
=
7383 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7384 host_sevp
->sigev_signo
=
7385 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7386 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7387 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7389 unlock_user_struct(target_sevp
, target_addr
, 1);
7393 #if defined(TARGET_NR_mlockall)
7394 static inline int target_to_host_mlockall_arg(int arg
)
7398 if (arg
& TARGET_MCL_CURRENT
) {
7399 result
|= MCL_CURRENT
;
7401 if (arg
& TARGET_MCL_FUTURE
) {
7402 result
|= MCL_FUTURE
;
7405 if (arg
& TARGET_MCL_ONFAULT
) {
7406 result
|= MCL_ONFAULT
;
7414 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7415 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7416 defined(TARGET_NR_newfstatat))
7417 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7418 abi_ulong target_addr
,
7419 struct stat
*host_st
)
7421 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7422 if (((CPUARMState
*)cpu_env
)->eabi
) {
7423 struct target_eabi_stat64
*target_st
;
7425 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7426 return -TARGET_EFAULT
;
7427 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7428 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7429 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7430 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7431 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7433 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7434 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7435 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7436 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7437 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7438 __put_user(host_st
->st_size
, &target_st
->st_size
);
7439 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7440 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7441 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7442 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7443 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7444 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7445 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7446 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7447 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7449 unlock_user_struct(target_st
, target_addr
, 1);
7453 #if defined(TARGET_HAS_STRUCT_STAT64)
7454 struct target_stat64
*target_st
;
7456 struct target_stat
*target_st
;
7459 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7460 return -TARGET_EFAULT
;
7461 memset(target_st
, 0, sizeof(*target_st
));
7462 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7463 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7464 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7465 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7467 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7468 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7469 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7470 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7471 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7472 /* XXX: better use of kernel struct */
7473 __put_user(host_st
->st_size
, &target_st
->st_size
);
7474 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7475 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7476 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7477 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7478 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7479 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7480 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7481 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7482 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7484 unlock_user_struct(target_st
, target_addr
, 1);
7491 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7492 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7493 abi_ulong target_addr
)
7495 struct target_statx
*target_stx
;
7497 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7498 return -TARGET_EFAULT
;
7500 memset(target_stx
, 0, sizeof(*target_stx
));
7502 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7503 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7504 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7505 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7506 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7507 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7508 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7509 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7510 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7511 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7512 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7513 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7514 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7515 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7516 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7517 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7518 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7519 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7520 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7521 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7522 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7523 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7524 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7526 unlock_user_struct(target_stx
, target_addr
, 1);
7532 static int do_sys_futex(int *uaddr
, int op
, int val
,
7533 const struct timespec
*timeout
, int *uaddr2
,
7536 #if HOST_LONG_BITS == 64
7537 #if defined(__NR_futex)
7538 /* always a 64-bit time_t, it doesn't define _time64 version */
7539 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7542 #else /* HOST_LONG_BITS == 64 */
7543 #if defined(__NR_futex_time64)
7544 if (sizeof(timeout
->tv_sec
) == 8) {
7545 /* _time64 function on 32bit arch */
7546 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7549 #if defined(__NR_futex)
7550 /* old function on 32bit arch */
7551 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7553 #endif /* HOST_LONG_BITS == 64 */
7554 g_assert_not_reached();
7557 static int do_safe_futex(int *uaddr
, int op
, int val
,
7558 const struct timespec
*timeout
, int *uaddr2
,
7561 #if HOST_LONG_BITS == 64
7562 #if defined(__NR_futex)
7563 /* always a 64-bit time_t, it doesn't define _time64 version */
7564 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7566 #else /* HOST_LONG_BITS == 64 */
7567 #if defined(__NR_futex_time64)
7568 if (sizeof(timeout
->tv_sec
) == 8) {
7569 /* _time64 function on 32bit arch */
7570 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7574 #if defined(__NR_futex)
7575 /* old function on 32bit arch */
7576 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7578 #endif /* HOST_LONG_BITS == 64 */
7579 return -TARGET_ENOSYS
;
7582 /* ??? Using host futex calls even when target atomic operations
7583 are not really atomic probably breaks things. However implementing
7584 futexes locally would make futexes shared between multiple processes
7585 tricky. However they're probably useless because guest atomic
7586 operations won't work either. */
7587 #if defined(TARGET_NR_futex)
7588 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7589 target_ulong uaddr2
, int val3
)
7591 struct timespec ts
, *pts
;
7594 /* ??? We assume FUTEX_* constants are the same on both host
7596 #ifdef FUTEX_CMD_MASK
7597 base_op
= op
& FUTEX_CMD_MASK
;
7603 case FUTEX_WAIT_BITSET
:
7606 target_to_host_timespec(pts
, timeout
);
7610 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7612 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7614 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7616 case FUTEX_CMP_REQUEUE
:
7618 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7619 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7620 But the prototype takes a `struct timespec *'; insert casts
7621 to satisfy the compiler. We do not need to tswap TIMEOUT
7622 since it's not compared to guest memory. */
7623 pts
= (struct timespec
*)(uintptr_t) timeout
;
7624 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7625 (base_op
== FUTEX_CMP_REQUEUE
7629 return -TARGET_ENOSYS
;
7634 #if defined(TARGET_NR_futex_time64)
7635 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7636 target_ulong uaddr2
, int val3
)
7638 struct timespec ts
, *pts
;
7641 /* ??? We assume FUTEX_* constants are the same on both host
7643 #ifdef FUTEX_CMD_MASK
7644 base_op
= op
& FUTEX_CMD_MASK
;
7650 case FUTEX_WAIT_BITSET
:
7653 if (target_to_host_timespec64(pts
, timeout
)) {
7654 return -TARGET_EFAULT
;
7659 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7661 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7663 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7665 case FUTEX_CMP_REQUEUE
:
7667 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7668 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7669 But the prototype takes a `struct timespec *'; insert casts
7670 to satisfy the compiler. We do not need to tswap TIMEOUT
7671 since it's not compared to guest memory. */
7672 pts
= (struct timespec
*)(uintptr_t) timeout
;
7673 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7674 (base_op
== FUTEX_CMP_REQUEUE
7678 return -TARGET_ENOSYS
;
7683 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7684 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7685 abi_long handle
, abi_long mount_id
,
7688 struct file_handle
*target_fh
;
7689 struct file_handle
*fh
;
7693 unsigned int size
, total_size
;
7695 if (get_user_s32(size
, handle
)) {
7696 return -TARGET_EFAULT
;
7699 name
= lock_user_string(pathname
);
7701 return -TARGET_EFAULT
;
7704 total_size
= sizeof(struct file_handle
) + size
;
7705 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7707 unlock_user(name
, pathname
, 0);
7708 return -TARGET_EFAULT
;
7711 fh
= g_malloc0(total_size
);
7712 fh
->handle_bytes
= size
;
7714 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7715 unlock_user(name
, pathname
, 0);
7717 /* man name_to_handle_at(2):
7718 * Other than the use of the handle_bytes field, the caller should treat
7719 * the file_handle structure as an opaque data type
7722 memcpy(target_fh
, fh
, total_size
);
7723 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7724 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7726 unlock_user(target_fh
, handle
, total_size
);
7728 if (put_user_s32(mid
, mount_id
)) {
7729 return -TARGET_EFAULT
;
7737 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7738 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7741 struct file_handle
*target_fh
;
7742 struct file_handle
*fh
;
7743 unsigned int size
, total_size
;
7746 if (get_user_s32(size
, handle
)) {
7747 return -TARGET_EFAULT
;
7750 total_size
= sizeof(struct file_handle
) + size
;
7751 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7753 return -TARGET_EFAULT
;
7756 fh
= g_memdup(target_fh
, total_size
);
7757 fh
->handle_bytes
= size
;
7758 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7760 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7761 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7765 unlock_user(target_fh
, handle
, total_size
);
7771 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7773 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7776 target_sigset_t
*target_mask
;
7780 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7781 return -TARGET_EINVAL
;
7783 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7784 return -TARGET_EFAULT
;
7787 target_to_host_sigset(&host_mask
, target_mask
);
7789 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7791 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7793 fd_trans_register(ret
, &target_signalfd_trans
);
7796 unlock_user_struct(target_mask
, mask
, 0);
7802 /* Map host to target signal numbers for the wait family of syscalls.
7803 Assume all other status bits are the same. */
7804 int host_to_target_waitstatus(int status
)
7806 if (WIFSIGNALED(status
)) {
7807 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7809 if (WIFSTOPPED(status
)) {
7810 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7816 static int open_self_cmdline(void *cpu_env
, int fd
)
7818 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7819 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7822 for (i
= 0; i
< bprm
->argc
; i
++) {
7823 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7825 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7833 static int open_self_maps(void *cpu_env
, int fd
)
7835 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7836 TaskState
*ts
= cpu
->opaque
;
7837 GSList
*map_info
= read_self_maps();
7841 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7842 MapInfo
*e
= (MapInfo
*) s
->data
;
7844 if (h2g_valid(e
->start
)) {
7845 unsigned long min
= e
->start
;
7846 unsigned long max
= e
->end
;
7847 int flags
= page_get_flags(h2g(min
));
7850 max
= h2g_valid(max
- 1) ?
7851 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7853 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7857 if (h2g(min
) == ts
->info
->stack_limit
) {
7863 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7864 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7865 h2g(min
), h2g(max
- 1) + 1,
7866 e
->is_read
? 'r' : '-',
7867 e
->is_write
? 'w' : '-',
7868 e
->is_exec
? 'x' : '-',
7869 e
->is_priv
? 'p' : '-',
7870 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7872 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7879 free_self_maps(map_info
);
7881 #ifdef TARGET_VSYSCALL_PAGE
7883 * We only support execution from the vsyscall page.
7884 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7886 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7887 " --xp 00000000 00:00 0",
7888 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7889 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7895 static int open_self_stat(void *cpu_env
, int fd
)
7897 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7898 TaskState
*ts
= cpu
->opaque
;
7899 g_autoptr(GString
) buf
= g_string_new(NULL
);
7902 for (i
= 0; i
< 44; i
++) {
7905 g_string_printf(buf
, FMT_pid
" ", getpid());
7906 } else if (i
== 1) {
7908 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7909 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7910 g_string_printf(buf
, "(%.15s) ", bin
);
7911 } else if (i
== 27) {
7913 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7915 /* for the rest, there is MasterCard */
7916 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7919 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7927 static int open_self_auxv(void *cpu_env
, int fd
)
7929 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7930 TaskState
*ts
= cpu
->opaque
;
7931 abi_ulong auxv
= ts
->info
->saved_auxv
;
7932 abi_ulong len
= ts
->info
->auxv_len
;
7936 * Auxiliary vector is stored in target process stack.
7937 * read in whole auxv vector and copy it to file
7939 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7943 r
= write(fd
, ptr
, len
);
7950 lseek(fd
, 0, SEEK_SET
);
7951 unlock_user(ptr
, auxv
, len
);
7957 static int is_proc_myself(const char *filename
, const char *entry
)
7959 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7960 filename
+= strlen("/proc/");
7961 if (!strncmp(filename
, "self/", strlen("self/"))) {
7962 filename
+= strlen("self/");
7963 } else if (*filename
>= '1' && *filename
<= '9') {
7965 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7966 if (!strncmp(filename
, myself
, strlen(myself
))) {
7967 filename
+= strlen(myself
);
7974 if (!strcmp(filename
, entry
)) {
7981 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7982 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7983 static int is_proc(const char *filename
, const char *entry
)
7985 return strcmp(filename
, entry
) == 0;
7989 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7990 static int open_net_route(void *cpu_env
, int fd
)
7997 fp
= fopen("/proc/net/route", "r");
8004 read
= getline(&line
, &len
, fp
);
8005 dprintf(fd
, "%s", line
);
8009 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8011 uint32_t dest
, gw
, mask
;
8012 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8015 fields
= sscanf(line
,
8016 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8017 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8018 &mask
, &mtu
, &window
, &irtt
);
8022 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8023 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8024 metric
, tswap32(mask
), mtu
, window
, irtt
);
8034 #if defined(TARGET_SPARC)
8035 static int open_cpuinfo(void *cpu_env
, int fd
)
8037 dprintf(fd
, "type\t\t: sun4u\n");
8042 #if defined(TARGET_HPPA)
8043 static int open_cpuinfo(void *cpu_env
, int fd
)
8045 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8046 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8047 dprintf(fd
, "capabilities\t: os32\n");
8048 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8049 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8054 #if defined(TARGET_M68K)
8055 static int open_hardware(void *cpu_env
, int fd
)
8057 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8062 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8065 const char *filename
;
8066 int (*fill
)(void *cpu_env
, int fd
);
8067 int (*cmp
)(const char *s1
, const char *s2
);
8069 const struct fake_open
*fake_open
;
8070 static const struct fake_open fakes
[] = {
8071 { "maps", open_self_maps
, is_proc_myself
},
8072 { "stat", open_self_stat
, is_proc_myself
},
8073 { "auxv", open_self_auxv
, is_proc_myself
},
8074 { "cmdline", open_self_cmdline
, is_proc_myself
},
8075 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8076 { "/proc/net/route", open_net_route
, is_proc
},
8078 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8079 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8081 #if defined(TARGET_M68K)
8082 { "/proc/hardware", open_hardware
, is_proc
},
8084 { NULL
, NULL
, NULL
}
8087 if (is_proc_myself(pathname
, "exe")) {
8088 int execfd
= qemu_getauxval(AT_EXECFD
);
8089 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8092 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8093 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8098 if (fake_open
->filename
) {
8100 char filename
[PATH_MAX
];
8103 /* create temporary file to map stat to */
8104 tmpdir
= getenv("TMPDIR");
8107 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8108 fd
= mkstemp(filename
);
8114 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8120 lseek(fd
, 0, SEEK_SET
);
8125 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8128 #define TIMER_MAGIC 0x0caf0000
8129 #define TIMER_MAGIC_MASK 0xffff0000
8131 /* Convert QEMU provided timer ID back to internal 16bit index format */
8132 static target_timer_t
get_timer_id(abi_long arg
)
8134 target_timer_t timerid
= arg
;
8136 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8137 return -TARGET_EINVAL
;
8142 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8143 return -TARGET_EINVAL
;
8149 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8151 abi_ulong target_addr
,
8154 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8155 unsigned host_bits
= sizeof(*host_mask
) * 8;
8156 abi_ulong
*target_mask
;
8159 assert(host_size
>= target_size
);
8161 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8163 return -TARGET_EFAULT
;
8165 memset(host_mask
, 0, host_size
);
8167 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8168 unsigned bit
= i
* target_bits
;
8171 __get_user(val
, &target_mask
[i
]);
8172 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8173 if (val
& (1UL << j
)) {
8174 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8179 unlock_user(target_mask
, target_addr
, 0);
8183 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8185 abi_ulong target_addr
,
8188 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8189 unsigned host_bits
= sizeof(*host_mask
) * 8;
8190 abi_ulong
*target_mask
;
8193 assert(host_size
>= target_size
);
8195 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8197 return -TARGET_EFAULT
;
8200 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8201 unsigned bit
= i
* target_bits
;
8204 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8205 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8209 __put_user(val
, &target_mask
[i
]);
8212 unlock_user(target_mask
, target_addr
, target_size
);
8216 /* This is an internal helper for do_syscall so that it is easier
8217 * to have a single return point, so that actions, such as logging
8218 * of syscall results, can be performed.
8219 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8221 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8222 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8223 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8226 CPUState
*cpu
= env_cpu(cpu_env
);
8228 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8229 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8230 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8231 || defined(TARGET_NR_statx)
8234 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8235 || defined(TARGET_NR_fstatfs)
8241 case TARGET_NR_exit
:
8242 /* In old applications this may be used to implement _exit(2).
8243 However in threaded applications it is used for thread termination,
8244 and _exit_group is used for application termination.
8245 Do thread termination if we have more then one thread. */
8247 if (block_signals()) {
8248 return -TARGET_ERESTARTSYS
;
8251 pthread_mutex_lock(&clone_lock
);
8253 if (CPU_NEXT(first_cpu
)) {
8254 TaskState
*ts
= cpu
->opaque
;
8256 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8257 object_unref(OBJECT(cpu
));
8259 * At this point the CPU should be unrealized and removed
8260 * from cpu lists. We can clean-up the rest of the thread
8261 * data without the lock held.
8264 pthread_mutex_unlock(&clone_lock
);
8266 if (ts
->child_tidptr
) {
8267 put_user_u32(0, ts
->child_tidptr
);
8268 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8273 rcu_unregister_thread();
8277 pthread_mutex_unlock(&clone_lock
);
8278 preexit_cleanup(cpu_env
, arg1
);
8280 return 0; /* avoid warning */
8281 case TARGET_NR_read
:
8282 if (arg2
== 0 && arg3
== 0) {
8283 return get_errno(safe_read(arg1
, 0, 0));
8285 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8286 return -TARGET_EFAULT
;
8287 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8289 fd_trans_host_to_target_data(arg1
)) {
8290 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8292 unlock_user(p
, arg2
, ret
);
8295 case TARGET_NR_write
:
8296 if (arg2
== 0 && arg3
== 0) {
8297 return get_errno(safe_write(arg1
, 0, 0));
8299 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8300 return -TARGET_EFAULT
;
8301 if (fd_trans_target_to_host_data(arg1
)) {
8302 void *copy
= g_malloc(arg3
);
8303 memcpy(copy
, p
, arg3
);
8304 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8306 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8310 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8312 unlock_user(p
, arg2
, 0);
8315 #ifdef TARGET_NR_open
8316 case TARGET_NR_open
:
8317 if (!(p
= lock_user_string(arg1
)))
8318 return -TARGET_EFAULT
;
8319 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8320 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8322 fd_trans_unregister(ret
);
8323 unlock_user(p
, arg1
, 0);
8326 case TARGET_NR_openat
:
8327 if (!(p
= lock_user_string(arg2
)))
8328 return -TARGET_EFAULT
;
8329 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8330 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8332 fd_trans_unregister(ret
);
8333 unlock_user(p
, arg2
, 0);
8335 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8336 case TARGET_NR_name_to_handle_at
:
8337 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8340 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8341 case TARGET_NR_open_by_handle_at
:
8342 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8343 fd_trans_unregister(ret
);
8346 case TARGET_NR_close
:
8347 fd_trans_unregister(arg1
);
8348 return get_errno(close(arg1
));
8351 return do_brk(arg1
);
8352 #ifdef TARGET_NR_fork
8353 case TARGET_NR_fork
:
8354 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8356 #ifdef TARGET_NR_waitpid
8357 case TARGET_NR_waitpid
:
8360 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8361 if (!is_error(ret
) && arg2
&& ret
8362 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8363 return -TARGET_EFAULT
;
8367 #ifdef TARGET_NR_waitid
8368 case TARGET_NR_waitid
:
8372 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8373 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8374 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8375 return -TARGET_EFAULT
;
8376 host_to_target_siginfo(p
, &info
);
8377 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8382 #ifdef TARGET_NR_creat /* not on alpha */
8383 case TARGET_NR_creat
:
8384 if (!(p
= lock_user_string(arg1
)))
8385 return -TARGET_EFAULT
;
8386 ret
= get_errno(creat(p
, arg2
));
8387 fd_trans_unregister(ret
);
8388 unlock_user(p
, arg1
, 0);
8391 #ifdef TARGET_NR_link
8392 case TARGET_NR_link
:
8395 p
= lock_user_string(arg1
);
8396 p2
= lock_user_string(arg2
);
8398 ret
= -TARGET_EFAULT
;
8400 ret
= get_errno(link(p
, p2
));
8401 unlock_user(p2
, arg2
, 0);
8402 unlock_user(p
, arg1
, 0);
8406 #if defined(TARGET_NR_linkat)
8407 case TARGET_NR_linkat
:
8411 return -TARGET_EFAULT
;
8412 p
= lock_user_string(arg2
);
8413 p2
= lock_user_string(arg4
);
8415 ret
= -TARGET_EFAULT
;
8417 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8418 unlock_user(p
, arg2
, 0);
8419 unlock_user(p2
, arg4
, 0);
8423 #ifdef TARGET_NR_unlink
8424 case TARGET_NR_unlink
:
8425 if (!(p
= lock_user_string(arg1
)))
8426 return -TARGET_EFAULT
;
8427 ret
= get_errno(unlink(p
));
8428 unlock_user(p
, arg1
, 0);
8431 #if defined(TARGET_NR_unlinkat)
8432 case TARGET_NR_unlinkat
:
8433 if (!(p
= lock_user_string(arg2
)))
8434 return -TARGET_EFAULT
;
8435 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8436 unlock_user(p
, arg2
, 0);
8439 case TARGET_NR_execve
:
8441 char **argp
, **envp
;
8444 abi_ulong guest_argp
;
8445 abi_ulong guest_envp
;
8452 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8453 if (get_user_ual(addr
, gp
))
8454 return -TARGET_EFAULT
;
8461 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8462 if (get_user_ual(addr
, gp
))
8463 return -TARGET_EFAULT
;
8469 argp
= g_new0(char *, argc
+ 1);
8470 envp
= g_new0(char *, envc
+ 1);
8472 for (gp
= guest_argp
, q
= argp
; gp
;
8473 gp
+= sizeof(abi_ulong
), q
++) {
8474 if (get_user_ual(addr
, gp
))
8478 if (!(*q
= lock_user_string(addr
)))
8480 total_size
+= strlen(*q
) + 1;
8484 for (gp
= guest_envp
, q
= envp
; gp
;
8485 gp
+= sizeof(abi_ulong
), q
++) {
8486 if (get_user_ual(addr
, gp
))
8490 if (!(*q
= lock_user_string(addr
)))
8492 total_size
+= strlen(*q
) + 1;
8496 if (!(p
= lock_user_string(arg1
)))
8498 /* Although execve() is not an interruptible syscall it is
8499 * a special case where we must use the safe_syscall wrapper:
8500 * if we allow a signal to happen before we make the host
8501 * syscall then we will 'lose' it, because at the point of
8502 * execve the process leaves QEMU's control. So we use the
8503 * safe syscall wrapper to ensure that we either take the
8504 * signal as a guest signal, or else it does not happen
8505 * before the execve completes and makes it the other
8506 * program's problem.
8508 ret
= get_errno(safe_execve(p
, argp
, envp
));
8509 unlock_user(p
, arg1
, 0);
8514 ret
= -TARGET_EFAULT
;
8517 for (gp
= guest_argp
, q
= argp
; *q
;
8518 gp
+= sizeof(abi_ulong
), q
++) {
8519 if (get_user_ual(addr
, gp
)
8522 unlock_user(*q
, addr
, 0);
8524 for (gp
= guest_envp
, q
= envp
; *q
;
8525 gp
+= sizeof(abi_ulong
), q
++) {
8526 if (get_user_ual(addr
, gp
)
8529 unlock_user(*q
, addr
, 0);
8536 case TARGET_NR_chdir
:
8537 if (!(p
= lock_user_string(arg1
)))
8538 return -TARGET_EFAULT
;
8539 ret
= get_errno(chdir(p
));
8540 unlock_user(p
, arg1
, 0);
8542 #ifdef TARGET_NR_time
8543 case TARGET_NR_time
:
8546 ret
= get_errno(time(&host_time
));
8549 && put_user_sal(host_time
, arg1
))
8550 return -TARGET_EFAULT
;
8554 #ifdef TARGET_NR_mknod
8555 case TARGET_NR_mknod
:
8556 if (!(p
= lock_user_string(arg1
)))
8557 return -TARGET_EFAULT
;
8558 ret
= get_errno(mknod(p
, arg2
, arg3
));
8559 unlock_user(p
, arg1
, 0);
8562 #if defined(TARGET_NR_mknodat)
8563 case TARGET_NR_mknodat
:
8564 if (!(p
= lock_user_string(arg2
)))
8565 return -TARGET_EFAULT
;
8566 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8567 unlock_user(p
, arg2
, 0);
8570 #ifdef TARGET_NR_chmod
8571 case TARGET_NR_chmod
:
8572 if (!(p
= lock_user_string(arg1
)))
8573 return -TARGET_EFAULT
;
8574 ret
= get_errno(chmod(p
, arg2
));
8575 unlock_user(p
, arg1
, 0);
8578 #ifdef TARGET_NR_lseek
8579 case TARGET_NR_lseek
:
8580 return get_errno(lseek(arg1
, arg2
, arg3
));
8582 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8583 /* Alpha specific */
8584 case TARGET_NR_getxpid
:
8585 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8586 return get_errno(getpid());
8588 #ifdef TARGET_NR_getpid
8589 case TARGET_NR_getpid
:
8590 return get_errno(getpid());
8592 case TARGET_NR_mount
:
8594 /* need to look at the data field */
8598 p
= lock_user_string(arg1
);
8600 return -TARGET_EFAULT
;
8606 p2
= lock_user_string(arg2
);
8609 unlock_user(p
, arg1
, 0);
8611 return -TARGET_EFAULT
;
8615 p3
= lock_user_string(arg3
);
8618 unlock_user(p
, arg1
, 0);
8620 unlock_user(p2
, arg2
, 0);
8621 return -TARGET_EFAULT
;
8627 /* FIXME - arg5 should be locked, but it isn't clear how to
8628 * do that since it's not guaranteed to be a NULL-terminated
8632 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8634 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8636 ret
= get_errno(ret
);
8639 unlock_user(p
, arg1
, 0);
8641 unlock_user(p2
, arg2
, 0);
8643 unlock_user(p3
, arg3
, 0);
8647 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8648 #if defined(TARGET_NR_umount)
8649 case TARGET_NR_umount
:
8651 #if defined(TARGET_NR_oldumount)
8652 case TARGET_NR_oldumount
:
8654 if (!(p
= lock_user_string(arg1
)))
8655 return -TARGET_EFAULT
;
8656 ret
= get_errno(umount(p
));
8657 unlock_user(p
, arg1
, 0);
8660 #ifdef TARGET_NR_stime /* not on alpha */
8661 case TARGET_NR_stime
:
8665 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8666 return -TARGET_EFAULT
;
8668 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8671 #ifdef TARGET_NR_alarm /* not on alpha */
8672 case TARGET_NR_alarm
:
8675 #ifdef TARGET_NR_pause /* not on alpha */
8676 case TARGET_NR_pause
:
8677 if (!block_signals()) {
8678 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8680 return -TARGET_EINTR
;
8682 #ifdef TARGET_NR_utime
8683 case TARGET_NR_utime
:
8685 struct utimbuf tbuf
, *host_tbuf
;
8686 struct target_utimbuf
*target_tbuf
;
8688 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8689 return -TARGET_EFAULT
;
8690 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8691 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8692 unlock_user_struct(target_tbuf
, arg2
, 0);
8697 if (!(p
= lock_user_string(arg1
)))
8698 return -TARGET_EFAULT
;
8699 ret
= get_errno(utime(p
, host_tbuf
));
8700 unlock_user(p
, arg1
, 0);
8704 #ifdef TARGET_NR_utimes
8705 case TARGET_NR_utimes
:
8707 struct timeval
*tvp
, tv
[2];
8709 if (copy_from_user_timeval(&tv
[0], arg2
)
8710 || copy_from_user_timeval(&tv
[1],
8711 arg2
+ sizeof(struct target_timeval
)))
8712 return -TARGET_EFAULT
;
8717 if (!(p
= lock_user_string(arg1
)))
8718 return -TARGET_EFAULT
;
8719 ret
= get_errno(utimes(p
, tvp
));
8720 unlock_user(p
, arg1
, 0);
8724 #if defined(TARGET_NR_futimesat)
8725 case TARGET_NR_futimesat
:
8727 struct timeval
*tvp
, tv
[2];
8729 if (copy_from_user_timeval(&tv
[0], arg3
)
8730 || copy_from_user_timeval(&tv
[1],
8731 arg3
+ sizeof(struct target_timeval
)))
8732 return -TARGET_EFAULT
;
8737 if (!(p
= lock_user_string(arg2
))) {
8738 return -TARGET_EFAULT
;
8740 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8741 unlock_user(p
, arg2
, 0);
8745 #ifdef TARGET_NR_access
8746 case TARGET_NR_access
:
8747 if (!(p
= lock_user_string(arg1
))) {
8748 return -TARGET_EFAULT
;
8750 ret
= get_errno(access(path(p
), arg2
));
8751 unlock_user(p
, arg1
, 0);
8754 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8755 case TARGET_NR_faccessat
:
8756 if (!(p
= lock_user_string(arg2
))) {
8757 return -TARGET_EFAULT
;
8759 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8760 unlock_user(p
, arg2
, 0);
8763 #ifdef TARGET_NR_nice /* not on alpha */
8764 case TARGET_NR_nice
:
8765 return get_errno(nice(arg1
));
8767 case TARGET_NR_sync
:
8770 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8771 case TARGET_NR_syncfs
:
8772 return get_errno(syncfs(arg1
));
8774 case TARGET_NR_kill
:
8775 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8776 #ifdef TARGET_NR_rename
8777 case TARGET_NR_rename
:
8780 p
= lock_user_string(arg1
);
8781 p2
= lock_user_string(arg2
);
8783 ret
= -TARGET_EFAULT
;
8785 ret
= get_errno(rename(p
, p2
));
8786 unlock_user(p2
, arg2
, 0);
8787 unlock_user(p
, arg1
, 0);
8791 #if defined(TARGET_NR_renameat)
8792 case TARGET_NR_renameat
:
8795 p
= lock_user_string(arg2
);
8796 p2
= lock_user_string(arg4
);
8798 ret
= -TARGET_EFAULT
;
8800 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8801 unlock_user(p2
, arg4
, 0);
8802 unlock_user(p
, arg2
, 0);
8806 #if defined(TARGET_NR_renameat2)
8807 case TARGET_NR_renameat2
:
8810 p
= lock_user_string(arg2
);
8811 p2
= lock_user_string(arg4
);
8813 ret
= -TARGET_EFAULT
;
8815 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8817 unlock_user(p2
, arg4
, 0);
8818 unlock_user(p
, arg2
, 0);
8822 #ifdef TARGET_NR_mkdir
8823 case TARGET_NR_mkdir
:
8824 if (!(p
= lock_user_string(arg1
)))
8825 return -TARGET_EFAULT
;
8826 ret
= get_errno(mkdir(p
, arg2
));
8827 unlock_user(p
, arg1
, 0);
8830 #if defined(TARGET_NR_mkdirat)
8831 case TARGET_NR_mkdirat
:
8832 if (!(p
= lock_user_string(arg2
)))
8833 return -TARGET_EFAULT
;
8834 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8835 unlock_user(p
, arg2
, 0);
8838 #ifdef TARGET_NR_rmdir
8839 case TARGET_NR_rmdir
:
8840 if (!(p
= lock_user_string(arg1
)))
8841 return -TARGET_EFAULT
;
8842 ret
= get_errno(rmdir(p
));
8843 unlock_user(p
, arg1
, 0);
8847 ret
= get_errno(dup(arg1
));
8849 fd_trans_dup(arg1
, ret
);
8852 #ifdef TARGET_NR_pipe
8853 case TARGET_NR_pipe
:
8854 return do_pipe(cpu_env
, arg1
, 0, 0);
8856 #ifdef TARGET_NR_pipe2
8857 case TARGET_NR_pipe2
:
8858 return do_pipe(cpu_env
, arg1
,
8859 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8861 case TARGET_NR_times
:
8863 struct target_tms
*tmsp
;
8865 ret
= get_errno(times(&tms
));
8867 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8869 return -TARGET_EFAULT
;
8870 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8871 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8872 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8873 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8876 ret
= host_to_target_clock_t(ret
);
8879 case TARGET_NR_acct
:
8881 ret
= get_errno(acct(NULL
));
8883 if (!(p
= lock_user_string(arg1
))) {
8884 return -TARGET_EFAULT
;
8886 ret
= get_errno(acct(path(p
)));
8887 unlock_user(p
, arg1
, 0);
8890 #ifdef TARGET_NR_umount2
8891 case TARGET_NR_umount2
:
8892 if (!(p
= lock_user_string(arg1
)))
8893 return -TARGET_EFAULT
;
8894 ret
= get_errno(umount2(p
, arg2
));
8895 unlock_user(p
, arg1
, 0);
8898 case TARGET_NR_ioctl
:
8899 return do_ioctl(arg1
, arg2
, arg3
);
8900 #ifdef TARGET_NR_fcntl
8901 case TARGET_NR_fcntl
:
8902 return do_fcntl(arg1
, arg2
, arg3
);
8904 case TARGET_NR_setpgid
:
8905 return get_errno(setpgid(arg1
, arg2
));
8906 case TARGET_NR_umask
:
8907 return get_errno(umask(arg1
));
8908 case TARGET_NR_chroot
:
8909 if (!(p
= lock_user_string(arg1
)))
8910 return -TARGET_EFAULT
;
8911 ret
= get_errno(chroot(p
));
8912 unlock_user(p
, arg1
, 0);
8914 #ifdef TARGET_NR_dup2
8915 case TARGET_NR_dup2
:
8916 ret
= get_errno(dup2(arg1
, arg2
));
8918 fd_trans_dup(arg1
, arg2
);
8922 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8923 case TARGET_NR_dup3
:
8927 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8930 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8931 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8933 fd_trans_dup(arg1
, arg2
);
8938 #ifdef TARGET_NR_getppid /* not on alpha */
8939 case TARGET_NR_getppid
:
8940 return get_errno(getppid());
8942 #ifdef TARGET_NR_getpgrp
8943 case TARGET_NR_getpgrp
:
8944 return get_errno(getpgrp());
8946 case TARGET_NR_setsid
:
8947 return get_errno(setsid());
8948 #ifdef TARGET_NR_sigaction
8949 case TARGET_NR_sigaction
:
8951 #if defined(TARGET_ALPHA)
8952 struct target_sigaction act
, oact
, *pact
= 0;
8953 struct target_old_sigaction
*old_act
;
8955 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8956 return -TARGET_EFAULT
;
8957 act
._sa_handler
= old_act
->_sa_handler
;
8958 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8959 act
.sa_flags
= old_act
->sa_flags
;
8960 act
.sa_restorer
= 0;
8961 unlock_user_struct(old_act
, arg2
, 0);
8964 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8965 if (!is_error(ret
) && arg3
) {
8966 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8967 return -TARGET_EFAULT
;
8968 old_act
->_sa_handler
= oact
._sa_handler
;
8969 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8970 old_act
->sa_flags
= oact
.sa_flags
;
8971 unlock_user_struct(old_act
, arg3
, 1);
8973 #elif defined(TARGET_MIPS)
8974 struct target_sigaction act
, oact
, *pact
, *old_act
;
8977 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8978 return -TARGET_EFAULT
;
8979 act
._sa_handler
= old_act
->_sa_handler
;
8980 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8981 act
.sa_flags
= old_act
->sa_flags
;
8982 unlock_user_struct(old_act
, arg2
, 0);
8988 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8990 if (!is_error(ret
) && arg3
) {
8991 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8992 return -TARGET_EFAULT
;
8993 old_act
->_sa_handler
= oact
._sa_handler
;
8994 old_act
->sa_flags
= oact
.sa_flags
;
8995 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8996 old_act
->sa_mask
.sig
[1] = 0;
8997 old_act
->sa_mask
.sig
[2] = 0;
8998 old_act
->sa_mask
.sig
[3] = 0;
8999 unlock_user_struct(old_act
, arg3
, 1);
9002 struct target_old_sigaction
*old_act
;
9003 struct target_sigaction act
, oact
, *pact
;
9005 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9006 return -TARGET_EFAULT
;
9007 act
._sa_handler
= old_act
->_sa_handler
;
9008 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9009 act
.sa_flags
= old_act
->sa_flags
;
9010 act
.sa_restorer
= old_act
->sa_restorer
;
9011 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9012 act
.ka_restorer
= 0;
9014 unlock_user_struct(old_act
, arg2
, 0);
9019 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9020 if (!is_error(ret
) && arg3
) {
9021 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9022 return -TARGET_EFAULT
;
9023 old_act
->_sa_handler
= oact
._sa_handler
;
9024 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9025 old_act
->sa_flags
= oact
.sa_flags
;
9026 old_act
->sa_restorer
= oact
.sa_restorer
;
9027 unlock_user_struct(old_act
, arg3
, 1);
9033 case TARGET_NR_rt_sigaction
:
9035 #if defined(TARGET_ALPHA)
9036 /* For Alpha and SPARC this is a 5 argument syscall, with
9037 * a 'restorer' parameter which must be copied into the
9038 * sa_restorer field of the sigaction struct.
9039 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9040 * and arg5 is the sigsetsize.
9041 * Alpha also has a separate rt_sigaction struct that it uses
9042 * here; SPARC uses the usual sigaction struct.
9044 struct target_rt_sigaction
*rt_act
;
9045 struct target_sigaction act
, oact
, *pact
= 0;
9047 if (arg4
!= sizeof(target_sigset_t
)) {
9048 return -TARGET_EINVAL
;
9051 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9052 return -TARGET_EFAULT
;
9053 act
._sa_handler
= rt_act
->_sa_handler
;
9054 act
.sa_mask
= rt_act
->sa_mask
;
9055 act
.sa_flags
= rt_act
->sa_flags
;
9056 act
.sa_restorer
= arg5
;
9057 unlock_user_struct(rt_act
, arg2
, 0);
9060 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9061 if (!is_error(ret
) && arg3
) {
9062 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9063 return -TARGET_EFAULT
;
9064 rt_act
->_sa_handler
= oact
._sa_handler
;
9065 rt_act
->sa_mask
= oact
.sa_mask
;
9066 rt_act
->sa_flags
= oact
.sa_flags
;
9067 unlock_user_struct(rt_act
, arg3
, 1);
9071 target_ulong restorer
= arg4
;
9072 target_ulong sigsetsize
= arg5
;
9074 target_ulong sigsetsize
= arg4
;
9076 struct target_sigaction
*act
;
9077 struct target_sigaction
*oact
;
9079 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9080 return -TARGET_EINVAL
;
9083 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9084 return -TARGET_EFAULT
;
9086 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9087 act
->ka_restorer
= restorer
;
9093 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9094 ret
= -TARGET_EFAULT
;
9095 goto rt_sigaction_fail
;
9099 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9102 unlock_user_struct(act
, arg2
, 0);
9104 unlock_user_struct(oact
, arg3
, 1);
9108 #ifdef TARGET_NR_sgetmask /* not on alpha */
9109 case TARGET_NR_sgetmask
:
9112 abi_ulong target_set
;
9113 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9115 host_to_target_old_sigset(&target_set
, &cur_set
);
9121 #ifdef TARGET_NR_ssetmask /* not on alpha */
9122 case TARGET_NR_ssetmask
:
9125 abi_ulong target_set
= arg1
;
9126 target_to_host_old_sigset(&set
, &target_set
);
9127 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9129 host_to_target_old_sigset(&target_set
, &oset
);
9135 #ifdef TARGET_NR_sigprocmask
9136 case TARGET_NR_sigprocmask
:
9138 #if defined(TARGET_ALPHA)
9139 sigset_t set
, oldset
;
9144 case TARGET_SIG_BLOCK
:
9147 case TARGET_SIG_UNBLOCK
:
9150 case TARGET_SIG_SETMASK
:
9154 return -TARGET_EINVAL
;
9157 target_to_host_old_sigset(&set
, &mask
);
9159 ret
= do_sigprocmask(how
, &set
, &oldset
);
9160 if (!is_error(ret
)) {
9161 host_to_target_old_sigset(&mask
, &oldset
);
9163 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9166 sigset_t set
, oldset
, *set_ptr
;
9171 case TARGET_SIG_BLOCK
:
9174 case TARGET_SIG_UNBLOCK
:
9177 case TARGET_SIG_SETMASK
:
9181 return -TARGET_EINVAL
;
9183 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9184 return -TARGET_EFAULT
;
9185 target_to_host_old_sigset(&set
, p
);
9186 unlock_user(p
, arg2
, 0);
9192 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9193 if (!is_error(ret
) && arg3
) {
9194 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9195 return -TARGET_EFAULT
;
9196 host_to_target_old_sigset(p
, &oldset
);
9197 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9203 case TARGET_NR_rt_sigprocmask
:
9206 sigset_t set
, oldset
, *set_ptr
;
9208 if (arg4
!= sizeof(target_sigset_t
)) {
9209 return -TARGET_EINVAL
;
9214 case TARGET_SIG_BLOCK
:
9217 case TARGET_SIG_UNBLOCK
:
9220 case TARGET_SIG_SETMASK
:
9224 return -TARGET_EINVAL
;
9226 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9227 return -TARGET_EFAULT
;
9228 target_to_host_sigset(&set
, p
);
9229 unlock_user(p
, arg2
, 0);
9235 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9236 if (!is_error(ret
) && arg3
) {
9237 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9238 return -TARGET_EFAULT
;
9239 host_to_target_sigset(p
, &oldset
);
9240 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9244 #ifdef TARGET_NR_sigpending
9245 case TARGET_NR_sigpending
:
9248 ret
= get_errno(sigpending(&set
));
9249 if (!is_error(ret
)) {
9250 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9251 return -TARGET_EFAULT
;
9252 host_to_target_old_sigset(p
, &set
);
9253 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9258 case TARGET_NR_rt_sigpending
:
9262 /* Yes, this check is >, not != like most. We follow the kernel's
9263 * logic and it does it like this because it implements
9264 * NR_sigpending through the same code path, and in that case
9265 * the old_sigset_t is smaller in size.
9267 if (arg2
> sizeof(target_sigset_t
)) {
9268 return -TARGET_EINVAL
;
9271 ret
= get_errno(sigpending(&set
));
9272 if (!is_error(ret
)) {
9273 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9274 return -TARGET_EFAULT
;
9275 host_to_target_sigset(p
, &set
);
9276 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9280 #ifdef TARGET_NR_sigsuspend
9281 case TARGET_NR_sigsuspend
:
9283 TaskState
*ts
= cpu
->opaque
;
9284 #if defined(TARGET_ALPHA)
9285 abi_ulong mask
= arg1
;
9286 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9288 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9289 return -TARGET_EFAULT
;
9290 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9291 unlock_user(p
, arg1
, 0);
9293 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9295 if (ret
!= -TARGET_ERESTARTSYS
) {
9296 ts
->in_sigsuspend
= 1;
9301 case TARGET_NR_rt_sigsuspend
:
9303 TaskState
*ts
= cpu
->opaque
;
9305 if (arg2
!= sizeof(target_sigset_t
)) {
9306 return -TARGET_EINVAL
;
9308 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9309 return -TARGET_EFAULT
;
9310 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9311 unlock_user(p
, arg1
, 0);
9312 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9314 if (ret
!= -TARGET_ERESTARTSYS
) {
9315 ts
->in_sigsuspend
= 1;
9319 #ifdef TARGET_NR_rt_sigtimedwait
9320 case TARGET_NR_rt_sigtimedwait
:
9323 struct timespec uts
, *puts
;
9326 if (arg4
!= sizeof(target_sigset_t
)) {
9327 return -TARGET_EINVAL
;
9330 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9331 return -TARGET_EFAULT
;
9332 target_to_host_sigset(&set
, p
);
9333 unlock_user(p
, arg1
, 0);
9336 if (target_to_host_timespec(puts
, arg3
)) {
9337 return -TARGET_EFAULT
;
9342 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9344 if (!is_error(ret
)) {
9346 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9349 return -TARGET_EFAULT
;
9351 host_to_target_siginfo(p
, &uinfo
);
9352 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9354 ret
= host_to_target_signal(ret
);
9359 #ifdef TARGET_NR_rt_sigtimedwait_time64
9360 case TARGET_NR_rt_sigtimedwait_time64
:
9363 struct timespec uts
, *puts
;
9366 if (arg4
!= sizeof(target_sigset_t
)) {
9367 return -TARGET_EINVAL
;
9370 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9372 return -TARGET_EFAULT
;
9374 target_to_host_sigset(&set
, p
);
9375 unlock_user(p
, arg1
, 0);
9378 if (target_to_host_timespec64(puts
, arg3
)) {
9379 return -TARGET_EFAULT
;
9384 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9386 if (!is_error(ret
)) {
9388 p
= lock_user(VERIFY_WRITE
, arg2
,
9389 sizeof(target_siginfo_t
), 0);
9391 return -TARGET_EFAULT
;
9393 host_to_target_siginfo(p
, &uinfo
);
9394 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9396 ret
= host_to_target_signal(ret
);
9401 case TARGET_NR_rt_sigqueueinfo
:
9405 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9407 return -TARGET_EFAULT
;
9409 target_to_host_siginfo(&uinfo
, p
);
9410 unlock_user(p
, arg3
, 0);
9411 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9414 case TARGET_NR_rt_tgsigqueueinfo
:
9418 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9420 return -TARGET_EFAULT
;
9422 target_to_host_siginfo(&uinfo
, p
);
9423 unlock_user(p
, arg4
, 0);
9424 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9427 #ifdef TARGET_NR_sigreturn
9428 case TARGET_NR_sigreturn
:
9429 if (block_signals()) {
9430 return -TARGET_ERESTARTSYS
;
9432 return do_sigreturn(cpu_env
);
9434 case TARGET_NR_rt_sigreturn
:
9435 if (block_signals()) {
9436 return -TARGET_ERESTARTSYS
;
9438 return do_rt_sigreturn(cpu_env
);
9439 case TARGET_NR_sethostname
:
9440 if (!(p
= lock_user_string(arg1
)))
9441 return -TARGET_EFAULT
;
9442 ret
= get_errno(sethostname(p
, arg2
));
9443 unlock_user(p
, arg1
, 0);
9445 #ifdef TARGET_NR_setrlimit
9446 case TARGET_NR_setrlimit
:
9448 int resource
= target_to_host_resource(arg1
);
9449 struct target_rlimit
*target_rlim
;
9451 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9452 return -TARGET_EFAULT
;
9453 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9454 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9455 unlock_user_struct(target_rlim
, arg2
, 0);
9457 * If we just passed through resource limit settings for memory then
9458 * they would also apply to QEMU's own allocations, and QEMU will
9459 * crash or hang or die if its allocations fail. Ideally we would
9460 * track the guest allocations in QEMU and apply the limits ourselves.
9461 * For now, just tell the guest the call succeeded but don't actually
9464 if (resource
!= RLIMIT_AS
&&
9465 resource
!= RLIMIT_DATA
&&
9466 resource
!= RLIMIT_STACK
) {
9467 return get_errno(setrlimit(resource
, &rlim
));
9473 #ifdef TARGET_NR_getrlimit
9474 case TARGET_NR_getrlimit
:
9476 int resource
= target_to_host_resource(arg1
);
9477 struct target_rlimit
*target_rlim
;
9480 ret
= get_errno(getrlimit(resource
, &rlim
));
9481 if (!is_error(ret
)) {
9482 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9483 return -TARGET_EFAULT
;
9484 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9485 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9486 unlock_user_struct(target_rlim
, arg2
, 1);
9491 case TARGET_NR_getrusage
:
9493 struct rusage rusage
;
9494 ret
= get_errno(getrusage(arg1
, &rusage
));
9495 if (!is_error(ret
)) {
9496 ret
= host_to_target_rusage(arg2
, &rusage
);
9500 #if defined(TARGET_NR_gettimeofday)
9501 case TARGET_NR_gettimeofday
:
9506 ret
= get_errno(gettimeofday(&tv
, &tz
));
9507 if (!is_error(ret
)) {
9508 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9509 return -TARGET_EFAULT
;
9511 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9512 return -TARGET_EFAULT
;
9518 #if defined(TARGET_NR_settimeofday)
9519 case TARGET_NR_settimeofday
:
9521 struct timeval tv
, *ptv
= NULL
;
9522 struct timezone tz
, *ptz
= NULL
;
9525 if (copy_from_user_timeval(&tv
, arg1
)) {
9526 return -TARGET_EFAULT
;
9532 if (copy_from_user_timezone(&tz
, arg2
)) {
9533 return -TARGET_EFAULT
;
9538 return get_errno(settimeofday(ptv
, ptz
));
9541 #if defined(TARGET_NR_select)
9542 case TARGET_NR_select
:
9543 #if defined(TARGET_WANT_NI_OLD_SELECT)
9544 /* some architectures used to have old_select here
9545 * but now ENOSYS it.
9547 ret
= -TARGET_ENOSYS
;
9548 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9549 ret
= do_old_select(arg1
);
9551 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9555 #ifdef TARGET_NR_pselect6
9556 case TARGET_NR_pselect6
:
9557 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9559 #ifdef TARGET_NR_pselect6_time64
9560 case TARGET_NR_pselect6_time64
:
9561 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9563 #ifdef TARGET_NR_symlink
9564 case TARGET_NR_symlink
:
9567 p
= lock_user_string(arg1
);
9568 p2
= lock_user_string(arg2
);
9570 ret
= -TARGET_EFAULT
;
9572 ret
= get_errno(symlink(p
, p2
));
9573 unlock_user(p2
, arg2
, 0);
9574 unlock_user(p
, arg1
, 0);
9578 #if defined(TARGET_NR_symlinkat)
9579 case TARGET_NR_symlinkat
:
9582 p
= lock_user_string(arg1
);
9583 p2
= lock_user_string(arg3
);
9585 ret
= -TARGET_EFAULT
;
9587 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9588 unlock_user(p2
, arg3
, 0);
9589 unlock_user(p
, arg1
, 0);
9593 #ifdef TARGET_NR_readlink
9594 case TARGET_NR_readlink
:
9597 p
= lock_user_string(arg1
);
9598 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9600 ret
= -TARGET_EFAULT
;
9602 /* Short circuit this for the magic exe check. */
9603 ret
= -TARGET_EINVAL
;
9604 } else if (is_proc_myself((const char *)p
, "exe")) {
9605 char real
[PATH_MAX
], *temp
;
9606 temp
= realpath(exec_path
, real
);
9607 /* Return value is # of bytes that we wrote to the buffer. */
9609 ret
= get_errno(-1);
9611 /* Don't worry about sign mismatch as earlier mapping
9612 * logic would have thrown a bad address error. */
9613 ret
= MIN(strlen(real
), arg3
);
9614 /* We cannot NUL terminate the string. */
9615 memcpy(p2
, real
, ret
);
9618 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9620 unlock_user(p2
, arg2
, ret
);
9621 unlock_user(p
, arg1
, 0);
9625 #if defined(TARGET_NR_readlinkat)
9626 case TARGET_NR_readlinkat
:
9629 p
= lock_user_string(arg2
);
9630 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9632 ret
= -TARGET_EFAULT
;
9633 } else if (is_proc_myself((const char *)p
, "exe")) {
9634 char real
[PATH_MAX
], *temp
;
9635 temp
= realpath(exec_path
, real
);
9636 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9637 snprintf((char *)p2
, arg4
, "%s", real
);
9639 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9641 unlock_user(p2
, arg3
, ret
);
9642 unlock_user(p
, arg2
, 0);
9646 #ifdef TARGET_NR_swapon
9647 case TARGET_NR_swapon
:
9648 if (!(p
= lock_user_string(arg1
)))
9649 return -TARGET_EFAULT
;
9650 ret
= get_errno(swapon(p
, arg2
));
9651 unlock_user(p
, arg1
, 0);
9654 case TARGET_NR_reboot
:
9655 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9656 /* arg4 must be ignored in all other cases */
9657 p
= lock_user_string(arg4
);
9659 return -TARGET_EFAULT
;
9661 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9662 unlock_user(p
, arg4
, 0);
9664 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9667 #ifdef TARGET_NR_mmap
9668 case TARGET_NR_mmap
:
9669 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9670 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9671 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9672 || defined(TARGET_S390X)
9675 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9676 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9677 return -TARGET_EFAULT
;
9684 unlock_user(v
, arg1
, 0);
9685 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9686 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9690 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9691 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9697 #ifdef TARGET_NR_mmap2
9698 case TARGET_NR_mmap2
:
9700 #define MMAP_SHIFT 12
9702 ret
= target_mmap(arg1
, arg2
, arg3
,
9703 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9704 arg5
, arg6
<< MMAP_SHIFT
);
9705 return get_errno(ret
);
9707 case TARGET_NR_munmap
:
9708 return get_errno(target_munmap(arg1
, arg2
));
9709 case TARGET_NR_mprotect
:
9711 TaskState
*ts
= cpu
->opaque
;
9712 /* Special hack to detect libc making the stack executable. */
9713 if ((arg3
& PROT_GROWSDOWN
)
9714 && arg1
>= ts
->info
->stack_limit
9715 && arg1
<= ts
->info
->start_stack
) {
9716 arg3
&= ~PROT_GROWSDOWN
;
9717 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9718 arg1
= ts
->info
->stack_limit
;
9721 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9722 #ifdef TARGET_NR_mremap
9723 case TARGET_NR_mremap
:
9724 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9726 /* ??? msync/mlock/munlock are broken for softmmu. */
9727 #ifdef TARGET_NR_msync
9728 case TARGET_NR_msync
:
9729 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9731 #ifdef TARGET_NR_mlock
9732 case TARGET_NR_mlock
:
9733 return get_errno(mlock(g2h(arg1
), arg2
));
9735 #ifdef TARGET_NR_munlock
9736 case TARGET_NR_munlock
:
9737 return get_errno(munlock(g2h(arg1
), arg2
));
9739 #ifdef TARGET_NR_mlockall
9740 case TARGET_NR_mlockall
:
9741 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9743 #ifdef TARGET_NR_munlockall
9744 case TARGET_NR_munlockall
:
9745 return get_errno(munlockall());
9747 #ifdef TARGET_NR_truncate
9748 case TARGET_NR_truncate
:
9749 if (!(p
= lock_user_string(arg1
)))
9750 return -TARGET_EFAULT
;
9751 ret
= get_errno(truncate(p
, arg2
));
9752 unlock_user(p
, arg1
, 0);
9755 #ifdef TARGET_NR_ftruncate
9756 case TARGET_NR_ftruncate
:
9757 return get_errno(ftruncate(arg1
, arg2
));
9759 case TARGET_NR_fchmod
:
9760 return get_errno(fchmod(arg1
, arg2
));
9761 #if defined(TARGET_NR_fchmodat)
9762 case TARGET_NR_fchmodat
:
9763 if (!(p
= lock_user_string(arg2
)))
9764 return -TARGET_EFAULT
;
9765 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9766 unlock_user(p
, arg2
, 0);
9769 case TARGET_NR_getpriority
:
9770 /* Note that negative values are valid for getpriority, so we must
9771 differentiate based on errno settings. */
9773 ret
= getpriority(arg1
, arg2
);
9774 if (ret
== -1 && errno
!= 0) {
9775 return -host_to_target_errno(errno
);
9778 /* Return value is the unbiased priority. Signal no error. */
9779 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9781 /* Return value is a biased priority to avoid negative numbers. */
9785 case TARGET_NR_setpriority
:
9786 return get_errno(setpriority(arg1
, arg2
, arg3
));
9787 #ifdef TARGET_NR_statfs
9788 case TARGET_NR_statfs
:
9789 if (!(p
= lock_user_string(arg1
))) {
9790 return -TARGET_EFAULT
;
9792 ret
= get_errno(statfs(path(p
), &stfs
));
9793 unlock_user(p
, arg1
, 0);
9795 if (!is_error(ret
)) {
9796 struct target_statfs
*target_stfs
;
9798 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9799 return -TARGET_EFAULT
;
9800 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9801 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9802 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9803 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9804 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9805 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9806 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9807 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9808 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9809 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9810 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9811 #ifdef _STATFS_F_FLAGS
9812 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9814 __put_user(0, &target_stfs
->f_flags
);
9816 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9817 unlock_user_struct(target_stfs
, arg2
, 1);
9821 #ifdef TARGET_NR_fstatfs
9822 case TARGET_NR_fstatfs
:
9823 ret
= get_errno(fstatfs(arg1
, &stfs
));
9824 goto convert_statfs
;
9826 #ifdef TARGET_NR_statfs64
9827 case TARGET_NR_statfs64
:
9828 if (!(p
= lock_user_string(arg1
))) {
9829 return -TARGET_EFAULT
;
9831 ret
= get_errno(statfs(path(p
), &stfs
));
9832 unlock_user(p
, arg1
, 0);
9834 if (!is_error(ret
)) {
9835 struct target_statfs64
*target_stfs
;
9837 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9838 return -TARGET_EFAULT
;
9839 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9840 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9841 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9842 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9843 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9844 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9845 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9846 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9847 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9848 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9849 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9850 #ifdef _STATFS_F_FLAGS
9851 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9853 __put_user(0, &target_stfs
->f_flags
);
9855 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9856 unlock_user_struct(target_stfs
, arg3
, 1);
9859 case TARGET_NR_fstatfs64
:
9860 ret
= get_errno(fstatfs(arg1
, &stfs
));
9861 goto convert_statfs64
;
9863 #ifdef TARGET_NR_socketcall
9864 case TARGET_NR_socketcall
:
9865 return do_socketcall(arg1
, arg2
);
9867 #ifdef TARGET_NR_accept
9868 case TARGET_NR_accept
:
9869 return do_accept4(arg1
, arg2
, arg3
, 0);
9871 #ifdef TARGET_NR_accept4
9872 case TARGET_NR_accept4
:
9873 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9875 #ifdef TARGET_NR_bind
9876 case TARGET_NR_bind
:
9877 return do_bind(arg1
, arg2
, arg3
);
9879 #ifdef TARGET_NR_connect
9880 case TARGET_NR_connect
:
9881 return do_connect(arg1
, arg2
, arg3
);
9883 #ifdef TARGET_NR_getpeername
9884 case TARGET_NR_getpeername
:
9885 return do_getpeername(arg1
, arg2
, arg3
);
9887 #ifdef TARGET_NR_getsockname
9888 case TARGET_NR_getsockname
:
9889 return do_getsockname(arg1
, arg2
, arg3
);
9891 #ifdef TARGET_NR_getsockopt
9892 case TARGET_NR_getsockopt
:
9893 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9895 #ifdef TARGET_NR_listen
9896 case TARGET_NR_listen
:
9897 return get_errno(listen(arg1
, arg2
));
9899 #ifdef TARGET_NR_recv
9900 case TARGET_NR_recv
:
9901 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9903 #ifdef TARGET_NR_recvfrom
9904 case TARGET_NR_recvfrom
:
9905 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9907 #ifdef TARGET_NR_recvmsg
9908 case TARGET_NR_recvmsg
:
9909 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9911 #ifdef TARGET_NR_send
9912 case TARGET_NR_send
:
9913 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9915 #ifdef TARGET_NR_sendmsg
9916 case TARGET_NR_sendmsg
:
9917 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9919 #ifdef TARGET_NR_sendmmsg
9920 case TARGET_NR_sendmmsg
:
9921 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9923 #ifdef TARGET_NR_recvmmsg
9924 case TARGET_NR_recvmmsg
:
9925 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9927 #ifdef TARGET_NR_sendto
9928 case TARGET_NR_sendto
:
9929 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9931 #ifdef TARGET_NR_shutdown
9932 case TARGET_NR_shutdown
:
9933 return get_errno(shutdown(arg1
, arg2
));
9935 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9936 case TARGET_NR_getrandom
:
9937 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9939 return -TARGET_EFAULT
;
9941 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9942 unlock_user(p
, arg1
, ret
);
9945 #ifdef TARGET_NR_socket
9946 case TARGET_NR_socket
:
9947 return do_socket(arg1
, arg2
, arg3
);
9949 #ifdef TARGET_NR_socketpair
9950 case TARGET_NR_socketpair
:
9951 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9953 #ifdef TARGET_NR_setsockopt
9954 case TARGET_NR_setsockopt
:
9955 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9957 #if defined(TARGET_NR_syslog)
9958 case TARGET_NR_syslog
:
9963 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9964 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9965 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9966 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9967 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9968 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9969 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9970 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9971 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9972 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9973 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9974 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9977 return -TARGET_EINVAL
;
9982 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9984 return -TARGET_EFAULT
;
9986 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9987 unlock_user(p
, arg2
, arg3
);
9991 return -TARGET_EINVAL
;
9996 case TARGET_NR_setitimer
:
9998 struct itimerval value
, ovalue
, *pvalue
;
10002 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10003 || copy_from_user_timeval(&pvalue
->it_value
,
10004 arg2
+ sizeof(struct target_timeval
)))
10005 return -TARGET_EFAULT
;
10009 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10010 if (!is_error(ret
) && arg3
) {
10011 if (copy_to_user_timeval(arg3
,
10012 &ovalue
.it_interval
)
10013 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10015 return -TARGET_EFAULT
;
10019 case TARGET_NR_getitimer
:
10021 struct itimerval value
;
10023 ret
= get_errno(getitimer(arg1
, &value
));
10024 if (!is_error(ret
) && arg2
) {
10025 if (copy_to_user_timeval(arg2
,
10026 &value
.it_interval
)
10027 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10029 return -TARGET_EFAULT
;
10033 #ifdef TARGET_NR_stat
10034 case TARGET_NR_stat
:
10035 if (!(p
= lock_user_string(arg1
))) {
10036 return -TARGET_EFAULT
;
10038 ret
= get_errno(stat(path(p
), &st
));
10039 unlock_user(p
, arg1
, 0);
10042 #ifdef TARGET_NR_lstat
10043 case TARGET_NR_lstat
:
10044 if (!(p
= lock_user_string(arg1
))) {
10045 return -TARGET_EFAULT
;
10047 ret
= get_errno(lstat(path(p
), &st
));
10048 unlock_user(p
, arg1
, 0);
10051 #ifdef TARGET_NR_fstat
10052 case TARGET_NR_fstat
:
10054 ret
= get_errno(fstat(arg1
, &st
));
10055 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10058 if (!is_error(ret
)) {
10059 struct target_stat
*target_st
;
10061 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10062 return -TARGET_EFAULT
;
10063 memset(target_st
, 0, sizeof(*target_st
));
10064 __put_user(st
.st_dev
, &target_st
->st_dev
);
10065 __put_user(st
.st_ino
, &target_st
->st_ino
);
10066 __put_user(st
.st_mode
, &target_st
->st_mode
);
10067 __put_user(st
.st_uid
, &target_st
->st_uid
);
10068 __put_user(st
.st_gid
, &target_st
->st_gid
);
10069 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10070 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10071 __put_user(st
.st_size
, &target_st
->st_size
);
10072 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10073 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10074 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10075 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10076 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10077 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10078 defined(TARGET_STAT_HAVE_NSEC)
10079 __put_user(st
.st_atim
.tv_nsec
,
10080 &target_st
->target_st_atime_nsec
);
10081 __put_user(st
.st_mtim
.tv_nsec
,
10082 &target_st
->target_st_mtime_nsec
);
10083 __put_user(st
.st_ctim
.tv_nsec
,
10084 &target_st
->target_st_ctime_nsec
);
10086 unlock_user_struct(target_st
, arg2
, 1);
10091 case TARGET_NR_vhangup
:
10092 return get_errno(vhangup());
10093 #ifdef TARGET_NR_syscall
10094 case TARGET_NR_syscall
:
10095 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10096 arg6
, arg7
, arg8
, 0);
10098 #if defined(TARGET_NR_wait4)
10099 case TARGET_NR_wait4
:
10102 abi_long status_ptr
= arg2
;
10103 struct rusage rusage
, *rusage_ptr
;
10104 abi_ulong target_rusage
= arg4
;
10105 abi_long rusage_err
;
10107 rusage_ptr
= &rusage
;
10110 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10111 if (!is_error(ret
)) {
10112 if (status_ptr
&& ret
) {
10113 status
= host_to_target_waitstatus(status
);
10114 if (put_user_s32(status
, status_ptr
))
10115 return -TARGET_EFAULT
;
10117 if (target_rusage
) {
10118 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10127 #ifdef TARGET_NR_swapoff
10128 case TARGET_NR_swapoff
:
10129 if (!(p
= lock_user_string(arg1
)))
10130 return -TARGET_EFAULT
;
10131 ret
= get_errno(swapoff(p
));
10132 unlock_user(p
, arg1
, 0);
10135 case TARGET_NR_sysinfo
:
10137 struct target_sysinfo
*target_value
;
10138 struct sysinfo value
;
10139 ret
= get_errno(sysinfo(&value
));
10140 if (!is_error(ret
) && arg1
)
10142 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10143 return -TARGET_EFAULT
;
10144 __put_user(value
.uptime
, &target_value
->uptime
);
10145 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10146 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10147 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10148 __put_user(value
.totalram
, &target_value
->totalram
);
10149 __put_user(value
.freeram
, &target_value
->freeram
);
10150 __put_user(value
.sharedram
, &target_value
->sharedram
);
10151 __put_user(value
.bufferram
, &target_value
->bufferram
);
10152 __put_user(value
.totalswap
, &target_value
->totalswap
);
10153 __put_user(value
.freeswap
, &target_value
->freeswap
);
10154 __put_user(value
.procs
, &target_value
->procs
);
10155 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10156 __put_user(value
.freehigh
, &target_value
->freehigh
);
10157 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10158 unlock_user_struct(target_value
, arg1
, 1);
10162 #ifdef TARGET_NR_ipc
10163 case TARGET_NR_ipc
:
10164 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10166 #ifdef TARGET_NR_semget
10167 case TARGET_NR_semget
:
10168 return get_errno(semget(arg1
, arg2
, arg3
));
10170 #ifdef TARGET_NR_semop
10171 case TARGET_NR_semop
:
10172 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10174 #ifdef TARGET_NR_semtimedop
10175 case TARGET_NR_semtimedop
:
10176 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10178 #ifdef TARGET_NR_semtimedop_time64
10179 case TARGET_NR_semtimedop_time64
:
10180 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10182 #ifdef TARGET_NR_semctl
10183 case TARGET_NR_semctl
:
10184 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10186 #ifdef TARGET_NR_msgctl
10187 case TARGET_NR_msgctl
:
10188 return do_msgctl(arg1
, arg2
, arg3
);
10190 #ifdef TARGET_NR_msgget
10191 case TARGET_NR_msgget
:
10192 return get_errno(msgget(arg1
, arg2
));
10194 #ifdef TARGET_NR_msgrcv
10195 case TARGET_NR_msgrcv
:
10196 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10198 #ifdef TARGET_NR_msgsnd
10199 case TARGET_NR_msgsnd
:
10200 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10202 #ifdef TARGET_NR_shmget
10203 case TARGET_NR_shmget
:
10204 return get_errno(shmget(arg1
, arg2
, arg3
));
10206 #ifdef TARGET_NR_shmctl
10207 case TARGET_NR_shmctl
:
10208 return do_shmctl(arg1
, arg2
, arg3
);
10210 #ifdef TARGET_NR_shmat
10211 case TARGET_NR_shmat
:
10212 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10214 #ifdef TARGET_NR_shmdt
10215 case TARGET_NR_shmdt
:
10216 return do_shmdt(arg1
);
10218 case TARGET_NR_fsync
:
10219 return get_errno(fsync(arg1
));
10220 case TARGET_NR_clone
:
10221 /* Linux manages to have three different orderings for its
10222 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10223 * match the kernel's CONFIG_CLONE_* settings.
10224 * Microblaze is further special in that it uses a sixth
10225 * implicit argument to clone for the TLS pointer.
10227 #if defined(TARGET_MICROBLAZE)
10228 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10229 #elif defined(TARGET_CLONE_BACKWARDS)
10230 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10231 #elif defined(TARGET_CLONE_BACKWARDS2)
10232 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10234 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10237 #ifdef __NR_exit_group
10238 /* new thread calls */
10239 case TARGET_NR_exit_group
:
10240 preexit_cleanup(cpu_env
, arg1
);
10241 return get_errno(exit_group(arg1
));
10243 case TARGET_NR_setdomainname
:
10244 if (!(p
= lock_user_string(arg1
)))
10245 return -TARGET_EFAULT
;
10246 ret
= get_errno(setdomainname(p
, arg2
));
10247 unlock_user(p
, arg1
, 0);
10249 case TARGET_NR_uname
:
10250 /* no need to transcode because we use the linux syscall */
10252 struct new_utsname
* buf
;
10254 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10255 return -TARGET_EFAULT
;
10256 ret
= get_errno(sys_uname(buf
));
10257 if (!is_error(ret
)) {
10258 /* Overwrite the native machine name with whatever is being
10260 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10261 sizeof(buf
->machine
));
10262 /* Allow the user to override the reported release. */
10263 if (qemu_uname_release
&& *qemu_uname_release
) {
10264 g_strlcpy(buf
->release
, qemu_uname_release
,
10265 sizeof(buf
->release
));
10268 unlock_user_struct(buf
, arg1
, 1);
10272 case TARGET_NR_modify_ldt
:
10273 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10274 #if !defined(TARGET_X86_64)
10275 case TARGET_NR_vm86
:
10276 return do_vm86(cpu_env
, arg1
, arg2
);
10279 #if defined(TARGET_NR_adjtimex)
10280 case TARGET_NR_adjtimex
:
10282 struct timex host_buf
;
10284 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10285 return -TARGET_EFAULT
;
10287 ret
= get_errno(adjtimex(&host_buf
));
10288 if (!is_error(ret
)) {
10289 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10290 return -TARGET_EFAULT
;
10296 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10297 case TARGET_NR_clock_adjtime
:
10299 struct timex htx
, *phtx
= &htx
;
10301 if (target_to_host_timex(phtx
, arg2
) != 0) {
10302 return -TARGET_EFAULT
;
10304 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10305 if (!is_error(ret
) && phtx
) {
10306 if (host_to_target_timex(arg2
, phtx
) != 0) {
10307 return -TARGET_EFAULT
;
10313 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10314 case TARGET_NR_clock_adjtime64
:
10318 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10319 return -TARGET_EFAULT
;
10321 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10322 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10323 return -TARGET_EFAULT
;
10328 case TARGET_NR_getpgid
:
10329 return get_errno(getpgid(arg1
));
10330 case TARGET_NR_fchdir
:
10331 return get_errno(fchdir(arg1
));
10332 case TARGET_NR_personality
:
10333 return get_errno(personality(arg1
));
10334 #ifdef TARGET_NR__llseek /* Not on alpha */
10335 case TARGET_NR__llseek
:
10338 #if !defined(__NR_llseek)
10339 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10341 ret
= get_errno(res
);
10346 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10348 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10349 return -TARGET_EFAULT
;
10354 #ifdef TARGET_NR_getdents
10355 case TARGET_NR_getdents
:
10356 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10357 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10359 struct target_dirent
*target_dirp
;
10360 struct linux_dirent
*dirp
;
10361 abi_long count
= arg3
;
10363 dirp
= g_try_malloc(count
);
10365 return -TARGET_ENOMEM
;
10368 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10369 if (!is_error(ret
)) {
10370 struct linux_dirent
*de
;
10371 struct target_dirent
*tde
;
10373 int reclen
, treclen
;
10374 int count1
, tnamelen
;
10378 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10379 return -TARGET_EFAULT
;
10382 reclen
= de
->d_reclen
;
10383 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10384 assert(tnamelen
>= 0);
10385 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10386 assert(count1
+ treclen
<= count
);
10387 tde
->d_reclen
= tswap16(treclen
);
10388 tde
->d_ino
= tswapal(de
->d_ino
);
10389 tde
->d_off
= tswapal(de
->d_off
);
10390 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10391 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10393 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10397 unlock_user(target_dirp
, arg2
, ret
);
10403 struct linux_dirent
*dirp
;
10404 abi_long count
= arg3
;
10406 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10407 return -TARGET_EFAULT
;
10408 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10409 if (!is_error(ret
)) {
10410 struct linux_dirent
*de
;
10415 reclen
= de
->d_reclen
;
10418 de
->d_reclen
= tswap16(reclen
);
10419 tswapls(&de
->d_ino
);
10420 tswapls(&de
->d_off
);
10421 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10425 unlock_user(dirp
, arg2
, ret
);
10429 /* Implement getdents in terms of getdents64 */
10431 struct linux_dirent64
*dirp
;
10432 abi_long count
= arg3
;
10434 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10436 return -TARGET_EFAULT
;
10438 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10439 if (!is_error(ret
)) {
10440 /* Convert the dirent64 structs to target dirent. We do this
10441 * in-place, since we can guarantee that a target_dirent is no
10442 * larger than a dirent64; however this means we have to be
10443 * careful to read everything before writing in the new format.
10445 struct linux_dirent64
*de
;
10446 struct target_dirent
*tde
;
10451 tde
= (struct target_dirent
*)dirp
;
10453 int namelen
, treclen
;
10454 int reclen
= de
->d_reclen
;
10455 uint64_t ino
= de
->d_ino
;
10456 int64_t off
= de
->d_off
;
10457 uint8_t type
= de
->d_type
;
10459 namelen
= strlen(de
->d_name
);
10460 treclen
= offsetof(struct target_dirent
, d_name
)
10462 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10464 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10465 tde
->d_ino
= tswapal(ino
);
10466 tde
->d_off
= tswapal(off
);
10467 tde
->d_reclen
= tswap16(treclen
);
10468 /* The target_dirent type is in what was formerly a padding
10469 * byte at the end of the structure:
10471 *(((char *)tde
) + treclen
- 1) = type
;
10473 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10474 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10480 unlock_user(dirp
, arg2
, ret
);
10484 #endif /* TARGET_NR_getdents */
10485 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10486 case TARGET_NR_getdents64
:
10488 struct linux_dirent64
*dirp
;
10489 abi_long count
= arg3
;
10490 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10491 return -TARGET_EFAULT
;
10492 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10493 if (!is_error(ret
)) {
10494 struct linux_dirent64
*de
;
10499 reclen
= de
->d_reclen
;
10502 de
->d_reclen
= tswap16(reclen
);
10503 tswap64s((uint64_t *)&de
->d_ino
);
10504 tswap64s((uint64_t *)&de
->d_off
);
10505 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10509 unlock_user(dirp
, arg2
, ret
);
10512 #endif /* TARGET_NR_getdents64 */
10513 #if defined(TARGET_NR__newselect)
10514 case TARGET_NR__newselect
:
10515 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10517 #ifdef TARGET_NR_poll
10518 case TARGET_NR_poll
:
10519 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10521 #ifdef TARGET_NR_ppoll
10522 case TARGET_NR_ppoll
:
10523 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10525 #ifdef TARGET_NR_ppoll_time64
10526 case TARGET_NR_ppoll_time64
:
10527 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10529 case TARGET_NR_flock
:
10530 /* NOTE: the flock constant seems to be the same for every
10532 return get_errno(safe_flock(arg1
, arg2
));
10533 case TARGET_NR_readv
:
10535 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10537 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10538 unlock_iovec(vec
, arg2
, arg3
, 1);
10540 ret
= -host_to_target_errno(errno
);
10544 case TARGET_NR_writev
:
10546 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10548 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10549 unlock_iovec(vec
, arg2
, arg3
, 0);
10551 ret
= -host_to_target_errno(errno
);
10555 #if defined(TARGET_NR_preadv)
10556 case TARGET_NR_preadv
:
10558 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10560 unsigned long low
, high
;
10562 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10563 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10564 unlock_iovec(vec
, arg2
, arg3
, 1);
10566 ret
= -host_to_target_errno(errno
);
10571 #if defined(TARGET_NR_pwritev)
10572 case TARGET_NR_pwritev
:
10574 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10576 unsigned long low
, high
;
10578 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10579 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10580 unlock_iovec(vec
, arg2
, arg3
, 0);
10582 ret
= -host_to_target_errno(errno
);
10587 case TARGET_NR_getsid
:
10588 return get_errno(getsid(arg1
));
10589 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10590 case TARGET_NR_fdatasync
:
10591 return get_errno(fdatasync(arg1
));
10593 case TARGET_NR_sched_getaffinity
:
10595 unsigned int mask_size
;
10596 unsigned long *mask
;
10599 * sched_getaffinity needs multiples of ulong, so need to take
10600 * care of mismatches between target ulong and host ulong sizes.
10602 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10603 return -TARGET_EINVAL
;
10605 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10607 mask
= alloca(mask_size
);
10608 memset(mask
, 0, mask_size
);
10609 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10611 if (!is_error(ret
)) {
10613 /* More data returned than the caller's buffer will fit.
10614 * This only happens if sizeof(abi_long) < sizeof(long)
10615 * and the caller passed us a buffer holding an odd number
10616 * of abi_longs. If the host kernel is actually using the
10617 * extra 4 bytes then fail EINVAL; otherwise we can just
10618 * ignore them and only copy the interesting part.
10620 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10621 if (numcpus
> arg2
* 8) {
10622 return -TARGET_EINVAL
;
10627 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10628 return -TARGET_EFAULT
;
10633 case TARGET_NR_sched_setaffinity
:
10635 unsigned int mask_size
;
10636 unsigned long *mask
;
10639 * sched_setaffinity needs multiples of ulong, so need to take
10640 * care of mismatches between target ulong and host ulong sizes.
10642 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10643 return -TARGET_EINVAL
;
10645 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10646 mask
= alloca(mask_size
);
10648 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10653 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10655 case TARGET_NR_getcpu
:
10657 unsigned cpu
, node
;
10658 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10659 arg2
? &node
: NULL
,
10661 if (is_error(ret
)) {
10664 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10665 return -TARGET_EFAULT
;
10667 if (arg2
&& put_user_u32(node
, arg2
)) {
10668 return -TARGET_EFAULT
;
10672 case TARGET_NR_sched_setparam
:
10674 struct sched_param
*target_schp
;
10675 struct sched_param schp
;
10678 return -TARGET_EINVAL
;
10680 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10681 return -TARGET_EFAULT
;
10682 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10683 unlock_user_struct(target_schp
, arg2
, 0);
10684 return get_errno(sched_setparam(arg1
, &schp
));
10686 case TARGET_NR_sched_getparam
:
10688 struct sched_param
*target_schp
;
10689 struct sched_param schp
;
10692 return -TARGET_EINVAL
;
10694 ret
= get_errno(sched_getparam(arg1
, &schp
));
10695 if (!is_error(ret
)) {
10696 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10697 return -TARGET_EFAULT
;
10698 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10699 unlock_user_struct(target_schp
, arg2
, 1);
10703 case TARGET_NR_sched_setscheduler
:
10705 struct sched_param
*target_schp
;
10706 struct sched_param schp
;
10708 return -TARGET_EINVAL
;
10710 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10711 return -TARGET_EFAULT
;
10712 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10713 unlock_user_struct(target_schp
, arg3
, 0);
10714 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10716 case TARGET_NR_sched_getscheduler
:
10717 return get_errno(sched_getscheduler(arg1
));
10718 case TARGET_NR_sched_yield
:
10719 return get_errno(sched_yield());
10720 case TARGET_NR_sched_get_priority_max
:
10721 return get_errno(sched_get_priority_max(arg1
));
10722 case TARGET_NR_sched_get_priority_min
:
10723 return get_errno(sched_get_priority_min(arg1
));
10724 #ifdef TARGET_NR_sched_rr_get_interval
10725 case TARGET_NR_sched_rr_get_interval
:
10727 struct timespec ts
;
10728 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10729 if (!is_error(ret
)) {
10730 ret
= host_to_target_timespec(arg2
, &ts
);
10735 #ifdef TARGET_NR_sched_rr_get_interval_time64
10736 case TARGET_NR_sched_rr_get_interval_time64
:
10738 struct timespec ts
;
10739 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10740 if (!is_error(ret
)) {
10741 ret
= host_to_target_timespec64(arg2
, &ts
);
10746 #if defined(TARGET_NR_nanosleep)
10747 case TARGET_NR_nanosleep
:
10749 struct timespec req
, rem
;
10750 target_to_host_timespec(&req
, arg1
);
10751 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10752 if (is_error(ret
) && arg2
) {
10753 host_to_target_timespec(arg2
, &rem
);
10758 case TARGET_NR_prctl
:
10760 case PR_GET_PDEATHSIG
:
10763 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10764 if (!is_error(ret
) && arg2
10765 && put_user_s32(deathsig
, arg2
)) {
10766 return -TARGET_EFAULT
;
10773 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10775 return -TARGET_EFAULT
;
10777 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10778 arg3
, arg4
, arg5
));
10779 unlock_user(name
, arg2
, 16);
10784 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10786 return -TARGET_EFAULT
;
10788 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10789 arg3
, arg4
, arg5
));
10790 unlock_user(name
, arg2
, 0);
10795 case TARGET_PR_GET_FP_MODE
:
10797 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10799 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10800 ret
|= TARGET_PR_FP_MODE_FR
;
10802 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10803 ret
|= TARGET_PR_FP_MODE_FRE
;
10807 case TARGET_PR_SET_FP_MODE
:
10809 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10810 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10811 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10812 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10813 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10815 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10816 TARGET_PR_FP_MODE_FRE
;
10818 /* If nothing to change, return right away, successfully. */
10819 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10822 /* Check the value is valid */
10823 if (arg2
& ~known_bits
) {
10824 return -TARGET_EOPNOTSUPP
;
10826 /* Setting FRE without FR is not supported. */
10827 if (new_fre
&& !new_fr
) {
10828 return -TARGET_EOPNOTSUPP
;
10830 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10831 /* FR1 is not supported */
10832 return -TARGET_EOPNOTSUPP
;
10834 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10835 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10836 /* cannot set FR=0 */
10837 return -TARGET_EOPNOTSUPP
;
10839 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10840 /* Cannot set FRE=1 */
10841 return -TARGET_EOPNOTSUPP
;
10845 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10846 for (i
= 0; i
< 32 ; i
+= 2) {
10847 if (!old_fr
&& new_fr
) {
10848 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10849 } else if (old_fr
&& !new_fr
) {
10850 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10855 env
->CP0_Status
|= (1 << CP0St_FR
);
10856 env
->hflags
|= MIPS_HFLAG_F64
;
10858 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10859 env
->hflags
&= ~MIPS_HFLAG_F64
;
10862 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10863 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10864 env
->hflags
|= MIPS_HFLAG_FRE
;
10867 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10868 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10874 #ifdef TARGET_AARCH64
10875 case TARGET_PR_SVE_SET_VL
:
10877 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10878 * PR_SVE_VL_INHERIT. Note the kernel definition
10879 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10880 * even though the current architectural maximum is VQ=16.
10882 ret
= -TARGET_EINVAL
;
10883 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10884 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10885 CPUARMState
*env
= cpu_env
;
10886 ARMCPU
*cpu
= env_archcpu(env
);
10887 uint32_t vq
, old_vq
;
10889 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10890 vq
= MAX(arg2
/ 16, 1);
10891 vq
= MIN(vq
, cpu
->sve_max_vq
);
10894 aarch64_sve_narrow_vq(env
, vq
);
10896 env
->vfp
.zcr_el
[1] = vq
- 1;
10897 arm_rebuild_hflags(env
);
10901 case TARGET_PR_SVE_GET_VL
:
10902 ret
= -TARGET_EINVAL
;
10904 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10905 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10906 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10910 case TARGET_PR_PAC_RESET_KEYS
:
10912 CPUARMState
*env
= cpu_env
;
10913 ARMCPU
*cpu
= env_archcpu(env
);
10915 if (arg3
|| arg4
|| arg5
) {
10916 return -TARGET_EINVAL
;
10918 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10919 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10920 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10921 TARGET_PR_PAC_APGAKEY
);
10927 } else if (arg2
& ~all
) {
10928 return -TARGET_EINVAL
;
10930 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10931 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10932 sizeof(ARMPACKey
), &err
);
10934 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10935 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10936 sizeof(ARMPACKey
), &err
);
10938 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10939 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10940 sizeof(ARMPACKey
), &err
);
10942 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10943 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10944 sizeof(ARMPACKey
), &err
);
10946 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10947 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10948 sizeof(ARMPACKey
), &err
);
10952 * Some unknown failure in the crypto. The best
10953 * we can do is log it and fail the syscall.
10954 * The real syscall cannot fail this way.
10956 qemu_log_mask(LOG_UNIMP
,
10957 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10958 error_get_pretty(err
));
10960 return -TARGET_EIO
;
10965 return -TARGET_EINVAL
;
10966 #endif /* AARCH64 */
10967 case PR_GET_SECCOMP
:
10968 case PR_SET_SECCOMP
:
10969 /* Disable seccomp to prevent the target disabling syscalls we
10971 return -TARGET_EINVAL
;
10973 /* Most prctl options have no pointer arguments */
10974 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10977 #ifdef TARGET_NR_arch_prctl
10978 case TARGET_NR_arch_prctl
:
10979 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10981 #ifdef TARGET_NR_pread64
10982 case TARGET_NR_pread64
:
10983 if (regpairs_aligned(cpu_env
, num
)) {
10987 if (arg2
== 0 && arg3
== 0) {
10988 /* Special-case NULL buffer and zero length, which should succeed */
10991 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10993 return -TARGET_EFAULT
;
10996 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10997 unlock_user(p
, arg2
, ret
);
10999 case TARGET_NR_pwrite64
:
11000 if (regpairs_aligned(cpu_env
, num
)) {
11004 if (arg2
== 0 && arg3
== 0) {
11005 /* Special-case NULL buffer and zero length, which should succeed */
11008 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11010 return -TARGET_EFAULT
;
11013 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11014 unlock_user(p
, arg2
, 0);
11017 case TARGET_NR_getcwd
:
11018 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11019 return -TARGET_EFAULT
;
11020 ret
= get_errno(sys_getcwd1(p
, arg2
));
11021 unlock_user(p
, arg1
, ret
);
11023 case TARGET_NR_capget
:
11024 case TARGET_NR_capset
:
11026 struct target_user_cap_header
*target_header
;
11027 struct target_user_cap_data
*target_data
= NULL
;
11028 struct __user_cap_header_struct header
;
11029 struct __user_cap_data_struct data
[2];
11030 struct __user_cap_data_struct
*dataptr
= NULL
;
11031 int i
, target_datalen
;
11032 int data_items
= 1;
11034 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11035 return -TARGET_EFAULT
;
11037 header
.version
= tswap32(target_header
->version
);
11038 header
.pid
= tswap32(target_header
->pid
);
11040 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11041 /* Version 2 and up takes pointer to two user_data structs */
11045 target_datalen
= sizeof(*target_data
) * data_items
;
11048 if (num
== TARGET_NR_capget
) {
11049 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11051 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11053 if (!target_data
) {
11054 unlock_user_struct(target_header
, arg1
, 0);
11055 return -TARGET_EFAULT
;
11058 if (num
== TARGET_NR_capset
) {
11059 for (i
= 0; i
< data_items
; i
++) {
11060 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11061 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11062 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11069 if (num
== TARGET_NR_capget
) {
11070 ret
= get_errno(capget(&header
, dataptr
));
11072 ret
= get_errno(capset(&header
, dataptr
));
11075 /* The kernel always updates version for both capget and capset */
11076 target_header
->version
= tswap32(header
.version
);
11077 unlock_user_struct(target_header
, arg1
, 1);
11080 if (num
== TARGET_NR_capget
) {
11081 for (i
= 0; i
< data_items
; i
++) {
11082 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11083 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11084 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11086 unlock_user(target_data
, arg2
, target_datalen
);
11088 unlock_user(target_data
, arg2
, 0);
11093 case TARGET_NR_sigaltstack
:
11094 return do_sigaltstack(arg1
, arg2
,
11095 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11097 #ifdef CONFIG_SENDFILE
11098 #ifdef TARGET_NR_sendfile
11099 case TARGET_NR_sendfile
:
11101 off_t
*offp
= NULL
;
11104 ret
= get_user_sal(off
, arg3
);
11105 if (is_error(ret
)) {
11110 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11111 if (!is_error(ret
) && arg3
) {
11112 abi_long ret2
= put_user_sal(off
, arg3
);
11113 if (is_error(ret2
)) {
11120 #ifdef TARGET_NR_sendfile64
11121 case TARGET_NR_sendfile64
:
11123 off_t
*offp
= NULL
;
11126 ret
= get_user_s64(off
, arg3
);
11127 if (is_error(ret
)) {
11132 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11133 if (!is_error(ret
) && arg3
) {
11134 abi_long ret2
= put_user_s64(off
, arg3
);
11135 if (is_error(ret2
)) {
11143 #ifdef TARGET_NR_vfork
11144 case TARGET_NR_vfork
:
11145 return get_errno(do_fork(cpu_env
,
11146 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11149 #ifdef TARGET_NR_ugetrlimit
11150 case TARGET_NR_ugetrlimit
:
11152 struct rlimit rlim
;
11153 int resource
= target_to_host_resource(arg1
);
11154 ret
= get_errno(getrlimit(resource
, &rlim
));
11155 if (!is_error(ret
)) {
11156 struct target_rlimit
*target_rlim
;
11157 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11158 return -TARGET_EFAULT
;
11159 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11160 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11161 unlock_user_struct(target_rlim
, arg2
, 1);
11166 #ifdef TARGET_NR_truncate64
11167 case TARGET_NR_truncate64
:
11168 if (!(p
= lock_user_string(arg1
)))
11169 return -TARGET_EFAULT
;
11170 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11171 unlock_user(p
, arg1
, 0);
11174 #ifdef TARGET_NR_ftruncate64
11175 case TARGET_NR_ftruncate64
:
11176 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11178 #ifdef TARGET_NR_stat64
11179 case TARGET_NR_stat64
:
11180 if (!(p
= lock_user_string(arg1
))) {
11181 return -TARGET_EFAULT
;
11183 ret
= get_errno(stat(path(p
), &st
));
11184 unlock_user(p
, arg1
, 0);
11185 if (!is_error(ret
))
11186 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11189 #ifdef TARGET_NR_lstat64
11190 case TARGET_NR_lstat64
:
11191 if (!(p
= lock_user_string(arg1
))) {
11192 return -TARGET_EFAULT
;
11194 ret
= get_errno(lstat(path(p
), &st
));
11195 unlock_user(p
, arg1
, 0);
11196 if (!is_error(ret
))
11197 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11200 #ifdef TARGET_NR_fstat64
11201 case TARGET_NR_fstat64
:
11202 ret
= get_errno(fstat(arg1
, &st
));
11203 if (!is_error(ret
))
11204 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11207 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11208 #ifdef TARGET_NR_fstatat64
11209 case TARGET_NR_fstatat64
:
11211 #ifdef TARGET_NR_newfstatat
11212 case TARGET_NR_newfstatat
:
11214 if (!(p
= lock_user_string(arg2
))) {
11215 return -TARGET_EFAULT
;
11217 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11218 unlock_user(p
, arg2
, 0);
11219 if (!is_error(ret
))
11220 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11223 #if defined(TARGET_NR_statx)
11224 case TARGET_NR_statx
:
11226 struct target_statx
*target_stx
;
11230 p
= lock_user_string(arg2
);
11232 return -TARGET_EFAULT
;
11234 #if defined(__NR_statx)
11237 * It is assumed that struct statx is architecture independent.
11239 struct target_statx host_stx
;
11242 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11243 if (!is_error(ret
)) {
11244 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11245 unlock_user(p
, arg2
, 0);
11246 return -TARGET_EFAULT
;
11250 if (ret
!= -TARGET_ENOSYS
) {
11251 unlock_user(p
, arg2
, 0);
11256 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11257 unlock_user(p
, arg2
, 0);
11259 if (!is_error(ret
)) {
11260 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11261 return -TARGET_EFAULT
;
11263 memset(target_stx
, 0, sizeof(*target_stx
));
11264 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11265 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11266 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11267 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11268 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11269 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11270 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11271 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11272 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11273 __put_user(st
.st_size
, &target_stx
->stx_size
);
11274 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11275 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11276 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11277 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11278 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11279 unlock_user_struct(target_stx
, arg5
, 1);
11284 #ifdef TARGET_NR_lchown
11285 case TARGET_NR_lchown
:
11286 if (!(p
= lock_user_string(arg1
)))
11287 return -TARGET_EFAULT
;
11288 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11289 unlock_user(p
, arg1
, 0);
11292 #ifdef TARGET_NR_getuid
11293 case TARGET_NR_getuid
:
11294 return get_errno(high2lowuid(getuid()));
11296 #ifdef TARGET_NR_getgid
11297 case TARGET_NR_getgid
:
11298 return get_errno(high2lowgid(getgid()));
11300 #ifdef TARGET_NR_geteuid
11301 case TARGET_NR_geteuid
:
11302 return get_errno(high2lowuid(geteuid()));
11304 #ifdef TARGET_NR_getegid
11305 case TARGET_NR_getegid
:
11306 return get_errno(high2lowgid(getegid()));
11308 case TARGET_NR_setreuid
:
11309 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11310 case TARGET_NR_setregid
:
11311 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11312 case TARGET_NR_getgroups
:
11314 int gidsetsize
= arg1
;
11315 target_id
*target_grouplist
;
11319 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11320 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11321 if (gidsetsize
== 0)
11323 if (!is_error(ret
)) {
11324 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11325 if (!target_grouplist
)
11326 return -TARGET_EFAULT
;
11327 for(i
= 0;i
< ret
; i
++)
11328 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11329 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11333 case TARGET_NR_setgroups
:
11335 int gidsetsize
= arg1
;
11336 target_id
*target_grouplist
;
11337 gid_t
*grouplist
= NULL
;
11340 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11341 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11342 if (!target_grouplist
) {
11343 return -TARGET_EFAULT
;
11345 for (i
= 0; i
< gidsetsize
; i
++) {
11346 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11348 unlock_user(target_grouplist
, arg2
, 0);
11350 return get_errno(setgroups(gidsetsize
, grouplist
));
11352 case TARGET_NR_fchown
:
11353 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11354 #if defined(TARGET_NR_fchownat)
11355 case TARGET_NR_fchownat
:
11356 if (!(p
= lock_user_string(arg2
)))
11357 return -TARGET_EFAULT
;
11358 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11359 low2highgid(arg4
), arg5
));
11360 unlock_user(p
, arg2
, 0);
11363 #ifdef TARGET_NR_setresuid
11364 case TARGET_NR_setresuid
:
11365 return get_errno(sys_setresuid(low2highuid(arg1
),
11367 low2highuid(arg3
)));
11369 #ifdef TARGET_NR_getresuid
11370 case TARGET_NR_getresuid
:
11372 uid_t ruid
, euid
, suid
;
11373 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11374 if (!is_error(ret
)) {
11375 if (put_user_id(high2lowuid(ruid
), arg1
)
11376 || put_user_id(high2lowuid(euid
), arg2
)
11377 || put_user_id(high2lowuid(suid
), arg3
))
11378 return -TARGET_EFAULT
;
11383 #ifdef TARGET_NR_getresgid
11384 case TARGET_NR_setresgid
:
11385 return get_errno(sys_setresgid(low2highgid(arg1
),
11387 low2highgid(arg3
)));
11389 #ifdef TARGET_NR_getresgid
11390 case TARGET_NR_getresgid
:
11392 gid_t rgid
, egid
, sgid
;
11393 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11394 if (!is_error(ret
)) {
11395 if (put_user_id(high2lowgid(rgid
), arg1
)
11396 || put_user_id(high2lowgid(egid
), arg2
)
11397 || put_user_id(high2lowgid(sgid
), arg3
))
11398 return -TARGET_EFAULT
;
11403 #ifdef TARGET_NR_chown
11404 case TARGET_NR_chown
:
11405 if (!(p
= lock_user_string(arg1
)))
11406 return -TARGET_EFAULT
;
11407 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11408 unlock_user(p
, arg1
, 0);
11411 case TARGET_NR_setuid
:
11412 return get_errno(sys_setuid(low2highuid(arg1
)));
11413 case TARGET_NR_setgid
:
11414 return get_errno(sys_setgid(low2highgid(arg1
)));
11415 case TARGET_NR_setfsuid
:
11416 return get_errno(setfsuid(arg1
));
11417 case TARGET_NR_setfsgid
:
11418 return get_errno(setfsgid(arg1
));
11420 #ifdef TARGET_NR_lchown32
11421 case TARGET_NR_lchown32
:
11422 if (!(p
= lock_user_string(arg1
)))
11423 return -TARGET_EFAULT
;
11424 ret
= get_errno(lchown(p
, arg2
, arg3
));
11425 unlock_user(p
, arg1
, 0);
11428 #ifdef TARGET_NR_getuid32
11429 case TARGET_NR_getuid32
:
11430 return get_errno(getuid());
11433 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11434 /* Alpha specific */
11435 case TARGET_NR_getxuid
:
11439 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11441 return get_errno(getuid());
11443 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11444 /* Alpha specific */
11445 case TARGET_NR_getxgid
:
11449 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11451 return get_errno(getgid());
11453 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11454 /* Alpha specific */
11455 case TARGET_NR_osf_getsysinfo
:
11456 ret
= -TARGET_EOPNOTSUPP
;
11458 case TARGET_GSI_IEEE_FP_CONTROL
:
11460 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11461 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11463 swcr
&= ~SWCR_STATUS_MASK
;
11464 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11466 if (put_user_u64 (swcr
, arg2
))
11467 return -TARGET_EFAULT
;
11472 /* case GSI_IEEE_STATE_AT_SIGNAL:
11473 -- Not implemented in linux kernel.
11475 -- Retrieves current unaligned access state; not much used.
11476 case GSI_PROC_TYPE:
11477 -- Retrieves implver information; surely not used.
11478 case GSI_GET_HWRPB:
11479 -- Grabs a copy of the HWRPB; surely not used.
11484 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11485 /* Alpha specific */
11486 case TARGET_NR_osf_setsysinfo
:
11487 ret
= -TARGET_EOPNOTSUPP
;
11489 case TARGET_SSI_IEEE_FP_CONTROL
:
11491 uint64_t swcr
, fpcr
;
11493 if (get_user_u64 (swcr
, arg2
)) {
11494 return -TARGET_EFAULT
;
11498 * The kernel calls swcr_update_status to update the
11499 * status bits from the fpcr at every point that it
11500 * could be queried. Therefore, we store the status
11501 * bits only in FPCR.
11503 ((CPUAlphaState
*)cpu_env
)->swcr
11504 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11506 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11507 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11508 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11509 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11514 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11516 uint64_t exc
, fpcr
, fex
;
11518 if (get_user_u64(exc
, arg2
)) {
11519 return -TARGET_EFAULT
;
11521 exc
&= SWCR_STATUS_MASK
;
11522 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11524 /* Old exceptions are not signaled. */
11525 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11527 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11528 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11530 /* Update the hardware fpcr. */
11531 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11532 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11535 int si_code
= TARGET_FPE_FLTUNK
;
11536 target_siginfo_t info
;
11538 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11539 si_code
= TARGET_FPE_FLTUND
;
11541 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11542 si_code
= TARGET_FPE_FLTRES
;
11544 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11545 si_code
= TARGET_FPE_FLTUND
;
11547 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11548 si_code
= TARGET_FPE_FLTOVF
;
11550 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11551 si_code
= TARGET_FPE_FLTDIV
;
11553 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11554 si_code
= TARGET_FPE_FLTINV
;
11557 info
.si_signo
= SIGFPE
;
11559 info
.si_code
= si_code
;
11560 info
._sifields
._sigfault
._addr
11561 = ((CPUArchState
*)cpu_env
)->pc
;
11562 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11563 QEMU_SI_FAULT
, &info
);
11569 /* case SSI_NVPAIRS:
11570 -- Used with SSIN_UACPROC to enable unaligned accesses.
11571 case SSI_IEEE_STATE_AT_SIGNAL:
11572 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11573 -- Not implemented in linux kernel
11578 #ifdef TARGET_NR_osf_sigprocmask
11579 /* Alpha specific. */
11580 case TARGET_NR_osf_sigprocmask
:
11584 sigset_t set
, oldset
;
11587 case TARGET_SIG_BLOCK
:
11590 case TARGET_SIG_UNBLOCK
:
11593 case TARGET_SIG_SETMASK
:
11597 return -TARGET_EINVAL
;
11600 target_to_host_old_sigset(&set
, &mask
);
11601 ret
= do_sigprocmask(how
, &set
, &oldset
);
11603 host_to_target_old_sigset(&mask
, &oldset
);
11610 #ifdef TARGET_NR_getgid32
11611 case TARGET_NR_getgid32
:
11612 return get_errno(getgid());
11614 #ifdef TARGET_NR_geteuid32
11615 case TARGET_NR_geteuid32
:
11616 return get_errno(geteuid());
11618 #ifdef TARGET_NR_getegid32
11619 case TARGET_NR_getegid32
:
11620 return get_errno(getegid());
11622 #ifdef TARGET_NR_setreuid32
11623 case TARGET_NR_setreuid32
:
11624 return get_errno(setreuid(arg1
, arg2
));
11626 #ifdef TARGET_NR_setregid32
11627 case TARGET_NR_setregid32
:
11628 return get_errno(setregid(arg1
, arg2
));
11630 #ifdef TARGET_NR_getgroups32
11631 case TARGET_NR_getgroups32
:
11633 int gidsetsize
= arg1
;
11634 uint32_t *target_grouplist
;
11638 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11639 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11640 if (gidsetsize
== 0)
11642 if (!is_error(ret
)) {
11643 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11644 if (!target_grouplist
) {
11645 return -TARGET_EFAULT
;
11647 for(i
= 0;i
< ret
; i
++)
11648 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11649 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11654 #ifdef TARGET_NR_setgroups32
11655 case TARGET_NR_setgroups32
:
11657 int gidsetsize
= arg1
;
11658 uint32_t *target_grouplist
;
11662 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11663 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11664 if (!target_grouplist
) {
11665 return -TARGET_EFAULT
;
11667 for(i
= 0;i
< gidsetsize
; i
++)
11668 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11669 unlock_user(target_grouplist
, arg2
, 0);
11670 return get_errno(setgroups(gidsetsize
, grouplist
));
11673 #ifdef TARGET_NR_fchown32
11674 case TARGET_NR_fchown32
:
11675 return get_errno(fchown(arg1
, arg2
, arg3
));
11677 #ifdef TARGET_NR_setresuid32
11678 case TARGET_NR_setresuid32
:
11679 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11681 #ifdef TARGET_NR_getresuid32
11682 case TARGET_NR_getresuid32
:
11684 uid_t ruid
, euid
, suid
;
11685 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11686 if (!is_error(ret
)) {
11687 if (put_user_u32(ruid
, arg1
)
11688 || put_user_u32(euid
, arg2
)
11689 || put_user_u32(suid
, arg3
))
11690 return -TARGET_EFAULT
;
11695 #ifdef TARGET_NR_setresgid32
11696 case TARGET_NR_setresgid32
:
11697 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11699 #ifdef TARGET_NR_getresgid32
11700 case TARGET_NR_getresgid32
:
11702 gid_t rgid
, egid
, sgid
;
11703 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11704 if (!is_error(ret
)) {
11705 if (put_user_u32(rgid
, arg1
)
11706 || put_user_u32(egid
, arg2
)
11707 || put_user_u32(sgid
, arg3
))
11708 return -TARGET_EFAULT
;
11713 #ifdef TARGET_NR_chown32
11714 case TARGET_NR_chown32
:
11715 if (!(p
= lock_user_string(arg1
)))
11716 return -TARGET_EFAULT
;
11717 ret
= get_errno(chown(p
, arg2
, arg3
));
11718 unlock_user(p
, arg1
, 0);
11721 #ifdef TARGET_NR_setuid32
11722 case TARGET_NR_setuid32
:
11723 return get_errno(sys_setuid(arg1
));
11725 #ifdef TARGET_NR_setgid32
11726 case TARGET_NR_setgid32
:
11727 return get_errno(sys_setgid(arg1
));
11729 #ifdef TARGET_NR_setfsuid32
11730 case TARGET_NR_setfsuid32
:
11731 return get_errno(setfsuid(arg1
));
11733 #ifdef TARGET_NR_setfsgid32
11734 case TARGET_NR_setfsgid32
:
11735 return get_errno(setfsgid(arg1
));
11737 #ifdef TARGET_NR_mincore
11738 case TARGET_NR_mincore
:
11740 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11742 return -TARGET_ENOMEM
;
11744 p
= lock_user_string(arg3
);
11746 ret
= -TARGET_EFAULT
;
11748 ret
= get_errno(mincore(a
, arg2
, p
));
11749 unlock_user(p
, arg3
, ret
);
11751 unlock_user(a
, arg1
, 0);
11755 #ifdef TARGET_NR_arm_fadvise64_64
11756 case TARGET_NR_arm_fadvise64_64
:
11757 /* arm_fadvise64_64 looks like fadvise64_64 but
11758 * with different argument order: fd, advice, offset, len
11759 * rather than the usual fd, offset, len, advice.
11760 * Note that offset and len are both 64-bit so appear as
11761 * pairs of 32-bit registers.
11763 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11764 target_offset64(arg5
, arg6
), arg2
);
11765 return -host_to_target_errno(ret
);
11768 #if TARGET_ABI_BITS == 32
11770 #ifdef TARGET_NR_fadvise64_64
11771 case TARGET_NR_fadvise64_64
:
11772 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11773 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11781 /* 6 args: fd, offset (high, low), len (high, low), advice */
11782 if (regpairs_aligned(cpu_env
, num
)) {
11783 /* offset is in (3,4), len in (5,6) and advice in 7 */
11791 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11792 target_offset64(arg4
, arg5
), arg6
);
11793 return -host_to_target_errno(ret
);
11796 #ifdef TARGET_NR_fadvise64
11797 case TARGET_NR_fadvise64
:
11798 /* 5 args: fd, offset (high, low), len, advice */
11799 if (regpairs_aligned(cpu_env
, num
)) {
11800 /* offset is in (3,4), len in 5 and advice in 6 */
11806 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11807 return -host_to_target_errno(ret
);
11810 #else /* not a 32-bit ABI */
11811 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11812 #ifdef TARGET_NR_fadvise64_64
11813 case TARGET_NR_fadvise64_64
:
11815 #ifdef TARGET_NR_fadvise64
11816 case TARGET_NR_fadvise64
:
11818 #ifdef TARGET_S390X
11820 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11821 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11822 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11823 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11827 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11829 #endif /* end of 64-bit ABI fadvise handling */
11831 #ifdef TARGET_NR_madvise
11832 case TARGET_NR_madvise
:
11833 /* A straight passthrough may not be safe because qemu sometimes
11834 turns private file-backed mappings into anonymous mappings.
11835 This will break MADV_DONTNEED.
11836 This is a hint, so ignoring and returning success is ok. */
11839 #ifdef TARGET_NR_fcntl64
11840 case TARGET_NR_fcntl64
:
11844 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11845 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11848 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11849 copyfrom
= copy_from_user_oabi_flock64
;
11850 copyto
= copy_to_user_oabi_flock64
;
11854 cmd
= target_to_host_fcntl_cmd(arg2
);
11855 if (cmd
== -TARGET_EINVAL
) {
11860 case TARGET_F_GETLK64
:
11861 ret
= copyfrom(&fl
, arg3
);
11865 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11867 ret
= copyto(arg3
, &fl
);
11871 case TARGET_F_SETLK64
:
11872 case TARGET_F_SETLKW64
:
11873 ret
= copyfrom(&fl
, arg3
);
11877 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11880 ret
= do_fcntl(arg1
, arg2
, arg3
);
11886 #ifdef TARGET_NR_cacheflush
11887 case TARGET_NR_cacheflush
:
11888 /* self-modifying code is handled automatically, so nothing needed */
11891 #ifdef TARGET_NR_getpagesize
11892 case TARGET_NR_getpagesize
:
11893 return TARGET_PAGE_SIZE
;
11895 case TARGET_NR_gettid
:
11896 return get_errno(sys_gettid());
11897 #ifdef TARGET_NR_readahead
11898 case TARGET_NR_readahead
:
11899 #if TARGET_ABI_BITS == 32
11900 if (regpairs_aligned(cpu_env
, num
)) {
11905 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11907 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11912 #ifdef TARGET_NR_setxattr
11913 case TARGET_NR_listxattr
:
11914 case TARGET_NR_llistxattr
:
11918 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11920 return -TARGET_EFAULT
;
11923 p
= lock_user_string(arg1
);
11925 if (num
== TARGET_NR_listxattr
) {
11926 ret
= get_errno(listxattr(p
, b
, arg3
));
11928 ret
= get_errno(llistxattr(p
, b
, arg3
));
11931 ret
= -TARGET_EFAULT
;
11933 unlock_user(p
, arg1
, 0);
11934 unlock_user(b
, arg2
, arg3
);
11937 case TARGET_NR_flistxattr
:
11941 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11943 return -TARGET_EFAULT
;
11946 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11947 unlock_user(b
, arg2
, arg3
);
11950 case TARGET_NR_setxattr
:
11951 case TARGET_NR_lsetxattr
:
11953 void *p
, *n
, *v
= 0;
11955 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11957 return -TARGET_EFAULT
;
11960 p
= lock_user_string(arg1
);
11961 n
= lock_user_string(arg2
);
11963 if (num
== TARGET_NR_setxattr
) {
11964 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11966 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11969 ret
= -TARGET_EFAULT
;
11971 unlock_user(p
, arg1
, 0);
11972 unlock_user(n
, arg2
, 0);
11973 unlock_user(v
, arg3
, 0);
11976 case TARGET_NR_fsetxattr
:
11980 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11982 return -TARGET_EFAULT
;
11985 n
= lock_user_string(arg2
);
11987 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11989 ret
= -TARGET_EFAULT
;
11991 unlock_user(n
, arg2
, 0);
11992 unlock_user(v
, arg3
, 0);
11995 case TARGET_NR_getxattr
:
11996 case TARGET_NR_lgetxattr
:
11998 void *p
, *n
, *v
= 0;
12000 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12002 return -TARGET_EFAULT
;
12005 p
= lock_user_string(arg1
);
12006 n
= lock_user_string(arg2
);
12008 if (num
== TARGET_NR_getxattr
) {
12009 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12011 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12014 ret
= -TARGET_EFAULT
;
12016 unlock_user(p
, arg1
, 0);
12017 unlock_user(n
, arg2
, 0);
12018 unlock_user(v
, arg3
, arg4
);
12021 case TARGET_NR_fgetxattr
:
12025 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12027 return -TARGET_EFAULT
;
12030 n
= lock_user_string(arg2
);
12032 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12034 ret
= -TARGET_EFAULT
;
12036 unlock_user(n
, arg2
, 0);
12037 unlock_user(v
, arg3
, arg4
);
12040 case TARGET_NR_removexattr
:
12041 case TARGET_NR_lremovexattr
:
12044 p
= lock_user_string(arg1
);
12045 n
= lock_user_string(arg2
);
12047 if (num
== TARGET_NR_removexattr
) {
12048 ret
= get_errno(removexattr(p
, n
));
12050 ret
= get_errno(lremovexattr(p
, n
));
12053 ret
= -TARGET_EFAULT
;
12055 unlock_user(p
, arg1
, 0);
12056 unlock_user(n
, arg2
, 0);
12059 case TARGET_NR_fremovexattr
:
12062 n
= lock_user_string(arg2
);
12064 ret
= get_errno(fremovexattr(arg1
, n
));
12066 ret
= -TARGET_EFAULT
;
12068 unlock_user(n
, arg2
, 0);
12072 #endif /* CONFIG_ATTR */
12073 #ifdef TARGET_NR_set_thread_area
12074 case TARGET_NR_set_thread_area
:
12075 #if defined(TARGET_MIPS)
12076 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12078 #elif defined(TARGET_CRIS)
12080 ret
= -TARGET_EINVAL
;
12082 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12086 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12087 return do_set_thread_area(cpu_env
, arg1
);
12088 #elif defined(TARGET_M68K)
12090 TaskState
*ts
= cpu
->opaque
;
12091 ts
->tp_value
= arg1
;
12095 return -TARGET_ENOSYS
;
12098 #ifdef TARGET_NR_get_thread_area
12099 case TARGET_NR_get_thread_area
:
12100 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12101 return do_get_thread_area(cpu_env
, arg1
);
12102 #elif defined(TARGET_M68K)
12104 TaskState
*ts
= cpu
->opaque
;
12105 return ts
->tp_value
;
12108 return -TARGET_ENOSYS
;
12111 #ifdef TARGET_NR_getdomainname
12112 case TARGET_NR_getdomainname
:
12113 return -TARGET_ENOSYS
;
12116 #ifdef TARGET_NR_clock_settime
12117 case TARGET_NR_clock_settime
:
12119 struct timespec ts
;
12121 ret
= target_to_host_timespec(&ts
, arg2
);
12122 if (!is_error(ret
)) {
12123 ret
= get_errno(clock_settime(arg1
, &ts
));
12128 #ifdef TARGET_NR_clock_settime64
12129 case TARGET_NR_clock_settime64
:
12131 struct timespec ts
;
12133 ret
= target_to_host_timespec64(&ts
, arg2
);
12134 if (!is_error(ret
)) {
12135 ret
= get_errno(clock_settime(arg1
, &ts
));
12140 #ifdef TARGET_NR_clock_gettime
12141 case TARGET_NR_clock_gettime
:
12143 struct timespec ts
;
12144 ret
= get_errno(clock_gettime(arg1
, &ts
));
12145 if (!is_error(ret
)) {
12146 ret
= host_to_target_timespec(arg2
, &ts
);
12151 #ifdef TARGET_NR_clock_gettime64
12152 case TARGET_NR_clock_gettime64
:
12154 struct timespec ts
;
12155 ret
= get_errno(clock_gettime(arg1
, &ts
));
12156 if (!is_error(ret
)) {
12157 ret
= host_to_target_timespec64(arg2
, &ts
);
12162 #ifdef TARGET_NR_clock_getres
12163 case TARGET_NR_clock_getres
:
12165 struct timespec ts
;
12166 ret
= get_errno(clock_getres(arg1
, &ts
));
12167 if (!is_error(ret
)) {
12168 host_to_target_timespec(arg2
, &ts
);
12173 #ifdef TARGET_NR_clock_getres_time64
12174 case TARGET_NR_clock_getres_time64
:
12176 struct timespec ts
;
12177 ret
= get_errno(clock_getres(arg1
, &ts
));
12178 if (!is_error(ret
)) {
12179 host_to_target_timespec64(arg2
, &ts
);
12184 #ifdef TARGET_NR_clock_nanosleep
12185 case TARGET_NR_clock_nanosleep
:
12187 struct timespec ts
;
12188 if (target_to_host_timespec(&ts
, arg3
)) {
12189 return -TARGET_EFAULT
;
12191 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12192 &ts
, arg4
? &ts
: NULL
));
12194 * if the call is interrupted by a signal handler, it fails
12195 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12196 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12198 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12199 host_to_target_timespec(arg4
, &ts
)) {
12200 return -TARGET_EFAULT
;
12206 #ifdef TARGET_NR_clock_nanosleep_time64
12207 case TARGET_NR_clock_nanosleep_time64
:
12209 struct timespec ts
;
12211 if (target_to_host_timespec64(&ts
, arg3
)) {
12212 return -TARGET_EFAULT
;
12215 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12216 &ts
, arg4
? &ts
: NULL
));
12218 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12219 host_to_target_timespec64(arg4
, &ts
)) {
12220 return -TARGET_EFAULT
;
12226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12227 case TARGET_NR_set_tid_address
:
12228 return get_errno(set_tid_address((int *)g2h(arg1
)));
12231 case TARGET_NR_tkill
:
12232 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12234 case TARGET_NR_tgkill
:
12235 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12236 target_to_host_signal(arg3
)));
12238 #ifdef TARGET_NR_set_robust_list
12239 case TARGET_NR_set_robust_list
:
12240 case TARGET_NR_get_robust_list
:
12241 /* The ABI for supporting robust futexes has userspace pass
12242 * the kernel a pointer to a linked list which is updated by
12243 * userspace after the syscall; the list is walked by the kernel
12244 * when the thread exits. Since the linked list in QEMU guest
12245 * memory isn't a valid linked list for the host and we have
12246 * no way to reliably intercept the thread-death event, we can't
12247 * support these. Silently return ENOSYS so that guest userspace
12248 * falls back to a non-robust futex implementation (which should
12249 * be OK except in the corner case of the guest crashing while
12250 * holding a mutex that is shared with another process via
12253 return -TARGET_ENOSYS
;
12256 #if defined(TARGET_NR_utimensat)
12257 case TARGET_NR_utimensat
:
12259 struct timespec
*tsp
, ts
[2];
12263 if (target_to_host_timespec(ts
, arg3
)) {
12264 return -TARGET_EFAULT
;
12266 if (target_to_host_timespec(ts
+ 1, arg3
+
12267 sizeof(struct target_timespec
))) {
12268 return -TARGET_EFAULT
;
12273 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12275 if (!(p
= lock_user_string(arg2
))) {
12276 return -TARGET_EFAULT
;
12278 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12279 unlock_user(p
, arg2
, 0);
12284 #ifdef TARGET_NR_utimensat_time64
12285 case TARGET_NR_utimensat_time64
:
12287 struct timespec
*tsp
, ts
[2];
12291 if (target_to_host_timespec64(ts
, arg3
)) {
12292 return -TARGET_EFAULT
;
12294 if (target_to_host_timespec64(ts
+ 1, arg3
+
12295 sizeof(struct target__kernel_timespec
))) {
12296 return -TARGET_EFAULT
;
12301 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12303 p
= lock_user_string(arg2
);
12305 return -TARGET_EFAULT
;
12307 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12308 unlock_user(p
, arg2
, 0);
12313 #ifdef TARGET_NR_futex
12314 case TARGET_NR_futex
:
12315 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12317 #ifdef TARGET_NR_futex_time64
12318 case TARGET_NR_futex_time64
:
12319 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12321 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12322 case TARGET_NR_inotify_init
:
12323 ret
= get_errno(sys_inotify_init());
12325 fd_trans_register(ret
, &target_inotify_trans
);
12329 #ifdef CONFIG_INOTIFY1
12330 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12331 case TARGET_NR_inotify_init1
:
12332 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12333 fcntl_flags_tbl
)));
12335 fd_trans_register(ret
, &target_inotify_trans
);
12340 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12341 case TARGET_NR_inotify_add_watch
:
12342 p
= lock_user_string(arg2
);
12343 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12344 unlock_user(p
, arg2
, 0);
12347 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12348 case TARGET_NR_inotify_rm_watch
:
12349 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12352 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12353 case TARGET_NR_mq_open
:
12355 struct mq_attr posix_mq_attr
;
12356 struct mq_attr
*pposix_mq_attr
;
12359 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12360 pposix_mq_attr
= NULL
;
12362 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12363 return -TARGET_EFAULT
;
12365 pposix_mq_attr
= &posix_mq_attr
;
12367 p
= lock_user_string(arg1
- 1);
12369 return -TARGET_EFAULT
;
12371 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12372 unlock_user (p
, arg1
, 0);
12376 case TARGET_NR_mq_unlink
:
12377 p
= lock_user_string(arg1
- 1);
12379 return -TARGET_EFAULT
;
12381 ret
= get_errno(mq_unlink(p
));
12382 unlock_user (p
, arg1
, 0);
12385 #ifdef TARGET_NR_mq_timedsend
12386 case TARGET_NR_mq_timedsend
:
12388 struct timespec ts
;
12390 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12392 if (target_to_host_timespec(&ts
, arg5
)) {
12393 return -TARGET_EFAULT
;
12395 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12396 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12397 return -TARGET_EFAULT
;
12400 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12402 unlock_user (p
, arg2
, arg3
);
12406 #ifdef TARGET_NR_mq_timedsend_time64
12407 case TARGET_NR_mq_timedsend_time64
:
12409 struct timespec ts
;
12411 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12413 if (target_to_host_timespec64(&ts
, arg5
)) {
12414 return -TARGET_EFAULT
;
12416 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12417 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12418 return -TARGET_EFAULT
;
12421 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12423 unlock_user(p
, arg2
, arg3
);
12428 #ifdef TARGET_NR_mq_timedreceive
12429 case TARGET_NR_mq_timedreceive
:
12431 struct timespec ts
;
12434 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12436 if (target_to_host_timespec(&ts
, arg5
)) {
12437 return -TARGET_EFAULT
;
12439 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12441 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12442 return -TARGET_EFAULT
;
12445 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12448 unlock_user (p
, arg2
, arg3
);
12450 put_user_u32(prio
, arg4
);
12454 #ifdef TARGET_NR_mq_timedreceive_time64
12455 case TARGET_NR_mq_timedreceive_time64
:
12457 struct timespec ts
;
12460 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12462 if (target_to_host_timespec64(&ts
, arg5
)) {
12463 return -TARGET_EFAULT
;
12465 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12467 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12468 return -TARGET_EFAULT
;
12471 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12474 unlock_user(p
, arg2
, arg3
);
12476 put_user_u32(prio
, arg4
);
12482 /* Not implemented for now... */
12483 /* case TARGET_NR_mq_notify: */
12486 case TARGET_NR_mq_getsetattr
:
12488 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12491 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12492 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12493 &posix_mq_attr_out
));
12494 } else if (arg3
!= 0) {
12495 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12497 if (ret
== 0 && arg3
!= 0) {
12498 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12504 #ifdef CONFIG_SPLICE
12505 #ifdef TARGET_NR_tee
12506 case TARGET_NR_tee
:
12508 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12512 #ifdef TARGET_NR_splice
12513 case TARGET_NR_splice
:
12515 loff_t loff_in
, loff_out
;
12516 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12518 if (get_user_u64(loff_in
, arg2
)) {
12519 return -TARGET_EFAULT
;
12521 ploff_in
= &loff_in
;
12524 if (get_user_u64(loff_out
, arg4
)) {
12525 return -TARGET_EFAULT
;
12527 ploff_out
= &loff_out
;
12529 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12531 if (put_user_u64(loff_in
, arg2
)) {
12532 return -TARGET_EFAULT
;
12536 if (put_user_u64(loff_out
, arg4
)) {
12537 return -TARGET_EFAULT
;
12543 #ifdef TARGET_NR_vmsplice
12544 case TARGET_NR_vmsplice
:
12546 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12548 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12549 unlock_iovec(vec
, arg2
, arg3
, 0);
12551 ret
= -host_to_target_errno(errno
);
12556 #endif /* CONFIG_SPLICE */
12557 #ifdef CONFIG_EVENTFD
12558 #if defined(TARGET_NR_eventfd)
12559 case TARGET_NR_eventfd
:
12560 ret
= get_errno(eventfd(arg1
, 0));
12562 fd_trans_register(ret
, &target_eventfd_trans
);
12566 #if defined(TARGET_NR_eventfd2)
12567 case TARGET_NR_eventfd2
:
12569 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12570 if (arg2
& TARGET_O_NONBLOCK
) {
12571 host_flags
|= O_NONBLOCK
;
12573 if (arg2
& TARGET_O_CLOEXEC
) {
12574 host_flags
|= O_CLOEXEC
;
12576 ret
= get_errno(eventfd(arg1
, host_flags
));
12578 fd_trans_register(ret
, &target_eventfd_trans
);
12583 #endif /* CONFIG_EVENTFD */
12584 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12585 case TARGET_NR_fallocate
:
12586 #if TARGET_ABI_BITS == 32
12587 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12588 target_offset64(arg5
, arg6
)));
12590 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12594 #if defined(CONFIG_SYNC_FILE_RANGE)
12595 #if defined(TARGET_NR_sync_file_range)
12596 case TARGET_NR_sync_file_range
:
12597 #if TARGET_ABI_BITS == 32
12598 #if defined(TARGET_MIPS)
12599 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12600 target_offset64(arg5
, arg6
), arg7
));
12602 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12603 target_offset64(arg4
, arg5
), arg6
));
12604 #endif /* !TARGET_MIPS */
12606 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12610 #if defined(TARGET_NR_sync_file_range2) || \
12611 defined(TARGET_NR_arm_sync_file_range)
12612 #if defined(TARGET_NR_sync_file_range2)
12613 case TARGET_NR_sync_file_range2
:
12615 #if defined(TARGET_NR_arm_sync_file_range)
12616 case TARGET_NR_arm_sync_file_range
:
12618 /* This is like sync_file_range but the arguments are reordered */
12619 #if TARGET_ABI_BITS == 32
12620 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12621 target_offset64(arg5
, arg6
), arg2
));
12623 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12628 #if defined(TARGET_NR_signalfd4)
12629 case TARGET_NR_signalfd4
:
12630 return do_signalfd4(arg1
, arg2
, arg4
);
12632 #if defined(TARGET_NR_signalfd)
12633 case TARGET_NR_signalfd
:
12634 return do_signalfd4(arg1
, arg2
, 0);
12636 #if defined(CONFIG_EPOLL)
12637 #if defined(TARGET_NR_epoll_create)
12638 case TARGET_NR_epoll_create
:
12639 return get_errno(epoll_create(arg1
));
12641 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12642 case TARGET_NR_epoll_create1
:
12643 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12645 #if defined(TARGET_NR_epoll_ctl)
12646 case TARGET_NR_epoll_ctl
:
12648 struct epoll_event ep
;
12649 struct epoll_event
*epp
= 0;
12651 if (arg2
!= EPOLL_CTL_DEL
) {
12652 struct target_epoll_event
*target_ep
;
12653 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12654 return -TARGET_EFAULT
;
12656 ep
.events
= tswap32(target_ep
->events
);
12658 * The epoll_data_t union is just opaque data to the kernel,
12659 * so we transfer all 64 bits across and need not worry what
12660 * actual data type it is.
12662 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12663 unlock_user_struct(target_ep
, arg4
, 0);
12666 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12667 * non-null pointer, even though this argument is ignored.
12672 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12676 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12677 #if defined(TARGET_NR_epoll_wait)
12678 case TARGET_NR_epoll_wait
:
12680 #if defined(TARGET_NR_epoll_pwait)
12681 case TARGET_NR_epoll_pwait
:
12684 struct target_epoll_event
*target_ep
;
12685 struct epoll_event
*ep
;
12687 int maxevents
= arg3
;
12688 int timeout
= arg4
;
12690 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12691 return -TARGET_EINVAL
;
12694 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12695 maxevents
* sizeof(struct target_epoll_event
), 1);
12697 return -TARGET_EFAULT
;
12700 ep
= g_try_new(struct epoll_event
, maxevents
);
12702 unlock_user(target_ep
, arg2
, 0);
12703 return -TARGET_ENOMEM
;
12707 #if defined(TARGET_NR_epoll_pwait)
12708 case TARGET_NR_epoll_pwait
:
12710 target_sigset_t
*target_set
;
12711 sigset_t _set
, *set
= &_set
;
12714 if (arg6
!= sizeof(target_sigset_t
)) {
12715 ret
= -TARGET_EINVAL
;
12719 target_set
= lock_user(VERIFY_READ
, arg5
,
12720 sizeof(target_sigset_t
), 1);
12722 ret
= -TARGET_EFAULT
;
12725 target_to_host_sigset(set
, target_set
);
12726 unlock_user(target_set
, arg5
, 0);
12731 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12732 set
, SIGSET_T_SIZE
));
12736 #if defined(TARGET_NR_epoll_wait)
12737 case TARGET_NR_epoll_wait
:
12738 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12743 ret
= -TARGET_ENOSYS
;
12745 if (!is_error(ret
)) {
12747 for (i
= 0; i
< ret
; i
++) {
12748 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12749 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12751 unlock_user(target_ep
, arg2
,
12752 ret
* sizeof(struct target_epoll_event
));
12754 unlock_user(target_ep
, arg2
, 0);
12761 #ifdef TARGET_NR_prlimit64
12762 case TARGET_NR_prlimit64
:
12764 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12765 struct target_rlimit64
*target_rnew
, *target_rold
;
12766 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12767 int resource
= target_to_host_resource(arg2
);
12769 if (arg3
&& (resource
!= RLIMIT_AS
&&
12770 resource
!= RLIMIT_DATA
&&
12771 resource
!= RLIMIT_STACK
)) {
12772 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12773 return -TARGET_EFAULT
;
12775 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12776 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12777 unlock_user_struct(target_rnew
, arg3
, 0);
12781 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12782 if (!is_error(ret
) && arg4
) {
12783 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12784 return -TARGET_EFAULT
;
12786 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12787 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12788 unlock_user_struct(target_rold
, arg4
, 1);
12793 #ifdef TARGET_NR_gethostname
12794 case TARGET_NR_gethostname
:
12796 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12798 ret
= get_errno(gethostname(name
, arg2
));
12799 unlock_user(name
, arg1
, arg2
);
12801 ret
= -TARGET_EFAULT
;
12806 #ifdef TARGET_NR_atomic_cmpxchg_32
12807 case TARGET_NR_atomic_cmpxchg_32
:
12809 /* should use start_exclusive from main.c */
12810 abi_ulong mem_value
;
12811 if (get_user_u32(mem_value
, arg6
)) {
12812 target_siginfo_t info
;
12813 info
.si_signo
= SIGSEGV
;
12815 info
.si_code
= TARGET_SEGV_MAPERR
;
12816 info
._sifields
._sigfault
._addr
= arg6
;
12817 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12818 QEMU_SI_FAULT
, &info
);
12822 if (mem_value
== arg2
)
12823 put_user_u32(arg1
, arg6
);
12827 #ifdef TARGET_NR_atomic_barrier
12828 case TARGET_NR_atomic_barrier
:
12829 /* Like the kernel implementation and the
12830 qemu arm barrier, no-op this? */
12834 #ifdef TARGET_NR_timer_create
12835 case TARGET_NR_timer_create
:
12837 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12839 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12842 int timer_index
= next_free_host_timer();
12844 if (timer_index
< 0) {
12845 ret
= -TARGET_EAGAIN
;
12847 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12850 phost_sevp
= &host_sevp
;
12851 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12857 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12861 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12862 return -TARGET_EFAULT
;
12870 #ifdef TARGET_NR_timer_settime
12871 case TARGET_NR_timer_settime
:
12873 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12874 * struct itimerspec * old_value */
12875 target_timer_t timerid
= get_timer_id(arg1
);
12879 } else if (arg3
== 0) {
12880 ret
= -TARGET_EINVAL
;
12882 timer_t htimer
= g_posix_timers
[timerid
];
12883 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12885 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12886 return -TARGET_EFAULT
;
12889 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12890 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12891 return -TARGET_EFAULT
;
12898 #ifdef TARGET_NR_timer_settime64
12899 case TARGET_NR_timer_settime64
:
12901 target_timer_t timerid
= get_timer_id(arg1
);
12905 } else if (arg3
== 0) {
12906 ret
= -TARGET_EINVAL
;
12908 timer_t htimer
= g_posix_timers
[timerid
];
12909 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12911 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12912 return -TARGET_EFAULT
;
12915 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12916 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12917 return -TARGET_EFAULT
;
12924 #ifdef TARGET_NR_timer_gettime
12925 case TARGET_NR_timer_gettime
:
12927 /* args: timer_t timerid, struct itimerspec *curr_value */
12928 target_timer_t timerid
= get_timer_id(arg1
);
12932 } else if (!arg2
) {
12933 ret
= -TARGET_EFAULT
;
12935 timer_t htimer
= g_posix_timers
[timerid
];
12936 struct itimerspec hspec
;
12937 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12939 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12940 ret
= -TARGET_EFAULT
;
12947 #ifdef TARGET_NR_timer_gettime64
12948 case TARGET_NR_timer_gettime64
:
12950 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12951 target_timer_t timerid
= get_timer_id(arg1
);
12955 } else if (!arg2
) {
12956 ret
= -TARGET_EFAULT
;
12958 timer_t htimer
= g_posix_timers
[timerid
];
12959 struct itimerspec hspec
;
12960 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12962 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12963 ret
= -TARGET_EFAULT
;
12970 #ifdef TARGET_NR_timer_getoverrun
12971 case TARGET_NR_timer_getoverrun
:
12973 /* args: timer_t timerid */
12974 target_timer_t timerid
= get_timer_id(arg1
);
12979 timer_t htimer
= g_posix_timers
[timerid
];
12980 ret
= get_errno(timer_getoverrun(htimer
));
12986 #ifdef TARGET_NR_timer_delete
12987 case TARGET_NR_timer_delete
:
12989 /* args: timer_t timerid */
12990 target_timer_t timerid
= get_timer_id(arg1
);
12995 timer_t htimer
= g_posix_timers
[timerid
];
12996 ret
= get_errno(timer_delete(htimer
));
12997 g_posix_timers
[timerid
] = 0;
13003 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13004 case TARGET_NR_timerfd_create
:
13005 return get_errno(timerfd_create(arg1
,
13006 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13009 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13010 case TARGET_NR_timerfd_gettime
:
13012 struct itimerspec its_curr
;
13014 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13016 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13017 return -TARGET_EFAULT
;
13023 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13024 case TARGET_NR_timerfd_gettime64
:
13026 struct itimerspec its_curr
;
13028 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13030 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13031 return -TARGET_EFAULT
;
13037 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13038 case TARGET_NR_timerfd_settime
:
13040 struct itimerspec its_new
, its_old
, *p_new
;
13043 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13044 return -TARGET_EFAULT
;
13051 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13053 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13054 return -TARGET_EFAULT
;
13060 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13061 case TARGET_NR_timerfd_settime64
:
13063 struct itimerspec its_new
, its_old
, *p_new
;
13066 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13067 return -TARGET_EFAULT
;
13074 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13076 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13077 return -TARGET_EFAULT
;
13083 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13084 case TARGET_NR_ioprio_get
:
13085 return get_errno(ioprio_get(arg1
, arg2
));
13088 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13089 case TARGET_NR_ioprio_set
:
13090 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13093 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13094 case TARGET_NR_setns
:
13095 return get_errno(setns(arg1
, arg2
));
13097 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13098 case TARGET_NR_unshare
:
13099 return get_errno(unshare(arg1
));
13101 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13102 case TARGET_NR_kcmp
:
13103 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13105 #ifdef TARGET_NR_swapcontext
13106 case TARGET_NR_swapcontext
:
13107 /* PowerPC specific. */
13108 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13110 #ifdef TARGET_NR_memfd_create
13111 case TARGET_NR_memfd_create
:
13112 p
= lock_user_string(arg1
);
13114 return -TARGET_EFAULT
;
13116 ret
= get_errno(memfd_create(p
, arg2
));
13117 fd_trans_unregister(ret
);
13118 unlock_user(p
, arg1
, 0);
13121 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13122 case TARGET_NR_membarrier
:
13123 return get_errno(membarrier(arg1
, arg2
));
13126 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13127 case TARGET_NR_copy_file_range
:
13129 loff_t inoff
, outoff
;
13130 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13133 if (get_user_u64(inoff
, arg2
)) {
13134 return -TARGET_EFAULT
;
13139 if (get_user_u64(outoff
, arg4
)) {
13140 return -TARGET_EFAULT
;
13144 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13146 if (!is_error(ret
) && ret
> 0) {
13148 if (put_user_u64(inoff
, arg2
)) {
13149 return -TARGET_EFAULT
;
13153 if (put_user_u64(outoff
, arg4
)) {
13154 return -TARGET_EFAULT
;
13163 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13164 return -TARGET_ENOSYS
;
13169 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13170 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13171 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13174 CPUState
*cpu
= env_cpu(cpu_env
);
13177 #ifdef DEBUG_ERESTARTSYS
13178 /* Debug-only code for exercising the syscall-restart code paths
13179 * in the per-architecture cpu main loops: restart every syscall
13180 * the guest makes once before letting it through.
13186 return -TARGET_ERESTARTSYS
;
13191 record_syscall_start(cpu
, num
, arg1
,
13192 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13194 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13195 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13198 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13199 arg5
, arg6
, arg7
, arg8
);
13201 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13202 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13203 arg3
, arg4
, arg5
, arg6
);
13206 record_syscall_return(cpu
, num
, ret
);