4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid
)
288 /* For the 64-bit guest on 32-bit host case we must emulate
289 * getdents using getdents64, because otherwise the host
290 * might hand us back more dirent records than we can fit
291 * into the guest buffer after structure format conversion.
292 * Otherwise we emulate getdents with getdents if the host has it.
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
301 #if (defined(TARGET_NR_getdents) && \
302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
308 loff_t
*, res
, uint
, wh
);
310 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
311 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
313 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group
,int,error_code
)
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address
,int *,tidptr
)
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
322 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
326 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
330 unsigned long *, user_mask_ptr
);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
336 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
338 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
339 struct __user_cap_data_struct
*, data
);
340 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
341 struct __user_cap_data_struct
*, data
);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get
, int, which
, int, who
)
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
354 unsigned long, idx1
, unsigned long, idx2
)
358 * It is assumed that struct statx is architecture independent.
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
362 unsigned int, mask
, struct target_statx
*, statxbuf
)
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier
, int, cmd
, int, flags
)
368 static bitmask_transtbl fcntl_flags_tbl
[] = {
369 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
370 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
371 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
372 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
373 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
374 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
375 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
376 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
377 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
378 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
379 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
380 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
381 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
382 #if defined(O_DIRECT)
383 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
385 #if defined(O_NOATIME)
386 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
388 #if defined(O_CLOEXEC)
389 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
392 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
394 #if defined(O_TMPFILE)
395 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
397 /* Don't terminate the list prematurely on 64-bit host+guest. */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
404 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
410 const struct timespec
*,tsp
,int,flags
)
412 static int sys_utimensat(int dirfd
, const char *pathname
,
413 const struct timespec times
[2], int flags
)
419 #endif /* TARGET_NR_utimensat */
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
425 const char *, new, unsigned int, flags
)
427 static int sys_renameat2(int oldfd
, const char *old
,
428 int newfd
, const char *new, int flags
)
431 return renameat(oldfd
, old
, newfd
, new);
437 #endif /* TARGET_NR_renameat2 */
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
445 return (inotify_init());
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
451 return (inotify_add_watch(fd
, pathname
, mask
));
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
457 return (inotify_rm_watch(fd
, wd
));
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags
)
464 return (inotify_init1(flags
));
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY */
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64
{
486 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
487 const struct host_rlimit64
*, new_limit
,
488 struct host_rlimit64
*, old_limit
)
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers
[32] = { 0, } ;
496 static inline int next_free_host_timer(void)
499 /* FIXME: Does finding the next free slot require a lock? */
500 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
501 if (g_posix_timers
[k
] == 0) {
502 g_posix_timers
[k
] = (timer_t
) 1;
510 #define ERRNO_TABLE_SIZE 1200
512 /* target_to_host_errno_table[] is initialized from
513 * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
518 * This list is the union of errno values overridden in asm-<arch>/errno.h
519 * minus the errnos that are not actually generic to all archs.
521 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
522 [EAGAIN
] = TARGET_EAGAIN
,
523 [EIDRM
] = TARGET_EIDRM
,
524 [ECHRNG
] = TARGET_ECHRNG
,
525 [EL2NSYNC
] = TARGET_EL2NSYNC
,
526 [EL3HLT
] = TARGET_EL3HLT
,
527 [EL3RST
] = TARGET_EL3RST
,
528 [ELNRNG
] = TARGET_ELNRNG
,
529 [EUNATCH
] = TARGET_EUNATCH
,
530 [ENOCSI
] = TARGET_ENOCSI
,
531 [EL2HLT
] = TARGET_EL2HLT
,
532 [EDEADLK
] = TARGET_EDEADLK
,
533 [ENOLCK
] = TARGET_ENOLCK
,
534 [EBADE
] = TARGET_EBADE
,
535 [EBADR
] = TARGET_EBADR
,
536 [EXFULL
] = TARGET_EXFULL
,
537 [ENOANO
] = TARGET_ENOANO
,
538 [EBADRQC
] = TARGET_EBADRQC
,
539 [EBADSLT
] = TARGET_EBADSLT
,
540 [EBFONT
] = TARGET_EBFONT
,
541 [ENOSTR
] = TARGET_ENOSTR
,
542 [ENODATA
] = TARGET_ENODATA
,
543 [ETIME
] = TARGET_ETIME
,
544 [ENOSR
] = TARGET_ENOSR
,
545 [ENONET
] = TARGET_ENONET
,
546 [ENOPKG
] = TARGET_ENOPKG
,
547 [EREMOTE
] = TARGET_EREMOTE
,
548 [ENOLINK
] = TARGET_ENOLINK
,
549 [EADV
] = TARGET_EADV
,
550 [ESRMNT
] = TARGET_ESRMNT
,
551 [ECOMM
] = TARGET_ECOMM
,
552 [EPROTO
] = TARGET_EPROTO
,
553 [EDOTDOT
] = TARGET_EDOTDOT
,
554 [EMULTIHOP
] = TARGET_EMULTIHOP
,
555 [EBADMSG
] = TARGET_EBADMSG
,
556 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
557 [EOVERFLOW
] = TARGET_EOVERFLOW
,
558 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
559 [EBADFD
] = TARGET_EBADFD
,
560 [EREMCHG
] = TARGET_EREMCHG
,
561 [ELIBACC
] = TARGET_ELIBACC
,
562 [ELIBBAD
] = TARGET_ELIBBAD
,
563 [ELIBSCN
] = TARGET_ELIBSCN
,
564 [ELIBMAX
] = TARGET_ELIBMAX
,
565 [ELIBEXEC
] = TARGET_ELIBEXEC
,
566 [EILSEQ
] = TARGET_EILSEQ
,
567 [ENOSYS
] = TARGET_ENOSYS
,
568 [ELOOP
] = TARGET_ELOOP
,
569 [ERESTART
] = TARGET_ERESTART
,
570 [ESTRPIPE
] = TARGET_ESTRPIPE
,
571 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
572 [EUSERS
] = TARGET_EUSERS
,
573 [ENOTSOCK
] = TARGET_ENOTSOCK
,
574 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
575 [EMSGSIZE
] = TARGET_EMSGSIZE
,
576 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
577 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
578 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
579 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
580 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
581 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
582 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
583 [EADDRINUSE
] = TARGET_EADDRINUSE
,
584 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
585 [ENETDOWN
] = TARGET_ENETDOWN
,
586 [ENETUNREACH
] = TARGET_ENETUNREACH
,
587 [ENETRESET
] = TARGET_ENETRESET
,
588 [ECONNABORTED
] = TARGET_ECONNABORTED
,
589 [ECONNRESET
] = TARGET_ECONNRESET
,
590 [ENOBUFS
] = TARGET_ENOBUFS
,
591 [EISCONN
] = TARGET_EISCONN
,
592 [ENOTCONN
] = TARGET_ENOTCONN
,
593 [EUCLEAN
] = TARGET_EUCLEAN
,
594 [ENOTNAM
] = TARGET_ENOTNAM
,
595 [ENAVAIL
] = TARGET_ENAVAIL
,
596 [EISNAM
] = TARGET_EISNAM
,
597 [EREMOTEIO
] = TARGET_EREMOTEIO
,
598 [EDQUOT
] = TARGET_EDQUOT
,
599 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
600 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
601 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
602 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
603 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
604 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
605 [EALREADY
] = TARGET_EALREADY
,
606 [EINPROGRESS
] = TARGET_EINPROGRESS
,
607 [ESTALE
] = TARGET_ESTALE
,
608 [ECANCELED
] = TARGET_ECANCELED
,
609 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
610 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
612 [ENOKEY
] = TARGET_ENOKEY
,
615 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
618 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
621 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
624 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
626 #ifdef ENOTRECOVERABLE
627 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
630 [ENOMSG
] = TARGET_ENOMSG
,
633 [ERFKILL
] = TARGET_ERFKILL
,
636 [EHWPOISON
] = TARGET_EHWPOISON
,
640 static inline int host_to_target_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 host_to_target_errno_table
[err
]) {
644 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 target_to_host_errno_table
[err
]) {
653 return target_to_host_errno_table
[err
];
658 static inline abi_long
get_errno(abi_long ret
)
661 return -host_to_target_errno(errno
);
666 const char *target_strerror(int err
)
668 if (err
== TARGET_ERESTARTSYS
) {
669 return "To be restarted";
671 if (err
== TARGET_QEMU_ESIGRETURN
) {
672 return "Successful exit from sigreturn";
675 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
678 return strerror(target_to_host_errno(err
));
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
684 return safe_syscall(__NR_##name); \
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
690 return safe_syscall(__NR_##name, arg1); \
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
696 return safe_syscall(__NR_##name, arg1, arg2); \
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
702 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713 type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721 type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723 type5 arg5, type6 arg6) \
725 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
728 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
729 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
730 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
731 int, flags
, mode_t
, mode
)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
734 struct rusage
*, rusage
)
736 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
737 int, options
, struct rusage
*, rusage
)
738 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
742 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
746 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
749 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
750 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
754 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
758 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
760 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
761 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
762 safe_syscall2(int, tkill
, int, tid
, int, sig
)
763 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
764 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
765 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
766 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
767 unsigned long, pos_l
, unsigned long, pos_h
)
768 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
769 unsigned long, pos_l
, unsigned long, pos_h
)
770 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
772 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
773 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
774 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
775 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
776 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
777 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
778 safe_syscall2(int, flock
, int, fd
, int, operation
)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
781 const struct timespec
*, uts
, size_t, sigsetsize
)
783 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
787 struct timespec
*, rem
)
789 #if defined(TARGET_NR_clock_nanosleep) || \
790 defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
792 const struct timespec
*, req
, struct timespec
*, rem
)
796 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
799 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
800 void *, ptr
, long, fifth
)
804 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
808 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
809 long, msgtype
, int, flags
)
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
813 unsigned, nsops
, const struct timespec
*, timeout
)
815 #if defined(TARGET_NR_mq_timedsend) || \
816 defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
818 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
820 #if defined(TARGET_NR_mq_timedreceive) || \
821 defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
823 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
827 int, outfd
, loff_t
*, poutoff
, size_t, length
,
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832 * "third argument might be integer or pointer or not present" behaviour of
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838 * use the flock64 struct rather than unsuffixed flock
839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
847 static inline int host_to_target_sock_type(int host_type
)
851 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
853 target_type
= TARGET_SOCK_DGRAM
;
856 target_type
= TARGET_SOCK_STREAM
;
859 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
863 #if defined(SOCK_CLOEXEC)
864 if (host_type
& SOCK_CLOEXEC
) {
865 target_type
|= TARGET_SOCK_CLOEXEC
;
869 #if defined(SOCK_NONBLOCK)
870 if (host_type
& SOCK_NONBLOCK
) {
871 target_type
|= TARGET_SOCK_NONBLOCK
;
878 static abi_ulong target_brk
;
879 static abi_ulong target_original_brk
;
880 static abi_ulong brk_page
;
882 void target_set_brk(abi_ulong new_brk
)
884 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
885 brk_page
= HOST_PAGE_ALIGN(target_brk
);
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
891 /* do_brk() must return target values and target errnos. */
892 abi_long
do_brk(abi_ulong new_brk
)
894 abi_long mapped_addr
;
895 abi_ulong new_alloc_size
;
897 /* brk pointers are always untagged */
899 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
905 if (new_brk
< target_original_brk
) {
906 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
911 /* If the new brk is less than the highest page reserved to the
912 * target heap allocation, set it and we're almost done... */
913 if (new_brk
<= brk_page
) {
914 /* Heap contents are initialized to zero, as for anonymous
916 if (new_brk
> target_brk
) {
917 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
919 target_brk
= new_brk
;
920 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
924 /* We need to allocate more memory after the brk... Note that
925 * we don't use MAP_FIXED because that will map over the top of
926 * any existing mapping (like the one with the host libc or qemu
927 * itself); instead we treat "mapped but at wrong address" as
928 * a failure and unmap again.
930 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
931 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
932 PROT_READ
|PROT_WRITE
,
933 MAP_ANON
|MAP_PRIVATE
, 0, 0));
935 if (mapped_addr
== brk_page
) {
936 /* Heap contents are initialized to zero, as for anonymous
937 * mapped pages. Technically the new pages are already
938 * initialized to zero since they *are* anonymous mapped
939 * pages, however we have to take care with the contents that
940 * come from the remaining part of the previous page: it may
941 * contains garbage data due to a previous heap usage (grown
943 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
945 target_brk
= new_brk
;
946 brk_page
= HOST_PAGE_ALIGN(target_brk
);
947 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
950 } else if (mapped_addr
!= -1) {
951 /* Mapped but at wrong address, meaning there wasn't actually
952 * enough space for this brk.
954 target_munmap(mapped_addr
, new_alloc_size
);
956 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
959 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
962 #if defined(TARGET_ALPHA)
963 /* We (partially) emulate OSF/1 on Alpha, which requires we
964 return a proper errno, not an unchanged brk value. */
965 return -TARGET_ENOMEM
;
967 /* For everything else, return the previous break. */
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
974 abi_ulong target_fds_addr
,
978 abi_ulong b
, *target_fds
;
980 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
981 if (!(target_fds
= lock_user(VERIFY_READ
,
983 sizeof(abi_ulong
) * nw
,
985 return -TARGET_EFAULT
;
989 for (i
= 0; i
< nw
; i
++) {
990 /* grab the abi_ulong */
991 __get_user(b
, &target_fds
[i
]);
992 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
993 /* check the bit inside the abi_ulong */
1000 unlock_user(target_fds
, target_fds_addr
, 0);
1005 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1006 abi_ulong target_fds_addr
,
1009 if (target_fds_addr
) {
1010 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1011 return -TARGET_EFAULT
;
1019 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1025 abi_ulong
*target_fds
;
1027 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1028 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1030 sizeof(abi_ulong
) * nw
,
1032 return -TARGET_EFAULT
;
1035 for (i
= 0; i
< nw
; i
++) {
1037 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1038 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1041 __put_user(v
, &target_fds
[i
]);
1044 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1056 static inline abi_long
host_to_target_clock_t(long ticks
)
1058 #if HOST_HZ == TARGET_HZ
1061 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1065 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1066 const struct rusage
*rusage
)
1068 struct target_rusage
*target_rusage
;
1070 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1071 return -TARGET_EFAULT
;
1072 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1073 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1074 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1075 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1076 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1077 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1078 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1079 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1080 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1081 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1082 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1083 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1084 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1085 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1086 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1087 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1088 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1089 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1090 unlock_user_struct(target_rusage
, target_addr
, 1);
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1098 abi_ulong target_rlim_swap
;
1101 target_rlim_swap
= tswapal(target_rlim
);
1102 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1103 return RLIM_INFINITY
;
1105 result
= target_rlim_swap
;
1106 if (target_rlim_swap
!= (rlim_t
)result
)
1107 return RLIM_INFINITY
;
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1116 abi_ulong target_rlim_swap
;
1119 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1120 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1122 target_rlim_swap
= rlim
;
1123 result
= tswapal(target_rlim_swap
);
1129 static inline int target_to_host_resource(int code
)
1132 case TARGET_RLIMIT_AS
:
1134 case TARGET_RLIMIT_CORE
:
1136 case TARGET_RLIMIT_CPU
:
1138 case TARGET_RLIMIT_DATA
:
1140 case TARGET_RLIMIT_FSIZE
:
1141 return RLIMIT_FSIZE
;
1142 case TARGET_RLIMIT_LOCKS
:
1143 return RLIMIT_LOCKS
;
1144 case TARGET_RLIMIT_MEMLOCK
:
1145 return RLIMIT_MEMLOCK
;
1146 case TARGET_RLIMIT_MSGQUEUE
:
1147 return RLIMIT_MSGQUEUE
;
1148 case TARGET_RLIMIT_NICE
:
1150 case TARGET_RLIMIT_NOFILE
:
1151 return RLIMIT_NOFILE
;
1152 case TARGET_RLIMIT_NPROC
:
1153 return RLIMIT_NPROC
;
1154 case TARGET_RLIMIT_RSS
:
1156 case TARGET_RLIMIT_RTPRIO
:
1157 return RLIMIT_RTPRIO
;
1158 case TARGET_RLIMIT_SIGPENDING
:
1159 return RLIMIT_SIGPENDING
;
1160 case TARGET_RLIMIT_STACK
:
1161 return RLIMIT_STACK
;
1167 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1168 abi_ulong target_tv_addr
)
1170 struct target_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1173 return -TARGET_EFAULT
;
1176 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1184 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1185 const struct timeval
*tv
)
1187 struct target_timeval
*target_tv
;
1189 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1190 return -TARGET_EFAULT
;
1193 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1194 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1196 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1203 abi_ulong target_tv_addr
)
1205 struct target__kernel_sock_timeval
*target_tv
;
1207 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1208 return -TARGET_EFAULT
;
1211 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1212 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1214 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1220 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1221 const struct timeval
*tv
)
1223 struct target__kernel_sock_timeval
*target_tv
;
1225 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1226 return -TARGET_EFAULT
;
1229 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1230 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1232 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1237 #if defined(TARGET_NR_futex) || \
1238 defined(TARGET_NR_rt_sigtimedwait) || \
1239 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244 defined(TARGET_NR_timer_settime) || \
1245 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1247 abi_ulong target_addr
)
1249 struct target_timespec
*target_ts
;
1251 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1252 return -TARGET_EFAULT
;
1254 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1255 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1256 unlock_user_struct(target_ts
, target_addr
, 0);
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262 defined(TARGET_NR_timer_settime64) || \
1263 defined(TARGET_NR_mq_timedsend_time64) || \
1264 defined(TARGET_NR_mq_timedreceive_time64) || \
1265 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266 defined(TARGET_NR_clock_nanosleep_time64) || \
1267 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268 defined(TARGET_NR_utimensat) || \
1269 defined(TARGET_NR_utimensat_time64) || \
1270 defined(TARGET_NR_semtimedop_time64) || \
1271 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1273 abi_ulong target_addr
)
1275 struct target__kernel_timespec
*target_ts
;
1277 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1278 return -TARGET_EFAULT
;
1280 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1281 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1282 /* in 32bit mode, this drops the padding */
1283 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1284 unlock_user_struct(target_ts
, target_addr
, 0);
1289 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1290 struct timespec
*host_ts
)
1292 struct target_timespec
*target_ts
;
1294 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1295 return -TARGET_EFAULT
;
1297 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1298 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1299 unlock_user_struct(target_ts
, target_addr
, 1);
1303 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1304 struct timespec
*host_ts
)
1306 struct target__kernel_timespec
*target_ts
;
1308 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1309 return -TARGET_EFAULT
;
1311 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1312 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1313 unlock_user_struct(target_ts
, target_addr
, 1);
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1319 struct timezone
*tz
)
1321 struct target_timezone
*target_tz
;
1323 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1324 return -TARGET_EFAULT
;
1327 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1328 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1330 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1338 abi_ulong target_tz_addr
)
1340 struct target_timezone
*target_tz
;
1342 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1343 return -TARGET_EFAULT
;
1346 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1347 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1349 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1358 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1359 abi_ulong target_mq_attr_addr
)
1361 struct target_mq_attr
*target_mq_attr
;
1363 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1364 target_mq_attr_addr
, 1))
1365 return -TARGET_EFAULT
;
1367 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1368 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1369 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1370 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1372 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1377 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1378 const struct mq_attr
*attr
)
1380 struct target_mq_attr
*target_mq_attr
;
1382 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1383 target_mq_attr_addr
, 0))
1384 return -TARGET_EFAULT
;
1386 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1387 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1388 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1389 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1391 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long
do_select(int n
,
1400 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1401 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1403 fd_set rfds
, wfds
, efds
;
1404 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1406 struct timespec ts
, *ts_ptr
;
1409 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1413 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1417 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1422 if (target_tv_addr
) {
1423 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1424 return -TARGET_EFAULT
;
1425 ts
.tv_sec
= tv
.tv_sec
;
1426 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1432 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1435 if (!is_error(ret
)) {
1436 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1437 return -TARGET_EFAULT
;
1438 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1439 return -TARGET_EFAULT
;
1440 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1441 return -TARGET_EFAULT
;
1443 if (target_tv_addr
) {
1444 tv
.tv_sec
= ts
.tv_sec
;
1445 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1446 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1447 return -TARGET_EFAULT
;
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long
do_old_select(abi_ulong arg1
)
1458 struct target_sel_arg_struct
*sel
;
1459 abi_ulong inp
, outp
, exp
, tvp
;
1462 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1463 return -TARGET_EFAULT
;
1466 nsel
= tswapal(sel
->n
);
1467 inp
= tswapal(sel
->inp
);
1468 outp
= tswapal(sel
->outp
);
1469 exp
= tswapal(sel
->exp
);
1470 tvp
= tswapal(sel
->tvp
);
1472 unlock_user_struct(sel
, arg1
, 0);
1474 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1481 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1484 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1485 fd_set rfds
, wfds
, efds
;
1486 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1487 struct timespec ts
, *ts_ptr
;
1491 * The 6th arg is actually two args smashed together,
1492 * so we cannot use the C library.
1500 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1501 target_sigset_t
*target_sigset
;
1509 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1513 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1517 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1523 * This takes a timespec, and not a timeval, so we cannot
1524 * use the do_select() helper ...
1528 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1529 return -TARGET_EFAULT
;
1532 if (target_to_host_timespec(&ts
, ts_addr
)) {
1533 return -TARGET_EFAULT
;
1541 /* Extract the two packed args for the sigset */
1544 sig
.size
= SIGSET_T_SIZE
;
1546 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1548 return -TARGET_EFAULT
;
1550 arg_sigset
= tswapal(arg7
[0]);
1551 arg_sigsize
= tswapal(arg7
[1]);
1552 unlock_user(arg7
, arg6
, 0);
1556 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1557 /* Like the kernel, we enforce correct size sigsets */
1558 return -TARGET_EINVAL
;
1560 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1561 sizeof(*target_sigset
), 1);
1562 if (!target_sigset
) {
1563 return -TARGET_EFAULT
;
1565 target_to_host_sigset(&set
, target_sigset
);
1566 unlock_user(target_sigset
, arg_sigset
, 0);
1574 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1577 if (!is_error(ret
)) {
1578 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1579 return -TARGET_EFAULT
;
1581 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1582 return -TARGET_EFAULT
;
1584 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1585 return -TARGET_EFAULT
;
1588 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1589 return -TARGET_EFAULT
;
1592 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1593 return -TARGET_EFAULT
;
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602 defined(TARGET_NR_ppoll_time64)
1603 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1604 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1606 struct target_pollfd
*target_pfd
;
1607 unsigned int nfds
= arg2
;
1615 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1616 return -TARGET_EINVAL
;
1618 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1619 sizeof(struct target_pollfd
) * nfds
, 1);
1621 return -TARGET_EFAULT
;
1624 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1625 for (i
= 0; i
< nfds
; i
++) {
1626 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1627 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1631 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1632 target_sigset_t
*target_set
;
1633 sigset_t _set
, *set
= &_set
;
1637 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1638 unlock_user(target_pfd
, arg1
, 0);
1639 return -TARGET_EFAULT
;
1642 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EFAULT
;
1652 if (arg5
!= sizeof(target_sigset_t
)) {
1653 unlock_user(target_pfd
, arg1
, 0);
1654 return -TARGET_EINVAL
;
1657 target_set
= lock_user(VERIFY_READ
, arg4
,
1658 sizeof(target_sigset_t
), 1);
1660 unlock_user(target_pfd
, arg1
, 0);
1661 return -TARGET_EFAULT
;
1663 target_to_host_sigset(set
, target_set
);
1668 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1669 set
, SIGSET_T_SIZE
));
1671 if (!is_error(ret
) && arg3
) {
1673 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1674 return -TARGET_EFAULT
;
1677 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1678 return -TARGET_EFAULT
;
1683 unlock_user(target_set
, arg4
, 0);
1686 struct timespec ts
, *pts
;
1689 /* Convert ms to secs, ns */
1690 ts
.tv_sec
= arg3
/ 1000;
1691 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1694 /* -ve poll() timeout means "infinite" */
1697 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1700 if (!is_error(ret
)) {
1701 for (i
= 0; i
< nfds
; i
++) {
1702 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1705 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1710 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1713 return pipe2(host_pipe
, flags
);
1719 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1720 int flags
, int is_pipe2
)
1724 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1727 return get_errno(ret
);
1729 /* Several targets have special calling conventions for the original
1730 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1732 #if defined(TARGET_ALPHA)
1733 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1734 return host_pipe
[0];
1735 #elif defined(TARGET_MIPS)
1736 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1737 return host_pipe
[0];
1738 #elif defined(TARGET_SH4)
1739 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1740 return host_pipe
[0];
1741 #elif defined(TARGET_SPARC)
1742 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1743 return host_pipe
[0];
1747 if (put_user_s32(host_pipe
[0], pipedes
)
1748 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1749 return -TARGET_EFAULT
;
1750 return get_errno(ret
);
1753 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1754 abi_ulong target_addr
,
1757 struct target_ip_mreqn
*target_smreqn
;
1759 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1761 return -TARGET_EFAULT
;
1762 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1763 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1764 if (len
== sizeof(struct target_ip_mreqn
))
1765 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1766 unlock_user(target_smreqn
, target_addr
, 0);
1771 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1772 abi_ulong target_addr
,
1775 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1776 sa_family_t sa_family
;
1777 struct target_sockaddr
*target_saddr
;
1779 if (fd_trans_target_to_host_addr(fd
)) {
1780 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1783 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1785 return -TARGET_EFAULT
;
1787 sa_family
= tswap16(target_saddr
->sa_family
);
1789 /* Oops. The caller might send a incomplete sun_path; sun_path
1790 * must be terminated by \0 (see the manual page), but
1791 * unfortunately it is quite common to specify sockaddr_un
1792 * length as "strlen(x->sun_path)" while it should be
1793 * "strlen(...) + 1". We'll fix that here if needed.
1794 * Linux kernel has a similar feature.
1797 if (sa_family
== AF_UNIX
) {
1798 if (len
< unix_maxlen
&& len
> 0) {
1799 char *cp
= (char*)target_saddr
;
1801 if ( cp
[len
-1] && !cp
[len
] )
1804 if (len
> unix_maxlen
)
1808 memcpy(addr
, target_saddr
, len
);
1809 addr
->sa_family
= sa_family
;
1810 if (sa_family
== AF_NETLINK
) {
1811 struct sockaddr_nl
*nladdr
;
1813 nladdr
= (struct sockaddr_nl
*)addr
;
1814 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1815 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1816 } else if (sa_family
== AF_PACKET
) {
1817 struct target_sockaddr_ll
*lladdr
;
1819 lladdr
= (struct target_sockaddr_ll
*)addr
;
1820 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1821 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1823 unlock_user(target_saddr
, target_addr
, 0);
1828 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1829 struct sockaddr
*addr
,
1832 struct target_sockaddr
*target_saddr
;
1839 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1841 return -TARGET_EFAULT
;
1842 memcpy(target_saddr
, addr
, len
);
1843 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1844 sizeof(target_saddr
->sa_family
)) {
1845 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1847 if (addr
->sa_family
== AF_NETLINK
&&
1848 len
>= sizeof(struct target_sockaddr_nl
)) {
1849 struct target_sockaddr_nl
*target_nl
=
1850 (struct target_sockaddr_nl
*)target_saddr
;
1851 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1852 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1853 } else if (addr
->sa_family
== AF_PACKET
) {
1854 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1855 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1856 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1857 } else if (addr
->sa_family
== AF_INET6
&&
1858 len
>= sizeof(struct target_sockaddr_in6
)) {
1859 struct target_sockaddr_in6
*target_in6
=
1860 (struct target_sockaddr_in6
*)target_saddr
;
1861 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1863 unlock_user(target_saddr
, target_addr
, len
);
1868 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1869 struct target_msghdr
*target_msgh
)
1871 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1872 abi_long msg_controllen
;
1873 abi_ulong target_cmsg_addr
;
1874 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1875 socklen_t space
= 0;
1877 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1878 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1880 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1881 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1882 target_cmsg_start
= target_cmsg
;
1884 return -TARGET_EFAULT
;
1886 while (cmsg
&& target_cmsg
) {
1887 void *data
= CMSG_DATA(cmsg
);
1888 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1890 int len
= tswapal(target_cmsg
->cmsg_len
)
1891 - sizeof(struct target_cmsghdr
);
1893 space
+= CMSG_SPACE(len
);
1894 if (space
> msgh
->msg_controllen
) {
1895 space
-= CMSG_SPACE(len
);
1896 /* This is a QEMU bug, since we allocated the payload
1897 * area ourselves (unlike overflow in host-to-target
1898 * conversion, which is just the guest giving us a buffer
1899 * that's too small). It can't happen for the payload types
1900 * we currently support; if it becomes an issue in future
1901 * we would need to improve our allocation strategy to
1902 * something more intelligent than "twice the size of the
1903 * target buffer we're reading from".
1905 qemu_log_mask(LOG_UNIMP
,
1906 ("Unsupported ancillary data %d/%d: "
1907 "unhandled msg size\n"),
1908 tswap32(target_cmsg
->cmsg_level
),
1909 tswap32(target_cmsg
->cmsg_type
));
1913 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1914 cmsg
->cmsg_level
= SOL_SOCKET
;
1916 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1918 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1919 cmsg
->cmsg_len
= CMSG_LEN(len
);
1921 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1922 int *fd
= (int *)data
;
1923 int *target_fd
= (int *)target_data
;
1924 int i
, numfds
= len
/ sizeof(int);
1926 for (i
= 0; i
< numfds
; i
++) {
1927 __get_user(fd
[i
], target_fd
+ i
);
1929 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1930 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1931 struct ucred
*cred
= (struct ucred
*)data
;
1932 struct target_ucred
*target_cred
=
1933 (struct target_ucred
*)target_data
;
1935 __get_user(cred
->pid
, &target_cred
->pid
);
1936 __get_user(cred
->uid
, &target_cred
->uid
);
1937 __get_user(cred
->gid
, &target_cred
->gid
);
1939 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1940 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1941 memcpy(data
, target_data
, len
);
1944 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1945 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1948 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1950 msgh
->msg_controllen
= space
;
1954 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1955 struct msghdr
*msgh
)
1957 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1958 abi_long msg_controllen
;
1959 abi_ulong target_cmsg_addr
;
1960 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1961 socklen_t space
= 0;
1963 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1964 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1966 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1967 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1968 target_cmsg_start
= target_cmsg
;
1970 return -TARGET_EFAULT
;
1972 while (cmsg
&& target_cmsg
) {
1973 void *data
= CMSG_DATA(cmsg
);
1974 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1976 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1977 int tgt_len
, tgt_space
;
1979 /* We never copy a half-header but may copy half-data;
1980 * this is Linux's behaviour in put_cmsg(). Note that
1981 * truncation here is a guest problem (which we report
1982 * to the guest via the CTRUNC bit), unlike truncation
1983 * in target_to_host_cmsg, which is a QEMU bug.
1985 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1986 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1990 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1991 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1993 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1995 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1997 /* Payload types which need a different size of payload on
1998 * the target must adjust tgt_len here.
2001 switch (cmsg
->cmsg_level
) {
2003 switch (cmsg
->cmsg_type
) {
2005 tgt_len
= sizeof(struct target_timeval
);
2015 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2016 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2017 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2020 /* We must now copy-and-convert len bytes of payload
2021 * into tgt_len bytes of destination space. Bear in mind
2022 * that in both source and destination we may be dealing
2023 * with a truncated value!
2025 switch (cmsg
->cmsg_level
) {
2027 switch (cmsg
->cmsg_type
) {
2030 int *fd
= (int *)data
;
2031 int *target_fd
= (int *)target_data
;
2032 int i
, numfds
= tgt_len
/ sizeof(int);
2034 for (i
= 0; i
< numfds
; i
++) {
2035 __put_user(fd
[i
], target_fd
+ i
);
2041 struct timeval
*tv
= (struct timeval
*)data
;
2042 struct target_timeval
*target_tv
=
2043 (struct target_timeval
*)target_data
;
2045 if (len
!= sizeof(struct timeval
) ||
2046 tgt_len
!= sizeof(struct target_timeval
)) {
2050 /* copy struct timeval to target */
2051 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2052 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2055 case SCM_CREDENTIALS
:
2057 struct ucred
*cred
= (struct ucred
*)data
;
2058 struct target_ucred
*target_cred
=
2059 (struct target_ucred
*)target_data
;
2061 __put_user(cred
->pid
, &target_cred
->pid
);
2062 __put_user(cred
->uid
, &target_cred
->uid
);
2063 __put_user(cred
->gid
, &target_cred
->gid
);
2072 switch (cmsg
->cmsg_type
) {
2075 uint32_t *v
= (uint32_t *)data
;
2076 uint32_t *t_int
= (uint32_t *)target_data
;
2078 if (len
!= sizeof(uint32_t) ||
2079 tgt_len
!= sizeof(uint32_t)) {
2082 __put_user(*v
, t_int
);
2088 struct sock_extended_err ee
;
2089 struct sockaddr_in offender
;
2091 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2092 struct errhdr_t
*target_errh
=
2093 (struct errhdr_t
*)target_data
;
2095 if (len
!= sizeof(struct errhdr_t
) ||
2096 tgt_len
!= sizeof(struct errhdr_t
)) {
2099 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2100 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2101 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2102 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2103 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2104 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2105 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2106 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2107 (void *) &errh
->offender
, sizeof(errh
->offender
));
2116 switch (cmsg
->cmsg_type
) {
2119 uint32_t *v
= (uint32_t *)data
;
2120 uint32_t *t_int
= (uint32_t *)target_data
;
2122 if (len
!= sizeof(uint32_t) ||
2123 tgt_len
!= sizeof(uint32_t)) {
2126 __put_user(*v
, t_int
);
2132 struct sock_extended_err ee
;
2133 struct sockaddr_in6 offender
;
2135 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2136 struct errhdr6_t
*target_errh
=
2137 (struct errhdr6_t
*)target_data
;
2139 if (len
!= sizeof(struct errhdr6_t
) ||
2140 tgt_len
!= sizeof(struct errhdr6_t
)) {
2143 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2144 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2145 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2146 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2147 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2148 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2149 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2150 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2151 (void *) &errh
->offender
, sizeof(errh
->offender
));
2161 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2162 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2163 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2164 if (tgt_len
> len
) {
2165 memset(target_data
+ len
, 0, tgt_len
- len
);
2169 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2170 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2171 if (msg_controllen
< tgt_space
) {
2172 tgt_space
= msg_controllen
;
2174 msg_controllen
-= tgt_space
;
2176 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2177 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2180 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2182 target_msgh
->msg_controllen
= tswapal(space
);
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2188 abi_ulong optval_addr
, socklen_t optlen
)
2192 struct ip_mreqn
*ip_mreq
;
2193 struct ip_mreq_source
*ip_mreq_source
;
2198 /* TCP and UDP options all take an 'int' value. */
2199 if (optlen
< sizeof(uint32_t))
2200 return -TARGET_EINVAL
;
2202 if (get_user_u32(val
, optval_addr
))
2203 return -TARGET_EFAULT
;
2204 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2211 case IP_ROUTER_ALERT
:
2215 case IP_MTU_DISCOVER
:
2222 case IP_MULTICAST_TTL
:
2223 case IP_MULTICAST_LOOP
:
2225 if (optlen
>= sizeof(uint32_t)) {
2226 if (get_user_u32(val
, optval_addr
))
2227 return -TARGET_EFAULT
;
2228 } else if (optlen
>= 1) {
2229 if (get_user_u8(val
, optval_addr
))
2230 return -TARGET_EFAULT
;
2232 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2234 case IP_ADD_MEMBERSHIP
:
2235 case IP_DROP_MEMBERSHIP
:
2236 if (optlen
< sizeof (struct target_ip_mreq
) ||
2237 optlen
> sizeof (struct target_ip_mreqn
))
2238 return -TARGET_EINVAL
;
2240 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2241 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2242 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2245 case IP_BLOCK_SOURCE
:
2246 case IP_UNBLOCK_SOURCE
:
2247 case IP_ADD_SOURCE_MEMBERSHIP
:
2248 case IP_DROP_SOURCE_MEMBERSHIP
:
2249 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2250 return -TARGET_EINVAL
;
2252 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2253 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2254 unlock_user (ip_mreq_source
, optval_addr
, 0);
2263 case IPV6_MTU_DISCOVER
:
2266 case IPV6_RECVPKTINFO
:
2267 case IPV6_UNICAST_HOPS
:
2268 case IPV6_MULTICAST_HOPS
:
2269 case IPV6_MULTICAST_LOOP
:
2271 case IPV6_RECVHOPLIMIT
:
2272 case IPV6_2292HOPLIMIT
:
2275 case IPV6_2292PKTINFO
:
2276 case IPV6_RECVTCLASS
:
2277 case IPV6_RECVRTHDR
:
2278 case IPV6_2292RTHDR
:
2279 case IPV6_RECVHOPOPTS
:
2280 case IPV6_2292HOPOPTS
:
2281 case IPV6_RECVDSTOPTS
:
2282 case IPV6_2292DSTOPTS
:
2284 case IPV6_ADDR_PREFERENCES
:
2285 #ifdef IPV6_RECVPATHMTU
2286 case IPV6_RECVPATHMTU
:
2288 #ifdef IPV6_TRANSPARENT
2289 case IPV6_TRANSPARENT
:
2291 #ifdef IPV6_FREEBIND
2294 #ifdef IPV6_RECVORIGDSTADDR
2295 case IPV6_RECVORIGDSTADDR
:
2298 if (optlen
< sizeof(uint32_t)) {
2299 return -TARGET_EINVAL
;
2301 if (get_user_u32(val
, optval_addr
)) {
2302 return -TARGET_EFAULT
;
2304 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2305 &val
, sizeof(val
)));
2309 struct in6_pktinfo pki
;
2311 if (optlen
< sizeof(pki
)) {
2312 return -TARGET_EINVAL
;
2315 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2316 return -TARGET_EFAULT
;
2319 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2321 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2322 &pki
, sizeof(pki
)));
2325 case IPV6_ADD_MEMBERSHIP
:
2326 case IPV6_DROP_MEMBERSHIP
:
2328 struct ipv6_mreq ipv6mreq
;
2330 if (optlen
< sizeof(ipv6mreq
)) {
2331 return -TARGET_EINVAL
;
2334 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2335 return -TARGET_EFAULT
;
2338 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2340 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2341 &ipv6mreq
, sizeof(ipv6mreq
)));
2352 struct icmp6_filter icmp6f
;
2354 if (optlen
> sizeof(icmp6f
)) {
2355 optlen
= sizeof(icmp6f
);
2358 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2359 return -TARGET_EFAULT
;
2362 for (val
= 0; val
< 8; val
++) {
2363 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2366 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2378 /* those take an u32 value */
2379 if (optlen
< sizeof(uint32_t)) {
2380 return -TARGET_EINVAL
;
2383 if (get_user_u32(val
, optval_addr
)) {
2384 return -TARGET_EFAULT
;
2386 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2387 &val
, sizeof(val
)));
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2399 char *alg_key
= g_malloc(optlen
);
2402 return -TARGET_ENOMEM
;
2404 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2406 return -TARGET_EFAULT
;
2408 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2413 case ALG_SET_AEAD_AUTHSIZE
:
2415 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2424 case TARGET_SOL_SOCKET
:
2426 case TARGET_SO_RCVTIMEO
:
2430 optname
= SO_RCVTIMEO
;
2433 if (optlen
!= sizeof(struct target_timeval
)) {
2434 return -TARGET_EINVAL
;
2437 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2438 return -TARGET_EFAULT
;
2441 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2445 case TARGET_SO_SNDTIMEO
:
2446 optname
= SO_SNDTIMEO
;
2448 case TARGET_SO_ATTACH_FILTER
:
2450 struct target_sock_fprog
*tfprog
;
2451 struct target_sock_filter
*tfilter
;
2452 struct sock_fprog fprog
;
2453 struct sock_filter
*filter
;
2456 if (optlen
!= sizeof(*tfprog
)) {
2457 return -TARGET_EINVAL
;
2459 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2460 return -TARGET_EFAULT
;
2462 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2463 tswapal(tfprog
->filter
), 0)) {
2464 unlock_user_struct(tfprog
, optval_addr
, 1);
2465 return -TARGET_EFAULT
;
2468 fprog
.len
= tswap16(tfprog
->len
);
2469 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2470 if (filter
== NULL
) {
2471 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2472 unlock_user_struct(tfprog
, optval_addr
, 1);
2473 return -TARGET_ENOMEM
;
2475 for (i
= 0; i
< fprog
.len
; i
++) {
2476 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2477 filter
[i
].jt
= tfilter
[i
].jt
;
2478 filter
[i
].jf
= tfilter
[i
].jf
;
2479 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2481 fprog
.filter
= filter
;
2483 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2484 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2487 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2488 unlock_user_struct(tfprog
, optval_addr
, 1);
2491 case TARGET_SO_BINDTODEVICE
:
2493 char *dev_ifname
, *addr_ifname
;
2495 if (optlen
> IFNAMSIZ
- 1) {
2496 optlen
= IFNAMSIZ
- 1;
2498 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2500 return -TARGET_EFAULT
;
2502 optname
= SO_BINDTODEVICE
;
2503 addr_ifname
= alloca(IFNAMSIZ
);
2504 memcpy(addr_ifname
, dev_ifname
, optlen
);
2505 addr_ifname
[optlen
] = 0;
2506 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2507 addr_ifname
, optlen
));
2508 unlock_user (dev_ifname
, optval_addr
, 0);
2511 case TARGET_SO_LINGER
:
2514 struct target_linger
*tlg
;
2516 if (optlen
!= sizeof(struct target_linger
)) {
2517 return -TARGET_EINVAL
;
2519 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2520 return -TARGET_EFAULT
;
2522 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2523 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2524 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2526 unlock_user_struct(tlg
, optval_addr
, 0);
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG
:
2533 case TARGET_SO_REUSEADDR
:
2534 optname
= SO_REUSEADDR
;
2537 case TARGET_SO_REUSEPORT
:
2538 optname
= SO_REUSEPORT
;
2541 case TARGET_SO_TYPE
:
2544 case TARGET_SO_ERROR
:
2547 case TARGET_SO_DONTROUTE
:
2548 optname
= SO_DONTROUTE
;
2550 case TARGET_SO_BROADCAST
:
2551 optname
= SO_BROADCAST
;
2553 case TARGET_SO_SNDBUF
:
2554 optname
= SO_SNDBUF
;
2556 case TARGET_SO_SNDBUFFORCE
:
2557 optname
= SO_SNDBUFFORCE
;
2559 case TARGET_SO_RCVBUF
:
2560 optname
= SO_RCVBUF
;
2562 case TARGET_SO_RCVBUFFORCE
:
2563 optname
= SO_RCVBUFFORCE
;
2565 case TARGET_SO_KEEPALIVE
:
2566 optname
= SO_KEEPALIVE
;
2568 case TARGET_SO_OOBINLINE
:
2569 optname
= SO_OOBINLINE
;
2571 case TARGET_SO_NO_CHECK
:
2572 optname
= SO_NO_CHECK
;
2574 case TARGET_SO_PRIORITY
:
2575 optname
= SO_PRIORITY
;
2578 case TARGET_SO_BSDCOMPAT
:
2579 optname
= SO_BSDCOMPAT
;
2582 case TARGET_SO_PASSCRED
:
2583 optname
= SO_PASSCRED
;
2585 case TARGET_SO_PASSSEC
:
2586 optname
= SO_PASSSEC
;
2588 case TARGET_SO_TIMESTAMP
:
2589 optname
= SO_TIMESTAMP
;
2591 case TARGET_SO_RCVLOWAT
:
2592 optname
= SO_RCVLOWAT
;
2597 if (optlen
< sizeof(uint32_t))
2598 return -TARGET_EINVAL
;
2600 if (get_user_u32(val
, optval_addr
))
2601 return -TARGET_EFAULT
;
2602 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2607 case NETLINK_PKTINFO
:
2608 case NETLINK_ADD_MEMBERSHIP
:
2609 case NETLINK_DROP_MEMBERSHIP
:
2610 case NETLINK_BROADCAST_ERROR
:
2611 case NETLINK_NO_ENOBUFS
:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613 case NETLINK_LISTEN_ALL_NSID
:
2614 case NETLINK_CAP_ACK
:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617 case NETLINK_EXT_ACK
:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620 case NETLINK_GET_STRICT_CHK
:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2627 if (optlen
< sizeof(uint32_t)) {
2628 return -TARGET_EINVAL
;
2630 if (get_user_u32(val
, optval_addr
)) {
2631 return -TARGET_EFAULT
;
2633 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2636 #endif /* SOL_NETLINK */
2639 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2641 ret
= -TARGET_ENOPROTOOPT
;
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2648 abi_ulong optval_addr
, abi_ulong optlen
)
2655 case TARGET_SOL_SOCKET
:
2658 /* These don't just return a single integer */
2659 case TARGET_SO_PEERNAME
:
2661 case TARGET_SO_RCVTIMEO
: {
2665 optname
= SO_RCVTIMEO
;
2668 if (get_user_u32(len
, optlen
)) {
2669 return -TARGET_EFAULT
;
2672 return -TARGET_EINVAL
;
2676 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2681 if (len
> sizeof(struct target_timeval
)) {
2682 len
= sizeof(struct target_timeval
);
2684 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2685 return -TARGET_EFAULT
;
2687 if (put_user_u32(len
, optlen
)) {
2688 return -TARGET_EFAULT
;
2692 case TARGET_SO_SNDTIMEO
:
2693 optname
= SO_SNDTIMEO
;
2695 case TARGET_SO_PEERCRED
: {
2698 struct target_ucred
*tcr
;
2700 if (get_user_u32(len
, optlen
)) {
2701 return -TARGET_EFAULT
;
2704 return -TARGET_EINVAL
;
2708 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2716 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2717 return -TARGET_EFAULT
;
2719 __put_user(cr
.pid
, &tcr
->pid
);
2720 __put_user(cr
.uid
, &tcr
->uid
);
2721 __put_user(cr
.gid
, &tcr
->gid
);
2722 unlock_user_struct(tcr
, optval_addr
, 1);
2723 if (put_user_u32(len
, optlen
)) {
2724 return -TARGET_EFAULT
;
2728 case TARGET_SO_PEERSEC
: {
2731 if (get_user_u32(len
, optlen
)) {
2732 return -TARGET_EFAULT
;
2735 return -TARGET_EINVAL
;
2737 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2739 return -TARGET_EFAULT
;
2742 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2744 if (put_user_u32(lv
, optlen
)) {
2745 ret
= -TARGET_EFAULT
;
2747 unlock_user(name
, optval_addr
, lv
);
2750 case TARGET_SO_LINGER
:
2754 struct target_linger
*tlg
;
2756 if (get_user_u32(len
, optlen
)) {
2757 return -TARGET_EFAULT
;
2760 return -TARGET_EINVAL
;
2764 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2772 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2773 return -TARGET_EFAULT
;
2775 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2776 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2777 unlock_user_struct(tlg
, optval_addr
, 1);
2778 if (put_user_u32(len
, optlen
)) {
2779 return -TARGET_EFAULT
;
2783 /* Options with 'int' argument. */
2784 case TARGET_SO_DEBUG
:
2787 case TARGET_SO_REUSEADDR
:
2788 optname
= SO_REUSEADDR
;
2791 case TARGET_SO_REUSEPORT
:
2792 optname
= SO_REUSEPORT
;
2795 case TARGET_SO_TYPE
:
2798 case TARGET_SO_ERROR
:
2801 case TARGET_SO_DONTROUTE
:
2802 optname
= SO_DONTROUTE
;
2804 case TARGET_SO_BROADCAST
:
2805 optname
= SO_BROADCAST
;
2807 case TARGET_SO_SNDBUF
:
2808 optname
= SO_SNDBUF
;
2810 case TARGET_SO_RCVBUF
:
2811 optname
= SO_RCVBUF
;
2813 case TARGET_SO_KEEPALIVE
:
2814 optname
= SO_KEEPALIVE
;
2816 case TARGET_SO_OOBINLINE
:
2817 optname
= SO_OOBINLINE
;
2819 case TARGET_SO_NO_CHECK
:
2820 optname
= SO_NO_CHECK
;
2822 case TARGET_SO_PRIORITY
:
2823 optname
= SO_PRIORITY
;
2826 case TARGET_SO_BSDCOMPAT
:
2827 optname
= SO_BSDCOMPAT
;
2830 case TARGET_SO_PASSCRED
:
2831 optname
= SO_PASSCRED
;
2833 case TARGET_SO_TIMESTAMP
:
2834 optname
= SO_TIMESTAMP
;
2836 case TARGET_SO_RCVLOWAT
:
2837 optname
= SO_RCVLOWAT
;
2839 case TARGET_SO_ACCEPTCONN
:
2840 optname
= SO_ACCEPTCONN
;
2842 case TARGET_SO_PROTOCOL
:
2843 optname
= SO_PROTOCOL
;
2845 case TARGET_SO_DOMAIN
:
2846 optname
= SO_DOMAIN
;
2854 /* TCP and UDP options all take an 'int' value. */
2856 if (get_user_u32(len
, optlen
))
2857 return -TARGET_EFAULT
;
2859 return -TARGET_EINVAL
;
2861 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2864 if (optname
== SO_TYPE
) {
2865 val
= host_to_target_sock_type(val
);
2870 if (put_user_u32(val
, optval_addr
))
2871 return -TARGET_EFAULT
;
2873 if (put_user_u8(val
, optval_addr
))
2874 return -TARGET_EFAULT
;
2876 if (put_user_u32(len
, optlen
))
2877 return -TARGET_EFAULT
;
2884 case IP_ROUTER_ALERT
:
2888 case IP_MTU_DISCOVER
:
2894 case IP_MULTICAST_TTL
:
2895 case IP_MULTICAST_LOOP
:
2896 if (get_user_u32(len
, optlen
))
2897 return -TARGET_EFAULT
;
2899 return -TARGET_EINVAL
;
2901 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2904 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2906 if (put_user_u32(len
, optlen
)
2907 || put_user_u8(val
, optval_addr
))
2908 return -TARGET_EFAULT
;
2910 if (len
> sizeof(int))
2912 if (put_user_u32(len
, optlen
)
2913 || put_user_u32(val
, optval_addr
))
2914 return -TARGET_EFAULT
;
2918 ret
= -TARGET_ENOPROTOOPT
;
2924 case IPV6_MTU_DISCOVER
:
2927 case IPV6_RECVPKTINFO
:
2928 case IPV6_UNICAST_HOPS
:
2929 case IPV6_MULTICAST_HOPS
:
2930 case IPV6_MULTICAST_LOOP
:
2932 case IPV6_RECVHOPLIMIT
:
2933 case IPV6_2292HOPLIMIT
:
2936 case IPV6_2292PKTINFO
:
2937 case IPV6_RECVTCLASS
:
2938 case IPV6_RECVRTHDR
:
2939 case IPV6_2292RTHDR
:
2940 case IPV6_RECVHOPOPTS
:
2941 case IPV6_2292HOPOPTS
:
2942 case IPV6_RECVDSTOPTS
:
2943 case IPV6_2292DSTOPTS
:
2945 case IPV6_ADDR_PREFERENCES
:
2946 #ifdef IPV6_RECVPATHMTU
2947 case IPV6_RECVPATHMTU
:
2949 #ifdef IPV6_TRANSPARENT
2950 case IPV6_TRANSPARENT
:
2952 #ifdef IPV6_FREEBIND
2955 #ifdef IPV6_RECVORIGDSTADDR
2956 case IPV6_RECVORIGDSTADDR
:
2958 if (get_user_u32(len
, optlen
))
2959 return -TARGET_EFAULT
;
2961 return -TARGET_EINVAL
;
2963 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2966 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2968 if (put_user_u32(len
, optlen
)
2969 || put_user_u8(val
, optval_addr
))
2970 return -TARGET_EFAULT
;
2972 if (len
> sizeof(int))
2974 if (put_user_u32(len
, optlen
)
2975 || put_user_u32(val
, optval_addr
))
2976 return -TARGET_EFAULT
;
2980 ret
= -TARGET_ENOPROTOOPT
;
2987 case NETLINK_PKTINFO
:
2988 case NETLINK_BROADCAST_ERROR
:
2989 case NETLINK_NO_ENOBUFS
:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991 case NETLINK_LISTEN_ALL_NSID
:
2992 case NETLINK_CAP_ACK
:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995 case NETLINK_EXT_ACK
:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998 case NETLINK_GET_STRICT_CHK
:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000 if (get_user_u32(len
, optlen
)) {
3001 return -TARGET_EFAULT
;
3003 if (len
!= sizeof(val
)) {
3004 return -TARGET_EINVAL
;
3007 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3011 if (put_user_u32(lv
, optlen
)
3012 || put_user_u32(val
, optval_addr
)) {
3013 return -TARGET_EFAULT
;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017 case NETLINK_LIST_MEMBERSHIPS
:
3021 if (get_user_u32(len
, optlen
)) {
3022 return -TARGET_EFAULT
;
3025 return -TARGET_EINVAL
;
3027 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3029 return -TARGET_EFAULT
;
3032 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3034 unlock_user(results
, optval_addr
, 0);
3037 /* swap host endianess to target endianess. */
3038 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3039 results
[i
] = tswap32(results
[i
]);
3041 if (put_user_u32(lv
, optlen
)) {
3042 return -TARGET_EFAULT
;
3044 unlock_user(results
, optval_addr
, 0);
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3052 #endif /* SOL_NETLINK */
3055 qemu_log_mask(LOG_UNIMP
,
3056 "getsockopt level=%d optname=%d not yet supported\n",
3058 ret
= -TARGET_EOPNOTSUPP
;
3064 /* Convert target low/high pair representing file offset into the host
3065 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066 * as the kernel doesn't handle them either.
3068 static void target_to_host_low_high(abi_ulong tlow
,
3070 unsigned long *hlow
,
3071 unsigned long *hhigh
)
3073 uint64_t off
= tlow
|
3074 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3075 TARGET_LONG_BITS
/ 2;
3078 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3081 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3082 abi_ulong count
, int copy
)
3084 struct target_iovec
*target_vec
;
3086 abi_ulong total_len
, max_len
;
3089 bool bad_address
= false;
3095 if (count
> IOV_MAX
) {
3100 vec
= g_try_new0(struct iovec
, count
);
3106 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3107 count
* sizeof(struct target_iovec
), 1);
3108 if (target_vec
== NULL
) {
3113 /* ??? If host page size > target page size, this will result in a
3114 value larger than what we can actually support. */
3115 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3118 for (i
= 0; i
< count
; i
++) {
3119 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3120 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3125 } else if (len
== 0) {
3126 /* Zero length pointer is ignored. */
3127 vec
[i
].iov_base
= 0;
3129 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3130 /* If the first buffer pointer is bad, this is a fault. But
3131 * subsequent bad buffers will result in a partial write; this
3132 * is realized by filling the vector with null pointers and
3134 if (!vec
[i
].iov_base
) {
3145 if (len
> max_len
- total_len
) {
3146 len
= max_len
- total_len
;
3149 vec
[i
].iov_len
= len
;
3153 unlock_user(target_vec
, target_addr
, 0);
3158 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3159 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3162 unlock_user(target_vec
, target_addr
, 0);
3169 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3170 abi_ulong count
, int copy
)
3172 struct target_iovec
*target_vec
;
3175 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3176 count
* sizeof(struct target_iovec
), 1);
3178 for (i
= 0; i
< count
; i
++) {
3179 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3180 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3184 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3186 unlock_user(target_vec
, target_addr
, 0);
3192 static inline int target_to_host_sock_type(int *type
)
3195 int target_type
= *type
;
3197 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3198 case TARGET_SOCK_DGRAM
:
3199 host_type
= SOCK_DGRAM
;
3201 case TARGET_SOCK_STREAM
:
3202 host_type
= SOCK_STREAM
;
3205 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3208 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3209 #if defined(SOCK_CLOEXEC)
3210 host_type
|= SOCK_CLOEXEC
;
3212 return -TARGET_EINVAL
;
3215 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3216 #if defined(SOCK_NONBLOCK)
3217 host_type
|= SOCK_NONBLOCK
;
3218 #elif !defined(O_NONBLOCK)
3219 return -TARGET_EINVAL
;
3226 /* Try to emulate socket type flags after socket creation. */
3227 static int sock_flags_fixup(int fd
, int target_type
)
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3231 int flags
= fcntl(fd
, F_GETFL
);
3232 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3234 return -TARGET_EINVAL
;
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long
do_socket(int domain
, int type
, int protocol
)
3244 int target_type
= type
;
3247 ret
= target_to_host_sock_type(&type
);
3252 if (domain
== PF_NETLINK
&& !(
3253 #ifdef CONFIG_RTNETLINK
3254 protocol
== NETLINK_ROUTE
||
3256 protocol
== NETLINK_KOBJECT_UEVENT
||
3257 protocol
== NETLINK_AUDIT
)) {
3258 return -TARGET_EPROTONOSUPPORT
;
3261 if (domain
== AF_PACKET
||
3262 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3263 protocol
= tswap16(protocol
);
3266 ret
= get_errno(socket(domain
, type
, protocol
));
3268 ret
= sock_flags_fixup(ret
, target_type
);
3269 if (type
== SOCK_PACKET
) {
3270 /* Manage an obsolete case :
3271 * if socket type is SOCK_PACKET, bind by name
3273 fd_trans_register(ret
, &target_packet_trans
);
3274 } else if (domain
== PF_NETLINK
) {
3276 #ifdef CONFIG_RTNETLINK
3278 fd_trans_register(ret
, &target_netlink_route_trans
);
3281 case NETLINK_KOBJECT_UEVENT
:
3282 /* nothing to do: messages are strings */
3285 fd_trans_register(ret
, &target_netlink_audit_trans
);
3288 g_assert_not_reached();
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3302 if ((int)addrlen
< 0) {
3303 return -TARGET_EINVAL
;
3306 addr
= alloca(addrlen
+1);
3308 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3312 return get_errno(bind(sockfd
, addr
, addrlen
));
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3322 if ((int)addrlen
< 0) {
3323 return -TARGET_EINVAL
;
3326 addr
= alloca(addrlen
+1);
3328 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3332 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3337 int flags
, int send
)
3343 abi_ulong target_vec
;
3345 if (msgp
->msg_name
) {
3346 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3347 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3348 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3349 tswapal(msgp
->msg_name
),
3351 if (ret
== -TARGET_EFAULT
) {
3352 /* For connected sockets msg_name and msg_namelen must
3353 * be ignored, so returning EFAULT immediately is wrong.
3354 * Instead, pass a bad msg_name to the host kernel, and
3355 * let it decide whether to return EFAULT or not.
3357 msg
.msg_name
= (void *)-1;
3362 msg
.msg_name
= NULL
;
3363 msg
.msg_namelen
= 0;
3365 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3366 msg
.msg_control
= alloca(msg
.msg_controllen
);
3367 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3369 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3371 count
= tswapal(msgp
->msg_iovlen
);
3372 target_vec
= tswapal(msgp
->msg_iov
);
3374 if (count
> IOV_MAX
) {
3375 /* sendrcvmsg returns a different errno for this condition than
3376 * readv/writev, so we must catch it here before lock_iovec() does.
3378 ret
= -TARGET_EMSGSIZE
;
3382 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3383 target_vec
, count
, send
);
3385 ret
= -host_to_target_errno(errno
);
3388 msg
.msg_iovlen
= count
;
3392 if (fd_trans_target_to_host_data(fd
)) {
3395 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3396 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3397 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3398 msg
.msg_iov
->iov_len
);
3400 msg
.msg_iov
->iov_base
= host_msg
;
3401 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3405 ret
= target_to_host_cmsg(&msg
, msgp
);
3407 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3411 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3412 if (!is_error(ret
)) {
3414 if (fd_trans_host_to_target_data(fd
)) {
3415 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3416 MIN(msg
.msg_iov
->iov_len
, len
));
3418 ret
= host_to_target_cmsg(msgp
, &msg
);
3420 if (!is_error(ret
)) {
3421 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3422 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3423 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3424 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3425 msg
.msg_name
, msg
.msg_namelen
);
3437 unlock_iovec(vec
, target_vec
, count
, !send
);
3442 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3443 int flags
, int send
)
3446 struct target_msghdr
*msgp
;
3448 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3452 return -TARGET_EFAULT
;
3454 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3455 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460 * so it might not have this *mmsg-specific flag either.
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3466 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3467 unsigned int vlen
, unsigned int flags
,
3470 struct target_mmsghdr
*mmsgp
;
3474 if (vlen
> UIO_MAXIOV
) {
3478 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3480 return -TARGET_EFAULT
;
3483 for (i
= 0; i
< vlen
; i
++) {
3484 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3485 if (is_error(ret
)) {
3488 mmsgp
[i
].msg_len
= tswap32(ret
);
3489 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490 if (flags
& MSG_WAITFORONE
) {
3491 flags
|= MSG_DONTWAIT
;
3495 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3497 /* Return number of datagrams sent if we sent any at all;
3498 * otherwise return the error.
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3508 abi_ulong target_addrlen_addr
, int flags
)
3510 socklen_t addrlen
, ret_addrlen
;
3515 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3517 if (target_addr
== 0) {
3518 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3521 /* linux returns EFAULT if addrlen pointer is invalid */
3522 if (get_user_u32(addrlen
, target_addrlen_addr
))
3523 return -TARGET_EFAULT
;
3525 if ((int)addrlen
< 0) {
3526 return -TARGET_EINVAL
;
3529 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3530 return -TARGET_EFAULT
;
3533 addr
= alloca(addrlen
);
3535 ret_addrlen
= addrlen
;
3536 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3537 if (!is_error(ret
)) {
3538 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3539 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3540 ret
= -TARGET_EFAULT
;
3546 /* do_getpeername() Must return target values and target errnos. */
3547 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3548 abi_ulong target_addrlen_addr
)
3550 socklen_t addrlen
, ret_addrlen
;
3554 if (get_user_u32(addrlen
, target_addrlen_addr
))
3555 return -TARGET_EFAULT
;
3557 if ((int)addrlen
< 0) {
3558 return -TARGET_EINVAL
;
3561 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3562 return -TARGET_EFAULT
;
3565 addr
= alloca(addrlen
);
3567 ret_addrlen
= addrlen
;
3568 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3569 if (!is_error(ret
)) {
3570 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3571 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3572 ret
= -TARGET_EFAULT
;
3578 /* do_getsockname() Must return target values and target errnos. */
3579 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3580 abi_ulong target_addrlen_addr
)
3582 socklen_t addrlen
, ret_addrlen
;
3586 if (get_user_u32(addrlen
, target_addrlen_addr
))
3587 return -TARGET_EFAULT
;
3589 if ((int)addrlen
< 0) {
3590 return -TARGET_EINVAL
;
3593 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3594 return -TARGET_EFAULT
;
3597 addr
= alloca(addrlen
);
3599 ret_addrlen
= addrlen
;
3600 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3601 if (!is_error(ret
)) {
3602 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3603 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3604 ret
= -TARGET_EFAULT
;
3610 /* do_socketpair() Must return target values and target errnos. */
3611 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3612 abi_ulong target_tab_addr
)
3617 target_to_host_sock_type(&type
);
3619 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3620 if (!is_error(ret
)) {
3621 if (put_user_s32(tab
[0], target_tab_addr
)
3622 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3623 ret
= -TARGET_EFAULT
;
3628 /* do_sendto() Must return target values and target errnos. */
3629 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3630 abi_ulong target_addr
, socklen_t addrlen
)
3634 void *copy_msg
= NULL
;
3637 if ((int)addrlen
< 0) {
3638 return -TARGET_EINVAL
;
3641 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3643 return -TARGET_EFAULT
;
3644 if (fd_trans_target_to_host_data(fd
)) {
3645 copy_msg
= host_msg
;
3646 host_msg
= g_malloc(len
);
3647 memcpy(host_msg
, copy_msg
, len
);
3648 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3654 addr
= alloca(addrlen
+1);
3655 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3659 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3661 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3666 host_msg
= copy_msg
;
3668 unlock_user(host_msg
, msg
, 0);
3672 /* do_recvfrom() Must return target values and target errnos. */
3673 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3674 abi_ulong target_addr
,
3675 abi_ulong target_addrlen
)
3677 socklen_t addrlen
, ret_addrlen
;
3682 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3684 return -TARGET_EFAULT
;
3686 if (get_user_u32(addrlen
, target_addrlen
)) {
3687 ret
= -TARGET_EFAULT
;
3690 if ((int)addrlen
< 0) {
3691 ret
= -TARGET_EINVAL
;
3694 addr
= alloca(addrlen
);
3695 ret_addrlen
= addrlen
;
3696 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3697 addr
, &ret_addrlen
));
3699 addr
= NULL
; /* To keep compiler quiet. */
3700 addrlen
= 0; /* To keep compiler quiet. */
3701 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3703 if (!is_error(ret
)) {
3704 if (fd_trans_host_to_target_data(fd
)) {
3706 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3707 if (is_error(trans
)) {
3713 host_to_target_sockaddr(target_addr
, addr
,
3714 MIN(addrlen
, ret_addrlen
));
3715 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3716 ret
= -TARGET_EFAULT
;
3720 unlock_user(host_msg
, msg
, len
);
3723 unlock_user(host_msg
, msg
, 0);
3728 #ifdef TARGET_NR_socketcall
3729 /* do_socketcall() must return target values and target errnos. */
3730 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3732 static const unsigned nargs
[] = { /* number of arguments per operation */
3733 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3734 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3735 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3736 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3737 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3738 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3739 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3740 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3741 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3742 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3743 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3744 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3745 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3746 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3747 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3748 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3749 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3750 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3751 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3752 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3754 abi_long a
[6]; /* max 6 args */
3757 /* check the range of the first argument num */
3758 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3759 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3760 return -TARGET_EINVAL
;
3762 /* ensure we have space for args */
3763 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3764 return -TARGET_EINVAL
;
3766 /* collect the arguments in a[] according to nargs[] */
3767 for (i
= 0; i
< nargs
[num
]; ++i
) {
3768 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3769 return -TARGET_EFAULT
;
3772 /* now when we have the args, invoke the appropriate underlying function */
3774 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3775 return do_socket(a
[0], a
[1], a
[2]);
3776 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3777 return do_bind(a
[0], a
[1], a
[2]);
3778 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3779 return do_connect(a
[0], a
[1], a
[2]);
3780 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3781 return get_errno(listen(a
[0], a
[1]));
3782 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3783 return do_accept4(a
[0], a
[1], a
[2], 0);
3784 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3785 return do_getsockname(a
[0], a
[1], a
[2]);
3786 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3787 return do_getpeername(a
[0], a
[1], a
[2]);
3788 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3789 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3790 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3791 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3792 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3793 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3794 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3795 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3796 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3797 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3798 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3799 return get_errno(shutdown(a
[0], a
[1]));
3800 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3801 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3802 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3803 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3804 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3805 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3806 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3807 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3808 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3809 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3810 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3811 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3812 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3813 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3815 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3816 return -TARGET_EINVAL
;
3821 #define N_SHM_REGIONS 32
3823 static struct shm_region
{
3827 } shm_regions
[N_SHM_REGIONS
];
3829 #ifndef TARGET_SEMID64_DS
3830 /* asm-generic version of this struct */
3831 struct target_semid64_ds
3833 struct target_ipc_perm sem_perm
;
3834 abi_ulong sem_otime
;
3835 #if TARGET_ABI_BITS == 32
3836 abi_ulong __unused1
;
3838 abi_ulong sem_ctime
;
3839 #if TARGET_ABI_BITS == 32
3840 abi_ulong __unused2
;
3842 abi_ulong sem_nsems
;
3843 abi_ulong __unused3
;
3844 abi_ulong __unused4
;
3848 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3849 abi_ulong target_addr
)
3851 struct target_ipc_perm
*target_ip
;
3852 struct target_semid64_ds
*target_sd
;
3854 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3855 return -TARGET_EFAULT
;
3856 target_ip
= &(target_sd
->sem_perm
);
3857 host_ip
->__key
= tswap32(target_ip
->__key
);
3858 host_ip
->uid
= tswap32(target_ip
->uid
);
3859 host_ip
->gid
= tswap32(target_ip
->gid
);
3860 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3861 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3862 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3863 host_ip
->mode
= tswap32(target_ip
->mode
);
3865 host_ip
->mode
= tswap16(target_ip
->mode
);
3867 #if defined(TARGET_PPC)
3868 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3870 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3872 unlock_user_struct(target_sd
, target_addr
, 0);
3876 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3877 struct ipc_perm
*host_ip
)
3879 struct target_ipc_perm
*target_ip
;
3880 struct target_semid64_ds
*target_sd
;
3882 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3883 return -TARGET_EFAULT
;
3884 target_ip
= &(target_sd
->sem_perm
);
3885 target_ip
->__key
= tswap32(host_ip
->__key
);
3886 target_ip
->uid
= tswap32(host_ip
->uid
);
3887 target_ip
->gid
= tswap32(host_ip
->gid
);
3888 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3889 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3890 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3891 target_ip
->mode
= tswap32(host_ip
->mode
);
3893 target_ip
->mode
= tswap16(host_ip
->mode
);
3895 #if defined(TARGET_PPC)
3896 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3898 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3900 unlock_user_struct(target_sd
, target_addr
, 1);
3904 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3905 abi_ulong target_addr
)
3907 struct target_semid64_ds
*target_sd
;
3909 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3910 return -TARGET_EFAULT
;
3911 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3912 return -TARGET_EFAULT
;
3913 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3914 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3915 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3916 unlock_user_struct(target_sd
, target_addr
, 0);
3920 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3921 struct semid_ds
*host_sd
)
3923 struct target_semid64_ds
*target_sd
;
3925 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3926 return -TARGET_EFAULT
;
3927 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3928 return -TARGET_EFAULT
;
3929 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3930 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3931 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3932 unlock_user_struct(target_sd
, target_addr
, 1);
3936 struct target_seminfo
{
3949 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3950 struct seminfo
*host_seminfo
)
3952 struct target_seminfo
*target_seminfo
;
3953 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3954 return -TARGET_EFAULT
;
3955 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3956 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3957 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3958 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3959 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3960 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3961 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3962 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3963 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3964 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3965 unlock_user_struct(target_seminfo
, target_addr
, 1);
3971 struct semid_ds
*buf
;
3972 unsigned short *array
;
3973 struct seminfo
*__buf
;
3976 union target_semun
{
3983 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3984 abi_ulong target_addr
)
3987 unsigned short *array
;
3989 struct semid_ds semid_ds
;
3992 semun
.buf
= &semid_ds
;
3994 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3996 return get_errno(ret
);
3998 nsems
= semid_ds
.sem_nsems
;
4000 *host_array
= g_try_new(unsigned short, nsems
);
4002 return -TARGET_ENOMEM
;
4004 array
= lock_user(VERIFY_READ
, target_addr
,
4005 nsems
*sizeof(unsigned short), 1);
4007 g_free(*host_array
);
4008 return -TARGET_EFAULT
;
4011 for(i
=0; i
<nsems
; i
++) {
4012 __get_user((*host_array
)[i
], &array
[i
]);
4014 unlock_user(array
, target_addr
, 0);
4019 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4020 unsigned short **host_array
)
4023 unsigned short *array
;
4025 struct semid_ds semid_ds
;
4028 semun
.buf
= &semid_ds
;
4030 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4032 return get_errno(ret
);
4034 nsems
= semid_ds
.sem_nsems
;
4036 array
= lock_user(VERIFY_WRITE
, target_addr
,
4037 nsems
*sizeof(unsigned short), 0);
4039 return -TARGET_EFAULT
;
4041 for(i
=0; i
<nsems
; i
++) {
4042 __put_user((*host_array
)[i
], &array
[i
]);
4044 g_free(*host_array
);
4045 unlock_user(array
, target_addr
, 1);
4050 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4051 abi_ulong target_arg
)
4053 union target_semun target_su
= { .buf
= target_arg
};
4055 struct semid_ds dsarg
;
4056 unsigned short *array
= NULL
;
4057 struct seminfo seminfo
;
4058 abi_long ret
= -TARGET_EINVAL
;
4065 /* In 64 bit cross-endian situations, we will erroneously pick up
4066 * the wrong half of the union for the "val" element. To rectify
4067 * this, the entire 8-byte structure is byteswapped, followed by
4068 * a swap of the 4 byte val field. In other cases, the data is
4069 * already in proper host byte order. */
4070 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4071 target_su
.buf
= tswapal(target_su
.buf
);
4072 arg
.val
= tswap32(target_su
.val
);
4074 arg
.val
= target_su
.val
;
4076 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4080 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4084 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4085 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4092 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4096 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4097 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4103 arg
.__buf
= &seminfo
;
4104 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4105 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4113 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4120 struct target_sembuf
{
4121 unsigned short sem_num
;
4126 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4127 abi_ulong target_addr
,
4130 struct target_sembuf
*target_sembuf
;
4133 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4134 nsops
*sizeof(struct target_sembuf
), 1);
4136 return -TARGET_EFAULT
;
4138 for(i
=0; i
<nsops
; i
++) {
4139 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4140 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4141 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4144 unlock_user(target_sembuf
, target_addr
, 0);
4149 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4150 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4153 * This macro is required to handle the s390 variants, which passes the
4154 * arguments in a different order than default.
4157 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4158 (__nsops), (__timeout), (__sops)
4160 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4161 (__nsops), 0, (__sops), (__timeout)
4164 static inline abi_long
do_semtimedop(int semid
,
4167 abi_long timeout
, bool time64
)
4169 struct sembuf
*sops
;
4170 struct timespec ts
, *pts
= NULL
;
4176 if (target_to_host_timespec64(pts
, timeout
)) {
4177 return -TARGET_EFAULT
;
4180 if (target_to_host_timespec(pts
, timeout
)) {
4181 return -TARGET_EFAULT
;
4186 if (nsops
> TARGET_SEMOPM
) {
4187 return -TARGET_E2BIG
;
4190 sops
= g_new(struct sembuf
, nsops
);
4192 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4194 return -TARGET_EFAULT
;
4197 ret
= -TARGET_ENOSYS
;
4198 #ifdef __NR_semtimedop
4199 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4202 if (ret
== -TARGET_ENOSYS
) {
4203 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4204 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4212 struct target_msqid_ds
4214 struct target_ipc_perm msg_perm
;
4215 abi_ulong msg_stime
;
4216 #if TARGET_ABI_BITS == 32
4217 abi_ulong __unused1
;
4219 abi_ulong msg_rtime
;
4220 #if TARGET_ABI_BITS == 32
4221 abi_ulong __unused2
;
4223 abi_ulong msg_ctime
;
4224 #if TARGET_ABI_BITS == 32
4225 abi_ulong __unused3
;
4227 abi_ulong __msg_cbytes
;
4229 abi_ulong msg_qbytes
;
4230 abi_ulong msg_lspid
;
4231 abi_ulong msg_lrpid
;
4232 abi_ulong __unused4
;
4233 abi_ulong __unused5
;
4236 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4237 abi_ulong target_addr
)
4239 struct target_msqid_ds
*target_md
;
4241 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4242 return -TARGET_EFAULT
;
4243 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4244 return -TARGET_EFAULT
;
4245 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4246 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4247 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4248 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4249 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4250 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4251 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4252 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4253 unlock_user_struct(target_md
, target_addr
, 0);
4257 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4258 struct msqid_ds
*host_md
)
4260 struct target_msqid_ds
*target_md
;
4262 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4263 return -TARGET_EFAULT
;
4264 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4265 return -TARGET_EFAULT
;
4266 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4267 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4268 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4269 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4270 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4271 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4272 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4273 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4274 unlock_user_struct(target_md
, target_addr
, 1);
4278 struct target_msginfo
{
4286 unsigned short int msgseg
;
4289 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4290 struct msginfo
*host_msginfo
)
4292 struct target_msginfo
*target_msginfo
;
4293 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4294 return -TARGET_EFAULT
;
4295 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4296 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4297 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4298 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4299 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4300 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4301 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4302 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4303 unlock_user_struct(target_msginfo
, target_addr
, 1);
4307 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4309 struct msqid_ds dsarg
;
4310 struct msginfo msginfo
;
4311 abi_long ret
= -TARGET_EINVAL
;
4319 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4320 return -TARGET_EFAULT
;
4321 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4322 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4323 return -TARGET_EFAULT
;
4326 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4330 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4331 if (host_to_target_msginfo(ptr
, &msginfo
))
4332 return -TARGET_EFAULT
;
4339 struct target_msgbuf
{
4344 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4345 ssize_t msgsz
, int msgflg
)
4347 struct target_msgbuf
*target_mb
;
4348 struct msgbuf
*host_mb
;
4352 return -TARGET_EINVAL
;
4355 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4356 return -TARGET_EFAULT
;
4357 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4359 unlock_user_struct(target_mb
, msgp
, 0);
4360 return -TARGET_ENOMEM
;
4362 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4363 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4364 ret
= -TARGET_ENOSYS
;
4366 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4369 if (ret
== -TARGET_ENOSYS
) {
4371 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4374 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4380 unlock_user_struct(target_mb
, msgp
, 0);
4386 #if defined(__sparc__)
4387 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4388 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4389 #elif defined(__s390x__)
4390 /* The s390 sys_ipc variant has only five parameters. */
4391 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4392 ((long int[]){(long int)__msgp, __msgtyp})
4394 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4395 ((long int[]){(long int)__msgp, __msgtyp}), 0
4399 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4400 ssize_t msgsz
, abi_long msgtyp
,
4403 struct target_msgbuf
*target_mb
;
4405 struct msgbuf
*host_mb
;
4409 return -TARGET_EINVAL
;
4412 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4413 return -TARGET_EFAULT
;
4415 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4417 ret
= -TARGET_ENOMEM
;
4420 ret
= -TARGET_ENOSYS
;
4422 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4425 if (ret
== -TARGET_ENOSYS
) {
4426 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4427 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4432 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4433 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4434 if (!target_mtext
) {
4435 ret
= -TARGET_EFAULT
;
4438 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4439 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4442 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4446 unlock_user_struct(target_mb
, msgp
, 1);
4451 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4452 abi_ulong target_addr
)
4454 struct target_shmid_ds
*target_sd
;
4456 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4457 return -TARGET_EFAULT
;
4458 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4459 return -TARGET_EFAULT
;
4460 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4461 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4462 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4463 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4464 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4465 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4466 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4467 unlock_user_struct(target_sd
, target_addr
, 0);
4471 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4472 struct shmid_ds
*host_sd
)
4474 struct target_shmid_ds
*target_sd
;
4476 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4477 return -TARGET_EFAULT
;
4478 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4479 return -TARGET_EFAULT
;
4480 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4481 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4482 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4483 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4484 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4485 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4486 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4487 unlock_user_struct(target_sd
, target_addr
, 1);
4491 struct target_shminfo
{
4499 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4500 struct shminfo
*host_shminfo
)
4502 struct target_shminfo
*target_shminfo
;
4503 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4504 return -TARGET_EFAULT
;
4505 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4506 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4507 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4508 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4509 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4510 unlock_user_struct(target_shminfo
, target_addr
, 1);
4514 struct target_shm_info
{
4519 abi_ulong swap_attempts
;
4520 abi_ulong swap_successes
;
4523 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4524 struct shm_info
*host_shm_info
)
4526 struct target_shm_info
*target_shm_info
;
4527 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4528 return -TARGET_EFAULT
;
4529 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4530 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4531 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4532 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4533 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4534 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4535 unlock_user_struct(target_shm_info
, target_addr
, 1);
4539 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4541 struct shmid_ds dsarg
;
4542 struct shminfo shminfo
;
4543 struct shm_info shm_info
;
4544 abi_long ret
= -TARGET_EINVAL
;
4552 if (target_to_host_shmid_ds(&dsarg
, buf
))
4553 return -TARGET_EFAULT
;
4554 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4555 if (host_to_target_shmid_ds(buf
, &dsarg
))
4556 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4560 if (host_to_target_shminfo(buf
, &shminfo
))
4561 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4565 if (host_to_target_shm_info(buf
, &shm_info
))
4566 return -TARGET_EFAULT
;
4571 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4578 #ifndef TARGET_FORCE_SHMLBA
4579 /* For most architectures, SHMLBA is the same as the page size;
4580 * some architectures have larger values, in which case they should
4581 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4582 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4583 * and defining its own value for SHMLBA.
4585 * The kernel also permits SHMLBA to be set by the architecture to a
4586 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4587 * this means that addresses are rounded to the large size if
4588 * SHM_RND is set but addresses not aligned to that size are not rejected
4589 * as long as they are at least page-aligned. Since the only architecture
4590 * which uses this is ia64 this code doesn't provide for that oddity.
4592 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4594 return TARGET_PAGE_SIZE
;
4598 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4599 int shmid
, abi_ulong shmaddr
, int shmflg
)
4603 struct shmid_ds shm_info
;
4607 /* shmat pointers are always untagged */
4609 /* find out the length of the shared memory segment */
4610 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4611 if (is_error(ret
)) {
4612 /* can't get length, bail out */
4616 shmlba
= target_shmlba(cpu_env
);
4618 if (shmaddr
& (shmlba
- 1)) {
4619 if (shmflg
& SHM_RND
) {
4620 shmaddr
&= ~(shmlba
- 1);
4622 return -TARGET_EINVAL
;
4625 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4626 return -TARGET_EINVAL
;
4632 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4634 abi_ulong mmap_start
;
4636 /* In order to use the host shmat, we need to honor host SHMLBA. */
4637 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4639 if (mmap_start
== -1) {
4641 host_raddr
= (void *)-1;
4643 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4644 shmflg
| SHM_REMAP
);
4647 if (host_raddr
== (void *)-1) {
4649 return get_errno((long)host_raddr
);
4651 raddr
=h2g((unsigned long)host_raddr
);
4653 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4654 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4655 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4657 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4658 if (!shm_regions
[i
].in_use
) {
4659 shm_regions
[i
].in_use
= true;
4660 shm_regions
[i
].start
= raddr
;
4661 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4671 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4676 /* shmdt pointers are always untagged */
4680 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4681 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4682 shm_regions
[i
].in_use
= false;
4683 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4687 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4694 #ifdef TARGET_NR_ipc
4695 /* ??? This only works with linear mappings. */
4696 /* do_ipc() must return target values and target errnos. */
4697 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4698 unsigned int call
, abi_long first
,
4699 abi_long second
, abi_long third
,
4700 abi_long ptr
, abi_long fifth
)
4705 version
= call
>> 16;
4710 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4712 case IPCOP_semtimedop
:
4714 * The s390 sys_ipc variant has only five parameters instead of six
4715 * (as for default variant) and the only difference is the handling of
4716 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4717 * to a struct timespec where the generic variant uses fifth parameter.
4719 #if defined(TARGET_S390X)
4720 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4722 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4727 ret
= get_errno(semget(first
, second
, third
));
4730 case IPCOP_semctl
: {
4731 /* The semun argument to semctl is passed by value, so dereference the
4734 get_user_ual(atptr
, ptr
);
4735 ret
= do_semctl(first
, second
, third
, atptr
);
4740 ret
= get_errno(msgget(first
, second
));
4744 ret
= do_msgsnd(first
, ptr
, second
, third
);
4748 ret
= do_msgctl(first
, second
, ptr
);
4755 struct target_ipc_kludge
{
4760 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4761 ret
= -TARGET_EFAULT
;
4765 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4767 unlock_user_struct(tmp
, ptr
, 0);
4771 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4780 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4781 if (is_error(raddr
))
4782 return get_errno(raddr
);
4783 if (put_user_ual(raddr
, third
))
4784 return -TARGET_EFAULT
;
4788 ret
= -TARGET_EINVAL
;
4793 ret
= do_shmdt(ptr
);
4797 /* IPC_* flag values are the same on all linux platforms */
4798 ret
= get_errno(shmget(first
, second
, third
));
4801 /* IPC_* and SHM_* command values are the same on all linux platforms */
4803 ret
= do_shmctl(first
, second
, ptr
);
4806 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4808 ret
= -TARGET_ENOSYS
;
4815 /* kernel structure types definitions */
4817 #define STRUCT(name, ...) STRUCT_ ## name,
4818 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4820 #include "syscall_types.h"
4824 #undef STRUCT_SPECIAL
4826 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4827 #define STRUCT_SPECIAL(name)
4828 #include "syscall_types.h"
4830 #undef STRUCT_SPECIAL
4832 #define MAX_STRUCT_SIZE 4096
4834 #ifdef CONFIG_FIEMAP
4835 /* So fiemap access checks don't overflow on 32 bit systems.
4836 * This is very slightly smaller than the limit imposed by
4837 * the underlying kernel.
4839 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4840 / sizeof(struct fiemap_extent))
4842 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4843 int fd
, int cmd
, abi_long arg
)
4845 /* The parameter for this ioctl is a struct fiemap followed
4846 * by an array of struct fiemap_extent whose size is set
4847 * in fiemap->fm_extent_count. The array is filled in by the
4850 int target_size_in
, target_size_out
;
4852 const argtype
*arg_type
= ie
->arg_type
;
4853 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4856 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4860 assert(arg_type
[0] == TYPE_PTR
);
4861 assert(ie
->access
== IOC_RW
);
4863 target_size_in
= thunk_type_size(arg_type
, 0);
4864 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4866 return -TARGET_EFAULT
;
4868 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4869 unlock_user(argptr
, arg
, 0);
4870 fm
= (struct fiemap
*)buf_temp
;
4871 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4872 return -TARGET_EINVAL
;
4875 outbufsz
= sizeof (*fm
) +
4876 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4878 if (outbufsz
> MAX_STRUCT_SIZE
) {
4879 /* We can't fit all the extents into the fixed size buffer.
4880 * Allocate one that is large enough and use it instead.
4882 fm
= g_try_malloc(outbufsz
);
4884 return -TARGET_ENOMEM
;
4886 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4889 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4890 if (!is_error(ret
)) {
4891 target_size_out
= target_size_in
;
4892 /* An extent_count of 0 means we were only counting the extents
4893 * so there are no structs to copy
4895 if (fm
->fm_extent_count
!= 0) {
4896 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4898 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4900 ret
= -TARGET_EFAULT
;
4902 /* Convert the struct fiemap */
4903 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4904 if (fm
->fm_extent_count
!= 0) {
4905 p
= argptr
+ target_size_in
;
4906 /* ...and then all the struct fiemap_extents */
4907 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4908 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4913 unlock_user(argptr
, arg
, target_size_out
);
4923 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4924 int fd
, int cmd
, abi_long arg
)
4926 const argtype
*arg_type
= ie
->arg_type
;
4930 struct ifconf
*host_ifconf
;
4932 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4933 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4934 int target_ifreq_size
;
4939 abi_long target_ifc_buf
;
4943 assert(arg_type
[0] == TYPE_PTR
);
4944 assert(ie
->access
== IOC_RW
);
4947 target_size
= thunk_type_size(arg_type
, 0);
4949 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4951 return -TARGET_EFAULT
;
4952 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4953 unlock_user(argptr
, arg
, 0);
4955 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4956 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4957 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4959 if (target_ifc_buf
!= 0) {
4960 target_ifc_len
= host_ifconf
->ifc_len
;
4961 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4962 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4964 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4965 if (outbufsz
> MAX_STRUCT_SIZE
) {
4967 * We can't fit all the extents into the fixed size buffer.
4968 * Allocate one that is large enough and use it instead.
4970 host_ifconf
= malloc(outbufsz
);
4972 return -TARGET_ENOMEM
;
4974 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4977 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4979 host_ifconf
->ifc_len
= host_ifc_len
;
4981 host_ifc_buf
= NULL
;
4983 host_ifconf
->ifc_buf
= host_ifc_buf
;
4985 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4986 if (!is_error(ret
)) {
4987 /* convert host ifc_len to target ifc_len */
4989 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4990 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4991 host_ifconf
->ifc_len
= target_ifc_len
;
4993 /* restore target ifc_buf */
4995 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4997 /* copy struct ifconf to target user */
4999 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5001 return -TARGET_EFAULT
;
5002 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5003 unlock_user(argptr
, arg
, target_size
);
5005 if (target_ifc_buf
!= 0) {
5006 /* copy ifreq[] to target user */
5007 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5008 for (i
= 0; i
< nb_ifreq
; i
++) {
5009 thunk_convert(argptr
+ i
* target_ifreq_size
,
5010 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5011 ifreq_arg_type
, THUNK_TARGET
);
5013 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5024 #if defined(CONFIG_USBFS)
5025 #if HOST_LONG_BITS > 64
5026 #error USBDEVFS thunks do not support >64 bit hosts yet.
5029 uint64_t target_urb_adr
;
5030 uint64_t target_buf_adr
;
5031 char *target_buf_ptr
;
5032 struct usbdevfs_urb host_urb
;
5035 static GHashTable
*usbdevfs_urb_hashtable(void)
5037 static GHashTable
*urb_hashtable
;
5039 if (!urb_hashtable
) {
5040 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5042 return urb_hashtable
;
5045 static void urb_hashtable_insert(struct live_urb
*urb
)
5047 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5048 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5051 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5053 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5054 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5057 static void urb_hashtable_remove(struct live_urb
*urb
)
5059 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5060 g_hash_table_remove(urb_hashtable
, urb
);
5064 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5065 int fd
, int cmd
, abi_long arg
)
5067 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5068 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5069 struct live_urb
*lurb
;
5073 uintptr_t target_urb_adr
;
5076 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5078 memset(buf_temp
, 0, sizeof(uint64_t));
5079 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5080 if (is_error(ret
)) {
5084 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5085 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5086 if (!lurb
->target_urb_adr
) {
5087 return -TARGET_EFAULT
;
5089 urb_hashtable_remove(lurb
);
5090 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5091 lurb
->host_urb
.buffer_length
);
5092 lurb
->target_buf_ptr
= NULL
;
5094 /* restore the guest buffer pointer */
5095 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5097 /* update the guest urb struct */
5098 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5101 return -TARGET_EFAULT
;
5103 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5104 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5106 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5107 /* write back the urb handle */
5108 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5111 return -TARGET_EFAULT
;
5114 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5115 target_urb_adr
= lurb
->target_urb_adr
;
5116 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5117 unlock_user(argptr
, arg
, target_size
);
5124 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5125 uint8_t *buf_temp
__attribute__((unused
)),
5126 int fd
, int cmd
, abi_long arg
)
5128 struct live_urb
*lurb
;
5130 /* map target address back to host URB with metadata. */
5131 lurb
= urb_hashtable_lookup(arg
);
5133 return -TARGET_EFAULT
;
5135 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5139 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5140 int fd
, int cmd
, abi_long arg
)
5142 const argtype
*arg_type
= ie
->arg_type
;
5147 struct live_urb
*lurb
;
5150 * each submitted URB needs to map to a unique ID for the
5151 * kernel, and that unique ID needs to be a pointer to
5152 * host memory. hence, we need to malloc for each URB.
5153 * isochronous transfers have a variable length struct.
5156 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5158 /* construct host copy of urb and metadata */
5159 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5161 return -TARGET_ENOMEM
;
5164 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5167 return -TARGET_EFAULT
;
5169 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5170 unlock_user(argptr
, arg
, 0);
5172 lurb
->target_urb_adr
= arg
;
5173 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5175 /* buffer space used depends on endpoint type so lock the entire buffer */
5176 /* control type urbs should check the buffer contents for true direction */
5177 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5178 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5179 lurb
->host_urb
.buffer_length
, 1);
5180 if (lurb
->target_buf_ptr
== NULL
) {
5182 return -TARGET_EFAULT
;
5185 /* update buffer pointer in host copy */
5186 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5188 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5189 if (is_error(ret
)) {
5190 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5193 urb_hashtable_insert(lurb
);
5198 #endif /* CONFIG_USBFS */
5200 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5201 int cmd
, abi_long arg
)
5204 struct dm_ioctl
*host_dm
;
5205 abi_long guest_data
;
5206 uint32_t guest_data_size
;
5208 const argtype
*arg_type
= ie
->arg_type
;
5210 void *big_buf
= NULL
;
5214 target_size
= thunk_type_size(arg_type
, 0);
5215 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5217 ret
= -TARGET_EFAULT
;
5220 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5221 unlock_user(argptr
, arg
, 0);
5223 /* buf_temp is too small, so fetch things into a bigger buffer */
5224 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5225 memcpy(big_buf
, buf_temp
, target_size
);
5229 guest_data
= arg
+ host_dm
->data_start
;
5230 if ((guest_data
- arg
) < 0) {
5231 ret
= -TARGET_EINVAL
;
5234 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5235 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5237 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5239 ret
= -TARGET_EFAULT
;
5243 switch (ie
->host_cmd
) {
5245 case DM_LIST_DEVICES
:
5248 case DM_DEV_SUSPEND
:
5251 case DM_TABLE_STATUS
:
5252 case DM_TABLE_CLEAR
:
5254 case DM_LIST_VERSIONS
:
5258 case DM_DEV_SET_GEOMETRY
:
5259 /* data contains only strings */
5260 memcpy(host_data
, argptr
, guest_data_size
);
5263 memcpy(host_data
, argptr
, guest_data_size
);
5264 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5268 void *gspec
= argptr
;
5269 void *cur_data
= host_data
;
5270 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5271 int spec_size
= thunk_type_size(arg_type
, 0);
5274 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5275 struct dm_target_spec
*spec
= cur_data
;
5279 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5280 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5282 spec
->next
= sizeof(*spec
) + slen
;
5283 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5285 cur_data
+= spec
->next
;
5290 ret
= -TARGET_EINVAL
;
5291 unlock_user(argptr
, guest_data
, 0);
5294 unlock_user(argptr
, guest_data
, 0);
5296 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5297 if (!is_error(ret
)) {
5298 guest_data
= arg
+ host_dm
->data_start
;
5299 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5300 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5301 switch (ie
->host_cmd
) {
5306 case DM_DEV_SUSPEND
:
5309 case DM_TABLE_CLEAR
:
5311 case DM_DEV_SET_GEOMETRY
:
5312 /* no return data */
5314 case DM_LIST_DEVICES
:
5316 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5317 uint32_t remaining_data
= guest_data_size
;
5318 void *cur_data
= argptr
;
5319 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5320 int nl_size
= 12; /* can't use thunk_size due to alignment */
5323 uint32_t next
= nl
->next
;
5325 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5327 if (remaining_data
< nl
->next
) {
5328 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5331 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5332 strcpy(cur_data
+ nl_size
, nl
->name
);
5333 cur_data
+= nl
->next
;
5334 remaining_data
-= nl
->next
;
5338 nl
= (void*)nl
+ next
;
5343 case DM_TABLE_STATUS
:
5345 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5346 void *cur_data
= argptr
;
5347 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5348 int spec_size
= thunk_type_size(arg_type
, 0);
5351 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5352 uint32_t next
= spec
->next
;
5353 int slen
= strlen((char*)&spec
[1]) + 1;
5354 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5355 if (guest_data_size
< spec
->next
) {
5356 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5359 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5360 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5361 cur_data
= argptr
+ spec
->next
;
5362 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5368 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5369 int count
= *(uint32_t*)hdata
;
5370 uint64_t *hdev
= hdata
+ 8;
5371 uint64_t *gdev
= argptr
+ 8;
5374 *(uint32_t*)argptr
= tswap32(count
);
5375 for (i
= 0; i
< count
; i
++) {
5376 *gdev
= tswap64(*hdev
);
5382 case DM_LIST_VERSIONS
:
5384 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5385 uint32_t remaining_data
= guest_data_size
;
5386 void *cur_data
= argptr
;
5387 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5388 int vers_size
= thunk_type_size(arg_type
, 0);
5391 uint32_t next
= vers
->next
;
5393 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5395 if (remaining_data
< vers
->next
) {
5396 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5399 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5400 strcpy(cur_data
+ vers_size
, vers
->name
);
5401 cur_data
+= vers
->next
;
5402 remaining_data
-= vers
->next
;
5406 vers
= (void*)vers
+ next
;
5411 unlock_user(argptr
, guest_data
, 0);
5412 ret
= -TARGET_EINVAL
;
5415 unlock_user(argptr
, guest_data
, guest_data_size
);
5417 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5419 ret
= -TARGET_EFAULT
;
5422 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5423 unlock_user(argptr
, arg
, target_size
);
5430 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5431 int cmd
, abi_long arg
)
5435 const argtype
*arg_type
= ie
->arg_type
;
5436 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5439 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5440 struct blkpg_partition host_part
;
5442 /* Read and convert blkpg */
5444 target_size
= thunk_type_size(arg_type
, 0);
5445 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5447 ret
= -TARGET_EFAULT
;
5450 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5451 unlock_user(argptr
, arg
, 0);
5453 switch (host_blkpg
->op
) {
5454 case BLKPG_ADD_PARTITION
:
5455 case BLKPG_DEL_PARTITION
:
5456 /* payload is struct blkpg_partition */
5459 /* Unknown opcode */
5460 ret
= -TARGET_EINVAL
;
5464 /* Read and convert blkpg->data */
5465 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5466 target_size
= thunk_type_size(part_arg_type
, 0);
5467 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5469 ret
= -TARGET_EFAULT
;
5472 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5473 unlock_user(argptr
, arg
, 0);
5475 /* Swizzle the data pointer to our local copy and call! */
5476 host_blkpg
->data
= &host_part
;
5477 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5483 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5484 int fd
, int cmd
, abi_long arg
)
5486 const argtype
*arg_type
= ie
->arg_type
;
5487 const StructEntry
*se
;
5488 const argtype
*field_types
;
5489 const int *dst_offsets
, *src_offsets
;
5492 abi_ulong
*target_rt_dev_ptr
= NULL
;
5493 unsigned long *host_rt_dev_ptr
= NULL
;
5497 assert(ie
->access
== IOC_W
);
5498 assert(*arg_type
== TYPE_PTR
);
5500 assert(*arg_type
== TYPE_STRUCT
);
5501 target_size
= thunk_type_size(arg_type
, 0);
5502 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5504 return -TARGET_EFAULT
;
5507 assert(*arg_type
== (int)STRUCT_rtentry
);
5508 se
= struct_entries
+ *arg_type
++;
5509 assert(se
->convert
[0] == NULL
);
5510 /* convert struct here to be able to catch rt_dev string */
5511 field_types
= se
->field_types
;
5512 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5513 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5514 for (i
= 0; i
< se
->nb_fields
; i
++) {
5515 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5516 assert(*field_types
== TYPE_PTRVOID
);
5517 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5518 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5519 if (*target_rt_dev_ptr
!= 0) {
5520 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5521 tswapal(*target_rt_dev_ptr
));
5522 if (!*host_rt_dev_ptr
) {
5523 unlock_user(argptr
, arg
, 0);
5524 return -TARGET_EFAULT
;
5527 *host_rt_dev_ptr
= 0;
5532 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5533 argptr
+ src_offsets
[i
],
5534 field_types
, THUNK_HOST
);
5536 unlock_user(argptr
, arg
, 0);
5538 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5540 assert(host_rt_dev_ptr
!= NULL
);
5541 assert(target_rt_dev_ptr
!= NULL
);
5542 if (*host_rt_dev_ptr
!= 0) {
5543 unlock_user((void *)*host_rt_dev_ptr
,
5544 *target_rt_dev_ptr
, 0);
5549 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5550 int fd
, int cmd
, abi_long arg
)
5552 int sig
= target_to_host_signal(arg
);
5553 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5556 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5557 int fd
, int cmd
, abi_long arg
)
5562 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5563 if (is_error(ret
)) {
5567 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5568 if (copy_to_user_timeval(arg
, &tv
)) {
5569 return -TARGET_EFAULT
;
5572 if (copy_to_user_timeval64(arg
, &tv
)) {
5573 return -TARGET_EFAULT
;
5580 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5581 int fd
, int cmd
, abi_long arg
)
5586 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5587 if (is_error(ret
)) {
5591 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5592 if (host_to_target_timespec(arg
, &ts
)) {
5593 return -TARGET_EFAULT
;
5596 if (host_to_target_timespec64(arg
, &ts
)) {
5597 return -TARGET_EFAULT
;
5605 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5606 int fd
, int cmd
, abi_long arg
)
5608 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5609 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5615 static void unlock_drm_version(struct drm_version
*host_ver
,
5616 struct target_drm_version
*target_ver
,
5619 unlock_user(host_ver
->name
, target_ver
->name
,
5620 copy
? host_ver
->name_len
: 0);
5621 unlock_user(host_ver
->date
, target_ver
->date
,
5622 copy
? host_ver
->date_len
: 0);
5623 unlock_user(host_ver
->desc
, target_ver
->desc
,
5624 copy
? host_ver
->desc_len
: 0);
5627 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5628 struct target_drm_version
*target_ver
)
5630 memset(host_ver
, 0, sizeof(*host_ver
));
5632 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5633 if (host_ver
->name_len
) {
5634 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5635 target_ver
->name_len
, 0);
5636 if (!host_ver
->name
) {
5641 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5642 if (host_ver
->date_len
) {
5643 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5644 target_ver
->date_len
, 0);
5645 if (!host_ver
->date
) {
5650 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5651 if (host_ver
->desc_len
) {
5652 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5653 target_ver
->desc_len
, 0);
5654 if (!host_ver
->desc
) {
5661 unlock_drm_version(host_ver
, target_ver
, false);
5665 static inline void host_to_target_drmversion(
5666 struct target_drm_version
*target_ver
,
5667 struct drm_version
*host_ver
)
5669 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5670 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5671 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5672 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5673 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5674 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5675 unlock_drm_version(host_ver
, target_ver
, true);
5678 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5679 int fd
, int cmd
, abi_long arg
)
5681 struct drm_version
*ver
;
5682 struct target_drm_version
*target_ver
;
5685 switch (ie
->host_cmd
) {
5686 case DRM_IOCTL_VERSION
:
5687 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5688 return -TARGET_EFAULT
;
5690 ver
= (struct drm_version
*)buf_temp
;
5691 ret
= target_to_host_drmversion(ver
, target_ver
);
5692 if (!is_error(ret
)) {
5693 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5694 if (is_error(ret
)) {
5695 unlock_drm_version(ver
, target_ver
, false);
5697 host_to_target_drmversion(target_ver
, ver
);
5700 unlock_user_struct(target_ver
, arg
, 0);
5703 return -TARGET_ENOSYS
;
5706 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5707 struct drm_i915_getparam
*gparam
,
5708 int fd
, abi_long arg
)
5712 struct target_drm_i915_getparam
*target_gparam
;
5714 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5715 return -TARGET_EFAULT
;
5718 __get_user(gparam
->param
, &target_gparam
->param
);
5719 gparam
->value
= &value
;
5720 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5721 put_user_s32(value
, target_gparam
->value
);
5723 unlock_user_struct(target_gparam
, arg
, 0);
5727 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5728 int fd
, int cmd
, abi_long arg
)
5730 switch (ie
->host_cmd
) {
5731 case DRM_IOCTL_I915_GETPARAM
:
5732 return do_ioctl_drm_i915_getparam(ie
,
5733 (struct drm_i915_getparam
*)buf_temp
,
5736 return -TARGET_ENOSYS
;
5742 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5743 int fd
, int cmd
, abi_long arg
)
5745 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5746 struct tun_filter
*target_filter
;
5749 assert(ie
->access
== IOC_W
);
5751 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5752 if (!target_filter
) {
5753 return -TARGET_EFAULT
;
5755 filter
->flags
= tswap16(target_filter
->flags
);
5756 filter
->count
= tswap16(target_filter
->count
);
5757 unlock_user(target_filter
, arg
, 0);
5759 if (filter
->count
) {
5760 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5762 return -TARGET_EFAULT
;
5765 target_addr
= lock_user(VERIFY_READ
,
5766 arg
+ offsetof(struct tun_filter
, addr
),
5767 filter
->count
* ETH_ALEN
, 1);
5769 return -TARGET_EFAULT
;
5771 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5772 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5775 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5778 IOCTLEntry ioctl_entries
[] = {
5779 #define IOCTL(cmd, access, ...) \
5780 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5781 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5782 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5783 #define IOCTL_IGNORE(cmd) \
5784 { TARGET_ ## cmd, 0, #cmd },
5789 /* ??? Implement proper locking for ioctls. */
5790 /* do_ioctl() Must return target values and target errnos. */
5791 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5793 const IOCTLEntry
*ie
;
5794 const argtype
*arg_type
;
5796 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5802 if (ie
->target_cmd
== 0) {
5804 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5805 return -TARGET_ENOSYS
;
5807 if (ie
->target_cmd
== cmd
)
5811 arg_type
= ie
->arg_type
;
5813 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5814 } else if (!ie
->host_cmd
) {
5815 /* Some architectures define BSD ioctls in their headers
5816 that are not implemented in Linux. */
5817 return -TARGET_ENOSYS
;
5820 switch(arg_type
[0]) {
5823 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5829 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5833 target_size
= thunk_type_size(arg_type
, 0);
5834 switch(ie
->access
) {
5836 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5837 if (!is_error(ret
)) {
5838 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5840 return -TARGET_EFAULT
;
5841 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5842 unlock_user(argptr
, arg
, target_size
);
5846 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5848 return -TARGET_EFAULT
;
5849 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5850 unlock_user(argptr
, arg
, 0);
5851 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5855 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5857 return -TARGET_EFAULT
;
5858 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5859 unlock_user(argptr
, arg
, 0);
5860 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5861 if (!is_error(ret
)) {
5862 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5864 return -TARGET_EFAULT
;
5865 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5866 unlock_user(argptr
, arg
, target_size
);
5872 qemu_log_mask(LOG_UNIMP
,
5873 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5874 (long)cmd
, arg_type
[0]);
5875 ret
= -TARGET_ENOSYS
;
5881 static const bitmask_transtbl iflag_tbl
[] = {
5882 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5883 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5884 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5885 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5886 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5887 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5888 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5889 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5890 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5891 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5892 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5893 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5894 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5895 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5896 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5900 static const bitmask_transtbl oflag_tbl
[] = {
5901 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5902 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5903 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5904 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5905 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5906 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5907 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5908 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5909 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5910 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5911 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5912 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5913 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5914 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5915 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5916 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5917 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5918 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5919 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5920 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5921 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5922 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5923 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5924 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5928 static const bitmask_transtbl cflag_tbl
[] = {
5929 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5930 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5931 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5932 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5933 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5934 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5935 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5936 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5937 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5938 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5939 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5940 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5941 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5942 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5943 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5944 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5945 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5946 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5947 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5948 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5949 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5950 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5951 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5952 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5953 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5954 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5955 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5956 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5957 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5958 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5959 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5963 static const bitmask_transtbl lflag_tbl
[] = {
5964 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5965 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5966 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5967 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5968 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5969 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5970 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5971 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5972 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5973 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5974 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5975 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5976 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5977 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5978 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5979 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5983 static void target_to_host_termios (void *dst
, const void *src
)
5985 struct host_termios
*host
= dst
;
5986 const struct target_termios
*target
= src
;
5989 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5991 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5993 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5995 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5996 host
->c_line
= target
->c_line
;
5998 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5999 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
6000 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
6001 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
6002 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
6003 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
6004 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
6005 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
6006 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
6007 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
6008 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6009 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6010 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6011 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6012 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6013 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6014 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6015 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6018 static void host_to_target_termios (void *dst
, const void *src
)
6020 struct target_termios
*target
= dst
;
6021 const struct host_termios
*host
= src
;
6024 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6026 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6028 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6030 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6031 target
->c_line
= host
->c_line
;
6033 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6034 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6035 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6036 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6037 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6038 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6039 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6040 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6041 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6042 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6043 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6044 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6045 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6046 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6047 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6048 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6049 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6050 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6053 static const StructEntry struct_termios_def
= {
6054 .convert
= { host_to_target_termios
, target_to_host_termios
},
6055 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6056 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6057 .print
= print_termios
,
6060 static bitmask_transtbl mmap_flags_tbl
[] = {
6061 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6062 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6063 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6064 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6065 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6066 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6067 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6068 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6069 MAP_DENYWRITE
, MAP_DENYWRITE
},
6070 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6071 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6072 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6073 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6074 MAP_NORESERVE
, MAP_NORESERVE
},
6075 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6076 /* MAP_STACK had been ignored by the kernel for quite some time.
6077 Recognize it for the target insofar as we do not want to pass
6078 it through to the host. */
6079 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6084 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6085 * TARGET_I386 is defined if TARGET_X86_64 is defined
6087 #if defined(TARGET_I386)
6089 /* NOTE: there is really one LDT for all the threads */
6090 static uint8_t *ldt_table
;
6092 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6099 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6100 if (size
> bytecount
)
6102 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6104 return -TARGET_EFAULT
;
6105 /* ??? Should this by byteswapped? */
6106 memcpy(p
, ldt_table
, size
);
6107 unlock_user(p
, ptr
, size
);
6111 /* XXX: add locking support */
6112 static abi_long
write_ldt(CPUX86State
*env
,
6113 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6115 struct target_modify_ldt_ldt_s ldt_info
;
6116 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6117 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6118 int seg_not_present
, useable
, lm
;
6119 uint32_t *lp
, entry_1
, entry_2
;
6121 if (bytecount
!= sizeof(ldt_info
))
6122 return -TARGET_EINVAL
;
6123 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6124 return -TARGET_EFAULT
;
6125 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6126 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6127 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6128 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6129 unlock_user_struct(target_ldt_info
, ptr
, 0);
6131 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6132 return -TARGET_EINVAL
;
6133 seg_32bit
= ldt_info
.flags
& 1;
6134 contents
= (ldt_info
.flags
>> 1) & 3;
6135 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6136 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6137 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6138 useable
= (ldt_info
.flags
>> 6) & 1;
6142 lm
= (ldt_info
.flags
>> 7) & 1;
6144 if (contents
== 3) {
6146 return -TARGET_EINVAL
;
6147 if (seg_not_present
== 0)
6148 return -TARGET_EINVAL
;
6150 /* allocate the LDT */
6152 env
->ldt
.base
= target_mmap(0,
6153 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6154 PROT_READ
|PROT_WRITE
,
6155 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6156 if (env
->ldt
.base
== -1)
6157 return -TARGET_ENOMEM
;
6158 memset(g2h_untagged(env
->ldt
.base
), 0,
6159 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6160 env
->ldt
.limit
= 0xffff;
6161 ldt_table
= g2h_untagged(env
->ldt
.base
);
6164 /* NOTE: same code as Linux kernel */
6165 /* Allow LDTs to be cleared by the user. */
6166 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6169 read_exec_only
== 1 &&
6171 limit_in_pages
== 0 &&
6172 seg_not_present
== 1 &&
6180 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6181 (ldt_info
.limit
& 0x0ffff);
6182 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6183 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6184 (ldt_info
.limit
& 0xf0000) |
6185 ((read_exec_only
^ 1) << 9) |
6187 ((seg_not_present
^ 1) << 15) |
6189 (limit_in_pages
<< 23) |
6193 entry_2
|= (useable
<< 20);
6195 /* Install the new entry ... */
6197 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6198 lp
[0] = tswap32(entry_1
);
6199 lp
[1] = tswap32(entry_2
);
6203 /* specific and weird i386 syscalls */
6204 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6205 unsigned long bytecount
)
6211 ret
= read_ldt(ptr
, bytecount
);
6214 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6217 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6220 ret
= -TARGET_ENOSYS
;
6226 #if defined(TARGET_ABI32)
6227 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6229 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6230 struct target_modify_ldt_ldt_s ldt_info
;
6231 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6232 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6233 int seg_not_present
, useable
, lm
;
6234 uint32_t *lp
, entry_1
, entry_2
;
6237 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6238 if (!target_ldt_info
)
6239 return -TARGET_EFAULT
;
6240 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6241 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6242 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6243 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6244 if (ldt_info
.entry_number
== -1) {
6245 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6246 if (gdt_table
[i
] == 0) {
6247 ldt_info
.entry_number
= i
;
6248 target_ldt_info
->entry_number
= tswap32(i
);
6253 unlock_user_struct(target_ldt_info
, ptr
, 1);
6255 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6256 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6257 return -TARGET_EINVAL
;
6258 seg_32bit
= ldt_info
.flags
& 1;
6259 contents
= (ldt_info
.flags
>> 1) & 3;
6260 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6261 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6262 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6263 useable
= (ldt_info
.flags
>> 6) & 1;
6267 lm
= (ldt_info
.flags
>> 7) & 1;
6270 if (contents
== 3) {
6271 if (seg_not_present
== 0)
6272 return -TARGET_EINVAL
;
6275 /* NOTE: same code as Linux kernel */
6276 /* Allow LDTs to be cleared by the user. */
6277 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6278 if ((contents
== 0 &&
6279 read_exec_only
== 1 &&
6281 limit_in_pages
== 0 &&
6282 seg_not_present
== 1 &&
6290 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6291 (ldt_info
.limit
& 0x0ffff);
6292 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6293 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6294 (ldt_info
.limit
& 0xf0000) |
6295 ((read_exec_only
^ 1) << 9) |
6297 ((seg_not_present
^ 1) << 15) |
6299 (limit_in_pages
<< 23) |
6304 /* Install the new entry ... */
6306 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6307 lp
[0] = tswap32(entry_1
);
6308 lp
[1] = tswap32(entry_2
);
6312 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6314 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6315 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6316 uint32_t base_addr
, limit
, flags
;
6317 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6318 int seg_not_present
, useable
, lm
;
6319 uint32_t *lp
, entry_1
, entry_2
;
6321 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6322 if (!target_ldt_info
)
6323 return -TARGET_EFAULT
;
6324 idx
= tswap32(target_ldt_info
->entry_number
);
6325 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6326 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6327 unlock_user_struct(target_ldt_info
, ptr
, 1);
6328 return -TARGET_EINVAL
;
6330 lp
= (uint32_t *)(gdt_table
+ idx
);
6331 entry_1
= tswap32(lp
[0]);
6332 entry_2
= tswap32(lp
[1]);
6334 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6335 contents
= (entry_2
>> 10) & 3;
6336 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6337 seg_32bit
= (entry_2
>> 22) & 1;
6338 limit_in_pages
= (entry_2
>> 23) & 1;
6339 useable
= (entry_2
>> 20) & 1;
6343 lm
= (entry_2
>> 21) & 1;
6345 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6346 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6347 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6348 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6349 base_addr
= (entry_1
>> 16) |
6350 (entry_2
& 0xff000000) |
6351 ((entry_2
& 0xff) << 16);
6352 target_ldt_info
->base_addr
= tswapal(base_addr
);
6353 target_ldt_info
->limit
= tswap32(limit
);
6354 target_ldt_info
->flags
= tswap32(flags
);
6355 unlock_user_struct(target_ldt_info
, ptr
, 1);
6359 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6361 return -TARGET_ENOSYS
;
6364 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6371 case TARGET_ARCH_SET_GS
:
6372 case TARGET_ARCH_SET_FS
:
6373 if (code
== TARGET_ARCH_SET_GS
)
6377 cpu_x86_load_seg(env
, idx
, 0);
6378 env
->segs
[idx
].base
= addr
;
6380 case TARGET_ARCH_GET_GS
:
6381 case TARGET_ARCH_GET_FS
:
6382 if (code
== TARGET_ARCH_GET_GS
)
6386 val
= env
->segs
[idx
].base
;
6387 if (put_user(val
, addr
, abi_ulong
))
6388 ret
= -TARGET_EFAULT
;
6391 ret
= -TARGET_EINVAL
;
6396 #endif /* defined(TARGET_ABI32 */
6398 #endif /* defined(TARGET_I386) */
6400 #define NEW_STACK_SIZE 0x40000
6403 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6406 pthread_mutex_t mutex
;
6407 pthread_cond_t cond
;
6410 abi_ulong child_tidptr
;
6411 abi_ulong parent_tidptr
;
6415 static void *clone_func(void *arg
)
6417 new_thread_info
*info
= arg
;
6422 rcu_register_thread();
6423 tcg_register_thread();
6427 ts
= (TaskState
*)cpu
->opaque
;
6428 info
->tid
= sys_gettid();
6430 if (info
->child_tidptr
)
6431 put_user_u32(info
->tid
, info
->child_tidptr
);
6432 if (info
->parent_tidptr
)
6433 put_user_u32(info
->tid
, info
->parent_tidptr
);
6434 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6435 /* Enable signals. */
6436 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6437 /* Signal to the parent that we're ready. */
6438 pthread_mutex_lock(&info
->mutex
);
6439 pthread_cond_broadcast(&info
->cond
);
6440 pthread_mutex_unlock(&info
->mutex
);
6441 /* Wait until the parent has finished initializing the tls state. */
6442 pthread_mutex_lock(&clone_lock
);
6443 pthread_mutex_unlock(&clone_lock
);
6449 /* do_fork() Must return host values and target errnos (unlike most
6450 do_*() functions). */
6451 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6452 abi_ulong parent_tidptr
, target_ulong newtls
,
6453 abi_ulong child_tidptr
)
6455 CPUState
*cpu
= env_cpu(env
);
6459 CPUArchState
*new_env
;
6462 flags
&= ~CLONE_IGNORED_FLAGS
;
6464 /* Emulate vfork() with fork() */
6465 if (flags
& CLONE_VFORK
)
6466 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6468 if (flags
& CLONE_VM
) {
6469 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6470 new_thread_info info
;
6471 pthread_attr_t attr
;
6473 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6474 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6475 return -TARGET_EINVAL
;
6478 ts
= g_new0(TaskState
, 1);
6479 init_task_state(ts
);
6481 /* Grab a mutex so that thread setup appears atomic. */
6482 pthread_mutex_lock(&clone_lock
);
6484 /* we create a new CPU instance. */
6485 new_env
= cpu_copy(env
);
6486 /* Init regs that differ from the parent. */
6487 cpu_clone_regs_child(new_env
, newsp
, flags
);
6488 cpu_clone_regs_parent(env
, flags
);
6489 new_cpu
= env_cpu(new_env
);
6490 new_cpu
->opaque
= ts
;
6491 ts
->bprm
= parent_ts
->bprm
;
6492 ts
->info
= parent_ts
->info
;
6493 ts
->signal_mask
= parent_ts
->signal_mask
;
6495 if (flags
& CLONE_CHILD_CLEARTID
) {
6496 ts
->child_tidptr
= child_tidptr
;
6499 if (flags
& CLONE_SETTLS
) {
6500 cpu_set_tls (new_env
, newtls
);
6503 memset(&info
, 0, sizeof(info
));
6504 pthread_mutex_init(&info
.mutex
, NULL
);
6505 pthread_mutex_lock(&info
.mutex
);
6506 pthread_cond_init(&info
.cond
, NULL
);
6508 if (flags
& CLONE_CHILD_SETTID
) {
6509 info
.child_tidptr
= child_tidptr
;
6511 if (flags
& CLONE_PARENT_SETTID
) {
6512 info
.parent_tidptr
= parent_tidptr
;
6515 ret
= pthread_attr_init(&attr
);
6516 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6517 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6518 /* It is not safe to deliver signals until the child has finished
6519 initializing, so temporarily block all signals. */
6520 sigfillset(&sigmask
);
6521 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6522 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6524 /* If this is our first additional thread, we need to ensure we
6525 * generate code for parallel execution and flush old translations.
6527 if (!parallel_cpus
) {
6528 parallel_cpus
= true;
6532 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6533 /* TODO: Free new CPU state if thread creation failed. */
6535 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6536 pthread_attr_destroy(&attr
);
6538 /* Wait for the child to initialize. */
6539 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6544 pthread_mutex_unlock(&info
.mutex
);
6545 pthread_cond_destroy(&info
.cond
);
6546 pthread_mutex_destroy(&info
.mutex
);
6547 pthread_mutex_unlock(&clone_lock
);
6549 /* if no CLONE_VM, we consider it is a fork */
6550 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6551 return -TARGET_EINVAL
;
6554 /* We can't support custom termination signals */
6555 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6556 return -TARGET_EINVAL
;
6559 if (block_signals()) {
6560 return -TARGET_ERESTARTSYS
;
6566 /* Child Process. */
6567 cpu_clone_regs_child(env
, newsp
, flags
);
6569 /* There is a race condition here. The parent process could
6570 theoretically read the TID in the child process before the child
6571 tid is set. This would require using either ptrace
6572 (not implemented) or having *_tidptr to point at a shared memory
6573 mapping. We can't repeat the spinlock hack used above because
6574 the child process gets its own copy of the lock. */
6575 if (flags
& CLONE_CHILD_SETTID
)
6576 put_user_u32(sys_gettid(), child_tidptr
);
6577 if (flags
& CLONE_PARENT_SETTID
)
6578 put_user_u32(sys_gettid(), parent_tidptr
);
6579 ts
= (TaskState
*)cpu
->opaque
;
6580 if (flags
& CLONE_SETTLS
)
6581 cpu_set_tls (env
, newtls
);
6582 if (flags
& CLONE_CHILD_CLEARTID
)
6583 ts
->child_tidptr
= child_tidptr
;
6585 cpu_clone_regs_parent(env
, flags
);
6592 /* warning : doesn't handle linux specific flags... */
6593 static int target_to_host_fcntl_cmd(int cmd
)
6598 case TARGET_F_DUPFD
:
6599 case TARGET_F_GETFD
:
6600 case TARGET_F_SETFD
:
6601 case TARGET_F_GETFL
:
6602 case TARGET_F_SETFL
:
6603 case TARGET_F_OFD_GETLK
:
6604 case TARGET_F_OFD_SETLK
:
6605 case TARGET_F_OFD_SETLKW
:
6608 case TARGET_F_GETLK
:
6611 case TARGET_F_SETLK
:
6614 case TARGET_F_SETLKW
:
6617 case TARGET_F_GETOWN
:
6620 case TARGET_F_SETOWN
:
6623 case TARGET_F_GETSIG
:
6626 case TARGET_F_SETSIG
:
6629 #if TARGET_ABI_BITS == 32
6630 case TARGET_F_GETLK64
:
6633 case TARGET_F_SETLK64
:
6636 case TARGET_F_SETLKW64
:
6640 case TARGET_F_SETLEASE
:
6643 case TARGET_F_GETLEASE
:
6646 #ifdef F_DUPFD_CLOEXEC
6647 case TARGET_F_DUPFD_CLOEXEC
:
6648 ret
= F_DUPFD_CLOEXEC
;
6651 case TARGET_F_NOTIFY
:
6655 case TARGET_F_GETOWN_EX
:
6660 case TARGET_F_SETOWN_EX
:
6665 case TARGET_F_SETPIPE_SZ
:
6668 case TARGET_F_GETPIPE_SZ
:
6673 case TARGET_F_ADD_SEALS
:
6676 case TARGET_F_GET_SEALS
:
6681 ret
= -TARGET_EINVAL
;
6685 #if defined(__powerpc64__)
6686 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6687 * is not supported by kernel. The glibc fcntl call actually adjusts
6688 * them to 5, 6 and 7 before making the syscall(). Since we make the
6689 * syscall directly, adjust to what is supported by the kernel.
6691 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6692 ret
-= F_GETLK64
- 5;
6699 #define FLOCK_TRANSTBL \
6701 TRANSTBL_CONVERT(F_RDLCK); \
6702 TRANSTBL_CONVERT(F_WRLCK); \
6703 TRANSTBL_CONVERT(F_UNLCK); \
6706 static int target_to_host_flock(int type
)
6708 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6710 #undef TRANSTBL_CONVERT
6711 return -TARGET_EINVAL
;
6714 static int host_to_target_flock(int type
)
6716 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6718 #undef TRANSTBL_CONVERT
6719 /* if we don't know how to convert the value coming
6720 * from the host we copy to the target field as-is
6725 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6726 abi_ulong target_flock_addr
)
6728 struct target_flock
*target_fl
;
6731 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6732 return -TARGET_EFAULT
;
6735 __get_user(l_type
, &target_fl
->l_type
);
6736 l_type
= target_to_host_flock(l_type
);
6740 fl
->l_type
= l_type
;
6741 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6742 __get_user(fl
->l_start
, &target_fl
->l_start
);
6743 __get_user(fl
->l_len
, &target_fl
->l_len
);
6744 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6745 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6749 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6750 const struct flock64
*fl
)
6752 struct target_flock
*target_fl
;
6755 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6756 return -TARGET_EFAULT
;
6759 l_type
= host_to_target_flock(fl
->l_type
);
6760 __put_user(l_type
, &target_fl
->l_type
);
6761 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6762 __put_user(fl
->l_start
, &target_fl
->l_start
);
6763 __put_user(fl
->l_len
, &target_fl
->l_len
);
6764 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6765 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6769 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6770 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6772 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6773 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6774 abi_ulong target_flock_addr
)
6776 struct target_oabi_flock64
*target_fl
;
6779 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6780 return -TARGET_EFAULT
;
6783 __get_user(l_type
, &target_fl
->l_type
);
6784 l_type
= target_to_host_flock(l_type
);
6788 fl
->l_type
= l_type
;
6789 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6790 __get_user(fl
->l_start
, &target_fl
->l_start
);
6791 __get_user(fl
->l_len
, &target_fl
->l_len
);
6792 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6793 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6797 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6798 const struct flock64
*fl
)
6800 struct target_oabi_flock64
*target_fl
;
6803 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6804 return -TARGET_EFAULT
;
6807 l_type
= host_to_target_flock(fl
->l_type
);
6808 __put_user(l_type
, &target_fl
->l_type
);
6809 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6810 __put_user(fl
->l_start
, &target_fl
->l_start
);
6811 __put_user(fl
->l_len
, &target_fl
->l_len
);
6812 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6813 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6818 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6819 abi_ulong target_flock_addr
)
6821 struct target_flock64
*target_fl
;
6824 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6825 return -TARGET_EFAULT
;
6828 __get_user(l_type
, &target_fl
->l_type
);
6829 l_type
= target_to_host_flock(l_type
);
6833 fl
->l_type
= l_type
;
6834 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6835 __get_user(fl
->l_start
, &target_fl
->l_start
);
6836 __get_user(fl
->l_len
, &target_fl
->l_len
);
6837 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6838 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6842 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6843 const struct flock64
*fl
)
6845 struct target_flock64
*target_fl
;
6848 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6849 return -TARGET_EFAULT
;
6852 l_type
= host_to_target_flock(fl
->l_type
);
6853 __put_user(l_type
, &target_fl
->l_type
);
6854 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6855 __put_user(fl
->l_start
, &target_fl
->l_start
);
6856 __put_user(fl
->l_len
, &target_fl
->l_len
);
6857 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6858 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6862 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6864 struct flock64 fl64
;
6866 struct f_owner_ex fox
;
6867 struct target_f_owner_ex
*target_fox
;
6870 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6872 if (host_cmd
== -TARGET_EINVAL
)
6876 case TARGET_F_GETLK
:
6877 ret
= copy_from_user_flock(&fl64
, arg
);
6881 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6883 ret
= copy_to_user_flock(arg
, &fl64
);
6887 case TARGET_F_SETLK
:
6888 case TARGET_F_SETLKW
:
6889 ret
= copy_from_user_flock(&fl64
, arg
);
6893 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6896 case TARGET_F_GETLK64
:
6897 case TARGET_F_OFD_GETLK
:
6898 ret
= copy_from_user_flock64(&fl64
, arg
);
6902 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6904 ret
= copy_to_user_flock64(arg
, &fl64
);
6907 case TARGET_F_SETLK64
:
6908 case TARGET_F_SETLKW64
:
6909 case TARGET_F_OFD_SETLK
:
6910 case TARGET_F_OFD_SETLKW
:
6911 ret
= copy_from_user_flock64(&fl64
, arg
);
6915 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6918 case TARGET_F_GETFL
:
6919 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6921 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6925 case TARGET_F_SETFL
:
6926 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6927 target_to_host_bitmask(arg
,
6932 case TARGET_F_GETOWN_EX
:
6933 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6935 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6936 return -TARGET_EFAULT
;
6937 target_fox
->type
= tswap32(fox
.type
);
6938 target_fox
->pid
= tswap32(fox
.pid
);
6939 unlock_user_struct(target_fox
, arg
, 1);
6945 case TARGET_F_SETOWN_EX
:
6946 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6947 return -TARGET_EFAULT
;
6948 fox
.type
= tswap32(target_fox
->type
);
6949 fox
.pid
= tswap32(target_fox
->pid
);
6950 unlock_user_struct(target_fox
, arg
, 0);
6951 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6955 case TARGET_F_SETSIG
:
6956 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6959 case TARGET_F_GETSIG
:
6960 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6963 case TARGET_F_SETOWN
:
6964 case TARGET_F_GETOWN
:
6965 case TARGET_F_SETLEASE
:
6966 case TARGET_F_GETLEASE
:
6967 case TARGET_F_SETPIPE_SZ
:
6968 case TARGET_F_GETPIPE_SZ
:
6969 case TARGET_F_ADD_SEALS
:
6970 case TARGET_F_GET_SEALS
:
6971 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6975 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6983 static inline int high2lowuid(int uid
)
6991 static inline int high2lowgid(int gid
)
6999 static inline int low2highuid(int uid
)
7001 if ((int16_t)uid
== -1)
7007 static inline int low2highgid(int gid
)
7009 if ((int16_t)gid
== -1)
7014 static inline int tswapid(int id
)
7019 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7021 #else /* !USE_UID16 */
7022 static inline int high2lowuid(int uid
)
7026 static inline int high2lowgid(int gid
)
7030 static inline int low2highuid(int uid
)
7034 static inline int low2highgid(int gid
)
7038 static inline int tswapid(int id
)
7043 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7045 #endif /* USE_UID16 */
7047 /* We must do direct syscalls for setting UID/GID, because we want to
7048 * implement the Linux system call semantics of "change only for this thread",
7049 * not the libc/POSIX semantics of "change for all threads in process".
7050 * (See http://ewontfix.com/17/ for more details.)
7051 * We use the 32-bit version of the syscalls if present; if it is not
7052 * then either the host architecture supports 32-bit UIDs natively with
7053 * the standard syscall, or the 16-bit UID is the best we can do.
7055 #ifdef __NR_setuid32
7056 #define __NR_sys_setuid __NR_setuid32
7058 #define __NR_sys_setuid __NR_setuid
7060 #ifdef __NR_setgid32
7061 #define __NR_sys_setgid __NR_setgid32
7063 #define __NR_sys_setgid __NR_setgid
7065 #ifdef __NR_setresuid32
7066 #define __NR_sys_setresuid __NR_setresuid32
7068 #define __NR_sys_setresuid __NR_setresuid
7070 #ifdef __NR_setresgid32
7071 #define __NR_sys_setresgid __NR_setresgid32
7073 #define __NR_sys_setresgid __NR_setresgid
7076 _syscall1(int, sys_setuid
, uid_t
, uid
)
7077 _syscall1(int, sys_setgid
, gid_t
, gid
)
7078 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7079 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7081 void syscall_init(void)
7084 const argtype
*arg_type
;
7088 thunk_init(STRUCT_MAX
);
7090 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7091 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7092 #include "syscall_types.h"
7094 #undef STRUCT_SPECIAL
7096 /* Build target_to_host_errno_table[] table from
7097 * host_to_target_errno_table[]. */
7098 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7099 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7102 /* we patch the ioctl size if necessary. We rely on the fact that
7103 no ioctl has all the bits at '1' in the size field */
7105 while (ie
->target_cmd
!= 0) {
7106 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7107 TARGET_IOC_SIZEMASK
) {
7108 arg_type
= ie
->arg_type
;
7109 if (arg_type
[0] != TYPE_PTR
) {
7110 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7115 size
= thunk_type_size(arg_type
, 0);
7116 ie
->target_cmd
= (ie
->target_cmd
&
7117 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7118 (size
<< TARGET_IOC_SIZESHIFT
);
7121 /* automatic consistency check if same arch */
7122 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7123 (defined(__x86_64__) && defined(TARGET_X86_64))
7124 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7125 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7126 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7133 #ifdef TARGET_NR_truncate64
7134 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7139 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7143 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7147 #ifdef TARGET_NR_ftruncate64
7148 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7153 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7157 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7161 #if defined(TARGET_NR_timer_settime) || \
7162 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7163 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7164 abi_ulong target_addr
)
7166 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7167 offsetof(struct target_itimerspec
,
7169 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7170 offsetof(struct target_itimerspec
,
7172 return -TARGET_EFAULT
;
7179 #if defined(TARGET_NR_timer_settime64) || \
7180 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7181 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7182 abi_ulong target_addr
)
7184 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7185 offsetof(struct target__kernel_itimerspec
,
7187 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7188 offsetof(struct target__kernel_itimerspec
,
7190 return -TARGET_EFAULT
;
7197 #if ((defined(TARGET_NR_timerfd_gettime) || \
7198 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7199 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7200 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7201 struct itimerspec
*host_its
)
7203 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7205 &host_its
->it_interval
) ||
7206 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7208 &host_its
->it_value
)) {
7209 return -TARGET_EFAULT
;
7215 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7216 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7217 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7218 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7219 struct itimerspec
*host_its
)
7221 if (host_to_target_timespec64(target_addr
+
7222 offsetof(struct target__kernel_itimerspec
,
7224 &host_its
->it_interval
) ||
7225 host_to_target_timespec64(target_addr
+
7226 offsetof(struct target__kernel_itimerspec
,
7228 &host_its
->it_value
)) {
7229 return -TARGET_EFAULT
;
7235 #if defined(TARGET_NR_adjtimex) || \
7236 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7237 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7238 abi_long target_addr
)
7240 struct target_timex
*target_tx
;
7242 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7243 return -TARGET_EFAULT
;
7246 __get_user(host_tx
->modes
, &target_tx
->modes
);
7247 __get_user(host_tx
->offset
, &target_tx
->offset
);
7248 __get_user(host_tx
->freq
, &target_tx
->freq
);
7249 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7250 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7251 __get_user(host_tx
->status
, &target_tx
->status
);
7252 __get_user(host_tx
->constant
, &target_tx
->constant
);
7253 __get_user(host_tx
->precision
, &target_tx
->precision
);
7254 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7255 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7256 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7257 __get_user(host_tx
->tick
, &target_tx
->tick
);
7258 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7259 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7260 __get_user(host_tx
->shift
, &target_tx
->shift
);
7261 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7262 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7263 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7264 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7265 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7266 __get_user(host_tx
->tai
, &target_tx
->tai
);
7268 unlock_user_struct(target_tx
, target_addr
, 0);
7272 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7273 struct timex
*host_tx
)
7275 struct target_timex
*target_tx
;
7277 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7278 return -TARGET_EFAULT
;
7281 __put_user(host_tx
->modes
, &target_tx
->modes
);
7282 __put_user(host_tx
->offset
, &target_tx
->offset
);
7283 __put_user(host_tx
->freq
, &target_tx
->freq
);
7284 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7285 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7286 __put_user(host_tx
->status
, &target_tx
->status
);
7287 __put_user(host_tx
->constant
, &target_tx
->constant
);
7288 __put_user(host_tx
->precision
, &target_tx
->precision
);
7289 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7290 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7291 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7292 __put_user(host_tx
->tick
, &target_tx
->tick
);
7293 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7294 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7295 __put_user(host_tx
->shift
, &target_tx
->shift
);
7296 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7297 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7298 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7299 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7300 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7301 __put_user(host_tx
->tai
, &target_tx
->tai
);
7303 unlock_user_struct(target_tx
, target_addr
, 1);
7309 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7310 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7311 abi_long target_addr
)
7313 struct target__kernel_timex
*target_tx
;
7315 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7316 offsetof(struct target__kernel_timex
,
7318 return -TARGET_EFAULT
;
7321 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7322 return -TARGET_EFAULT
;
7325 __get_user(host_tx
->modes
, &target_tx
->modes
);
7326 __get_user(host_tx
->offset
, &target_tx
->offset
);
7327 __get_user(host_tx
->freq
, &target_tx
->freq
);
7328 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7329 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7330 __get_user(host_tx
->status
, &target_tx
->status
);
7331 __get_user(host_tx
->constant
, &target_tx
->constant
);
7332 __get_user(host_tx
->precision
, &target_tx
->precision
);
7333 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7334 __get_user(host_tx
->tick
, &target_tx
->tick
);
7335 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7336 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7337 __get_user(host_tx
->shift
, &target_tx
->shift
);
7338 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7339 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7340 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7341 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7342 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7343 __get_user(host_tx
->tai
, &target_tx
->tai
);
7345 unlock_user_struct(target_tx
, target_addr
, 0);
7349 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7350 struct timex
*host_tx
)
7352 struct target__kernel_timex
*target_tx
;
7354 if (copy_to_user_timeval64(target_addr
+
7355 offsetof(struct target__kernel_timex
, time
),
7357 return -TARGET_EFAULT
;
7360 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7361 return -TARGET_EFAULT
;
7364 __put_user(host_tx
->modes
, &target_tx
->modes
);
7365 __put_user(host_tx
->offset
, &target_tx
->offset
);
7366 __put_user(host_tx
->freq
, &target_tx
->freq
);
7367 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7368 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7369 __put_user(host_tx
->status
, &target_tx
->status
);
7370 __put_user(host_tx
->constant
, &target_tx
->constant
);
7371 __put_user(host_tx
->precision
, &target_tx
->precision
);
7372 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7373 __put_user(host_tx
->tick
, &target_tx
->tick
);
7374 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7375 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7376 __put_user(host_tx
->shift
, &target_tx
->shift
);
7377 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7378 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7379 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7380 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7381 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7382 __put_user(host_tx
->tai
, &target_tx
->tai
);
7384 unlock_user_struct(target_tx
, target_addr
, 1);
7389 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7390 abi_ulong target_addr
)
7392 struct target_sigevent
*target_sevp
;
7394 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7395 return -TARGET_EFAULT
;
7398 /* This union is awkward on 64 bit systems because it has a 32 bit
7399 * integer and a pointer in it; we follow the conversion approach
7400 * used for handling sigval types in signal.c so the guest should get
7401 * the correct value back even if we did a 64 bit byteswap and it's
7402 * using the 32 bit integer.
7404 host_sevp
->sigev_value
.sival_ptr
=
7405 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7406 host_sevp
->sigev_signo
=
7407 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7408 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7409 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7411 unlock_user_struct(target_sevp
, target_addr
, 1);
7415 #if defined(TARGET_NR_mlockall)
7416 static inline int target_to_host_mlockall_arg(int arg
)
7420 if (arg
& TARGET_MCL_CURRENT
) {
7421 result
|= MCL_CURRENT
;
7423 if (arg
& TARGET_MCL_FUTURE
) {
7424 result
|= MCL_FUTURE
;
7427 if (arg
& TARGET_MCL_ONFAULT
) {
7428 result
|= MCL_ONFAULT
;
7436 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7437 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7438 defined(TARGET_NR_newfstatat))
7439 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7440 abi_ulong target_addr
,
7441 struct stat
*host_st
)
7443 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7444 if (((CPUARMState
*)cpu_env
)->eabi
) {
7445 struct target_eabi_stat64
*target_st
;
7447 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7448 return -TARGET_EFAULT
;
7449 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7450 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7451 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7452 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7453 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7455 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7456 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7457 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7458 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7459 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7460 __put_user(host_st
->st_size
, &target_st
->st_size
);
7461 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7462 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7463 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7464 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7465 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7466 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7467 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7468 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7469 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7471 unlock_user_struct(target_st
, target_addr
, 1);
7475 #if defined(TARGET_HAS_STRUCT_STAT64)
7476 struct target_stat64
*target_st
;
7478 struct target_stat
*target_st
;
7481 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7482 return -TARGET_EFAULT
;
7483 memset(target_st
, 0, sizeof(*target_st
));
7484 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7485 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7486 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7487 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7489 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7490 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7491 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7492 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7493 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7494 /* XXX: better use of kernel struct */
7495 __put_user(host_st
->st_size
, &target_st
->st_size
);
7496 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7497 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7498 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7499 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7500 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7501 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7502 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7503 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7504 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7506 unlock_user_struct(target_st
, target_addr
, 1);
7513 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7514 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7515 abi_ulong target_addr
)
7517 struct target_statx
*target_stx
;
7519 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7520 return -TARGET_EFAULT
;
7522 memset(target_stx
, 0, sizeof(*target_stx
));
7524 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7525 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7526 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7527 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7528 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7529 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7530 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7531 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7532 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7533 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7534 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7535 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7536 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7537 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7538 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7539 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7540 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7541 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7542 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7543 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7544 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7545 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7546 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7548 unlock_user_struct(target_stx
, target_addr
, 1);
7554 static int do_sys_futex(int *uaddr
, int op
, int val
,
7555 const struct timespec
*timeout
, int *uaddr2
,
7558 #if HOST_LONG_BITS == 64
7559 #if defined(__NR_futex)
7560 /* always a 64-bit time_t, it doesn't define _time64 version */
7561 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7564 #else /* HOST_LONG_BITS == 64 */
7565 #if defined(__NR_futex_time64)
7566 if (sizeof(timeout
->tv_sec
) == 8) {
7567 /* _time64 function on 32bit arch */
7568 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7571 #if defined(__NR_futex)
7572 /* old function on 32bit arch */
7573 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7575 #endif /* HOST_LONG_BITS == 64 */
7576 g_assert_not_reached();
7579 static int do_safe_futex(int *uaddr
, int op
, int val
,
7580 const struct timespec
*timeout
, int *uaddr2
,
7583 #if HOST_LONG_BITS == 64
7584 #if defined(__NR_futex)
7585 /* always a 64-bit time_t, it doesn't define _time64 version */
7586 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7588 #else /* HOST_LONG_BITS == 64 */
7589 #if defined(__NR_futex_time64)
7590 if (sizeof(timeout
->tv_sec
) == 8) {
7591 /* _time64 function on 32bit arch */
7592 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7596 #if defined(__NR_futex)
7597 /* old function on 32bit arch */
7598 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7600 #endif /* HOST_LONG_BITS == 64 */
7601 return -TARGET_ENOSYS
;
7604 /* ??? Using host futex calls even when target atomic operations
7605 are not really atomic probably breaks things. However implementing
7606 futexes locally would make futexes shared between multiple processes
7607 tricky. However they're probably useless because guest atomic
7608 operations won't work either. */
7609 #if defined(TARGET_NR_futex)
7610 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7611 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7613 struct timespec ts
, *pts
;
7616 /* ??? We assume FUTEX_* constants are the same on both host
7618 #ifdef FUTEX_CMD_MASK
7619 base_op
= op
& FUTEX_CMD_MASK
;
7625 case FUTEX_WAIT_BITSET
:
7628 target_to_host_timespec(pts
, timeout
);
7632 return do_safe_futex(g2h(cpu
, uaddr
),
7633 op
, tswap32(val
), pts
, NULL
, val3
);
7635 return do_safe_futex(g2h(cpu
, uaddr
),
7636 op
, val
, NULL
, NULL
, 0);
7638 return do_safe_futex(g2h(cpu
, uaddr
),
7639 op
, val
, NULL
, NULL
, 0);
7641 case FUTEX_CMP_REQUEUE
:
7643 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7644 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7645 But the prototype takes a `struct timespec *'; insert casts
7646 to satisfy the compiler. We do not need to tswap TIMEOUT
7647 since it's not compared to guest memory. */
7648 pts
= (struct timespec
*)(uintptr_t) timeout
;
7649 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7650 (base_op
== FUTEX_CMP_REQUEUE
7651 ? tswap32(val3
) : val3
));
7653 return -TARGET_ENOSYS
;
7658 #if defined(TARGET_NR_futex_time64)
7659 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7660 int val
, target_ulong timeout
,
7661 target_ulong uaddr2
, int val3
)
7663 struct timespec ts
, *pts
;
7666 /* ??? We assume FUTEX_* constants are the same on both host
7668 #ifdef FUTEX_CMD_MASK
7669 base_op
= op
& FUTEX_CMD_MASK
;
7675 case FUTEX_WAIT_BITSET
:
7678 if (target_to_host_timespec64(pts
, timeout
)) {
7679 return -TARGET_EFAULT
;
7684 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7685 tswap32(val
), pts
, NULL
, val3
);
7687 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7689 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7691 case FUTEX_CMP_REQUEUE
:
7693 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7694 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7695 But the prototype takes a `struct timespec *'; insert casts
7696 to satisfy the compiler. We do not need to tswap TIMEOUT
7697 since it's not compared to guest memory. */
7698 pts
= (struct timespec
*)(uintptr_t) timeout
;
7699 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7700 (base_op
== FUTEX_CMP_REQUEUE
7701 ? tswap32(val3
) : val3
));
7703 return -TARGET_ENOSYS
;
7708 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7709 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7710 abi_long handle
, abi_long mount_id
,
7713 struct file_handle
*target_fh
;
7714 struct file_handle
*fh
;
7718 unsigned int size
, total_size
;
7720 if (get_user_s32(size
, handle
)) {
7721 return -TARGET_EFAULT
;
7724 name
= lock_user_string(pathname
);
7726 return -TARGET_EFAULT
;
7729 total_size
= sizeof(struct file_handle
) + size
;
7730 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7732 unlock_user(name
, pathname
, 0);
7733 return -TARGET_EFAULT
;
7736 fh
= g_malloc0(total_size
);
7737 fh
->handle_bytes
= size
;
7739 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7740 unlock_user(name
, pathname
, 0);
7742 /* man name_to_handle_at(2):
7743 * Other than the use of the handle_bytes field, the caller should treat
7744 * the file_handle structure as an opaque data type
7747 memcpy(target_fh
, fh
, total_size
);
7748 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7749 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7751 unlock_user(target_fh
, handle
, total_size
);
7753 if (put_user_s32(mid
, mount_id
)) {
7754 return -TARGET_EFAULT
;
7762 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7763 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7766 struct file_handle
*target_fh
;
7767 struct file_handle
*fh
;
7768 unsigned int size
, total_size
;
7771 if (get_user_s32(size
, handle
)) {
7772 return -TARGET_EFAULT
;
7775 total_size
= sizeof(struct file_handle
) + size
;
7776 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7778 return -TARGET_EFAULT
;
7781 fh
= g_memdup(target_fh
, total_size
);
7782 fh
->handle_bytes
= size
;
7783 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7785 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7786 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7790 unlock_user(target_fh
, handle
, total_size
);
7796 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7798 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7801 target_sigset_t
*target_mask
;
7805 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7806 return -TARGET_EINVAL
;
7808 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7809 return -TARGET_EFAULT
;
7812 target_to_host_sigset(&host_mask
, target_mask
);
7814 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7816 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7818 fd_trans_register(ret
, &target_signalfd_trans
);
7821 unlock_user_struct(target_mask
, mask
, 0);
7827 /* Map host to target signal numbers for the wait family of syscalls.
7828 Assume all other status bits are the same. */
7829 int host_to_target_waitstatus(int status
)
7831 if (WIFSIGNALED(status
)) {
7832 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7834 if (WIFSTOPPED(status
)) {
7835 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7841 static int open_self_cmdline(void *cpu_env
, int fd
)
7843 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7844 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7847 for (i
= 0; i
< bprm
->argc
; i
++) {
7848 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7850 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7858 static int open_self_maps(void *cpu_env
, int fd
)
7860 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7861 TaskState
*ts
= cpu
->opaque
;
7862 GSList
*map_info
= read_self_maps();
7866 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7867 MapInfo
*e
= (MapInfo
*) s
->data
;
7869 if (h2g_valid(e
->start
)) {
7870 unsigned long min
= e
->start
;
7871 unsigned long max
= e
->end
;
7872 int flags
= page_get_flags(h2g(min
));
7875 max
= h2g_valid(max
- 1) ?
7876 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7878 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7882 if (h2g(min
) == ts
->info
->stack_limit
) {
7888 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7889 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7890 h2g(min
), h2g(max
- 1) + 1,
7891 e
->is_read
? 'r' : '-',
7892 e
->is_write
? 'w' : '-',
7893 e
->is_exec
? 'x' : '-',
7894 e
->is_priv
? 'p' : '-',
7895 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7897 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7904 free_self_maps(map_info
);
7906 #ifdef TARGET_VSYSCALL_PAGE
7908 * We only support execution from the vsyscall page.
7909 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7911 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7912 " --xp 00000000 00:00 0",
7913 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7914 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7920 static int open_self_stat(void *cpu_env
, int fd
)
7922 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7923 TaskState
*ts
= cpu
->opaque
;
7924 g_autoptr(GString
) buf
= g_string_new(NULL
);
7927 for (i
= 0; i
< 44; i
++) {
7930 g_string_printf(buf
, FMT_pid
" ", getpid());
7931 } else if (i
== 1) {
7933 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7934 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7935 g_string_printf(buf
, "(%.15s) ", bin
);
7936 } else if (i
== 27) {
7938 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7940 /* for the rest, there is MasterCard */
7941 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7944 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7952 static int open_self_auxv(void *cpu_env
, int fd
)
7954 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7955 TaskState
*ts
= cpu
->opaque
;
7956 abi_ulong auxv
= ts
->info
->saved_auxv
;
7957 abi_ulong len
= ts
->info
->auxv_len
;
7961 * Auxiliary vector is stored in target process stack.
7962 * read in whole auxv vector and copy it to file
7964 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7968 r
= write(fd
, ptr
, len
);
7975 lseek(fd
, 0, SEEK_SET
);
7976 unlock_user(ptr
, auxv
, len
);
7982 static int is_proc_myself(const char *filename
, const char *entry
)
7984 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7985 filename
+= strlen("/proc/");
7986 if (!strncmp(filename
, "self/", strlen("self/"))) {
7987 filename
+= strlen("self/");
7988 } else if (*filename
>= '1' && *filename
<= '9') {
7990 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7991 if (!strncmp(filename
, myself
, strlen(myself
))) {
7992 filename
+= strlen(myself
);
7999 if (!strcmp(filename
, entry
)) {
8006 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8007 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8008 static int is_proc(const char *filename
, const char *entry
)
8010 return strcmp(filename
, entry
) == 0;
8014 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8015 static int open_net_route(void *cpu_env
, int fd
)
8022 fp
= fopen("/proc/net/route", "r");
8029 read
= getline(&line
, &len
, fp
);
8030 dprintf(fd
, "%s", line
);
8034 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8036 uint32_t dest
, gw
, mask
;
8037 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8040 fields
= sscanf(line
,
8041 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8042 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8043 &mask
, &mtu
, &window
, &irtt
);
8047 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8048 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8049 metric
, tswap32(mask
), mtu
, window
, irtt
);
8059 #if defined(TARGET_SPARC)
8060 static int open_cpuinfo(void *cpu_env
, int fd
)
8062 dprintf(fd
, "type\t\t: sun4u\n");
8067 #if defined(TARGET_HPPA)
8068 static int open_cpuinfo(void *cpu_env
, int fd
)
8070 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8071 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8072 dprintf(fd
, "capabilities\t: os32\n");
8073 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8074 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8079 #if defined(TARGET_M68K)
8080 static int open_hardware(void *cpu_env
, int fd
)
8082 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8087 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8090 const char *filename
;
8091 int (*fill
)(void *cpu_env
, int fd
);
8092 int (*cmp
)(const char *s1
, const char *s2
);
8094 const struct fake_open
*fake_open
;
8095 static const struct fake_open fakes
[] = {
8096 { "maps", open_self_maps
, is_proc_myself
},
8097 { "stat", open_self_stat
, is_proc_myself
},
8098 { "auxv", open_self_auxv
, is_proc_myself
},
8099 { "cmdline", open_self_cmdline
, is_proc_myself
},
8100 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8101 { "/proc/net/route", open_net_route
, is_proc
},
8103 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8104 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8106 #if defined(TARGET_M68K)
8107 { "/proc/hardware", open_hardware
, is_proc
},
8109 { NULL
, NULL
, NULL
}
8112 if (is_proc_myself(pathname
, "exe")) {
8113 int execfd
= qemu_getauxval(AT_EXECFD
);
8114 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8117 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8118 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8123 if (fake_open
->filename
) {
8125 char filename
[PATH_MAX
];
8128 /* create temporary file to map stat to */
8129 tmpdir
= getenv("TMPDIR");
8132 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8133 fd
= mkstemp(filename
);
8139 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8145 lseek(fd
, 0, SEEK_SET
);
8150 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8153 #define TIMER_MAGIC 0x0caf0000
8154 #define TIMER_MAGIC_MASK 0xffff0000
8156 /* Convert QEMU provided timer ID back to internal 16bit index format */
8157 static target_timer_t
get_timer_id(abi_long arg
)
8159 target_timer_t timerid
= arg
;
8161 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8162 return -TARGET_EINVAL
;
8167 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8168 return -TARGET_EINVAL
;
8174 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8176 abi_ulong target_addr
,
8179 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8180 unsigned host_bits
= sizeof(*host_mask
) * 8;
8181 abi_ulong
*target_mask
;
8184 assert(host_size
>= target_size
);
8186 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8188 return -TARGET_EFAULT
;
8190 memset(host_mask
, 0, host_size
);
8192 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8193 unsigned bit
= i
* target_bits
;
8196 __get_user(val
, &target_mask
[i
]);
8197 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8198 if (val
& (1UL << j
)) {
8199 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8204 unlock_user(target_mask
, target_addr
, 0);
8208 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8210 abi_ulong target_addr
,
8213 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8214 unsigned host_bits
= sizeof(*host_mask
) * 8;
8215 abi_ulong
*target_mask
;
8218 assert(host_size
>= target_size
);
8220 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8222 return -TARGET_EFAULT
;
8225 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8226 unsigned bit
= i
* target_bits
;
8229 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8230 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8234 __put_user(val
, &target_mask
[i
]);
8237 unlock_user(target_mask
, target_addr
, target_size
);
8241 /* This is an internal helper for do_syscall so that it is easier
8242 * to have a single return point, so that actions, such as logging
8243 * of syscall results, can be performed.
8244 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8246 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8247 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8248 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8251 CPUState
*cpu
= env_cpu(cpu_env
);
8253 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8254 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8255 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8256 || defined(TARGET_NR_statx)
8259 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8260 || defined(TARGET_NR_fstatfs)
8266 case TARGET_NR_exit
:
8267 /* In old applications this may be used to implement _exit(2).
8268 However in threaded applications it is used for thread termination,
8269 and _exit_group is used for application termination.
8270 Do thread termination if we have more then one thread. */
8272 if (block_signals()) {
8273 return -TARGET_ERESTARTSYS
;
8276 pthread_mutex_lock(&clone_lock
);
8278 if (CPU_NEXT(first_cpu
)) {
8279 TaskState
*ts
= cpu
->opaque
;
8281 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8282 object_unref(OBJECT(cpu
));
8284 * At this point the CPU should be unrealized and removed
8285 * from cpu lists. We can clean-up the rest of the thread
8286 * data without the lock held.
8289 pthread_mutex_unlock(&clone_lock
);
8291 if (ts
->child_tidptr
) {
8292 put_user_u32(0, ts
->child_tidptr
);
8293 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8294 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8298 rcu_unregister_thread();
8302 pthread_mutex_unlock(&clone_lock
);
8303 preexit_cleanup(cpu_env
, arg1
);
8305 return 0; /* avoid warning */
8306 case TARGET_NR_read
:
8307 if (arg2
== 0 && arg3
== 0) {
8308 return get_errno(safe_read(arg1
, 0, 0));
8310 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8311 return -TARGET_EFAULT
;
8312 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8314 fd_trans_host_to_target_data(arg1
)) {
8315 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8317 unlock_user(p
, arg2
, ret
);
8320 case TARGET_NR_write
:
8321 if (arg2
== 0 && arg3
== 0) {
8322 return get_errno(safe_write(arg1
, 0, 0));
8324 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8325 return -TARGET_EFAULT
;
8326 if (fd_trans_target_to_host_data(arg1
)) {
8327 void *copy
= g_malloc(arg3
);
8328 memcpy(copy
, p
, arg3
);
8329 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8331 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8335 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8337 unlock_user(p
, arg2
, 0);
8340 #ifdef TARGET_NR_open
8341 case TARGET_NR_open
:
8342 if (!(p
= lock_user_string(arg1
)))
8343 return -TARGET_EFAULT
;
8344 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8345 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8347 fd_trans_unregister(ret
);
8348 unlock_user(p
, arg1
, 0);
8351 case TARGET_NR_openat
:
8352 if (!(p
= lock_user_string(arg2
)))
8353 return -TARGET_EFAULT
;
8354 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8355 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8357 fd_trans_unregister(ret
);
8358 unlock_user(p
, arg2
, 0);
8360 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8361 case TARGET_NR_name_to_handle_at
:
8362 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8365 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8366 case TARGET_NR_open_by_handle_at
:
8367 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8368 fd_trans_unregister(ret
);
8371 case TARGET_NR_close
:
8372 fd_trans_unregister(arg1
);
8373 return get_errno(close(arg1
));
8376 return do_brk(arg1
);
8377 #ifdef TARGET_NR_fork
8378 case TARGET_NR_fork
:
8379 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8381 #ifdef TARGET_NR_waitpid
8382 case TARGET_NR_waitpid
:
8385 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8386 if (!is_error(ret
) && arg2
&& ret
8387 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8388 return -TARGET_EFAULT
;
8392 #ifdef TARGET_NR_waitid
8393 case TARGET_NR_waitid
:
8397 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8398 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8399 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8400 return -TARGET_EFAULT
;
8401 host_to_target_siginfo(p
, &info
);
8402 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8407 #ifdef TARGET_NR_creat /* not on alpha */
8408 case TARGET_NR_creat
:
8409 if (!(p
= lock_user_string(arg1
)))
8410 return -TARGET_EFAULT
;
8411 ret
= get_errno(creat(p
, arg2
));
8412 fd_trans_unregister(ret
);
8413 unlock_user(p
, arg1
, 0);
8416 #ifdef TARGET_NR_link
8417 case TARGET_NR_link
:
8420 p
= lock_user_string(arg1
);
8421 p2
= lock_user_string(arg2
);
8423 ret
= -TARGET_EFAULT
;
8425 ret
= get_errno(link(p
, p2
));
8426 unlock_user(p2
, arg2
, 0);
8427 unlock_user(p
, arg1
, 0);
8431 #if defined(TARGET_NR_linkat)
8432 case TARGET_NR_linkat
:
8436 return -TARGET_EFAULT
;
8437 p
= lock_user_string(arg2
);
8438 p2
= lock_user_string(arg4
);
8440 ret
= -TARGET_EFAULT
;
8442 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8443 unlock_user(p
, arg2
, 0);
8444 unlock_user(p2
, arg4
, 0);
8448 #ifdef TARGET_NR_unlink
8449 case TARGET_NR_unlink
:
8450 if (!(p
= lock_user_string(arg1
)))
8451 return -TARGET_EFAULT
;
8452 ret
= get_errno(unlink(p
));
8453 unlock_user(p
, arg1
, 0);
8456 #if defined(TARGET_NR_unlinkat)
8457 case TARGET_NR_unlinkat
:
8458 if (!(p
= lock_user_string(arg2
)))
8459 return -TARGET_EFAULT
;
8460 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8461 unlock_user(p
, arg2
, 0);
8464 case TARGET_NR_execve
:
8466 char **argp
, **envp
;
8469 abi_ulong guest_argp
;
8470 abi_ulong guest_envp
;
8477 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8478 if (get_user_ual(addr
, gp
))
8479 return -TARGET_EFAULT
;
8486 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8487 if (get_user_ual(addr
, gp
))
8488 return -TARGET_EFAULT
;
8494 argp
= g_new0(char *, argc
+ 1);
8495 envp
= g_new0(char *, envc
+ 1);
8497 for (gp
= guest_argp
, q
= argp
; gp
;
8498 gp
+= sizeof(abi_ulong
), q
++) {
8499 if (get_user_ual(addr
, gp
))
8503 if (!(*q
= lock_user_string(addr
)))
8505 total_size
+= strlen(*q
) + 1;
8509 for (gp
= guest_envp
, q
= envp
; gp
;
8510 gp
+= sizeof(abi_ulong
), q
++) {
8511 if (get_user_ual(addr
, gp
))
8515 if (!(*q
= lock_user_string(addr
)))
8517 total_size
+= strlen(*q
) + 1;
8521 if (!(p
= lock_user_string(arg1
)))
8523 /* Although execve() is not an interruptible syscall it is
8524 * a special case where we must use the safe_syscall wrapper:
8525 * if we allow a signal to happen before we make the host
8526 * syscall then we will 'lose' it, because at the point of
8527 * execve the process leaves QEMU's control. So we use the
8528 * safe syscall wrapper to ensure that we either take the
8529 * signal as a guest signal, or else it does not happen
8530 * before the execve completes and makes it the other
8531 * program's problem.
8533 ret
= get_errno(safe_execve(p
, argp
, envp
));
8534 unlock_user(p
, arg1
, 0);
8539 ret
= -TARGET_EFAULT
;
8542 for (gp
= guest_argp
, q
= argp
; *q
;
8543 gp
+= sizeof(abi_ulong
), q
++) {
8544 if (get_user_ual(addr
, gp
)
8547 unlock_user(*q
, addr
, 0);
8549 for (gp
= guest_envp
, q
= envp
; *q
;
8550 gp
+= sizeof(abi_ulong
), q
++) {
8551 if (get_user_ual(addr
, gp
)
8554 unlock_user(*q
, addr
, 0);
8561 case TARGET_NR_chdir
:
8562 if (!(p
= lock_user_string(arg1
)))
8563 return -TARGET_EFAULT
;
8564 ret
= get_errno(chdir(p
));
8565 unlock_user(p
, arg1
, 0);
8567 #ifdef TARGET_NR_time
8568 case TARGET_NR_time
:
8571 ret
= get_errno(time(&host_time
));
8574 && put_user_sal(host_time
, arg1
))
8575 return -TARGET_EFAULT
;
8579 #ifdef TARGET_NR_mknod
8580 case TARGET_NR_mknod
:
8581 if (!(p
= lock_user_string(arg1
)))
8582 return -TARGET_EFAULT
;
8583 ret
= get_errno(mknod(p
, arg2
, arg3
));
8584 unlock_user(p
, arg1
, 0);
8587 #if defined(TARGET_NR_mknodat)
8588 case TARGET_NR_mknodat
:
8589 if (!(p
= lock_user_string(arg2
)))
8590 return -TARGET_EFAULT
;
8591 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8592 unlock_user(p
, arg2
, 0);
8595 #ifdef TARGET_NR_chmod
8596 case TARGET_NR_chmod
:
8597 if (!(p
= lock_user_string(arg1
)))
8598 return -TARGET_EFAULT
;
8599 ret
= get_errno(chmod(p
, arg2
));
8600 unlock_user(p
, arg1
, 0);
8603 #ifdef TARGET_NR_lseek
8604 case TARGET_NR_lseek
:
8605 return get_errno(lseek(arg1
, arg2
, arg3
));
8607 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8608 /* Alpha specific */
8609 case TARGET_NR_getxpid
:
8610 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8611 return get_errno(getpid());
8613 #ifdef TARGET_NR_getpid
8614 case TARGET_NR_getpid
:
8615 return get_errno(getpid());
8617 case TARGET_NR_mount
:
8619 /* need to look at the data field */
8623 p
= lock_user_string(arg1
);
8625 return -TARGET_EFAULT
;
8631 p2
= lock_user_string(arg2
);
8634 unlock_user(p
, arg1
, 0);
8636 return -TARGET_EFAULT
;
8640 p3
= lock_user_string(arg3
);
8643 unlock_user(p
, arg1
, 0);
8645 unlock_user(p2
, arg2
, 0);
8646 return -TARGET_EFAULT
;
8652 /* FIXME - arg5 should be locked, but it isn't clear how to
8653 * do that since it's not guaranteed to be a NULL-terminated
8657 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8659 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8661 ret
= get_errno(ret
);
8664 unlock_user(p
, arg1
, 0);
8666 unlock_user(p2
, arg2
, 0);
8668 unlock_user(p3
, arg3
, 0);
8672 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8673 #if defined(TARGET_NR_umount)
8674 case TARGET_NR_umount
:
8676 #if defined(TARGET_NR_oldumount)
8677 case TARGET_NR_oldumount
:
8679 if (!(p
= lock_user_string(arg1
)))
8680 return -TARGET_EFAULT
;
8681 ret
= get_errno(umount(p
));
8682 unlock_user(p
, arg1
, 0);
8685 #ifdef TARGET_NR_stime /* not on alpha */
8686 case TARGET_NR_stime
:
8690 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8691 return -TARGET_EFAULT
;
8693 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8696 #ifdef TARGET_NR_alarm /* not on alpha */
8697 case TARGET_NR_alarm
:
8700 #ifdef TARGET_NR_pause /* not on alpha */
8701 case TARGET_NR_pause
:
8702 if (!block_signals()) {
8703 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8705 return -TARGET_EINTR
;
8707 #ifdef TARGET_NR_utime
8708 case TARGET_NR_utime
:
8710 struct utimbuf tbuf
, *host_tbuf
;
8711 struct target_utimbuf
*target_tbuf
;
8713 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8714 return -TARGET_EFAULT
;
8715 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8716 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8717 unlock_user_struct(target_tbuf
, arg2
, 0);
8722 if (!(p
= lock_user_string(arg1
)))
8723 return -TARGET_EFAULT
;
8724 ret
= get_errno(utime(p
, host_tbuf
));
8725 unlock_user(p
, arg1
, 0);
8729 #ifdef TARGET_NR_utimes
8730 case TARGET_NR_utimes
:
8732 struct timeval
*tvp
, tv
[2];
8734 if (copy_from_user_timeval(&tv
[0], arg2
)
8735 || copy_from_user_timeval(&tv
[1],
8736 arg2
+ sizeof(struct target_timeval
)))
8737 return -TARGET_EFAULT
;
8742 if (!(p
= lock_user_string(arg1
)))
8743 return -TARGET_EFAULT
;
8744 ret
= get_errno(utimes(p
, tvp
));
8745 unlock_user(p
, arg1
, 0);
8749 #if defined(TARGET_NR_futimesat)
8750 case TARGET_NR_futimesat
:
8752 struct timeval
*tvp
, tv
[2];
8754 if (copy_from_user_timeval(&tv
[0], arg3
)
8755 || copy_from_user_timeval(&tv
[1],
8756 arg3
+ sizeof(struct target_timeval
)))
8757 return -TARGET_EFAULT
;
8762 if (!(p
= lock_user_string(arg2
))) {
8763 return -TARGET_EFAULT
;
8765 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8766 unlock_user(p
, arg2
, 0);
8770 #ifdef TARGET_NR_access
8771 case TARGET_NR_access
:
8772 if (!(p
= lock_user_string(arg1
))) {
8773 return -TARGET_EFAULT
;
8775 ret
= get_errno(access(path(p
), arg2
));
8776 unlock_user(p
, arg1
, 0);
8779 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8780 case TARGET_NR_faccessat
:
8781 if (!(p
= lock_user_string(arg2
))) {
8782 return -TARGET_EFAULT
;
8784 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8785 unlock_user(p
, arg2
, 0);
8788 #ifdef TARGET_NR_nice /* not on alpha */
8789 case TARGET_NR_nice
:
8790 return get_errno(nice(arg1
));
8792 case TARGET_NR_sync
:
8795 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8796 case TARGET_NR_syncfs
:
8797 return get_errno(syncfs(arg1
));
8799 case TARGET_NR_kill
:
8800 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8801 #ifdef TARGET_NR_rename
8802 case TARGET_NR_rename
:
8805 p
= lock_user_string(arg1
);
8806 p2
= lock_user_string(arg2
);
8808 ret
= -TARGET_EFAULT
;
8810 ret
= get_errno(rename(p
, p2
));
8811 unlock_user(p2
, arg2
, 0);
8812 unlock_user(p
, arg1
, 0);
8816 #if defined(TARGET_NR_renameat)
8817 case TARGET_NR_renameat
:
8820 p
= lock_user_string(arg2
);
8821 p2
= lock_user_string(arg4
);
8823 ret
= -TARGET_EFAULT
;
8825 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8826 unlock_user(p2
, arg4
, 0);
8827 unlock_user(p
, arg2
, 0);
8831 #if defined(TARGET_NR_renameat2)
8832 case TARGET_NR_renameat2
:
8835 p
= lock_user_string(arg2
);
8836 p2
= lock_user_string(arg4
);
8838 ret
= -TARGET_EFAULT
;
8840 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8842 unlock_user(p2
, arg4
, 0);
8843 unlock_user(p
, arg2
, 0);
8847 #ifdef TARGET_NR_mkdir
8848 case TARGET_NR_mkdir
:
8849 if (!(p
= lock_user_string(arg1
)))
8850 return -TARGET_EFAULT
;
8851 ret
= get_errno(mkdir(p
, arg2
));
8852 unlock_user(p
, arg1
, 0);
8855 #if defined(TARGET_NR_mkdirat)
8856 case TARGET_NR_mkdirat
:
8857 if (!(p
= lock_user_string(arg2
)))
8858 return -TARGET_EFAULT
;
8859 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8860 unlock_user(p
, arg2
, 0);
8863 #ifdef TARGET_NR_rmdir
8864 case TARGET_NR_rmdir
:
8865 if (!(p
= lock_user_string(arg1
)))
8866 return -TARGET_EFAULT
;
8867 ret
= get_errno(rmdir(p
));
8868 unlock_user(p
, arg1
, 0);
8872 ret
= get_errno(dup(arg1
));
8874 fd_trans_dup(arg1
, ret
);
8877 #ifdef TARGET_NR_pipe
8878 case TARGET_NR_pipe
:
8879 return do_pipe(cpu_env
, arg1
, 0, 0);
8881 #ifdef TARGET_NR_pipe2
8882 case TARGET_NR_pipe2
:
8883 return do_pipe(cpu_env
, arg1
,
8884 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8886 case TARGET_NR_times
:
8888 struct target_tms
*tmsp
;
8890 ret
= get_errno(times(&tms
));
8892 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8894 return -TARGET_EFAULT
;
8895 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8896 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8897 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8898 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8901 ret
= host_to_target_clock_t(ret
);
8904 case TARGET_NR_acct
:
8906 ret
= get_errno(acct(NULL
));
8908 if (!(p
= lock_user_string(arg1
))) {
8909 return -TARGET_EFAULT
;
8911 ret
= get_errno(acct(path(p
)));
8912 unlock_user(p
, arg1
, 0);
8915 #ifdef TARGET_NR_umount2
8916 case TARGET_NR_umount2
:
8917 if (!(p
= lock_user_string(arg1
)))
8918 return -TARGET_EFAULT
;
8919 ret
= get_errno(umount2(p
, arg2
));
8920 unlock_user(p
, arg1
, 0);
8923 case TARGET_NR_ioctl
:
8924 return do_ioctl(arg1
, arg2
, arg3
);
8925 #ifdef TARGET_NR_fcntl
8926 case TARGET_NR_fcntl
:
8927 return do_fcntl(arg1
, arg2
, arg3
);
8929 case TARGET_NR_setpgid
:
8930 return get_errno(setpgid(arg1
, arg2
));
8931 case TARGET_NR_umask
:
8932 return get_errno(umask(arg1
));
8933 case TARGET_NR_chroot
:
8934 if (!(p
= lock_user_string(arg1
)))
8935 return -TARGET_EFAULT
;
8936 ret
= get_errno(chroot(p
));
8937 unlock_user(p
, arg1
, 0);
8939 #ifdef TARGET_NR_dup2
8940 case TARGET_NR_dup2
:
8941 ret
= get_errno(dup2(arg1
, arg2
));
8943 fd_trans_dup(arg1
, arg2
);
8947 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8948 case TARGET_NR_dup3
:
8952 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8955 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8956 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8958 fd_trans_dup(arg1
, arg2
);
8963 #ifdef TARGET_NR_getppid /* not on alpha */
8964 case TARGET_NR_getppid
:
8965 return get_errno(getppid());
8967 #ifdef TARGET_NR_getpgrp
8968 case TARGET_NR_getpgrp
:
8969 return get_errno(getpgrp());
8971 case TARGET_NR_setsid
:
8972 return get_errno(setsid());
8973 #ifdef TARGET_NR_sigaction
8974 case TARGET_NR_sigaction
:
8976 #if defined(TARGET_ALPHA)
8977 struct target_sigaction act
, oact
, *pact
= 0;
8978 struct target_old_sigaction
*old_act
;
8980 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8981 return -TARGET_EFAULT
;
8982 act
._sa_handler
= old_act
->_sa_handler
;
8983 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8984 act
.sa_flags
= old_act
->sa_flags
;
8985 act
.sa_restorer
= 0;
8986 unlock_user_struct(old_act
, arg2
, 0);
8989 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8990 if (!is_error(ret
) && arg3
) {
8991 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8992 return -TARGET_EFAULT
;
8993 old_act
->_sa_handler
= oact
._sa_handler
;
8994 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8995 old_act
->sa_flags
= oact
.sa_flags
;
8996 unlock_user_struct(old_act
, arg3
, 1);
8998 #elif defined(TARGET_MIPS)
8999 struct target_sigaction act
, oact
, *pact
, *old_act
;
9002 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9003 return -TARGET_EFAULT
;
9004 act
._sa_handler
= old_act
->_sa_handler
;
9005 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9006 act
.sa_flags
= old_act
->sa_flags
;
9007 unlock_user_struct(old_act
, arg2
, 0);
9013 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9015 if (!is_error(ret
) && arg3
) {
9016 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9017 return -TARGET_EFAULT
;
9018 old_act
->_sa_handler
= oact
._sa_handler
;
9019 old_act
->sa_flags
= oact
.sa_flags
;
9020 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9021 old_act
->sa_mask
.sig
[1] = 0;
9022 old_act
->sa_mask
.sig
[2] = 0;
9023 old_act
->sa_mask
.sig
[3] = 0;
9024 unlock_user_struct(old_act
, arg3
, 1);
9027 struct target_old_sigaction
*old_act
;
9028 struct target_sigaction act
, oact
, *pact
;
9030 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9031 return -TARGET_EFAULT
;
9032 act
._sa_handler
= old_act
->_sa_handler
;
9033 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9034 act
.sa_flags
= old_act
->sa_flags
;
9035 act
.sa_restorer
= old_act
->sa_restorer
;
9036 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9037 act
.ka_restorer
= 0;
9039 unlock_user_struct(old_act
, arg2
, 0);
9044 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9045 if (!is_error(ret
) && arg3
) {
9046 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9047 return -TARGET_EFAULT
;
9048 old_act
->_sa_handler
= oact
._sa_handler
;
9049 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9050 old_act
->sa_flags
= oact
.sa_flags
;
9051 old_act
->sa_restorer
= oact
.sa_restorer
;
9052 unlock_user_struct(old_act
, arg3
, 1);
9058 case TARGET_NR_rt_sigaction
:
9060 #if defined(TARGET_ALPHA)
9061 /* For Alpha and SPARC this is a 5 argument syscall, with
9062 * a 'restorer' parameter which must be copied into the
9063 * sa_restorer field of the sigaction struct.
9064 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9065 * and arg5 is the sigsetsize.
9066 * Alpha also has a separate rt_sigaction struct that it uses
9067 * here; SPARC uses the usual sigaction struct.
9069 struct target_rt_sigaction
*rt_act
;
9070 struct target_sigaction act
, oact
, *pact
= 0;
9072 if (arg4
!= sizeof(target_sigset_t
)) {
9073 return -TARGET_EINVAL
;
9076 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9077 return -TARGET_EFAULT
;
9078 act
._sa_handler
= rt_act
->_sa_handler
;
9079 act
.sa_mask
= rt_act
->sa_mask
;
9080 act
.sa_flags
= rt_act
->sa_flags
;
9081 act
.sa_restorer
= arg5
;
9082 unlock_user_struct(rt_act
, arg2
, 0);
9085 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9086 if (!is_error(ret
) && arg3
) {
9087 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9088 return -TARGET_EFAULT
;
9089 rt_act
->_sa_handler
= oact
._sa_handler
;
9090 rt_act
->sa_mask
= oact
.sa_mask
;
9091 rt_act
->sa_flags
= oact
.sa_flags
;
9092 unlock_user_struct(rt_act
, arg3
, 1);
9096 target_ulong restorer
= arg4
;
9097 target_ulong sigsetsize
= arg5
;
9099 target_ulong sigsetsize
= arg4
;
9101 struct target_sigaction
*act
;
9102 struct target_sigaction
*oact
;
9104 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9105 return -TARGET_EINVAL
;
9108 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9109 return -TARGET_EFAULT
;
9111 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9112 act
->ka_restorer
= restorer
;
9118 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9119 ret
= -TARGET_EFAULT
;
9120 goto rt_sigaction_fail
;
9124 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9127 unlock_user_struct(act
, arg2
, 0);
9129 unlock_user_struct(oact
, arg3
, 1);
9133 #ifdef TARGET_NR_sgetmask /* not on alpha */
9134 case TARGET_NR_sgetmask
:
9137 abi_ulong target_set
;
9138 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9140 host_to_target_old_sigset(&target_set
, &cur_set
);
9146 #ifdef TARGET_NR_ssetmask /* not on alpha */
9147 case TARGET_NR_ssetmask
:
9150 abi_ulong target_set
= arg1
;
9151 target_to_host_old_sigset(&set
, &target_set
);
9152 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9154 host_to_target_old_sigset(&target_set
, &oset
);
9160 #ifdef TARGET_NR_sigprocmask
9161 case TARGET_NR_sigprocmask
:
9163 #if defined(TARGET_ALPHA)
9164 sigset_t set
, oldset
;
9169 case TARGET_SIG_BLOCK
:
9172 case TARGET_SIG_UNBLOCK
:
9175 case TARGET_SIG_SETMASK
:
9179 return -TARGET_EINVAL
;
9182 target_to_host_old_sigset(&set
, &mask
);
9184 ret
= do_sigprocmask(how
, &set
, &oldset
);
9185 if (!is_error(ret
)) {
9186 host_to_target_old_sigset(&mask
, &oldset
);
9188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9191 sigset_t set
, oldset
, *set_ptr
;
9196 case TARGET_SIG_BLOCK
:
9199 case TARGET_SIG_UNBLOCK
:
9202 case TARGET_SIG_SETMASK
:
9206 return -TARGET_EINVAL
;
9208 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9209 return -TARGET_EFAULT
;
9210 target_to_host_old_sigset(&set
, p
);
9211 unlock_user(p
, arg2
, 0);
9217 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9218 if (!is_error(ret
) && arg3
) {
9219 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9220 return -TARGET_EFAULT
;
9221 host_to_target_old_sigset(p
, &oldset
);
9222 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9228 case TARGET_NR_rt_sigprocmask
:
9231 sigset_t set
, oldset
, *set_ptr
;
9233 if (arg4
!= sizeof(target_sigset_t
)) {
9234 return -TARGET_EINVAL
;
9239 case TARGET_SIG_BLOCK
:
9242 case TARGET_SIG_UNBLOCK
:
9245 case TARGET_SIG_SETMASK
:
9249 return -TARGET_EINVAL
;
9251 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9252 return -TARGET_EFAULT
;
9253 target_to_host_sigset(&set
, p
);
9254 unlock_user(p
, arg2
, 0);
9260 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9261 if (!is_error(ret
) && arg3
) {
9262 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9263 return -TARGET_EFAULT
;
9264 host_to_target_sigset(p
, &oldset
);
9265 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9269 #ifdef TARGET_NR_sigpending
9270 case TARGET_NR_sigpending
:
9273 ret
= get_errno(sigpending(&set
));
9274 if (!is_error(ret
)) {
9275 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9276 return -TARGET_EFAULT
;
9277 host_to_target_old_sigset(p
, &set
);
9278 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9283 case TARGET_NR_rt_sigpending
:
9287 /* Yes, this check is >, not != like most. We follow the kernel's
9288 * logic and it does it like this because it implements
9289 * NR_sigpending through the same code path, and in that case
9290 * the old_sigset_t is smaller in size.
9292 if (arg2
> sizeof(target_sigset_t
)) {
9293 return -TARGET_EINVAL
;
9296 ret
= get_errno(sigpending(&set
));
9297 if (!is_error(ret
)) {
9298 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9299 return -TARGET_EFAULT
;
9300 host_to_target_sigset(p
, &set
);
9301 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9305 #ifdef TARGET_NR_sigsuspend
9306 case TARGET_NR_sigsuspend
:
9308 TaskState
*ts
= cpu
->opaque
;
9309 #if defined(TARGET_ALPHA)
9310 abi_ulong mask
= arg1
;
9311 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9313 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9314 return -TARGET_EFAULT
;
9315 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9316 unlock_user(p
, arg1
, 0);
9318 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9320 if (ret
!= -TARGET_ERESTARTSYS
) {
9321 ts
->in_sigsuspend
= 1;
9326 case TARGET_NR_rt_sigsuspend
:
9328 TaskState
*ts
= cpu
->opaque
;
9330 if (arg2
!= sizeof(target_sigset_t
)) {
9331 return -TARGET_EINVAL
;
9333 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9334 return -TARGET_EFAULT
;
9335 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9336 unlock_user(p
, arg1
, 0);
9337 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9339 if (ret
!= -TARGET_ERESTARTSYS
) {
9340 ts
->in_sigsuspend
= 1;
9344 #ifdef TARGET_NR_rt_sigtimedwait
9345 case TARGET_NR_rt_sigtimedwait
:
9348 struct timespec uts
, *puts
;
9351 if (arg4
!= sizeof(target_sigset_t
)) {
9352 return -TARGET_EINVAL
;
9355 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9356 return -TARGET_EFAULT
;
9357 target_to_host_sigset(&set
, p
);
9358 unlock_user(p
, arg1
, 0);
9361 if (target_to_host_timespec(puts
, arg3
)) {
9362 return -TARGET_EFAULT
;
9367 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9369 if (!is_error(ret
)) {
9371 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9374 return -TARGET_EFAULT
;
9376 host_to_target_siginfo(p
, &uinfo
);
9377 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9379 ret
= host_to_target_signal(ret
);
9384 #ifdef TARGET_NR_rt_sigtimedwait_time64
9385 case TARGET_NR_rt_sigtimedwait_time64
:
9388 struct timespec uts
, *puts
;
9391 if (arg4
!= sizeof(target_sigset_t
)) {
9392 return -TARGET_EINVAL
;
9395 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9397 return -TARGET_EFAULT
;
9399 target_to_host_sigset(&set
, p
);
9400 unlock_user(p
, arg1
, 0);
9403 if (target_to_host_timespec64(puts
, arg3
)) {
9404 return -TARGET_EFAULT
;
9409 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9411 if (!is_error(ret
)) {
9413 p
= lock_user(VERIFY_WRITE
, arg2
,
9414 sizeof(target_siginfo_t
), 0);
9416 return -TARGET_EFAULT
;
9418 host_to_target_siginfo(p
, &uinfo
);
9419 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9421 ret
= host_to_target_signal(ret
);
9426 case TARGET_NR_rt_sigqueueinfo
:
9430 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9432 return -TARGET_EFAULT
;
9434 target_to_host_siginfo(&uinfo
, p
);
9435 unlock_user(p
, arg3
, 0);
9436 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9439 case TARGET_NR_rt_tgsigqueueinfo
:
9443 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9445 return -TARGET_EFAULT
;
9447 target_to_host_siginfo(&uinfo
, p
);
9448 unlock_user(p
, arg4
, 0);
9449 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9452 #ifdef TARGET_NR_sigreturn
9453 case TARGET_NR_sigreturn
:
9454 if (block_signals()) {
9455 return -TARGET_ERESTARTSYS
;
9457 return do_sigreturn(cpu_env
);
9459 case TARGET_NR_rt_sigreturn
:
9460 if (block_signals()) {
9461 return -TARGET_ERESTARTSYS
;
9463 return do_rt_sigreturn(cpu_env
);
9464 case TARGET_NR_sethostname
:
9465 if (!(p
= lock_user_string(arg1
)))
9466 return -TARGET_EFAULT
;
9467 ret
= get_errno(sethostname(p
, arg2
));
9468 unlock_user(p
, arg1
, 0);
9470 #ifdef TARGET_NR_setrlimit
9471 case TARGET_NR_setrlimit
:
9473 int resource
= target_to_host_resource(arg1
);
9474 struct target_rlimit
*target_rlim
;
9476 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9477 return -TARGET_EFAULT
;
9478 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9479 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9480 unlock_user_struct(target_rlim
, arg2
, 0);
9482 * If we just passed through resource limit settings for memory then
9483 * they would also apply to QEMU's own allocations, and QEMU will
9484 * crash or hang or die if its allocations fail. Ideally we would
9485 * track the guest allocations in QEMU and apply the limits ourselves.
9486 * For now, just tell the guest the call succeeded but don't actually
9489 if (resource
!= RLIMIT_AS
&&
9490 resource
!= RLIMIT_DATA
&&
9491 resource
!= RLIMIT_STACK
) {
9492 return get_errno(setrlimit(resource
, &rlim
));
9498 #ifdef TARGET_NR_getrlimit
9499 case TARGET_NR_getrlimit
:
9501 int resource
= target_to_host_resource(arg1
);
9502 struct target_rlimit
*target_rlim
;
9505 ret
= get_errno(getrlimit(resource
, &rlim
));
9506 if (!is_error(ret
)) {
9507 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9508 return -TARGET_EFAULT
;
9509 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9510 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9511 unlock_user_struct(target_rlim
, arg2
, 1);
9516 case TARGET_NR_getrusage
:
9518 struct rusage rusage
;
9519 ret
= get_errno(getrusage(arg1
, &rusage
));
9520 if (!is_error(ret
)) {
9521 ret
= host_to_target_rusage(arg2
, &rusage
);
9525 #if defined(TARGET_NR_gettimeofday)
9526 case TARGET_NR_gettimeofday
:
9531 ret
= get_errno(gettimeofday(&tv
, &tz
));
9532 if (!is_error(ret
)) {
9533 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9534 return -TARGET_EFAULT
;
9536 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9537 return -TARGET_EFAULT
;
9543 #if defined(TARGET_NR_settimeofday)
9544 case TARGET_NR_settimeofday
:
9546 struct timeval tv
, *ptv
= NULL
;
9547 struct timezone tz
, *ptz
= NULL
;
9550 if (copy_from_user_timeval(&tv
, arg1
)) {
9551 return -TARGET_EFAULT
;
9557 if (copy_from_user_timezone(&tz
, arg2
)) {
9558 return -TARGET_EFAULT
;
9563 return get_errno(settimeofday(ptv
, ptz
));
9566 #if defined(TARGET_NR_select)
9567 case TARGET_NR_select
:
9568 #if defined(TARGET_WANT_NI_OLD_SELECT)
9569 /* some architectures used to have old_select here
9570 * but now ENOSYS it.
9572 ret
= -TARGET_ENOSYS
;
9573 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9574 ret
= do_old_select(arg1
);
9576 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9580 #ifdef TARGET_NR_pselect6
9581 case TARGET_NR_pselect6
:
9582 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9584 #ifdef TARGET_NR_pselect6_time64
9585 case TARGET_NR_pselect6_time64
:
9586 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9588 #ifdef TARGET_NR_symlink
9589 case TARGET_NR_symlink
:
9592 p
= lock_user_string(arg1
);
9593 p2
= lock_user_string(arg2
);
9595 ret
= -TARGET_EFAULT
;
9597 ret
= get_errno(symlink(p
, p2
));
9598 unlock_user(p2
, arg2
, 0);
9599 unlock_user(p
, arg1
, 0);
9603 #if defined(TARGET_NR_symlinkat)
9604 case TARGET_NR_symlinkat
:
9607 p
= lock_user_string(arg1
);
9608 p2
= lock_user_string(arg3
);
9610 ret
= -TARGET_EFAULT
;
9612 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9613 unlock_user(p2
, arg3
, 0);
9614 unlock_user(p
, arg1
, 0);
9618 #ifdef TARGET_NR_readlink
9619 case TARGET_NR_readlink
:
9622 p
= lock_user_string(arg1
);
9623 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9625 ret
= -TARGET_EFAULT
;
9627 /* Short circuit this for the magic exe check. */
9628 ret
= -TARGET_EINVAL
;
9629 } else if (is_proc_myself((const char *)p
, "exe")) {
9630 char real
[PATH_MAX
], *temp
;
9631 temp
= realpath(exec_path
, real
);
9632 /* Return value is # of bytes that we wrote to the buffer. */
9634 ret
= get_errno(-1);
9636 /* Don't worry about sign mismatch as earlier mapping
9637 * logic would have thrown a bad address error. */
9638 ret
= MIN(strlen(real
), arg3
);
9639 /* We cannot NUL terminate the string. */
9640 memcpy(p2
, real
, ret
);
9643 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9645 unlock_user(p2
, arg2
, ret
);
9646 unlock_user(p
, arg1
, 0);
9650 #if defined(TARGET_NR_readlinkat)
9651 case TARGET_NR_readlinkat
:
9654 p
= lock_user_string(arg2
);
9655 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9657 ret
= -TARGET_EFAULT
;
9658 } else if (is_proc_myself((const char *)p
, "exe")) {
9659 char real
[PATH_MAX
], *temp
;
9660 temp
= realpath(exec_path
, real
);
9661 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9662 snprintf((char *)p2
, arg4
, "%s", real
);
9664 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9666 unlock_user(p2
, arg3
, ret
);
9667 unlock_user(p
, arg2
, 0);
9671 #ifdef TARGET_NR_swapon
9672 case TARGET_NR_swapon
:
9673 if (!(p
= lock_user_string(arg1
)))
9674 return -TARGET_EFAULT
;
9675 ret
= get_errno(swapon(p
, arg2
));
9676 unlock_user(p
, arg1
, 0);
9679 case TARGET_NR_reboot
:
9680 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9681 /* arg4 must be ignored in all other cases */
9682 p
= lock_user_string(arg4
);
9684 return -TARGET_EFAULT
;
9686 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9687 unlock_user(p
, arg4
, 0);
9689 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9692 #ifdef TARGET_NR_mmap
9693 case TARGET_NR_mmap
:
9694 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9695 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9696 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9697 || defined(TARGET_S390X)
9700 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9701 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9702 return -TARGET_EFAULT
;
9709 unlock_user(v
, arg1
, 0);
9710 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9711 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9715 /* mmap pointers are always untagged */
9716 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9717 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9723 #ifdef TARGET_NR_mmap2
9724 case TARGET_NR_mmap2
:
9726 #define MMAP_SHIFT 12
9728 ret
= target_mmap(arg1
, arg2
, arg3
,
9729 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9730 arg5
, arg6
<< MMAP_SHIFT
);
9731 return get_errno(ret
);
9733 case TARGET_NR_munmap
:
9734 arg1
= cpu_untagged_addr(cpu
, arg1
);
9735 return get_errno(target_munmap(arg1
, arg2
));
9736 case TARGET_NR_mprotect
:
9737 arg1
= cpu_untagged_addr(cpu
, arg1
);
9739 TaskState
*ts
= cpu
->opaque
;
9740 /* Special hack to detect libc making the stack executable. */
9741 if ((arg3
& PROT_GROWSDOWN
)
9742 && arg1
>= ts
->info
->stack_limit
9743 && arg1
<= ts
->info
->start_stack
) {
9744 arg3
&= ~PROT_GROWSDOWN
;
9745 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9746 arg1
= ts
->info
->stack_limit
;
9749 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9750 #ifdef TARGET_NR_mremap
9751 case TARGET_NR_mremap
:
9752 arg1
= cpu_untagged_addr(cpu
, arg1
);
9753 /* mremap new_addr (arg5) is always untagged */
9754 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9756 /* ??? msync/mlock/munlock are broken for softmmu. */
9757 #ifdef TARGET_NR_msync
9758 case TARGET_NR_msync
:
9759 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9761 #ifdef TARGET_NR_mlock
9762 case TARGET_NR_mlock
:
9763 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9765 #ifdef TARGET_NR_munlock
9766 case TARGET_NR_munlock
:
9767 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9769 #ifdef TARGET_NR_mlockall
9770 case TARGET_NR_mlockall
:
9771 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9773 #ifdef TARGET_NR_munlockall
9774 case TARGET_NR_munlockall
:
9775 return get_errno(munlockall());
9777 #ifdef TARGET_NR_truncate
9778 case TARGET_NR_truncate
:
9779 if (!(p
= lock_user_string(arg1
)))
9780 return -TARGET_EFAULT
;
9781 ret
= get_errno(truncate(p
, arg2
));
9782 unlock_user(p
, arg1
, 0);
9785 #ifdef TARGET_NR_ftruncate
9786 case TARGET_NR_ftruncate
:
9787 return get_errno(ftruncate(arg1
, arg2
));
9789 case TARGET_NR_fchmod
:
9790 return get_errno(fchmod(arg1
, arg2
));
9791 #if defined(TARGET_NR_fchmodat)
9792 case TARGET_NR_fchmodat
:
9793 if (!(p
= lock_user_string(arg2
)))
9794 return -TARGET_EFAULT
;
9795 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9796 unlock_user(p
, arg2
, 0);
9799 case TARGET_NR_getpriority
:
9800 /* Note that negative values are valid for getpriority, so we must
9801 differentiate based on errno settings. */
9803 ret
= getpriority(arg1
, arg2
);
9804 if (ret
== -1 && errno
!= 0) {
9805 return -host_to_target_errno(errno
);
9808 /* Return value is the unbiased priority. Signal no error. */
9809 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9811 /* Return value is a biased priority to avoid negative numbers. */
9815 case TARGET_NR_setpriority
:
9816 return get_errno(setpriority(arg1
, arg2
, arg3
));
9817 #ifdef TARGET_NR_statfs
9818 case TARGET_NR_statfs
:
9819 if (!(p
= lock_user_string(arg1
))) {
9820 return -TARGET_EFAULT
;
9822 ret
= get_errno(statfs(path(p
), &stfs
));
9823 unlock_user(p
, arg1
, 0);
9825 if (!is_error(ret
)) {
9826 struct target_statfs
*target_stfs
;
9828 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9829 return -TARGET_EFAULT
;
9830 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9831 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9832 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9833 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9834 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9835 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9836 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9837 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9838 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9839 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9840 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9841 #ifdef _STATFS_F_FLAGS
9842 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9844 __put_user(0, &target_stfs
->f_flags
);
9846 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9847 unlock_user_struct(target_stfs
, arg2
, 1);
9851 #ifdef TARGET_NR_fstatfs
9852 case TARGET_NR_fstatfs
:
9853 ret
= get_errno(fstatfs(arg1
, &stfs
));
9854 goto convert_statfs
;
9856 #ifdef TARGET_NR_statfs64
9857 case TARGET_NR_statfs64
:
9858 if (!(p
= lock_user_string(arg1
))) {
9859 return -TARGET_EFAULT
;
9861 ret
= get_errno(statfs(path(p
), &stfs
));
9862 unlock_user(p
, arg1
, 0);
9864 if (!is_error(ret
)) {
9865 struct target_statfs64
*target_stfs
;
9867 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9868 return -TARGET_EFAULT
;
9869 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9870 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9871 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9872 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9873 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9874 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9875 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9876 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9877 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9878 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9879 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9880 #ifdef _STATFS_F_FLAGS
9881 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9883 __put_user(0, &target_stfs
->f_flags
);
9885 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9886 unlock_user_struct(target_stfs
, arg3
, 1);
9889 case TARGET_NR_fstatfs64
:
9890 ret
= get_errno(fstatfs(arg1
, &stfs
));
9891 goto convert_statfs64
;
9893 #ifdef TARGET_NR_socketcall
9894 case TARGET_NR_socketcall
:
9895 return do_socketcall(arg1
, arg2
);
9897 #ifdef TARGET_NR_accept
9898 case TARGET_NR_accept
:
9899 return do_accept4(arg1
, arg2
, arg3
, 0);
9901 #ifdef TARGET_NR_accept4
9902 case TARGET_NR_accept4
:
9903 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9905 #ifdef TARGET_NR_bind
9906 case TARGET_NR_bind
:
9907 return do_bind(arg1
, arg2
, arg3
);
9909 #ifdef TARGET_NR_connect
9910 case TARGET_NR_connect
:
9911 return do_connect(arg1
, arg2
, arg3
);
9913 #ifdef TARGET_NR_getpeername
9914 case TARGET_NR_getpeername
:
9915 return do_getpeername(arg1
, arg2
, arg3
);
9917 #ifdef TARGET_NR_getsockname
9918 case TARGET_NR_getsockname
:
9919 return do_getsockname(arg1
, arg2
, arg3
);
9921 #ifdef TARGET_NR_getsockopt
9922 case TARGET_NR_getsockopt
:
9923 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9925 #ifdef TARGET_NR_listen
9926 case TARGET_NR_listen
:
9927 return get_errno(listen(arg1
, arg2
));
9929 #ifdef TARGET_NR_recv
9930 case TARGET_NR_recv
:
9931 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9933 #ifdef TARGET_NR_recvfrom
9934 case TARGET_NR_recvfrom
:
9935 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9937 #ifdef TARGET_NR_recvmsg
9938 case TARGET_NR_recvmsg
:
9939 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9941 #ifdef TARGET_NR_send
9942 case TARGET_NR_send
:
9943 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9945 #ifdef TARGET_NR_sendmsg
9946 case TARGET_NR_sendmsg
:
9947 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9949 #ifdef TARGET_NR_sendmmsg
9950 case TARGET_NR_sendmmsg
:
9951 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9953 #ifdef TARGET_NR_recvmmsg
9954 case TARGET_NR_recvmmsg
:
9955 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9957 #ifdef TARGET_NR_sendto
9958 case TARGET_NR_sendto
:
9959 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9961 #ifdef TARGET_NR_shutdown
9962 case TARGET_NR_shutdown
:
9963 return get_errno(shutdown(arg1
, arg2
));
9965 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9966 case TARGET_NR_getrandom
:
9967 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9969 return -TARGET_EFAULT
;
9971 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9972 unlock_user(p
, arg1
, ret
);
9975 #ifdef TARGET_NR_socket
9976 case TARGET_NR_socket
:
9977 return do_socket(arg1
, arg2
, arg3
);
9979 #ifdef TARGET_NR_socketpair
9980 case TARGET_NR_socketpair
:
9981 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9983 #ifdef TARGET_NR_setsockopt
9984 case TARGET_NR_setsockopt
:
9985 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9987 #if defined(TARGET_NR_syslog)
9988 case TARGET_NR_syslog
:
9993 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9994 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9995 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9996 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9997 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9998 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9999 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10000 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10001 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10002 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10003 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10004 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10007 return -TARGET_EINVAL
;
10012 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10014 return -TARGET_EFAULT
;
10016 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10017 unlock_user(p
, arg2
, arg3
);
10021 return -TARGET_EINVAL
;
10026 case TARGET_NR_setitimer
:
10028 struct itimerval value
, ovalue
, *pvalue
;
10032 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10033 || copy_from_user_timeval(&pvalue
->it_value
,
10034 arg2
+ sizeof(struct target_timeval
)))
10035 return -TARGET_EFAULT
;
10039 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10040 if (!is_error(ret
) && arg3
) {
10041 if (copy_to_user_timeval(arg3
,
10042 &ovalue
.it_interval
)
10043 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10045 return -TARGET_EFAULT
;
10049 case TARGET_NR_getitimer
:
10051 struct itimerval value
;
10053 ret
= get_errno(getitimer(arg1
, &value
));
10054 if (!is_error(ret
) && arg2
) {
10055 if (copy_to_user_timeval(arg2
,
10056 &value
.it_interval
)
10057 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10059 return -TARGET_EFAULT
;
10063 #ifdef TARGET_NR_stat
10064 case TARGET_NR_stat
:
10065 if (!(p
= lock_user_string(arg1
))) {
10066 return -TARGET_EFAULT
;
10068 ret
= get_errno(stat(path(p
), &st
));
10069 unlock_user(p
, arg1
, 0);
10072 #ifdef TARGET_NR_lstat
10073 case TARGET_NR_lstat
:
10074 if (!(p
= lock_user_string(arg1
))) {
10075 return -TARGET_EFAULT
;
10077 ret
= get_errno(lstat(path(p
), &st
));
10078 unlock_user(p
, arg1
, 0);
10081 #ifdef TARGET_NR_fstat
10082 case TARGET_NR_fstat
:
10084 ret
= get_errno(fstat(arg1
, &st
));
10085 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10088 if (!is_error(ret
)) {
10089 struct target_stat
*target_st
;
10091 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10092 return -TARGET_EFAULT
;
10093 memset(target_st
, 0, sizeof(*target_st
));
10094 __put_user(st
.st_dev
, &target_st
->st_dev
);
10095 __put_user(st
.st_ino
, &target_st
->st_ino
);
10096 __put_user(st
.st_mode
, &target_st
->st_mode
);
10097 __put_user(st
.st_uid
, &target_st
->st_uid
);
10098 __put_user(st
.st_gid
, &target_st
->st_gid
);
10099 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10100 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10101 __put_user(st
.st_size
, &target_st
->st_size
);
10102 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10103 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10104 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10105 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10106 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10107 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10108 defined(TARGET_STAT_HAVE_NSEC)
10109 __put_user(st
.st_atim
.tv_nsec
,
10110 &target_st
->target_st_atime_nsec
);
10111 __put_user(st
.st_mtim
.tv_nsec
,
10112 &target_st
->target_st_mtime_nsec
);
10113 __put_user(st
.st_ctim
.tv_nsec
,
10114 &target_st
->target_st_ctime_nsec
);
10116 unlock_user_struct(target_st
, arg2
, 1);
10121 case TARGET_NR_vhangup
:
10122 return get_errno(vhangup());
10123 #ifdef TARGET_NR_syscall
10124 case TARGET_NR_syscall
:
10125 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10126 arg6
, arg7
, arg8
, 0);
10128 #if defined(TARGET_NR_wait4)
10129 case TARGET_NR_wait4
:
10132 abi_long status_ptr
= arg2
;
10133 struct rusage rusage
, *rusage_ptr
;
10134 abi_ulong target_rusage
= arg4
;
10135 abi_long rusage_err
;
10137 rusage_ptr
= &rusage
;
10140 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10141 if (!is_error(ret
)) {
10142 if (status_ptr
&& ret
) {
10143 status
= host_to_target_waitstatus(status
);
10144 if (put_user_s32(status
, status_ptr
))
10145 return -TARGET_EFAULT
;
10147 if (target_rusage
) {
10148 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10157 #ifdef TARGET_NR_swapoff
10158 case TARGET_NR_swapoff
:
10159 if (!(p
= lock_user_string(arg1
)))
10160 return -TARGET_EFAULT
;
10161 ret
= get_errno(swapoff(p
));
10162 unlock_user(p
, arg1
, 0);
10165 case TARGET_NR_sysinfo
:
10167 struct target_sysinfo
*target_value
;
10168 struct sysinfo value
;
10169 ret
= get_errno(sysinfo(&value
));
10170 if (!is_error(ret
) && arg1
)
10172 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10173 return -TARGET_EFAULT
;
10174 __put_user(value
.uptime
, &target_value
->uptime
);
10175 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10176 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10177 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10178 __put_user(value
.totalram
, &target_value
->totalram
);
10179 __put_user(value
.freeram
, &target_value
->freeram
);
10180 __put_user(value
.sharedram
, &target_value
->sharedram
);
10181 __put_user(value
.bufferram
, &target_value
->bufferram
);
10182 __put_user(value
.totalswap
, &target_value
->totalswap
);
10183 __put_user(value
.freeswap
, &target_value
->freeswap
);
10184 __put_user(value
.procs
, &target_value
->procs
);
10185 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10186 __put_user(value
.freehigh
, &target_value
->freehigh
);
10187 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10188 unlock_user_struct(target_value
, arg1
, 1);
10192 #ifdef TARGET_NR_ipc
10193 case TARGET_NR_ipc
:
10194 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10196 #ifdef TARGET_NR_semget
10197 case TARGET_NR_semget
:
10198 return get_errno(semget(arg1
, arg2
, arg3
));
10200 #ifdef TARGET_NR_semop
10201 case TARGET_NR_semop
:
10202 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10204 #ifdef TARGET_NR_semtimedop
10205 case TARGET_NR_semtimedop
:
10206 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10208 #ifdef TARGET_NR_semtimedop_time64
10209 case TARGET_NR_semtimedop_time64
:
10210 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10212 #ifdef TARGET_NR_semctl
10213 case TARGET_NR_semctl
:
10214 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10216 #ifdef TARGET_NR_msgctl
10217 case TARGET_NR_msgctl
:
10218 return do_msgctl(arg1
, arg2
, arg3
);
10220 #ifdef TARGET_NR_msgget
10221 case TARGET_NR_msgget
:
10222 return get_errno(msgget(arg1
, arg2
));
10224 #ifdef TARGET_NR_msgrcv
10225 case TARGET_NR_msgrcv
:
10226 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10228 #ifdef TARGET_NR_msgsnd
10229 case TARGET_NR_msgsnd
:
10230 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10232 #ifdef TARGET_NR_shmget
10233 case TARGET_NR_shmget
:
10234 return get_errno(shmget(arg1
, arg2
, arg3
));
10236 #ifdef TARGET_NR_shmctl
10237 case TARGET_NR_shmctl
:
10238 return do_shmctl(arg1
, arg2
, arg3
);
10240 #ifdef TARGET_NR_shmat
10241 case TARGET_NR_shmat
:
10242 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10244 #ifdef TARGET_NR_shmdt
10245 case TARGET_NR_shmdt
:
10246 return do_shmdt(arg1
);
10248 case TARGET_NR_fsync
:
10249 return get_errno(fsync(arg1
));
10250 case TARGET_NR_clone
:
10251 /* Linux manages to have three different orderings for its
10252 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10253 * match the kernel's CONFIG_CLONE_* settings.
10254 * Microblaze is further special in that it uses a sixth
10255 * implicit argument to clone for the TLS pointer.
10257 #if defined(TARGET_MICROBLAZE)
10258 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10259 #elif defined(TARGET_CLONE_BACKWARDS)
10260 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10261 #elif defined(TARGET_CLONE_BACKWARDS2)
10262 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10264 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10267 #ifdef __NR_exit_group
10268 /* new thread calls */
10269 case TARGET_NR_exit_group
:
10270 preexit_cleanup(cpu_env
, arg1
);
10271 return get_errno(exit_group(arg1
));
10273 case TARGET_NR_setdomainname
:
10274 if (!(p
= lock_user_string(arg1
)))
10275 return -TARGET_EFAULT
;
10276 ret
= get_errno(setdomainname(p
, arg2
));
10277 unlock_user(p
, arg1
, 0);
10279 case TARGET_NR_uname
:
10280 /* no need to transcode because we use the linux syscall */
10282 struct new_utsname
* buf
;
10284 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10285 return -TARGET_EFAULT
;
10286 ret
= get_errno(sys_uname(buf
));
10287 if (!is_error(ret
)) {
10288 /* Overwrite the native machine name with whatever is being
10290 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10291 sizeof(buf
->machine
));
10292 /* Allow the user to override the reported release. */
10293 if (qemu_uname_release
&& *qemu_uname_release
) {
10294 g_strlcpy(buf
->release
, qemu_uname_release
,
10295 sizeof(buf
->release
));
10298 unlock_user_struct(buf
, arg1
, 1);
10302 case TARGET_NR_modify_ldt
:
10303 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10304 #if !defined(TARGET_X86_64)
10305 case TARGET_NR_vm86
:
10306 return do_vm86(cpu_env
, arg1
, arg2
);
10309 #if defined(TARGET_NR_adjtimex)
10310 case TARGET_NR_adjtimex
:
10312 struct timex host_buf
;
10314 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10315 return -TARGET_EFAULT
;
10317 ret
= get_errno(adjtimex(&host_buf
));
10318 if (!is_error(ret
)) {
10319 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10320 return -TARGET_EFAULT
;
10326 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10327 case TARGET_NR_clock_adjtime
:
10329 struct timex htx
, *phtx
= &htx
;
10331 if (target_to_host_timex(phtx
, arg2
) != 0) {
10332 return -TARGET_EFAULT
;
10334 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10335 if (!is_error(ret
) && phtx
) {
10336 if (host_to_target_timex(arg2
, phtx
) != 0) {
10337 return -TARGET_EFAULT
;
10343 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10344 case TARGET_NR_clock_adjtime64
:
10348 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10349 return -TARGET_EFAULT
;
10351 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10352 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10353 return -TARGET_EFAULT
;
10358 case TARGET_NR_getpgid
:
10359 return get_errno(getpgid(arg1
));
10360 case TARGET_NR_fchdir
:
10361 return get_errno(fchdir(arg1
));
10362 case TARGET_NR_personality
:
10363 return get_errno(personality(arg1
));
10364 #ifdef TARGET_NR__llseek /* Not on alpha */
10365 case TARGET_NR__llseek
:
10368 #if !defined(__NR_llseek)
10369 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10371 ret
= get_errno(res
);
10376 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10378 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10379 return -TARGET_EFAULT
;
10384 #ifdef TARGET_NR_getdents
10385 case TARGET_NR_getdents
:
10386 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10387 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10389 struct target_dirent
*target_dirp
;
10390 struct linux_dirent
*dirp
;
10391 abi_long count
= arg3
;
10393 dirp
= g_try_malloc(count
);
10395 return -TARGET_ENOMEM
;
10398 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10399 if (!is_error(ret
)) {
10400 struct linux_dirent
*de
;
10401 struct target_dirent
*tde
;
10403 int reclen
, treclen
;
10404 int count1
, tnamelen
;
10408 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10409 return -TARGET_EFAULT
;
10412 reclen
= de
->d_reclen
;
10413 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10414 assert(tnamelen
>= 0);
10415 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10416 assert(count1
+ treclen
<= count
);
10417 tde
->d_reclen
= tswap16(treclen
);
10418 tde
->d_ino
= tswapal(de
->d_ino
);
10419 tde
->d_off
= tswapal(de
->d_off
);
10420 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10421 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10423 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10427 unlock_user(target_dirp
, arg2
, ret
);
10433 struct linux_dirent
*dirp
;
10434 abi_long count
= arg3
;
10436 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10437 return -TARGET_EFAULT
;
10438 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10439 if (!is_error(ret
)) {
10440 struct linux_dirent
*de
;
10445 reclen
= de
->d_reclen
;
10448 de
->d_reclen
= tswap16(reclen
);
10449 tswapls(&de
->d_ino
);
10450 tswapls(&de
->d_off
);
10451 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10455 unlock_user(dirp
, arg2
, ret
);
10459 /* Implement getdents in terms of getdents64 */
10461 struct linux_dirent64
*dirp
;
10462 abi_long count
= arg3
;
10464 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10466 return -TARGET_EFAULT
;
10468 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10469 if (!is_error(ret
)) {
10470 /* Convert the dirent64 structs to target dirent. We do this
10471 * in-place, since we can guarantee that a target_dirent is no
10472 * larger than a dirent64; however this means we have to be
10473 * careful to read everything before writing in the new format.
10475 struct linux_dirent64
*de
;
10476 struct target_dirent
*tde
;
10481 tde
= (struct target_dirent
*)dirp
;
10483 int namelen
, treclen
;
10484 int reclen
= de
->d_reclen
;
10485 uint64_t ino
= de
->d_ino
;
10486 int64_t off
= de
->d_off
;
10487 uint8_t type
= de
->d_type
;
10489 namelen
= strlen(de
->d_name
);
10490 treclen
= offsetof(struct target_dirent
, d_name
)
10492 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10494 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10495 tde
->d_ino
= tswapal(ino
);
10496 tde
->d_off
= tswapal(off
);
10497 tde
->d_reclen
= tswap16(treclen
);
10498 /* The target_dirent type is in what was formerly a padding
10499 * byte at the end of the structure:
10501 *(((char *)tde
) + treclen
- 1) = type
;
10503 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10504 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10510 unlock_user(dirp
, arg2
, ret
);
10514 #endif /* TARGET_NR_getdents */
10515 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10516 case TARGET_NR_getdents64
:
10518 struct linux_dirent64
*dirp
;
10519 abi_long count
= arg3
;
10520 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10521 return -TARGET_EFAULT
;
10522 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10523 if (!is_error(ret
)) {
10524 struct linux_dirent64
*de
;
10529 reclen
= de
->d_reclen
;
10532 de
->d_reclen
= tswap16(reclen
);
10533 tswap64s((uint64_t *)&de
->d_ino
);
10534 tswap64s((uint64_t *)&de
->d_off
);
10535 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10539 unlock_user(dirp
, arg2
, ret
);
10542 #endif /* TARGET_NR_getdents64 */
10543 #if defined(TARGET_NR__newselect)
10544 case TARGET_NR__newselect
:
10545 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10547 #ifdef TARGET_NR_poll
10548 case TARGET_NR_poll
:
10549 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10551 #ifdef TARGET_NR_ppoll
10552 case TARGET_NR_ppoll
:
10553 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10555 #ifdef TARGET_NR_ppoll_time64
10556 case TARGET_NR_ppoll_time64
:
10557 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10559 case TARGET_NR_flock
:
10560 /* NOTE: the flock constant seems to be the same for every
10562 return get_errno(safe_flock(arg1
, arg2
));
10563 case TARGET_NR_readv
:
10565 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10567 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10568 unlock_iovec(vec
, arg2
, arg3
, 1);
10570 ret
= -host_to_target_errno(errno
);
10574 case TARGET_NR_writev
:
10576 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10578 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10579 unlock_iovec(vec
, arg2
, arg3
, 0);
10581 ret
= -host_to_target_errno(errno
);
10585 #if defined(TARGET_NR_preadv)
10586 case TARGET_NR_preadv
:
10588 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10590 unsigned long low
, high
;
10592 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10593 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10594 unlock_iovec(vec
, arg2
, arg3
, 1);
10596 ret
= -host_to_target_errno(errno
);
10601 #if defined(TARGET_NR_pwritev)
10602 case TARGET_NR_pwritev
:
10604 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10606 unsigned long low
, high
;
10608 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10609 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10610 unlock_iovec(vec
, arg2
, arg3
, 0);
10612 ret
= -host_to_target_errno(errno
);
10617 case TARGET_NR_getsid
:
10618 return get_errno(getsid(arg1
));
10619 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10620 case TARGET_NR_fdatasync
:
10621 return get_errno(fdatasync(arg1
));
10623 case TARGET_NR_sched_getaffinity
:
10625 unsigned int mask_size
;
10626 unsigned long *mask
;
10629 * sched_getaffinity needs multiples of ulong, so need to take
10630 * care of mismatches between target ulong and host ulong sizes.
10632 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10633 return -TARGET_EINVAL
;
10635 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10637 mask
= alloca(mask_size
);
10638 memset(mask
, 0, mask_size
);
10639 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10641 if (!is_error(ret
)) {
10643 /* More data returned than the caller's buffer will fit.
10644 * This only happens if sizeof(abi_long) < sizeof(long)
10645 * and the caller passed us a buffer holding an odd number
10646 * of abi_longs. If the host kernel is actually using the
10647 * extra 4 bytes then fail EINVAL; otherwise we can just
10648 * ignore them and only copy the interesting part.
10650 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10651 if (numcpus
> arg2
* 8) {
10652 return -TARGET_EINVAL
;
10657 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10658 return -TARGET_EFAULT
;
10663 case TARGET_NR_sched_setaffinity
:
10665 unsigned int mask_size
;
10666 unsigned long *mask
;
10669 * sched_setaffinity needs multiples of ulong, so need to take
10670 * care of mismatches between target ulong and host ulong sizes.
10672 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10673 return -TARGET_EINVAL
;
10675 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10676 mask
= alloca(mask_size
);
10678 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10683 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10685 case TARGET_NR_getcpu
:
10687 unsigned cpu
, node
;
10688 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10689 arg2
? &node
: NULL
,
10691 if (is_error(ret
)) {
10694 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10695 return -TARGET_EFAULT
;
10697 if (arg2
&& put_user_u32(node
, arg2
)) {
10698 return -TARGET_EFAULT
;
10702 case TARGET_NR_sched_setparam
:
10704 struct sched_param
*target_schp
;
10705 struct sched_param schp
;
10708 return -TARGET_EINVAL
;
10710 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10711 return -TARGET_EFAULT
;
10712 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10713 unlock_user_struct(target_schp
, arg2
, 0);
10714 return get_errno(sched_setparam(arg1
, &schp
));
10716 case TARGET_NR_sched_getparam
:
10718 struct sched_param
*target_schp
;
10719 struct sched_param schp
;
10722 return -TARGET_EINVAL
;
10724 ret
= get_errno(sched_getparam(arg1
, &schp
));
10725 if (!is_error(ret
)) {
10726 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10727 return -TARGET_EFAULT
;
10728 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10729 unlock_user_struct(target_schp
, arg2
, 1);
10733 case TARGET_NR_sched_setscheduler
:
10735 struct sched_param
*target_schp
;
10736 struct sched_param schp
;
10738 return -TARGET_EINVAL
;
10740 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10741 return -TARGET_EFAULT
;
10742 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10743 unlock_user_struct(target_schp
, arg3
, 0);
10744 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10746 case TARGET_NR_sched_getscheduler
:
10747 return get_errno(sched_getscheduler(arg1
));
10748 case TARGET_NR_sched_yield
:
10749 return get_errno(sched_yield());
10750 case TARGET_NR_sched_get_priority_max
:
10751 return get_errno(sched_get_priority_max(arg1
));
10752 case TARGET_NR_sched_get_priority_min
:
10753 return get_errno(sched_get_priority_min(arg1
));
10754 #ifdef TARGET_NR_sched_rr_get_interval
10755 case TARGET_NR_sched_rr_get_interval
:
10757 struct timespec ts
;
10758 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10759 if (!is_error(ret
)) {
10760 ret
= host_to_target_timespec(arg2
, &ts
);
10765 #ifdef TARGET_NR_sched_rr_get_interval_time64
10766 case TARGET_NR_sched_rr_get_interval_time64
:
10768 struct timespec ts
;
10769 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10770 if (!is_error(ret
)) {
10771 ret
= host_to_target_timespec64(arg2
, &ts
);
10776 #if defined(TARGET_NR_nanosleep)
10777 case TARGET_NR_nanosleep
:
10779 struct timespec req
, rem
;
10780 target_to_host_timespec(&req
, arg1
);
10781 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10782 if (is_error(ret
) && arg2
) {
10783 host_to_target_timespec(arg2
, &rem
);
10788 case TARGET_NR_prctl
:
10790 case PR_GET_PDEATHSIG
:
10793 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10794 if (!is_error(ret
) && arg2
10795 && put_user_s32(deathsig
, arg2
)) {
10796 return -TARGET_EFAULT
;
10803 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10805 return -TARGET_EFAULT
;
10807 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10808 arg3
, arg4
, arg5
));
10809 unlock_user(name
, arg2
, 16);
10814 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10816 return -TARGET_EFAULT
;
10818 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10819 arg3
, arg4
, arg5
));
10820 unlock_user(name
, arg2
, 0);
10825 case TARGET_PR_GET_FP_MODE
:
10827 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10829 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10830 ret
|= TARGET_PR_FP_MODE_FR
;
10832 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10833 ret
|= TARGET_PR_FP_MODE_FRE
;
10837 case TARGET_PR_SET_FP_MODE
:
10839 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10840 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10841 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10842 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10843 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10845 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10846 TARGET_PR_FP_MODE_FRE
;
10848 /* If nothing to change, return right away, successfully. */
10849 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10852 /* Check the value is valid */
10853 if (arg2
& ~known_bits
) {
10854 return -TARGET_EOPNOTSUPP
;
10856 /* Setting FRE without FR is not supported. */
10857 if (new_fre
&& !new_fr
) {
10858 return -TARGET_EOPNOTSUPP
;
10860 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10861 /* FR1 is not supported */
10862 return -TARGET_EOPNOTSUPP
;
10864 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10865 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10866 /* cannot set FR=0 */
10867 return -TARGET_EOPNOTSUPP
;
10869 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10870 /* Cannot set FRE=1 */
10871 return -TARGET_EOPNOTSUPP
;
10875 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10876 for (i
= 0; i
< 32 ; i
+= 2) {
10877 if (!old_fr
&& new_fr
) {
10878 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10879 } else if (old_fr
&& !new_fr
) {
10880 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10885 env
->CP0_Status
|= (1 << CP0St_FR
);
10886 env
->hflags
|= MIPS_HFLAG_F64
;
10888 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10889 env
->hflags
&= ~MIPS_HFLAG_F64
;
10892 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10893 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10894 env
->hflags
|= MIPS_HFLAG_FRE
;
10897 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10898 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10904 #ifdef TARGET_AARCH64
10905 case TARGET_PR_SVE_SET_VL
:
10907 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10908 * PR_SVE_VL_INHERIT. Note the kernel definition
10909 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10910 * even though the current architectural maximum is VQ=16.
10912 ret
= -TARGET_EINVAL
;
10913 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10914 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10915 CPUARMState
*env
= cpu_env
;
10916 ARMCPU
*cpu
= env_archcpu(env
);
10917 uint32_t vq
, old_vq
;
10919 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10920 vq
= MAX(arg2
/ 16, 1);
10921 vq
= MIN(vq
, cpu
->sve_max_vq
);
10924 aarch64_sve_narrow_vq(env
, vq
);
10926 env
->vfp
.zcr_el
[1] = vq
- 1;
10927 arm_rebuild_hflags(env
);
10931 case TARGET_PR_SVE_GET_VL
:
10932 ret
= -TARGET_EINVAL
;
10934 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10935 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10936 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10940 case TARGET_PR_PAC_RESET_KEYS
:
10942 CPUARMState
*env
= cpu_env
;
10943 ARMCPU
*cpu
= env_archcpu(env
);
10945 if (arg3
|| arg4
|| arg5
) {
10946 return -TARGET_EINVAL
;
10948 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10949 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10950 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10951 TARGET_PR_PAC_APGAKEY
);
10957 } else if (arg2
& ~all
) {
10958 return -TARGET_EINVAL
;
10960 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10961 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10962 sizeof(ARMPACKey
), &err
);
10964 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10965 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10966 sizeof(ARMPACKey
), &err
);
10968 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10969 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10970 sizeof(ARMPACKey
), &err
);
10972 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10973 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10974 sizeof(ARMPACKey
), &err
);
10976 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10977 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10978 sizeof(ARMPACKey
), &err
);
10982 * Some unknown failure in the crypto. The best
10983 * we can do is log it and fail the syscall.
10984 * The real syscall cannot fail this way.
10986 qemu_log_mask(LOG_UNIMP
,
10987 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10988 error_get_pretty(err
));
10990 return -TARGET_EIO
;
10995 return -TARGET_EINVAL
;
10996 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10998 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10999 CPUARMState
*env
= cpu_env
;
11000 ARMCPU
*cpu
= env_archcpu(env
);
11002 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11003 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
11004 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
11007 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
11008 return -TARGET_EINVAL
;
11010 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
11012 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11013 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
11014 case TARGET_PR_MTE_TCF_NONE
:
11015 case TARGET_PR_MTE_TCF_SYNC
:
11016 case TARGET_PR_MTE_TCF_ASYNC
:
11023 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
11024 * Note that the syscall values are consistent with hw.
11026 env
->cp15
.sctlr_el
[1] =
11027 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
11028 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
11031 * Write PR_MTE_TAG to GCR_EL1[Exclude].
11032 * Note that the syscall uses an include mask,
11033 * and hardware uses an exclude mask -- invert.
11035 env
->cp15
.gcr_el1
=
11036 deposit64(env
->cp15
.gcr_el1
, 0, 16,
11037 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
11038 arm_rebuild_hflags(env
);
11042 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
11045 CPUARMState
*env
= cpu_env
;
11046 ARMCPU
*cpu
= env_archcpu(env
);
11048 if (arg2
|| arg3
|| arg4
|| arg5
) {
11049 return -TARGET_EINVAL
;
11051 if (env
->tagged_addr_enable
) {
11052 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
11054 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11056 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
11057 << TARGET_PR_MTE_TCF_SHIFT
);
11058 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
11059 ~env
->cp15
.gcr_el1
);
11063 #endif /* AARCH64 */
11064 case PR_GET_SECCOMP
:
11065 case PR_SET_SECCOMP
:
11066 /* Disable seccomp to prevent the target disabling syscalls we
11068 return -TARGET_EINVAL
;
11070 /* Most prctl options have no pointer arguments */
11071 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
11074 #ifdef TARGET_NR_arch_prctl
11075 case TARGET_NR_arch_prctl
:
11076 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11078 #ifdef TARGET_NR_pread64
11079 case TARGET_NR_pread64
:
11080 if (regpairs_aligned(cpu_env
, num
)) {
11084 if (arg2
== 0 && arg3
== 0) {
11085 /* Special-case NULL buffer and zero length, which should succeed */
11088 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11090 return -TARGET_EFAULT
;
11093 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11094 unlock_user(p
, arg2
, ret
);
11096 case TARGET_NR_pwrite64
:
11097 if (regpairs_aligned(cpu_env
, num
)) {
11101 if (arg2
== 0 && arg3
== 0) {
11102 /* Special-case NULL buffer and zero length, which should succeed */
11105 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11107 return -TARGET_EFAULT
;
11110 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11111 unlock_user(p
, arg2
, 0);
11114 case TARGET_NR_getcwd
:
11115 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11116 return -TARGET_EFAULT
;
11117 ret
= get_errno(sys_getcwd1(p
, arg2
));
11118 unlock_user(p
, arg1
, ret
);
11120 case TARGET_NR_capget
:
11121 case TARGET_NR_capset
:
11123 struct target_user_cap_header
*target_header
;
11124 struct target_user_cap_data
*target_data
= NULL
;
11125 struct __user_cap_header_struct header
;
11126 struct __user_cap_data_struct data
[2];
11127 struct __user_cap_data_struct
*dataptr
= NULL
;
11128 int i
, target_datalen
;
11129 int data_items
= 1;
11131 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11132 return -TARGET_EFAULT
;
11134 header
.version
= tswap32(target_header
->version
);
11135 header
.pid
= tswap32(target_header
->pid
);
11137 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11138 /* Version 2 and up takes pointer to two user_data structs */
11142 target_datalen
= sizeof(*target_data
) * data_items
;
11145 if (num
== TARGET_NR_capget
) {
11146 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11148 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11150 if (!target_data
) {
11151 unlock_user_struct(target_header
, arg1
, 0);
11152 return -TARGET_EFAULT
;
11155 if (num
== TARGET_NR_capset
) {
11156 for (i
= 0; i
< data_items
; i
++) {
11157 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11158 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11159 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11166 if (num
== TARGET_NR_capget
) {
11167 ret
= get_errno(capget(&header
, dataptr
));
11169 ret
= get_errno(capset(&header
, dataptr
));
11172 /* The kernel always updates version for both capget and capset */
11173 target_header
->version
= tswap32(header
.version
);
11174 unlock_user_struct(target_header
, arg1
, 1);
11177 if (num
== TARGET_NR_capget
) {
11178 for (i
= 0; i
< data_items
; i
++) {
11179 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11180 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11181 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11183 unlock_user(target_data
, arg2
, target_datalen
);
11185 unlock_user(target_data
, arg2
, 0);
11190 case TARGET_NR_sigaltstack
:
11191 return do_sigaltstack(arg1
, arg2
,
11192 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11194 #ifdef CONFIG_SENDFILE
11195 #ifdef TARGET_NR_sendfile
11196 case TARGET_NR_sendfile
:
11198 off_t
*offp
= NULL
;
11201 ret
= get_user_sal(off
, arg3
);
11202 if (is_error(ret
)) {
11207 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11208 if (!is_error(ret
) && arg3
) {
11209 abi_long ret2
= put_user_sal(off
, arg3
);
11210 if (is_error(ret2
)) {
11217 #ifdef TARGET_NR_sendfile64
11218 case TARGET_NR_sendfile64
:
11220 off_t
*offp
= NULL
;
11223 ret
= get_user_s64(off
, arg3
);
11224 if (is_error(ret
)) {
11229 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11230 if (!is_error(ret
) && arg3
) {
11231 abi_long ret2
= put_user_s64(off
, arg3
);
11232 if (is_error(ret2
)) {
11240 #ifdef TARGET_NR_vfork
11241 case TARGET_NR_vfork
:
11242 return get_errno(do_fork(cpu_env
,
11243 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11246 #ifdef TARGET_NR_ugetrlimit
11247 case TARGET_NR_ugetrlimit
:
11249 struct rlimit rlim
;
11250 int resource
= target_to_host_resource(arg1
);
11251 ret
= get_errno(getrlimit(resource
, &rlim
));
11252 if (!is_error(ret
)) {
11253 struct target_rlimit
*target_rlim
;
11254 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11255 return -TARGET_EFAULT
;
11256 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11257 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11258 unlock_user_struct(target_rlim
, arg2
, 1);
11263 #ifdef TARGET_NR_truncate64
11264 case TARGET_NR_truncate64
:
11265 if (!(p
= lock_user_string(arg1
)))
11266 return -TARGET_EFAULT
;
11267 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11268 unlock_user(p
, arg1
, 0);
11271 #ifdef TARGET_NR_ftruncate64
11272 case TARGET_NR_ftruncate64
:
11273 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11275 #ifdef TARGET_NR_stat64
11276 case TARGET_NR_stat64
:
11277 if (!(p
= lock_user_string(arg1
))) {
11278 return -TARGET_EFAULT
;
11280 ret
= get_errno(stat(path(p
), &st
));
11281 unlock_user(p
, arg1
, 0);
11282 if (!is_error(ret
))
11283 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11286 #ifdef TARGET_NR_lstat64
11287 case TARGET_NR_lstat64
:
11288 if (!(p
= lock_user_string(arg1
))) {
11289 return -TARGET_EFAULT
;
11291 ret
= get_errno(lstat(path(p
), &st
));
11292 unlock_user(p
, arg1
, 0);
11293 if (!is_error(ret
))
11294 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11297 #ifdef TARGET_NR_fstat64
11298 case TARGET_NR_fstat64
:
11299 ret
= get_errno(fstat(arg1
, &st
));
11300 if (!is_error(ret
))
11301 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11304 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11305 #ifdef TARGET_NR_fstatat64
11306 case TARGET_NR_fstatat64
:
11308 #ifdef TARGET_NR_newfstatat
11309 case TARGET_NR_newfstatat
:
11311 if (!(p
= lock_user_string(arg2
))) {
11312 return -TARGET_EFAULT
;
11314 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11315 unlock_user(p
, arg2
, 0);
11316 if (!is_error(ret
))
11317 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11320 #if defined(TARGET_NR_statx)
11321 case TARGET_NR_statx
:
11323 struct target_statx
*target_stx
;
11327 p
= lock_user_string(arg2
);
11329 return -TARGET_EFAULT
;
11331 #if defined(__NR_statx)
11334 * It is assumed that struct statx is architecture independent.
11336 struct target_statx host_stx
;
11339 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11340 if (!is_error(ret
)) {
11341 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11342 unlock_user(p
, arg2
, 0);
11343 return -TARGET_EFAULT
;
11347 if (ret
!= -TARGET_ENOSYS
) {
11348 unlock_user(p
, arg2
, 0);
11353 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11354 unlock_user(p
, arg2
, 0);
11356 if (!is_error(ret
)) {
11357 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11358 return -TARGET_EFAULT
;
11360 memset(target_stx
, 0, sizeof(*target_stx
));
11361 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11362 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11363 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11364 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11365 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11366 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11367 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11368 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11369 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11370 __put_user(st
.st_size
, &target_stx
->stx_size
);
11371 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11372 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11373 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11374 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11375 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11376 unlock_user_struct(target_stx
, arg5
, 1);
11381 #ifdef TARGET_NR_lchown
11382 case TARGET_NR_lchown
:
11383 if (!(p
= lock_user_string(arg1
)))
11384 return -TARGET_EFAULT
;
11385 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11386 unlock_user(p
, arg1
, 0);
11389 #ifdef TARGET_NR_getuid
11390 case TARGET_NR_getuid
:
11391 return get_errno(high2lowuid(getuid()));
11393 #ifdef TARGET_NR_getgid
11394 case TARGET_NR_getgid
:
11395 return get_errno(high2lowgid(getgid()));
11397 #ifdef TARGET_NR_geteuid
11398 case TARGET_NR_geteuid
:
11399 return get_errno(high2lowuid(geteuid()));
11401 #ifdef TARGET_NR_getegid
11402 case TARGET_NR_getegid
:
11403 return get_errno(high2lowgid(getegid()));
11405 case TARGET_NR_setreuid
:
11406 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11407 case TARGET_NR_setregid
:
11408 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11409 case TARGET_NR_getgroups
:
11411 int gidsetsize
= arg1
;
11412 target_id
*target_grouplist
;
11416 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11417 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11418 if (gidsetsize
== 0)
11420 if (!is_error(ret
)) {
11421 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11422 if (!target_grouplist
)
11423 return -TARGET_EFAULT
;
11424 for(i
= 0;i
< ret
; i
++)
11425 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11426 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11430 case TARGET_NR_setgroups
:
11432 int gidsetsize
= arg1
;
11433 target_id
*target_grouplist
;
11434 gid_t
*grouplist
= NULL
;
11437 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11438 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11439 if (!target_grouplist
) {
11440 return -TARGET_EFAULT
;
11442 for (i
= 0; i
< gidsetsize
; i
++) {
11443 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11445 unlock_user(target_grouplist
, arg2
, 0);
11447 return get_errno(setgroups(gidsetsize
, grouplist
));
11449 case TARGET_NR_fchown
:
11450 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11451 #if defined(TARGET_NR_fchownat)
11452 case TARGET_NR_fchownat
:
11453 if (!(p
= lock_user_string(arg2
)))
11454 return -TARGET_EFAULT
;
11455 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11456 low2highgid(arg4
), arg5
));
11457 unlock_user(p
, arg2
, 0);
11460 #ifdef TARGET_NR_setresuid
11461 case TARGET_NR_setresuid
:
11462 return get_errno(sys_setresuid(low2highuid(arg1
),
11464 low2highuid(arg3
)));
11466 #ifdef TARGET_NR_getresuid
11467 case TARGET_NR_getresuid
:
11469 uid_t ruid
, euid
, suid
;
11470 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11471 if (!is_error(ret
)) {
11472 if (put_user_id(high2lowuid(ruid
), arg1
)
11473 || put_user_id(high2lowuid(euid
), arg2
)
11474 || put_user_id(high2lowuid(suid
), arg3
))
11475 return -TARGET_EFAULT
;
11480 #ifdef TARGET_NR_getresgid
11481 case TARGET_NR_setresgid
:
11482 return get_errno(sys_setresgid(low2highgid(arg1
),
11484 low2highgid(arg3
)));
11486 #ifdef TARGET_NR_getresgid
11487 case TARGET_NR_getresgid
:
11489 gid_t rgid
, egid
, sgid
;
11490 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11491 if (!is_error(ret
)) {
11492 if (put_user_id(high2lowgid(rgid
), arg1
)
11493 || put_user_id(high2lowgid(egid
), arg2
)
11494 || put_user_id(high2lowgid(sgid
), arg3
))
11495 return -TARGET_EFAULT
;
11500 #ifdef TARGET_NR_chown
11501 case TARGET_NR_chown
:
11502 if (!(p
= lock_user_string(arg1
)))
11503 return -TARGET_EFAULT
;
11504 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11505 unlock_user(p
, arg1
, 0);
11508 case TARGET_NR_setuid
:
11509 return get_errno(sys_setuid(low2highuid(arg1
)));
11510 case TARGET_NR_setgid
:
11511 return get_errno(sys_setgid(low2highgid(arg1
)));
11512 case TARGET_NR_setfsuid
:
11513 return get_errno(setfsuid(arg1
));
11514 case TARGET_NR_setfsgid
:
11515 return get_errno(setfsgid(arg1
));
11517 #ifdef TARGET_NR_lchown32
11518 case TARGET_NR_lchown32
:
11519 if (!(p
= lock_user_string(arg1
)))
11520 return -TARGET_EFAULT
;
11521 ret
= get_errno(lchown(p
, arg2
, arg3
));
11522 unlock_user(p
, arg1
, 0);
11525 #ifdef TARGET_NR_getuid32
11526 case TARGET_NR_getuid32
:
11527 return get_errno(getuid());
11530 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11531 /* Alpha specific */
11532 case TARGET_NR_getxuid
:
11536 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11538 return get_errno(getuid());
11540 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11541 /* Alpha specific */
11542 case TARGET_NR_getxgid
:
11546 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11548 return get_errno(getgid());
11550 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11551 /* Alpha specific */
11552 case TARGET_NR_osf_getsysinfo
:
11553 ret
= -TARGET_EOPNOTSUPP
;
11555 case TARGET_GSI_IEEE_FP_CONTROL
:
11557 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11558 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11560 swcr
&= ~SWCR_STATUS_MASK
;
11561 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11563 if (put_user_u64 (swcr
, arg2
))
11564 return -TARGET_EFAULT
;
11569 /* case GSI_IEEE_STATE_AT_SIGNAL:
11570 -- Not implemented in linux kernel.
11572 -- Retrieves current unaligned access state; not much used.
11573 case GSI_PROC_TYPE:
11574 -- Retrieves implver information; surely not used.
11575 case GSI_GET_HWRPB:
11576 -- Grabs a copy of the HWRPB; surely not used.
11581 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11582 /* Alpha specific */
11583 case TARGET_NR_osf_setsysinfo
:
11584 ret
= -TARGET_EOPNOTSUPP
;
11586 case TARGET_SSI_IEEE_FP_CONTROL
:
11588 uint64_t swcr
, fpcr
;
11590 if (get_user_u64 (swcr
, arg2
)) {
11591 return -TARGET_EFAULT
;
11595 * The kernel calls swcr_update_status to update the
11596 * status bits from the fpcr at every point that it
11597 * could be queried. Therefore, we store the status
11598 * bits only in FPCR.
11600 ((CPUAlphaState
*)cpu_env
)->swcr
11601 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11603 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11604 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11605 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11606 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11611 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11613 uint64_t exc
, fpcr
, fex
;
11615 if (get_user_u64(exc
, arg2
)) {
11616 return -TARGET_EFAULT
;
11618 exc
&= SWCR_STATUS_MASK
;
11619 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11621 /* Old exceptions are not signaled. */
11622 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11624 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11625 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11627 /* Update the hardware fpcr. */
11628 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11629 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11632 int si_code
= TARGET_FPE_FLTUNK
;
11633 target_siginfo_t info
;
11635 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11636 si_code
= TARGET_FPE_FLTUND
;
11638 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11639 si_code
= TARGET_FPE_FLTRES
;
11641 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11642 si_code
= TARGET_FPE_FLTUND
;
11644 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11645 si_code
= TARGET_FPE_FLTOVF
;
11647 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11648 si_code
= TARGET_FPE_FLTDIV
;
11650 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11651 si_code
= TARGET_FPE_FLTINV
;
11654 info
.si_signo
= SIGFPE
;
11656 info
.si_code
= si_code
;
11657 info
._sifields
._sigfault
._addr
11658 = ((CPUArchState
*)cpu_env
)->pc
;
11659 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11660 QEMU_SI_FAULT
, &info
);
11666 /* case SSI_NVPAIRS:
11667 -- Used with SSIN_UACPROC to enable unaligned accesses.
11668 case SSI_IEEE_STATE_AT_SIGNAL:
11669 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11670 -- Not implemented in linux kernel
11675 #ifdef TARGET_NR_osf_sigprocmask
11676 /* Alpha specific. */
11677 case TARGET_NR_osf_sigprocmask
:
11681 sigset_t set
, oldset
;
11684 case TARGET_SIG_BLOCK
:
11687 case TARGET_SIG_UNBLOCK
:
11690 case TARGET_SIG_SETMASK
:
11694 return -TARGET_EINVAL
;
11697 target_to_host_old_sigset(&set
, &mask
);
11698 ret
= do_sigprocmask(how
, &set
, &oldset
);
11700 host_to_target_old_sigset(&mask
, &oldset
);
11707 #ifdef TARGET_NR_getgid32
11708 case TARGET_NR_getgid32
:
11709 return get_errno(getgid());
11711 #ifdef TARGET_NR_geteuid32
11712 case TARGET_NR_geteuid32
:
11713 return get_errno(geteuid());
11715 #ifdef TARGET_NR_getegid32
11716 case TARGET_NR_getegid32
:
11717 return get_errno(getegid());
11719 #ifdef TARGET_NR_setreuid32
11720 case TARGET_NR_setreuid32
:
11721 return get_errno(setreuid(arg1
, arg2
));
11723 #ifdef TARGET_NR_setregid32
11724 case TARGET_NR_setregid32
:
11725 return get_errno(setregid(arg1
, arg2
));
11727 #ifdef TARGET_NR_getgroups32
11728 case TARGET_NR_getgroups32
:
11730 int gidsetsize
= arg1
;
11731 uint32_t *target_grouplist
;
11735 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11736 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11737 if (gidsetsize
== 0)
11739 if (!is_error(ret
)) {
11740 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11741 if (!target_grouplist
) {
11742 return -TARGET_EFAULT
;
11744 for(i
= 0;i
< ret
; i
++)
11745 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11746 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11751 #ifdef TARGET_NR_setgroups32
11752 case TARGET_NR_setgroups32
:
11754 int gidsetsize
= arg1
;
11755 uint32_t *target_grouplist
;
11759 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11760 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11761 if (!target_grouplist
) {
11762 return -TARGET_EFAULT
;
11764 for(i
= 0;i
< gidsetsize
; i
++)
11765 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11766 unlock_user(target_grouplist
, arg2
, 0);
11767 return get_errno(setgroups(gidsetsize
, grouplist
));
11770 #ifdef TARGET_NR_fchown32
11771 case TARGET_NR_fchown32
:
11772 return get_errno(fchown(arg1
, arg2
, arg3
));
11774 #ifdef TARGET_NR_setresuid32
11775 case TARGET_NR_setresuid32
:
11776 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11778 #ifdef TARGET_NR_getresuid32
11779 case TARGET_NR_getresuid32
:
11781 uid_t ruid
, euid
, suid
;
11782 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11783 if (!is_error(ret
)) {
11784 if (put_user_u32(ruid
, arg1
)
11785 || put_user_u32(euid
, arg2
)
11786 || put_user_u32(suid
, arg3
))
11787 return -TARGET_EFAULT
;
11792 #ifdef TARGET_NR_setresgid32
11793 case TARGET_NR_setresgid32
:
11794 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11796 #ifdef TARGET_NR_getresgid32
11797 case TARGET_NR_getresgid32
:
11799 gid_t rgid
, egid
, sgid
;
11800 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11801 if (!is_error(ret
)) {
11802 if (put_user_u32(rgid
, arg1
)
11803 || put_user_u32(egid
, arg2
)
11804 || put_user_u32(sgid
, arg3
))
11805 return -TARGET_EFAULT
;
11810 #ifdef TARGET_NR_chown32
11811 case TARGET_NR_chown32
:
11812 if (!(p
= lock_user_string(arg1
)))
11813 return -TARGET_EFAULT
;
11814 ret
= get_errno(chown(p
, arg2
, arg3
));
11815 unlock_user(p
, arg1
, 0);
11818 #ifdef TARGET_NR_setuid32
11819 case TARGET_NR_setuid32
:
11820 return get_errno(sys_setuid(arg1
));
11822 #ifdef TARGET_NR_setgid32
11823 case TARGET_NR_setgid32
:
11824 return get_errno(sys_setgid(arg1
));
11826 #ifdef TARGET_NR_setfsuid32
11827 case TARGET_NR_setfsuid32
:
11828 return get_errno(setfsuid(arg1
));
11830 #ifdef TARGET_NR_setfsgid32
11831 case TARGET_NR_setfsgid32
:
11832 return get_errno(setfsgid(arg1
));
11834 #ifdef TARGET_NR_mincore
11835 case TARGET_NR_mincore
:
11837 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11839 return -TARGET_ENOMEM
;
11841 p
= lock_user_string(arg3
);
11843 ret
= -TARGET_EFAULT
;
11845 ret
= get_errno(mincore(a
, arg2
, p
));
11846 unlock_user(p
, arg3
, ret
);
11848 unlock_user(a
, arg1
, 0);
11852 #ifdef TARGET_NR_arm_fadvise64_64
11853 case TARGET_NR_arm_fadvise64_64
:
11854 /* arm_fadvise64_64 looks like fadvise64_64 but
11855 * with different argument order: fd, advice, offset, len
11856 * rather than the usual fd, offset, len, advice.
11857 * Note that offset and len are both 64-bit so appear as
11858 * pairs of 32-bit registers.
11860 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11861 target_offset64(arg5
, arg6
), arg2
);
11862 return -host_to_target_errno(ret
);
11865 #if TARGET_ABI_BITS == 32
11867 #ifdef TARGET_NR_fadvise64_64
11868 case TARGET_NR_fadvise64_64
:
11869 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11870 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11878 /* 6 args: fd, offset (high, low), len (high, low), advice */
11879 if (regpairs_aligned(cpu_env
, num
)) {
11880 /* offset is in (3,4), len in (5,6) and advice in 7 */
11888 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11889 target_offset64(arg4
, arg5
), arg6
);
11890 return -host_to_target_errno(ret
);
11893 #ifdef TARGET_NR_fadvise64
11894 case TARGET_NR_fadvise64
:
11895 /* 5 args: fd, offset (high, low), len, advice */
11896 if (regpairs_aligned(cpu_env
, num
)) {
11897 /* offset is in (3,4), len in 5 and advice in 6 */
11903 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11904 return -host_to_target_errno(ret
);
11907 #else /* not a 32-bit ABI */
11908 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11909 #ifdef TARGET_NR_fadvise64_64
11910 case TARGET_NR_fadvise64_64
:
11912 #ifdef TARGET_NR_fadvise64
11913 case TARGET_NR_fadvise64
:
11915 #ifdef TARGET_S390X
11917 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11918 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11919 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11920 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11924 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11926 #endif /* end of 64-bit ABI fadvise handling */
11928 #ifdef TARGET_NR_madvise
11929 case TARGET_NR_madvise
:
11930 /* A straight passthrough may not be safe because qemu sometimes
11931 turns private file-backed mappings into anonymous mappings.
11932 This will break MADV_DONTNEED.
11933 This is a hint, so ignoring and returning success is ok. */
11936 #ifdef TARGET_NR_fcntl64
11937 case TARGET_NR_fcntl64
:
11941 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11942 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11945 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11946 copyfrom
= copy_from_user_oabi_flock64
;
11947 copyto
= copy_to_user_oabi_flock64
;
11951 cmd
= target_to_host_fcntl_cmd(arg2
);
11952 if (cmd
== -TARGET_EINVAL
) {
11957 case TARGET_F_GETLK64
:
11958 ret
= copyfrom(&fl
, arg3
);
11962 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11964 ret
= copyto(arg3
, &fl
);
11968 case TARGET_F_SETLK64
:
11969 case TARGET_F_SETLKW64
:
11970 ret
= copyfrom(&fl
, arg3
);
11974 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11977 ret
= do_fcntl(arg1
, arg2
, arg3
);
11983 #ifdef TARGET_NR_cacheflush
11984 case TARGET_NR_cacheflush
:
11985 /* self-modifying code is handled automatically, so nothing needed */
11988 #ifdef TARGET_NR_getpagesize
11989 case TARGET_NR_getpagesize
:
11990 return TARGET_PAGE_SIZE
;
11992 case TARGET_NR_gettid
:
11993 return get_errno(sys_gettid());
11994 #ifdef TARGET_NR_readahead
11995 case TARGET_NR_readahead
:
11996 #if TARGET_ABI_BITS == 32
11997 if (regpairs_aligned(cpu_env
, num
)) {
12002 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12004 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12009 #ifdef TARGET_NR_setxattr
12010 case TARGET_NR_listxattr
:
12011 case TARGET_NR_llistxattr
:
12015 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12017 return -TARGET_EFAULT
;
12020 p
= lock_user_string(arg1
);
12022 if (num
== TARGET_NR_listxattr
) {
12023 ret
= get_errno(listxattr(p
, b
, arg3
));
12025 ret
= get_errno(llistxattr(p
, b
, arg3
));
12028 ret
= -TARGET_EFAULT
;
12030 unlock_user(p
, arg1
, 0);
12031 unlock_user(b
, arg2
, arg3
);
12034 case TARGET_NR_flistxattr
:
12038 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12040 return -TARGET_EFAULT
;
12043 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12044 unlock_user(b
, arg2
, arg3
);
12047 case TARGET_NR_setxattr
:
12048 case TARGET_NR_lsetxattr
:
12050 void *p
, *n
, *v
= 0;
12052 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12054 return -TARGET_EFAULT
;
12057 p
= lock_user_string(arg1
);
12058 n
= lock_user_string(arg2
);
12060 if (num
== TARGET_NR_setxattr
) {
12061 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12063 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12066 ret
= -TARGET_EFAULT
;
12068 unlock_user(p
, arg1
, 0);
12069 unlock_user(n
, arg2
, 0);
12070 unlock_user(v
, arg3
, 0);
12073 case TARGET_NR_fsetxattr
:
12077 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12079 return -TARGET_EFAULT
;
12082 n
= lock_user_string(arg2
);
12084 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12086 ret
= -TARGET_EFAULT
;
12088 unlock_user(n
, arg2
, 0);
12089 unlock_user(v
, arg3
, 0);
12092 case TARGET_NR_getxattr
:
12093 case TARGET_NR_lgetxattr
:
12095 void *p
, *n
, *v
= 0;
12097 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12099 return -TARGET_EFAULT
;
12102 p
= lock_user_string(arg1
);
12103 n
= lock_user_string(arg2
);
12105 if (num
== TARGET_NR_getxattr
) {
12106 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12108 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12111 ret
= -TARGET_EFAULT
;
12113 unlock_user(p
, arg1
, 0);
12114 unlock_user(n
, arg2
, 0);
12115 unlock_user(v
, arg3
, arg4
);
12118 case TARGET_NR_fgetxattr
:
12122 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12124 return -TARGET_EFAULT
;
12127 n
= lock_user_string(arg2
);
12129 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12131 ret
= -TARGET_EFAULT
;
12133 unlock_user(n
, arg2
, 0);
12134 unlock_user(v
, arg3
, arg4
);
12137 case TARGET_NR_removexattr
:
12138 case TARGET_NR_lremovexattr
:
12141 p
= lock_user_string(arg1
);
12142 n
= lock_user_string(arg2
);
12144 if (num
== TARGET_NR_removexattr
) {
12145 ret
= get_errno(removexattr(p
, n
));
12147 ret
= get_errno(lremovexattr(p
, n
));
12150 ret
= -TARGET_EFAULT
;
12152 unlock_user(p
, arg1
, 0);
12153 unlock_user(n
, arg2
, 0);
12156 case TARGET_NR_fremovexattr
:
12159 n
= lock_user_string(arg2
);
12161 ret
= get_errno(fremovexattr(arg1
, n
));
12163 ret
= -TARGET_EFAULT
;
12165 unlock_user(n
, arg2
, 0);
12169 #endif /* CONFIG_ATTR */
12170 #ifdef TARGET_NR_set_thread_area
12171 case TARGET_NR_set_thread_area
:
12172 #if defined(TARGET_MIPS)
12173 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12175 #elif defined(TARGET_CRIS)
12177 ret
= -TARGET_EINVAL
;
12179 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12183 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12184 return do_set_thread_area(cpu_env
, arg1
);
12185 #elif defined(TARGET_M68K)
12187 TaskState
*ts
= cpu
->opaque
;
12188 ts
->tp_value
= arg1
;
12192 return -TARGET_ENOSYS
;
12195 #ifdef TARGET_NR_get_thread_area
12196 case TARGET_NR_get_thread_area
:
12197 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12198 return do_get_thread_area(cpu_env
, arg1
);
12199 #elif defined(TARGET_M68K)
12201 TaskState
*ts
= cpu
->opaque
;
12202 return ts
->tp_value
;
12205 return -TARGET_ENOSYS
;
12208 #ifdef TARGET_NR_getdomainname
12209 case TARGET_NR_getdomainname
:
12210 return -TARGET_ENOSYS
;
12213 #ifdef TARGET_NR_clock_settime
12214 case TARGET_NR_clock_settime
:
12216 struct timespec ts
;
12218 ret
= target_to_host_timespec(&ts
, arg2
);
12219 if (!is_error(ret
)) {
12220 ret
= get_errno(clock_settime(arg1
, &ts
));
12225 #ifdef TARGET_NR_clock_settime64
12226 case TARGET_NR_clock_settime64
:
12228 struct timespec ts
;
12230 ret
= target_to_host_timespec64(&ts
, arg2
);
12231 if (!is_error(ret
)) {
12232 ret
= get_errno(clock_settime(arg1
, &ts
));
12237 #ifdef TARGET_NR_clock_gettime
12238 case TARGET_NR_clock_gettime
:
12240 struct timespec ts
;
12241 ret
= get_errno(clock_gettime(arg1
, &ts
));
12242 if (!is_error(ret
)) {
12243 ret
= host_to_target_timespec(arg2
, &ts
);
12248 #ifdef TARGET_NR_clock_gettime64
12249 case TARGET_NR_clock_gettime64
:
12251 struct timespec ts
;
12252 ret
= get_errno(clock_gettime(arg1
, &ts
));
12253 if (!is_error(ret
)) {
12254 ret
= host_to_target_timespec64(arg2
, &ts
);
12259 #ifdef TARGET_NR_clock_getres
12260 case TARGET_NR_clock_getres
:
12262 struct timespec ts
;
12263 ret
= get_errno(clock_getres(arg1
, &ts
));
12264 if (!is_error(ret
)) {
12265 host_to_target_timespec(arg2
, &ts
);
12270 #ifdef TARGET_NR_clock_getres_time64
12271 case TARGET_NR_clock_getres_time64
:
12273 struct timespec ts
;
12274 ret
= get_errno(clock_getres(arg1
, &ts
));
12275 if (!is_error(ret
)) {
12276 host_to_target_timespec64(arg2
, &ts
);
12281 #ifdef TARGET_NR_clock_nanosleep
12282 case TARGET_NR_clock_nanosleep
:
12284 struct timespec ts
;
12285 if (target_to_host_timespec(&ts
, arg3
)) {
12286 return -TARGET_EFAULT
;
12288 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12289 &ts
, arg4
? &ts
: NULL
));
12291 * if the call is interrupted by a signal handler, it fails
12292 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12293 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12295 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12296 host_to_target_timespec(arg4
, &ts
)) {
12297 return -TARGET_EFAULT
;
12303 #ifdef TARGET_NR_clock_nanosleep_time64
12304 case TARGET_NR_clock_nanosleep_time64
:
12306 struct timespec ts
;
12308 if (target_to_host_timespec64(&ts
, arg3
)) {
12309 return -TARGET_EFAULT
;
12312 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12313 &ts
, arg4
? &ts
: NULL
));
12315 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12316 host_to_target_timespec64(arg4
, &ts
)) {
12317 return -TARGET_EFAULT
;
12323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12324 case TARGET_NR_set_tid_address
:
12325 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12328 case TARGET_NR_tkill
:
12329 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12331 case TARGET_NR_tgkill
:
12332 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12333 target_to_host_signal(arg3
)));
12335 #ifdef TARGET_NR_set_robust_list
12336 case TARGET_NR_set_robust_list
:
12337 case TARGET_NR_get_robust_list
:
12338 /* The ABI for supporting robust futexes has userspace pass
12339 * the kernel a pointer to a linked list which is updated by
12340 * userspace after the syscall; the list is walked by the kernel
12341 * when the thread exits. Since the linked list in QEMU guest
12342 * memory isn't a valid linked list for the host and we have
12343 * no way to reliably intercept the thread-death event, we can't
12344 * support these. Silently return ENOSYS so that guest userspace
12345 * falls back to a non-robust futex implementation (which should
12346 * be OK except in the corner case of the guest crashing while
12347 * holding a mutex that is shared with another process via
12350 return -TARGET_ENOSYS
;
12353 #if defined(TARGET_NR_utimensat)
12354 case TARGET_NR_utimensat
:
12356 struct timespec
*tsp
, ts
[2];
12360 if (target_to_host_timespec(ts
, arg3
)) {
12361 return -TARGET_EFAULT
;
12363 if (target_to_host_timespec(ts
+ 1, arg3
+
12364 sizeof(struct target_timespec
))) {
12365 return -TARGET_EFAULT
;
12370 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12372 if (!(p
= lock_user_string(arg2
))) {
12373 return -TARGET_EFAULT
;
12375 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12376 unlock_user(p
, arg2
, 0);
12381 #ifdef TARGET_NR_utimensat_time64
12382 case TARGET_NR_utimensat_time64
:
12384 struct timespec
*tsp
, ts
[2];
12388 if (target_to_host_timespec64(ts
, arg3
)) {
12389 return -TARGET_EFAULT
;
12391 if (target_to_host_timespec64(ts
+ 1, arg3
+
12392 sizeof(struct target__kernel_timespec
))) {
12393 return -TARGET_EFAULT
;
12398 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12400 p
= lock_user_string(arg2
);
12402 return -TARGET_EFAULT
;
12404 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12405 unlock_user(p
, arg2
, 0);
12410 #ifdef TARGET_NR_futex
12411 case TARGET_NR_futex
:
12412 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12414 #ifdef TARGET_NR_futex_time64
12415 case TARGET_NR_futex_time64
:
12416 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12418 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12419 case TARGET_NR_inotify_init
:
12420 ret
= get_errno(sys_inotify_init());
12422 fd_trans_register(ret
, &target_inotify_trans
);
12426 #ifdef CONFIG_INOTIFY1
12427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12428 case TARGET_NR_inotify_init1
:
12429 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12430 fcntl_flags_tbl
)));
12432 fd_trans_register(ret
, &target_inotify_trans
);
12437 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12438 case TARGET_NR_inotify_add_watch
:
12439 p
= lock_user_string(arg2
);
12440 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12441 unlock_user(p
, arg2
, 0);
12444 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12445 case TARGET_NR_inotify_rm_watch
:
12446 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12449 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12450 case TARGET_NR_mq_open
:
12452 struct mq_attr posix_mq_attr
;
12453 struct mq_attr
*pposix_mq_attr
;
12456 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12457 pposix_mq_attr
= NULL
;
12459 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12460 return -TARGET_EFAULT
;
12462 pposix_mq_attr
= &posix_mq_attr
;
12464 p
= lock_user_string(arg1
- 1);
12466 return -TARGET_EFAULT
;
12468 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12469 unlock_user (p
, arg1
, 0);
12473 case TARGET_NR_mq_unlink
:
12474 p
= lock_user_string(arg1
- 1);
12476 return -TARGET_EFAULT
;
12478 ret
= get_errno(mq_unlink(p
));
12479 unlock_user (p
, arg1
, 0);
12482 #ifdef TARGET_NR_mq_timedsend
12483 case TARGET_NR_mq_timedsend
:
12485 struct timespec ts
;
12487 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12489 if (target_to_host_timespec(&ts
, arg5
)) {
12490 return -TARGET_EFAULT
;
12492 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12493 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12494 return -TARGET_EFAULT
;
12497 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12499 unlock_user (p
, arg2
, arg3
);
12503 #ifdef TARGET_NR_mq_timedsend_time64
12504 case TARGET_NR_mq_timedsend_time64
:
12506 struct timespec ts
;
12508 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12510 if (target_to_host_timespec64(&ts
, arg5
)) {
12511 return -TARGET_EFAULT
;
12513 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12514 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12515 return -TARGET_EFAULT
;
12518 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12520 unlock_user(p
, arg2
, arg3
);
12525 #ifdef TARGET_NR_mq_timedreceive
12526 case TARGET_NR_mq_timedreceive
:
12528 struct timespec ts
;
12531 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12533 if (target_to_host_timespec(&ts
, arg5
)) {
12534 return -TARGET_EFAULT
;
12536 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12538 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12539 return -TARGET_EFAULT
;
12542 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12545 unlock_user (p
, arg2
, arg3
);
12547 put_user_u32(prio
, arg4
);
12551 #ifdef TARGET_NR_mq_timedreceive_time64
12552 case TARGET_NR_mq_timedreceive_time64
:
12554 struct timespec ts
;
12557 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12559 if (target_to_host_timespec64(&ts
, arg5
)) {
12560 return -TARGET_EFAULT
;
12562 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12564 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12565 return -TARGET_EFAULT
;
12568 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12571 unlock_user(p
, arg2
, arg3
);
12573 put_user_u32(prio
, arg4
);
12579 /* Not implemented for now... */
12580 /* case TARGET_NR_mq_notify: */
12583 case TARGET_NR_mq_getsetattr
:
12585 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12588 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12589 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12590 &posix_mq_attr_out
));
12591 } else if (arg3
!= 0) {
12592 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12594 if (ret
== 0 && arg3
!= 0) {
12595 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12601 #ifdef CONFIG_SPLICE
12602 #ifdef TARGET_NR_tee
12603 case TARGET_NR_tee
:
12605 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12609 #ifdef TARGET_NR_splice
12610 case TARGET_NR_splice
:
12612 loff_t loff_in
, loff_out
;
12613 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12615 if (get_user_u64(loff_in
, arg2
)) {
12616 return -TARGET_EFAULT
;
12618 ploff_in
= &loff_in
;
12621 if (get_user_u64(loff_out
, arg4
)) {
12622 return -TARGET_EFAULT
;
12624 ploff_out
= &loff_out
;
12626 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12628 if (put_user_u64(loff_in
, arg2
)) {
12629 return -TARGET_EFAULT
;
12633 if (put_user_u64(loff_out
, arg4
)) {
12634 return -TARGET_EFAULT
;
12640 #ifdef TARGET_NR_vmsplice
12641 case TARGET_NR_vmsplice
:
12643 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12645 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12646 unlock_iovec(vec
, arg2
, arg3
, 0);
12648 ret
= -host_to_target_errno(errno
);
12653 #endif /* CONFIG_SPLICE */
12654 #ifdef CONFIG_EVENTFD
12655 #if defined(TARGET_NR_eventfd)
12656 case TARGET_NR_eventfd
:
12657 ret
= get_errno(eventfd(arg1
, 0));
12659 fd_trans_register(ret
, &target_eventfd_trans
);
12663 #if defined(TARGET_NR_eventfd2)
12664 case TARGET_NR_eventfd2
:
12666 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12667 if (arg2
& TARGET_O_NONBLOCK
) {
12668 host_flags
|= O_NONBLOCK
;
12670 if (arg2
& TARGET_O_CLOEXEC
) {
12671 host_flags
|= O_CLOEXEC
;
12673 ret
= get_errno(eventfd(arg1
, host_flags
));
12675 fd_trans_register(ret
, &target_eventfd_trans
);
12680 #endif /* CONFIG_EVENTFD */
12681 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12682 case TARGET_NR_fallocate
:
12683 #if TARGET_ABI_BITS == 32
12684 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12685 target_offset64(arg5
, arg6
)));
12687 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12691 #if defined(CONFIG_SYNC_FILE_RANGE)
12692 #if defined(TARGET_NR_sync_file_range)
12693 case TARGET_NR_sync_file_range
:
12694 #if TARGET_ABI_BITS == 32
12695 #if defined(TARGET_MIPS)
12696 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12697 target_offset64(arg5
, arg6
), arg7
));
12699 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12700 target_offset64(arg4
, arg5
), arg6
));
12701 #endif /* !TARGET_MIPS */
12703 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12707 #if defined(TARGET_NR_sync_file_range2) || \
12708 defined(TARGET_NR_arm_sync_file_range)
12709 #if defined(TARGET_NR_sync_file_range2)
12710 case TARGET_NR_sync_file_range2
:
12712 #if defined(TARGET_NR_arm_sync_file_range)
12713 case TARGET_NR_arm_sync_file_range
:
12715 /* This is like sync_file_range but the arguments are reordered */
12716 #if TARGET_ABI_BITS == 32
12717 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12718 target_offset64(arg5
, arg6
), arg2
));
12720 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12725 #if defined(TARGET_NR_signalfd4)
12726 case TARGET_NR_signalfd4
:
12727 return do_signalfd4(arg1
, arg2
, arg4
);
12729 #if defined(TARGET_NR_signalfd)
12730 case TARGET_NR_signalfd
:
12731 return do_signalfd4(arg1
, arg2
, 0);
12733 #if defined(CONFIG_EPOLL)
12734 #if defined(TARGET_NR_epoll_create)
12735 case TARGET_NR_epoll_create
:
12736 return get_errno(epoll_create(arg1
));
12738 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12739 case TARGET_NR_epoll_create1
:
12740 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12742 #if defined(TARGET_NR_epoll_ctl)
12743 case TARGET_NR_epoll_ctl
:
12745 struct epoll_event ep
;
12746 struct epoll_event
*epp
= 0;
12748 if (arg2
!= EPOLL_CTL_DEL
) {
12749 struct target_epoll_event
*target_ep
;
12750 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12751 return -TARGET_EFAULT
;
12753 ep
.events
= tswap32(target_ep
->events
);
12755 * The epoll_data_t union is just opaque data to the kernel,
12756 * so we transfer all 64 bits across and need not worry what
12757 * actual data type it is.
12759 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12760 unlock_user_struct(target_ep
, arg4
, 0);
12763 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12764 * non-null pointer, even though this argument is ignored.
12769 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12773 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12774 #if defined(TARGET_NR_epoll_wait)
12775 case TARGET_NR_epoll_wait
:
12777 #if defined(TARGET_NR_epoll_pwait)
12778 case TARGET_NR_epoll_pwait
:
12781 struct target_epoll_event
*target_ep
;
12782 struct epoll_event
*ep
;
12784 int maxevents
= arg3
;
12785 int timeout
= arg4
;
12787 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12788 return -TARGET_EINVAL
;
12791 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12792 maxevents
* sizeof(struct target_epoll_event
), 1);
12794 return -TARGET_EFAULT
;
12797 ep
= g_try_new(struct epoll_event
, maxevents
);
12799 unlock_user(target_ep
, arg2
, 0);
12800 return -TARGET_ENOMEM
;
12804 #if defined(TARGET_NR_epoll_pwait)
12805 case TARGET_NR_epoll_pwait
:
12807 target_sigset_t
*target_set
;
12808 sigset_t _set
, *set
= &_set
;
12811 if (arg6
!= sizeof(target_sigset_t
)) {
12812 ret
= -TARGET_EINVAL
;
12816 target_set
= lock_user(VERIFY_READ
, arg5
,
12817 sizeof(target_sigset_t
), 1);
12819 ret
= -TARGET_EFAULT
;
12822 target_to_host_sigset(set
, target_set
);
12823 unlock_user(target_set
, arg5
, 0);
12828 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12829 set
, SIGSET_T_SIZE
));
12833 #if defined(TARGET_NR_epoll_wait)
12834 case TARGET_NR_epoll_wait
:
12835 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12840 ret
= -TARGET_ENOSYS
;
12842 if (!is_error(ret
)) {
12844 for (i
= 0; i
< ret
; i
++) {
12845 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12846 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12848 unlock_user(target_ep
, arg2
,
12849 ret
* sizeof(struct target_epoll_event
));
12851 unlock_user(target_ep
, arg2
, 0);
12858 #ifdef TARGET_NR_prlimit64
12859 case TARGET_NR_prlimit64
:
12861 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12862 struct target_rlimit64
*target_rnew
, *target_rold
;
12863 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12864 int resource
= target_to_host_resource(arg2
);
12866 if (arg3
&& (resource
!= RLIMIT_AS
&&
12867 resource
!= RLIMIT_DATA
&&
12868 resource
!= RLIMIT_STACK
)) {
12869 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12870 return -TARGET_EFAULT
;
12872 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12873 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12874 unlock_user_struct(target_rnew
, arg3
, 0);
12878 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12879 if (!is_error(ret
) && arg4
) {
12880 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12881 return -TARGET_EFAULT
;
12883 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12884 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12885 unlock_user_struct(target_rold
, arg4
, 1);
12890 #ifdef TARGET_NR_gethostname
12891 case TARGET_NR_gethostname
:
12893 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12895 ret
= get_errno(gethostname(name
, arg2
));
12896 unlock_user(name
, arg1
, arg2
);
12898 ret
= -TARGET_EFAULT
;
12903 #ifdef TARGET_NR_atomic_cmpxchg_32
12904 case TARGET_NR_atomic_cmpxchg_32
:
12906 /* should use start_exclusive from main.c */
12907 abi_ulong mem_value
;
12908 if (get_user_u32(mem_value
, arg6
)) {
12909 target_siginfo_t info
;
12910 info
.si_signo
= SIGSEGV
;
12912 info
.si_code
= TARGET_SEGV_MAPERR
;
12913 info
._sifields
._sigfault
._addr
= arg6
;
12914 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12915 QEMU_SI_FAULT
, &info
);
12919 if (mem_value
== arg2
)
12920 put_user_u32(arg1
, arg6
);
12924 #ifdef TARGET_NR_atomic_barrier
12925 case TARGET_NR_atomic_barrier
:
12926 /* Like the kernel implementation and the
12927 qemu arm barrier, no-op this? */
12931 #ifdef TARGET_NR_timer_create
12932 case TARGET_NR_timer_create
:
12934 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12936 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12939 int timer_index
= next_free_host_timer();
12941 if (timer_index
< 0) {
12942 ret
= -TARGET_EAGAIN
;
12944 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12947 phost_sevp
= &host_sevp
;
12948 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12954 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12958 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12959 return -TARGET_EFAULT
;
12967 #ifdef TARGET_NR_timer_settime
12968 case TARGET_NR_timer_settime
:
12970 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12971 * struct itimerspec * old_value */
12972 target_timer_t timerid
= get_timer_id(arg1
);
12976 } else if (arg3
== 0) {
12977 ret
= -TARGET_EINVAL
;
12979 timer_t htimer
= g_posix_timers
[timerid
];
12980 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12982 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12983 return -TARGET_EFAULT
;
12986 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12987 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12988 return -TARGET_EFAULT
;
12995 #ifdef TARGET_NR_timer_settime64
12996 case TARGET_NR_timer_settime64
:
12998 target_timer_t timerid
= get_timer_id(arg1
);
13002 } else if (arg3
== 0) {
13003 ret
= -TARGET_EINVAL
;
13005 timer_t htimer
= g_posix_timers
[timerid
];
13006 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13008 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13009 return -TARGET_EFAULT
;
13012 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13013 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13014 return -TARGET_EFAULT
;
13021 #ifdef TARGET_NR_timer_gettime
13022 case TARGET_NR_timer_gettime
:
13024 /* args: timer_t timerid, struct itimerspec *curr_value */
13025 target_timer_t timerid
= get_timer_id(arg1
);
13029 } else if (!arg2
) {
13030 ret
= -TARGET_EFAULT
;
13032 timer_t htimer
= g_posix_timers
[timerid
];
13033 struct itimerspec hspec
;
13034 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13036 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13037 ret
= -TARGET_EFAULT
;
13044 #ifdef TARGET_NR_timer_gettime64
13045 case TARGET_NR_timer_gettime64
:
13047 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13048 target_timer_t timerid
= get_timer_id(arg1
);
13052 } else if (!arg2
) {
13053 ret
= -TARGET_EFAULT
;
13055 timer_t htimer
= g_posix_timers
[timerid
];
13056 struct itimerspec hspec
;
13057 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13059 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13060 ret
= -TARGET_EFAULT
;
13067 #ifdef TARGET_NR_timer_getoverrun
13068 case TARGET_NR_timer_getoverrun
:
13070 /* args: timer_t timerid */
13071 target_timer_t timerid
= get_timer_id(arg1
);
13076 timer_t htimer
= g_posix_timers
[timerid
];
13077 ret
= get_errno(timer_getoverrun(htimer
));
13083 #ifdef TARGET_NR_timer_delete
13084 case TARGET_NR_timer_delete
:
13086 /* args: timer_t timerid */
13087 target_timer_t timerid
= get_timer_id(arg1
);
13092 timer_t htimer
= g_posix_timers
[timerid
];
13093 ret
= get_errno(timer_delete(htimer
));
13094 g_posix_timers
[timerid
] = 0;
13100 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13101 case TARGET_NR_timerfd_create
:
13102 return get_errno(timerfd_create(arg1
,
13103 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13106 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13107 case TARGET_NR_timerfd_gettime
:
13109 struct itimerspec its_curr
;
13111 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13113 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13114 return -TARGET_EFAULT
;
13120 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13121 case TARGET_NR_timerfd_gettime64
:
13123 struct itimerspec its_curr
;
13125 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13127 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13128 return -TARGET_EFAULT
;
13134 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13135 case TARGET_NR_timerfd_settime
:
13137 struct itimerspec its_new
, its_old
, *p_new
;
13140 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13141 return -TARGET_EFAULT
;
13148 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13150 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13151 return -TARGET_EFAULT
;
13157 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13158 case TARGET_NR_timerfd_settime64
:
13160 struct itimerspec its_new
, its_old
, *p_new
;
13163 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13164 return -TARGET_EFAULT
;
13171 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13173 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13174 return -TARGET_EFAULT
;
13180 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13181 case TARGET_NR_ioprio_get
:
13182 return get_errno(ioprio_get(arg1
, arg2
));
13185 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13186 case TARGET_NR_ioprio_set
:
13187 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13190 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13191 case TARGET_NR_setns
:
13192 return get_errno(setns(arg1
, arg2
));
13194 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13195 case TARGET_NR_unshare
:
13196 return get_errno(unshare(arg1
));
13198 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13199 case TARGET_NR_kcmp
:
13200 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13202 #ifdef TARGET_NR_swapcontext
13203 case TARGET_NR_swapcontext
:
13204 /* PowerPC specific. */
13205 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13207 #ifdef TARGET_NR_memfd_create
13208 case TARGET_NR_memfd_create
:
13209 p
= lock_user_string(arg1
);
13211 return -TARGET_EFAULT
;
13213 ret
= get_errno(memfd_create(p
, arg2
));
13214 fd_trans_unregister(ret
);
13215 unlock_user(p
, arg1
, 0);
13218 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13219 case TARGET_NR_membarrier
:
13220 return get_errno(membarrier(arg1
, arg2
));
13223 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13224 case TARGET_NR_copy_file_range
:
13226 loff_t inoff
, outoff
;
13227 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13230 if (get_user_u64(inoff
, arg2
)) {
13231 return -TARGET_EFAULT
;
13236 if (get_user_u64(outoff
, arg4
)) {
13237 return -TARGET_EFAULT
;
13241 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13243 if (!is_error(ret
) && ret
> 0) {
13245 if (put_user_u64(inoff
, arg2
)) {
13246 return -TARGET_EFAULT
;
13250 if (put_user_u64(outoff
, arg4
)) {
13251 return -TARGET_EFAULT
;
13260 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13261 return -TARGET_ENOSYS
;
13266 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13267 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13268 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13271 CPUState
*cpu
= env_cpu(cpu_env
);
13274 #ifdef DEBUG_ERESTARTSYS
13275 /* Debug-only code for exercising the syscall-restart code paths
13276 * in the per-architecture cpu main loops: restart every syscall
13277 * the guest makes once before letting it through.
13283 return -TARGET_ERESTARTSYS
;
13288 record_syscall_start(cpu
, num
, arg1
,
13289 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13291 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13292 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13295 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13296 arg5
, arg6
, arg7
, arg8
);
13298 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13299 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13300 arg3
, arg4
, arg5
, arg6
);
13303 record_syscall_return(cpu
, num
, ret
);