4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
138 #define CLONE_IO 0x80000000 /* Clone io context */
141 /* We can't directly call the host clone syscall, because this will
142 * badly confuse libc (breaking mutexes, for example). So we must
143 * divide clone flags into:
144 * * flag combinations that look like pthread_create()
145 * * flag combinations that look like fork()
146 * * flags we can implement within QEMU itself
147 * * flags we can't support and will return an error for
149 /* For thread creation, all these flags must be present; for
150 * fork, none must be present.
152 #define CLONE_THREAD_FLAGS \
153 (CLONE_VM | CLONE_FS | CLONE_FILES | \
154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 /* These flags are ignored:
157 * CLONE_DETACHED is now ignored by the kernel;
158 * CLONE_IO is just an optimisation hint to the I/O scheduler
160 #define CLONE_IGNORED_FLAGS \
161 (CLONE_DETACHED | CLONE_IO)
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS \
165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 #define CLONE_INVALID_FORK_FLAGS \
174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 #define CLONE_INVALID_THREAD_FLAGS \
177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
178 CLONE_IGNORED_FLAGS))
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181 * have almost all been allocated. We cannot support any of
182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184 * The checks against the invalid thread masks above will catch these.
185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189 * once. This exercises the codepaths for restart.
191 //#define DEBUG_ERESTARTSYS
193 //#include <linux/msdos_fs.h>
194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
205 #define _syscall0(type,name) \
206 static type name (void) \
208 return syscall(__NR_##name); \
211 #define _syscall1(type,name,type1,arg1) \
212 static type name (type1 arg1) \
214 return syscall(__NR_##name, arg1); \
217 #define _syscall2(type,name,type1,arg1,type2,arg2) \
218 static type name (type1 arg1,type2 arg2) \
220 return syscall(__NR_##name, arg1, arg2); \
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
224 static type name (type1 arg1,type2 arg2,type3 arg3) \
226 return syscall(__NR_##name, arg1, arg2, arg3); \
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5,type6,arg6) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid
)
288 /* For the 64-bit guest on 32-bit host case we must emulate
289 * getdents using getdents64, because otherwise the host
290 * might hand us back more dirent records than we can fit
291 * into the guest buffer after structure format conversion.
292 * Otherwise we emulate getdents with getdents if the host has it.
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
301 #if (defined(TARGET_NR_getdents) && \
302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
308 loff_t
*, res
, uint
, wh
);
310 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
311 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
313 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group
,int,error_code
)
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address
,int *,tidptr
)
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
322 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
326 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
330 unsigned long *, user_mask_ptr
);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
336 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
338 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
339 struct __user_cap_data_struct
*, data
);
340 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
341 struct __user_cap_data_struct
*, data
);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get
, int, which
, int, who
)
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
354 unsigned long, idx1
, unsigned long, idx2
)
358 * It is assumed that struct statx is architecture independent.
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
362 unsigned int, mask
, struct target_statx
*, statxbuf
)
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier
, int, cmd
, int, flags
)
368 static bitmask_transtbl fcntl_flags_tbl
[] = {
369 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
370 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
371 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
372 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
373 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
374 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
375 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
376 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
377 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
378 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
379 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
380 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
381 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
382 #if defined(O_DIRECT)
383 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
385 #if defined(O_NOATIME)
386 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
388 #if defined(O_CLOEXEC)
389 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
392 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
394 #if defined(O_TMPFILE)
395 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
397 /* Don't terminate the list prematurely on 64-bit host+guest. */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
404 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
410 const struct timespec
*,tsp
,int,flags
)
412 static int sys_utimensat(int dirfd
, const char *pathname
,
413 const struct timespec times
[2], int flags
)
419 #endif /* TARGET_NR_utimensat */
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
425 const char *, new, unsigned int, flags
)
427 static int sys_renameat2(int oldfd
, const char *old
,
428 int newfd
, const char *new, int flags
)
431 return renameat(oldfd
, old
, newfd
, new);
437 #endif /* TARGET_NR_renameat2 */
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
445 return (inotify_init());
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
451 return (inotify_add_watch(fd
, pathname
, mask
));
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
457 return (inotify_rm_watch(fd
, wd
));
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags
)
464 return (inotify_init1(flags
));
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY */
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64
{
486 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
487 const struct host_rlimit64
*, new_limit
,
488 struct host_rlimit64
*, old_limit
)
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers
[32] = { 0, } ;
496 static inline int next_free_host_timer(void)
499 /* FIXME: Does finding the next free slot require a lock? */
500 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
501 if (g_posix_timers
[k
] == 0) {
502 g_posix_timers
[k
] = (timer_t
) 1;
510 #define ERRNO_TABLE_SIZE 1200
512 /* target_to_host_errno_table[] is initialized from
513 * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
518 * This list is the union of errno values overridden in asm-<arch>/errno.h
519 * minus the errnos that are not actually generic to all archs.
521 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
522 [EAGAIN
] = TARGET_EAGAIN
,
523 [EIDRM
] = TARGET_EIDRM
,
524 [ECHRNG
] = TARGET_ECHRNG
,
525 [EL2NSYNC
] = TARGET_EL2NSYNC
,
526 [EL3HLT
] = TARGET_EL3HLT
,
527 [EL3RST
] = TARGET_EL3RST
,
528 [ELNRNG
] = TARGET_ELNRNG
,
529 [EUNATCH
] = TARGET_EUNATCH
,
530 [ENOCSI
] = TARGET_ENOCSI
,
531 [EL2HLT
] = TARGET_EL2HLT
,
532 [EDEADLK
] = TARGET_EDEADLK
,
533 [ENOLCK
] = TARGET_ENOLCK
,
534 [EBADE
] = TARGET_EBADE
,
535 [EBADR
] = TARGET_EBADR
,
536 [EXFULL
] = TARGET_EXFULL
,
537 [ENOANO
] = TARGET_ENOANO
,
538 [EBADRQC
] = TARGET_EBADRQC
,
539 [EBADSLT
] = TARGET_EBADSLT
,
540 [EBFONT
] = TARGET_EBFONT
,
541 [ENOSTR
] = TARGET_ENOSTR
,
542 [ENODATA
] = TARGET_ENODATA
,
543 [ETIME
] = TARGET_ETIME
,
544 [ENOSR
] = TARGET_ENOSR
,
545 [ENONET
] = TARGET_ENONET
,
546 [ENOPKG
] = TARGET_ENOPKG
,
547 [EREMOTE
] = TARGET_EREMOTE
,
548 [ENOLINK
] = TARGET_ENOLINK
,
549 [EADV
] = TARGET_EADV
,
550 [ESRMNT
] = TARGET_ESRMNT
,
551 [ECOMM
] = TARGET_ECOMM
,
552 [EPROTO
] = TARGET_EPROTO
,
553 [EDOTDOT
] = TARGET_EDOTDOT
,
554 [EMULTIHOP
] = TARGET_EMULTIHOP
,
555 [EBADMSG
] = TARGET_EBADMSG
,
556 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
557 [EOVERFLOW
] = TARGET_EOVERFLOW
,
558 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
559 [EBADFD
] = TARGET_EBADFD
,
560 [EREMCHG
] = TARGET_EREMCHG
,
561 [ELIBACC
] = TARGET_ELIBACC
,
562 [ELIBBAD
] = TARGET_ELIBBAD
,
563 [ELIBSCN
] = TARGET_ELIBSCN
,
564 [ELIBMAX
] = TARGET_ELIBMAX
,
565 [ELIBEXEC
] = TARGET_ELIBEXEC
,
566 [EILSEQ
] = TARGET_EILSEQ
,
567 [ENOSYS
] = TARGET_ENOSYS
,
568 [ELOOP
] = TARGET_ELOOP
,
569 [ERESTART
] = TARGET_ERESTART
,
570 [ESTRPIPE
] = TARGET_ESTRPIPE
,
571 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
572 [EUSERS
] = TARGET_EUSERS
,
573 [ENOTSOCK
] = TARGET_ENOTSOCK
,
574 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
575 [EMSGSIZE
] = TARGET_EMSGSIZE
,
576 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
577 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
578 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
579 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
580 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
581 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
582 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
583 [EADDRINUSE
] = TARGET_EADDRINUSE
,
584 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
585 [ENETDOWN
] = TARGET_ENETDOWN
,
586 [ENETUNREACH
] = TARGET_ENETUNREACH
,
587 [ENETRESET
] = TARGET_ENETRESET
,
588 [ECONNABORTED
] = TARGET_ECONNABORTED
,
589 [ECONNRESET
] = TARGET_ECONNRESET
,
590 [ENOBUFS
] = TARGET_ENOBUFS
,
591 [EISCONN
] = TARGET_EISCONN
,
592 [ENOTCONN
] = TARGET_ENOTCONN
,
593 [EUCLEAN
] = TARGET_EUCLEAN
,
594 [ENOTNAM
] = TARGET_ENOTNAM
,
595 [ENAVAIL
] = TARGET_ENAVAIL
,
596 [EISNAM
] = TARGET_EISNAM
,
597 [EREMOTEIO
] = TARGET_EREMOTEIO
,
598 [EDQUOT
] = TARGET_EDQUOT
,
599 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
600 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
601 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
602 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
603 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
604 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
605 [EALREADY
] = TARGET_EALREADY
,
606 [EINPROGRESS
] = TARGET_EINPROGRESS
,
607 [ESTALE
] = TARGET_ESTALE
,
608 [ECANCELED
] = TARGET_ECANCELED
,
609 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
610 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
612 [ENOKEY
] = TARGET_ENOKEY
,
615 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
618 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
621 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
624 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
626 #ifdef ENOTRECOVERABLE
627 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
630 [ENOMSG
] = TARGET_ENOMSG
,
633 [ERFKILL
] = TARGET_ERFKILL
,
636 [EHWPOISON
] = TARGET_EHWPOISON
,
640 static inline int host_to_target_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 host_to_target_errno_table
[err
]) {
644 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 target_to_host_errno_table
[err
]) {
653 return target_to_host_errno_table
[err
];
658 static inline abi_long
get_errno(abi_long ret
)
661 return -host_to_target_errno(errno
);
666 const char *target_strerror(int err
)
668 if (err
== TARGET_ERESTARTSYS
) {
669 return "To be restarted";
671 if (err
== TARGET_QEMU_ESIGRETURN
) {
672 return "Successful exit from sigreturn";
675 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
678 return strerror(target_to_host_errno(err
));
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
684 return safe_syscall(__NR_##name); \
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
690 return safe_syscall(__NR_##name, arg1); \
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
696 return safe_syscall(__NR_##name, arg1, arg2); \
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
702 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713 type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721 type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723 type5 arg5, type6 arg6) \
725 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
728 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
729 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
730 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
731 int, flags
, mode_t
, mode
)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
734 struct rusage
*, rusage
)
736 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
737 int, options
, struct rusage
*, rusage
)
738 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
742 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
746 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
749 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
750 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
754 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
758 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
760 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
761 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
762 safe_syscall2(int, tkill
, int, tid
, int, sig
)
763 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
764 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
765 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
766 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
767 unsigned long, pos_l
, unsigned long, pos_h
)
768 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
769 unsigned long, pos_l
, unsigned long, pos_h
)
770 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
772 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
773 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
774 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
775 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
776 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
777 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
778 safe_syscall2(int, flock
, int, fd
, int, operation
)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
781 const struct timespec
*, uts
, size_t, sigsetsize
)
783 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
787 struct timespec
*, rem
)
789 #if defined(TARGET_NR_clock_nanosleep) || \
790 defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
792 const struct timespec
*, req
, struct timespec
*, rem
)
796 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
799 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
800 void *, ptr
, long, fifth
)
804 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
808 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
809 long, msgtype
, int, flags
)
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
813 unsigned, nsops
, const struct timespec
*, timeout
)
815 #if defined(TARGET_NR_mq_timedsend) || \
816 defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
818 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
820 #if defined(TARGET_NR_mq_timedreceive) || \
821 defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
823 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
827 int, outfd
, loff_t
*, poutoff
, size_t, length
,
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832 * "third argument might be integer or pointer or not present" behaviour of
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838 * use the flock64 struct rather than unsuffixed flock
839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
847 static inline int host_to_target_sock_type(int host_type
)
851 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
853 target_type
= TARGET_SOCK_DGRAM
;
856 target_type
= TARGET_SOCK_STREAM
;
859 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
863 #if defined(SOCK_CLOEXEC)
864 if (host_type
& SOCK_CLOEXEC
) {
865 target_type
|= TARGET_SOCK_CLOEXEC
;
869 #if defined(SOCK_NONBLOCK)
870 if (host_type
& SOCK_NONBLOCK
) {
871 target_type
|= TARGET_SOCK_NONBLOCK
;
878 static abi_ulong target_brk
;
879 static abi_ulong target_original_brk
;
880 static abi_ulong brk_page
;
882 void target_set_brk(abi_ulong new_brk
)
884 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
885 brk_page
= HOST_PAGE_ALIGN(target_brk
);
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
891 /* do_brk() must return target values and target errnos. */
892 abi_long
do_brk(abi_ulong new_brk
)
894 abi_long mapped_addr
;
895 abi_ulong new_alloc_size
;
897 /* brk pointers are always untagged */
899 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
905 if (new_brk
< target_original_brk
) {
906 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
911 /* If the new brk is less than the highest page reserved to the
912 * target heap allocation, set it and we're almost done... */
913 if (new_brk
<= brk_page
) {
914 /* Heap contents are initialized to zero, as for anonymous
916 if (new_brk
> target_brk
) {
917 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
919 target_brk
= new_brk
;
920 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
924 /* We need to allocate more memory after the brk... Note that
925 * we don't use MAP_FIXED because that will map over the top of
926 * any existing mapping (like the one with the host libc or qemu
927 * itself); instead we treat "mapped but at wrong address" as
928 * a failure and unmap again.
930 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
931 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
932 PROT_READ
|PROT_WRITE
,
933 MAP_ANON
|MAP_PRIVATE
, 0, 0));
935 if (mapped_addr
== brk_page
) {
936 /* Heap contents are initialized to zero, as for anonymous
937 * mapped pages. Technically the new pages are already
938 * initialized to zero since they *are* anonymous mapped
939 * pages, however we have to take care with the contents that
940 * come from the remaining part of the previous page: it may
941 * contains garbage data due to a previous heap usage (grown
943 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
945 target_brk
= new_brk
;
946 brk_page
= HOST_PAGE_ALIGN(target_brk
);
947 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
950 } else if (mapped_addr
!= -1) {
951 /* Mapped but at wrong address, meaning there wasn't actually
952 * enough space for this brk.
954 target_munmap(mapped_addr
, new_alloc_size
);
956 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
959 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
962 #if defined(TARGET_ALPHA)
963 /* We (partially) emulate OSF/1 on Alpha, which requires we
964 return a proper errno, not an unchanged brk value. */
965 return -TARGET_ENOMEM
;
967 /* For everything else, return the previous break. */
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
974 abi_ulong target_fds_addr
,
978 abi_ulong b
, *target_fds
;
980 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
981 if (!(target_fds
= lock_user(VERIFY_READ
,
983 sizeof(abi_ulong
) * nw
,
985 return -TARGET_EFAULT
;
989 for (i
= 0; i
< nw
; i
++) {
990 /* grab the abi_ulong */
991 __get_user(b
, &target_fds
[i
]);
992 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
993 /* check the bit inside the abi_ulong */
1000 unlock_user(target_fds
, target_fds_addr
, 0);
1005 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1006 abi_ulong target_fds_addr
,
1009 if (target_fds_addr
) {
1010 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1011 return -TARGET_EFAULT
;
1019 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1025 abi_ulong
*target_fds
;
1027 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1028 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1030 sizeof(abi_ulong
) * nw
,
1032 return -TARGET_EFAULT
;
1035 for (i
= 0; i
< nw
; i
++) {
1037 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1038 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1041 __put_user(v
, &target_fds
[i
]);
1044 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1056 static inline abi_long
host_to_target_clock_t(long ticks
)
1058 #if HOST_HZ == TARGET_HZ
1061 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1065 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1066 const struct rusage
*rusage
)
1068 struct target_rusage
*target_rusage
;
1070 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1071 return -TARGET_EFAULT
;
1072 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1073 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1074 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1075 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1076 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1077 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1078 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1079 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1080 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1081 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1082 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1083 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1084 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1085 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1086 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1087 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1088 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1089 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1090 unlock_user_struct(target_rusage
, target_addr
, 1);
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1098 abi_ulong target_rlim_swap
;
1101 target_rlim_swap
= tswapal(target_rlim
);
1102 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1103 return RLIM_INFINITY
;
1105 result
= target_rlim_swap
;
1106 if (target_rlim_swap
!= (rlim_t
)result
)
1107 return RLIM_INFINITY
;
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1116 abi_ulong target_rlim_swap
;
1119 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1120 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1122 target_rlim_swap
= rlim
;
1123 result
= tswapal(target_rlim_swap
);
1129 static inline int target_to_host_resource(int code
)
1132 case TARGET_RLIMIT_AS
:
1134 case TARGET_RLIMIT_CORE
:
1136 case TARGET_RLIMIT_CPU
:
1138 case TARGET_RLIMIT_DATA
:
1140 case TARGET_RLIMIT_FSIZE
:
1141 return RLIMIT_FSIZE
;
1142 case TARGET_RLIMIT_LOCKS
:
1143 return RLIMIT_LOCKS
;
1144 case TARGET_RLIMIT_MEMLOCK
:
1145 return RLIMIT_MEMLOCK
;
1146 case TARGET_RLIMIT_MSGQUEUE
:
1147 return RLIMIT_MSGQUEUE
;
1148 case TARGET_RLIMIT_NICE
:
1150 case TARGET_RLIMIT_NOFILE
:
1151 return RLIMIT_NOFILE
;
1152 case TARGET_RLIMIT_NPROC
:
1153 return RLIMIT_NPROC
;
1154 case TARGET_RLIMIT_RSS
:
1156 case TARGET_RLIMIT_RTPRIO
:
1157 return RLIMIT_RTPRIO
;
1158 case TARGET_RLIMIT_SIGPENDING
:
1159 return RLIMIT_SIGPENDING
;
1160 case TARGET_RLIMIT_STACK
:
1161 return RLIMIT_STACK
;
1167 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1168 abi_ulong target_tv_addr
)
1170 struct target_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1173 return -TARGET_EFAULT
;
1176 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1184 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1185 const struct timeval
*tv
)
1187 struct target_timeval
*target_tv
;
1189 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1190 return -TARGET_EFAULT
;
1193 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1194 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1196 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1203 abi_ulong target_tv_addr
)
1205 struct target__kernel_sock_timeval
*target_tv
;
1207 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1208 return -TARGET_EFAULT
;
1211 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1212 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1214 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1220 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1221 const struct timeval
*tv
)
1223 struct target__kernel_sock_timeval
*target_tv
;
1225 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1226 return -TARGET_EFAULT
;
1229 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1230 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1232 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1237 #if defined(TARGET_NR_futex) || \
1238 defined(TARGET_NR_rt_sigtimedwait) || \
1239 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244 defined(TARGET_NR_timer_settime) || \
1245 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1247 abi_ulong target_addr
)
1249 struct target_timespec
*target_ts
;
1251 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1252 return -TARGET_EFAULT
;
1254 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1255 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1256 unlock_user_struct(target_ts
, target_addr
, 0);
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262 defined(TARGET_NR_timer_settime64) || \
1263 defined(TARGET_NR_mq_timedsend_time64) || \
1264 defined(TARGET_NR_mq_timedreceive_time64) || \
1265 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266 defined(TARGET_NR_clock_nanosleep_time64) || \
1267 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268 defined(TARGET_NR_utimensat) || \
1269 defined(TARGET_NR_utimensat_time64) || \
1270 defined(TARGET_NR_semtimedop_time64) || \
1271 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1273 abi_ulong target_addr
)
1275 struct target__kernel_timespec
*target_ts
;
1277 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1278 return -TARGET_EFAULT
;
1280 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1281 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1282 /* in 32bit mode, this drops the padding */
1283 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1284 unlock_user_struct(target_ts
, target_addr
, 0);
1289 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1290 struct timespec
*host_ts
)
1292 struct target_timespec
*target_ts
;
1294 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1295 return -TARGET_EFAULT
;
1297 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1298 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1299 unlock_user_struct(target_ts
, target_addr
, 1);
1303 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1304 struct timespec
*host_ts
)
1306 struct target__kernel_timespec
*target_ts
;
1308 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1309 return -TARGET_EFAULT
;
1311 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1312 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1313 unlock_user_struct(target_ts
, target_addr
, 1);
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1319 struct timezone
*tz
)
1321 struct target_timezone
*target_tz
;
1323 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1324 return -TARGET_EFAULT
;
1327 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1328 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1330 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1338 abi_ulong target_tz_addr
)
1340 struct target_timezone
*target_tz
;
1342 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1343 return -TARGET_EFAULT
;
1346 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1347 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1349 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1358 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1359 abi_ulong target_mq_attr_addr
)
1361 struct target_mq_attr
*target_mq_attr
;
1363 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1364 target_mq_attr_addr
, 1))
1365 return -TARGET_EFAULT
;
1367 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1368 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1369 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1370 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1372 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1377 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1378 const struct mq_attr
*attr
)
1380 struct target_mq_attr
*target_mq_attr
;
1382 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1383 target_mq_attr_addr
, 0))
1384 return -TARGET_EFAULT
;
1386 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1387 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1388 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1389 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1391 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long
do_select(int n
,
1400 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1401 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1403 fd_set rfds
, wfds
, efds
;
1404 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1406 struct timespec ts
, *ts_ptr
;
1409 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1413 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1417 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1422 if (target_tv_addr
) {
1423 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1424 return -TARGET_EFAULT
;
1425 ts
.tv_sec
= tv
.tv_sec
;
1426 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1432 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1435 if (!is_error(ret
)) {
1436 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1437 return -TARGET_EFAULT
;
1438 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1439 return -TARGET_EFAULT
;
1440 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1441 return -TARGET_EFAULT
;
1443 if (target_tv_addr
) {
1444 tv
.tv_sec
= ts
.tv_sec
;
1445 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1446 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1447 return -TARGET_EFAULT
;
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long
do_old_select(abi_ulong arg1
)
1458 struct target_sel_arg_struct
*sel
;
1459 abi_ulong inp
, outp
, exp
, tvp
;
1462 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1463 return -TARGET_EFAULT
;
1466 nsel
= tswapal(sel
->n
);
1467 inp
= tswapal(sel
->inp
);
1468 outp
= tswapal(sel
->outp
);
1469 exp
= tswapal(sel
->exp
);
1470 tvp
= tswapal(sel
->tvp
);
1472 unlock_user_struct(sel
, arg1
, 0);
1474 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1481 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1484 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1485 fd_set rfds
, wfds
, efds
;
1486 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1487 struct timespec ts
, *ts_ptr
;
1491 * The 6th arg is actually two args smashed together,
1492 * so we cannot use the C library.
1500 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1501 target_sigset_t
*target_sigset
;
1509 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1513 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1517 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1523 * This takes a timespec, and not a timeval, so we cannot
1524 * use the do_select() helper ...
1528 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1529 return -TARGET_EFAULT
;
1532 if (target_to_host_timespec(&ts
, ts_addr
)) {
1533 return -TARGET_EFAULT
;
1541 /* Extract the two packed args for the sigset */
1544 sig
.size
= SIGSET_T_SIZE
;
1546 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1548 return -TARGET_EFAULT
;
1550 arg_sigset
= tswapal(arg7
[0]);
1551 arg_sigsize
= tswapal(arg7
[1]);
1552 unlock_user(arg7
, arg6
, 0);
1556 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1557 /* Like the kernel, we enforce correct size sigsets */
1558 return -TARGET_EINVAL
;
1560 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1561 sizeof(*target_sigset
), 1);
1562 if (!target_sigset
) {
1563 return -TARGET_EFAULT
;
1565 target_to_host_sigset(&set
, target_sigset
);
1566 unlock_user(target_sigset
, arg_sigset
, 0);
1574 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1577 if (!is_error(ret
)) {
1578 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1579 return -TARGET_EFAULT
;
1581 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1582 return -TARGET_EFAULT
;
1584 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1585 return -TARGET_EFAULT
;
1588 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1589 return -TARGET_EFAULT
;
1592 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1593 return -TARGET_EFAULT
;
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602 defined(TARGET_NR_ppoll_time64)
1603 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1604 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1606 struct target_pollfd
*target_pfd
;
1607 unsigned int nfds
= arg2
;
1615 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1616 return -TARGET_EINVAL
;
1618 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1619 sizeof(struct target_pollfd
) * nfds
, 1);
1621 return -TARGET_EFAULT
;
1624 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1625 for (i
= 0; i
< nfds
; i
++) {
1626 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1627 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1631 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1632 target_sigset_t
*target_set
;
1633 sigset_t _set
, *set
= &_set
;
1637 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1638 unlock_user(target_pfd
, arg1
, 0);
1639 return -TARGET_EFAULT
;
1642 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EFAULT
;
1652 if (arg5
!= sizeof(target_sigset_t
)) {
1653 unlock_user(target_pfd
, arg1
, 0);
1654 return -TARGET_EINVAL
;
1657 target_set
= lock_user(VERIFY_READ
, arg4
,
1658 sizeof(target_sigset_t
), 1);
1660 unlock_user(target_pfd
, arg1
, 0);
1661 return -TARGET_EFAULT
;
1663 target_to_host_sigset(set
, target_set
);
1668 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1669 set
, SIGSET_T_SIZE
));
1671 if (!is_error(ret
) && arg3
) {
1673 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1674 return -TARGET_EFAULT
;
1677 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1678 return -TARGET_EFAULT
;
1683 unlock_user(target_set
, arg4
, 0);
1686 struct timespec ts
, *pts
;
1689 /* Convert ms to secs, ns */
1690 ts
.tv_sec
= arg3
/ 1000;
1691 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1694 /* -ve poll() timeout means "infinite" */
1697 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1700 if (!is_error(ret
)) {
1701 for (i
= 0; i
< nfds
; i
++) {
1702 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1705 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1710 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1713 return pipe2(host_pipe
, flags
);
1719 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1720 int flags
, int is_pipe2
)
1724 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1727 return get_errno(ret
);
1729 /* Several targets have special calling conventions for the original
1730 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1732 #if defined(TARGET_ALPHA)
1733 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1734 return host_pipe
[0];
1735 #elif defined(TARGET_MIPS)
1736 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1737 return host_pipe
[0];
1738 #elif defined(TARGET_SH4)
1739 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1740 return host_pipe
[0];
1741 #elif defined(TARGET_SPARC)
1742 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1743 return host_pipe
[0];
1747 if (put_user_s32(host_pipe
[0], pipedes
)
1748 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1749 return -TARGET_EFAULT
;
1750 return get_errno(ret
);
1753 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1754 abi_ulong target_addr
,
1757 struct target_ip_mreqn
*target_smreqn
;
1759 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1761 return -TARGET_EFAULT
;
1762 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1763 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1764 if (len
== sizeof(struct target_ip_mreqn
))
1765 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1766 unlock_user(target_smreqn
, target_addr
, 0);
1771 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1772 abi_ulong target_addr
,
1775 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1776 sa_family_t sa_family
;
1777 struct target_sockaddr
*target_saddr
;
1779 if (fd_trans_target_to_host_addr(fd
)) {
1780 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1783 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1785 return -TARGET_EFAULT
;
1787 sa_family
= tswap16(target_saddr
->sa_family
);
1789 /* Oops. The caller might send a incomplete sun_path; sun_path
1790 * must be terminated by \0 (see the manual page), but
1791 * unfortunately it is quite common to specify sockaddr_un
1792 * length as "strlen(x->sun_path)" while it should be
1793 * "strlen(...) + 1". We'll fix that here if needed.
1794 * Linux kernel has a similar feature.
1797 if (sa_family
== AF_UNIX
) {
1798 if (len
< unix_maxlen
&& len
> 0) {
1799 char *cp
= (char*)target_saddr
;
1801 if ( cp
[len
-1] && !cp
[len
] )
1804 if (len
> unix_maxlen
)
1808 memcpy(addr
, target_saddr
, len
);
1809 addr
->sa_family
= sa_family
;
1810 if (sa_family
== AF_NETLINK
) {
1811 struct sockaddr_nl
*nladdr
;
1813 nladdr
= (struct sockaddr_nl
*)addr
;
1814 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1815 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1816 } else if (sa_family
== AF_PACKET
) {
1817 struct target_sockaddr_ll
*lladdr
;
1819 lladdr
= (struct target_sockaddr_ll
*)addr
;
1820 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1821 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1823 unlock_user(target_saddr
, target_addr
, 0);
1828 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1829 struct sockaddr
*addr
,
1832 struct target_sockaddr
*target_saddr
;
1839 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1841 return -TARGET_EFAULT
;
1842 memcpy(target_saddr
, addr
, len
);
1843 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1844 sizeof(target_saddr
->sa_family
)) {
1845 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1847 if (addr
->sa_family
== AF_NETLINK
&&
1848 len
>= sizeof(struct target_sockaddr_nl
)) {
1849 struct target_sockaddr_nl
*target_nl
=
1850 (struct target_sockaddr_nl
*)target_saddr
;
1851 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1852 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1853 } else if (addr
->sa_family
== AF_PACKET
) {
1854 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1855 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1856 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1857 } else if (addr
->sa_family
== AF_INET6
&&
1858 len
>= sizeof(struct target_sockaddr_in6
)) {
1859 struct target_sockaddr_in6
*target_in6
=
1860 (struct target_sockaddr_in6
*)target_saddr
;
1861 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1863 unlock_user(target_saddr
, target_addr
, len
);
1868 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1869 struct target_msghdr
*target_msgh
)
1871 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1872 abi_long msg_controllen
;
1873 abi_ulong target_cmsg_addr
;
1874 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1875 socklen_t space
= 0;
1877 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1878 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1880 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1881 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1882 target_cmsg_start
= target_cmsg
;
1884 return -TARGET_EFAULT
;
1886 while (cmsg
&& target_cmsg
) {
1887 void *data
= CMSG_DATA(cmsg
);
1888 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1890 int len
= tswapal(target_cmsg
->cmsg_len
)
1891 - sizeof(struct target_cmsghdr
);
1893 space
+= CMSG_SPACE(len
);
1894 if (space
> msgh
->msg_controllen
) {
1895 space
-= CMSG_SPACE(len
);
1896 /* This is a QEMU bug, since we allocated the payload
1897 * area ourselves (unlike overflow in host-to-target
1898 * conversion, which is just the guest giving us a buffer
1899 * that's too small). It can't happen for the payload types
1900 * we currently support; if it becomes an issue in future
1901 * we would need to improve our allocation strategy to
1902 * something more intelligent than "twice the size of the
1903 * target buffer we're reading from".
1905 qemu_log_mask(LOG_UNIMP
,
1906 ("Unsupported ancillary data %d/%d: "
1907 "unhandled msg size\n"),
1908 tswap32(target_cmsg
->cmsg_level
),
1909 tswap32(target_cmsg
->cmsg_type
));
1913 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1914 cmsg
->cmsg_level
= SOL_SOCKET
;
1916 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1918 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1919 cmsg
->cmsg_len
= CMSG_LEN(len
);
1921 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1922 int *fd
= (int *)data
;
1923 int *target_fd
= (int *)target_data
;
1924 int i
, numfds
= len
/ sizeof(int);
1926 for (i
= 0; i
< numfds
; i
++) {
1927 __get_user(fd
[i
], target_fd
+ i
);
1929 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1930 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1931 struct ucred
*cred
= (struct ucred
*)data
;
1932 struct target_ucred
*target_cred
=
1933 (struct target_ucred
*)target_data
;
1935 __get_user(cred
->pid
, &target_cred
->pid
);
1936 __get_user(cred
->uid
, &target_cred
->uid
);
1937 __get_user(cred
->gid
, &target_cred
->gid
);
1939 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1940 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1941 memcpy(data
, target_data
, len
);
1944 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1945 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1948 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1950 msgh
->msg_controllen
= space
;
1954 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1955 struct msghdr
*msgh
)
1957 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1958 abi_long msg_controllen
;
1959 abi_ulong target_cmsg_addr
;
1960 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1961 socklen_t space
= 0;
1963 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1964 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1966 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1967 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1968 target_cmsg_start
= target_cmsg
;
1970 return -TARGET_EFAULT
;
1972 while (cmsg
&& target_cmsg
) {
1973 void *data
= CMSG_DATA(cmsg
);
1974 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1976 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1977 int tgt_len
, tgt_space
;
1979 /* We never copy a half-header but may copy half-data;
1980 * this is Linux's behaviour in put_cmsg(). Note that
1981 * truncation here is a guest problem (which we report
1982 * to the guest via the CTRUNC bit), unlike truncation
1983 * in target_to_host_cmsg, which is a QEMU bug.
1985 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1986 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1990 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1991 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1993 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1995 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1997 /* Payload types which need a different size of payload on
1998 * the target must adjust tgt_len here.
2001 switch (cmsg
->cmsg_level
) {
2003 switch (cmsg
->cmsg_type
) {
2005 tgt_len
= sizeof(struct target_timeval
);
2015 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
2016 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2017 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2020 /* We must now copy-and-convert len bytes of payload
2021 * into tgt_len bytes of destination space. Bear in mind
2022 * that in both source and destination we may be dealing
2023 * with a truncated value!
2025 switch (cmsg
->cmsg_level
) {
2027 switch (cmsg
->cmsg_type
) {
2030 int *fd
= (int *)data
;
2031 int *target_fd
= (int *)target_data
;
2032 int i
, numfds
= tgt_len
/ sizeof(int);
2034 for (i
= 0; i
< numfds
; i
++) {
2035 __put_user(fd
[i
], target_fd
+ i
);
2041 struct timeval
*tv
= (struct timeval
*)data
;
2042 struct target_timeval
*target_tv
=
2043 (struct target_timeval
*)target_data
;
2045 if (len
!= sizeof(struct timeval
) ||
2046 tgt_len
!= sizeof(struct target_timeval
)) {
2050 /* copy struct timeval to target */
2051 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2052 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2055 case SCM_CREDENTIALS
:
2057 struct ucred
*cred
= (struct ucred
*)data
;
2058 struct target_ucred
*target_cred
=
2059 (struct target_ucred
*)target_data
;
2061 __put_user(cred
->pid
, &target_cred
->pid
);
2062 __put_user(cred
->uid
, &target_cred
->uid
);
2063 __put_user(cred
->gid
, &target_cred
->gid
);
2072 switch (cmsg
->cmsg_type
) {
2075 uint32_t *v
= (uint32_t *)data
;
2076 uint32_t *t_int
= (uint32_t *)target_data
;
2078 if (len
!= sizeof(uint32_t) ||
2079 tgt_len
!= sizeof(uint32_t)) {
2082 __put_user(*v
, t_int
);
2088 struct sock_extended_err ee
;
2089 struct sockaddr_in offender
;
2091 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2092 struct errhdr_t
*target_errh
=
2093 (struct errhdr_t
*)target_data
;
2095 if (len
!= sizeof(struct errhdr_t
) ||
2096 tgt_len
!= sizeof(struct errhdr_t
)) {
2099 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2100 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2101 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2102 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2103 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2104 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2105 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2106 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2107 (void *) &errh
->offender
, sizeof(errh
->offender
));
2116 switch (cmsg
->cmsg_type
) {
2119 uint32_t *v
= (uint32_t *)data
;
2120 uint32_t *t_int
= (uint32_t *)target_data
;
2122 if (len
!= sizeof(uint32_t) ||
2123 tgt_len
!= sizeof(uint32_t)) {
2126 __put_user(*v
, t_int
);
2132 struct sock_extended_err ee
;
2133 struct sockaddr_in6 offender
;
2135 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2136 struct errhdr6_t
*target_errh
=
2137 (struct errhdr6_t
*)target_data
;
2139 if (len
!= sizeof(struct errhdr6_t
) ||
2140 tgt_len
!= sizeof(struct errhdr6_t
)) {
2143 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2144 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2145 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2146 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2147 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2148 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2149 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2150 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2151 (void *) &errh
->offender
, sizeof(errh
->offender
));
2161 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2162 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2163 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2164 if (tgt_len
> len
) {
2165 memset(target_data
+ len
, 0, tgt_len
- len
);
2169 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2170 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2171 if (msg_controllen
< tgt_space
) {
2172 tgt_space
= msg_controllen
;
2174 msg_controllen
-= tgt_space
;
2176 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2177 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2180 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2182 target_msgh
->msg_controllen
= tswapal(space
);
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2188 abi_ulong optval_addr
, socklen_t optlen
)
2192 struct ip_mreqn
*ip_mreq
;
2193 struct ip_mreq_source
*ip_mreq_source
;
2198 /* TCP and UDP options all take an 'int' value. */
2199 if (optlen
< sizeof(uint32_t))
2200 return -TARGET_EINVAL
;
2202 if (get_user_u32(val
, optval_addr
))
2203 return -TARGET_EFAULT
;
2204 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2211 case IP_ROUTER_ALERT
:
2215 case IP_MTU_DISCOVER
:
2222 case IP_MULTICAST_TTL
:
2223 case IP_MULTICAST_LOOP
:
2225 if (optlen
>= sizeof(uint32_t)) {
2226 if (get_user_u32(val
, optval_addr
))
2227 return -TARGET_EFAULT
;
2228 } else if (optlen
>= 1) {
2229 if (get_user_u8(val
, optval_addr
))
2230 return -TARGET_EFAULT
;
2232 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2234 case IP_ADD_MEMBERSHIP
:
2235 case IP_DROP_MEMBERSHIP
:
2236 if (optlen
< sizeof (struct target_ip_mreq
) ||
2237 optlen
> sizeof (struct target_ip_mreqn
))
2238 return -TARGET_EINVAL
;
2240 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2241 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2242 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2245 case IP_BLOCK_SOURCE
:
2246 case IP_UNBLOCK_SOURCE
:
2247 case IP_ADD_SOURCE_MEMBERSHIP
:
2248 case IP_DROP_SOURCE_MEMBERSHIP
:
2249 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2250 return -TARGET_EINVAL
;
2252 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2253 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2254 unlock_user (ip_mreq_source
, optval_addr
, 0);
2263 case IPV6_MTU_DISCOVER
:
2266 case IPV6_RECVPKTINFO
:
2267 case IPV6_UNICAST_HOPS
:
2268 case IPV6_MULTICAST_HOPS
:
2269 case IPV6_MULTICAST_LOOP
:
2271 case IPV6_RECVHOPLIMIT
:
2272 case IPV6_2292HOPLIMIT
:
2275 case IPV6_2292PKTINFO
:
2276 case IPV6_RECVTCLASS
:
2277 case IPV6_RECVRTHDR
:
2278 case IPV6_2292RTHDR
:
2279 case IPV6_RECVHOPOPTS
:
2280 case IPV6_2292HOPOPTS
:
2281 case IPV6_RECVDSTOPTS
:
2282 case IPV6_2292DSTOPTS
:
2284 case IPV6_ADDR_PREFERENCES
:
2285 #ifdef IPV6_RECVPATHMTU
2286 case IPV6_RECVPATHMTU
:
2288 #ifdef IPV6_TRANSPARENT
2289 case IPV6_TRANSPARENT
:
2291 #ifdef IPV6_FREEBIND
2294 #ifdef IPV6_RECVORIGDSTADDR
2295 case IPV6_RECVORIGDSTADDR
:
2298 if (optlen
< sizeof(uint32_t)) {
2299 return -TARGET_EINVAL
;
2301 if (get_user_u32(val
, optval_addr
)) {
2302 return -TARGET_EFAULT
;
2304 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2305 &val
, sizeof(val
)));
2309 struct in6_pktinfo pki
;
2311 if (optlen
< sizeof(pki
)) {
2312 return -TARGET_EINVAL
;
2315 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2316 return -TARGET_EFAULT
;
2319 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2321 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2322 &pki
, sizeof(pki
)));
2325 case IPV6_ADD_MEMBERSHIP
:
2326 case IPV6_DROP_MEMBERSHIP
:
2328 struct ipv6_mreq ipv6mreq
;
2330 if (optlen
< sizeof(ipv6mreq
)) {
2331 return -TARGET_EINVAL
;
2334 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2335 return -TARGET_EFAULT
;
2338 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2340 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2341 &ipv6mreq
, sizeof(ipv6mreq
)));
2352 struct icmp6_filter icmp6f
;
2354 if (optlen
> sizeof(icmp6f
)) {
2355 optlen
= sizeof(icmp6f
);
2358 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2359 return -TARGET_EFAULT
;
2362 for (val
= 0; val
< 8; val
++) {
2363 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2366 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2378 /* those take an u32 value */
2379 if (optlen
< sizeof(uint32_t)) {
2380 return -TARGET_EINVAL
;
2383 if (get_user_u32(val
, optval_addr
)) {
2384 return -TARGET_EFAULT
;
2386 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2387 &val
, sizeof(val
)));
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2399 char *alg_key
= g_malloc(optlen
);
2402 return -TARGET_ENOMEM
;
2404 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2406 return -TARGET_EFAULT
;
2408 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2413 case ALG_SET_AEAD_AUTHSIZE
:
2415 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2424 case TARGET_SOL_SOCKET
:
2426 case TARGET_SO_RCVTIMEO
:
2430 optname
= SO_RCVTIMEO
;
2433 if (optlen
!= sizeof(struct target_timeval
)) {
2434 return -TARGET_EINVAL
;
2437 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2438 return -TARGET_EFAULT
;
2441 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2445 case TARGET_SO_SNDTIMEO
:
2446 optname
= SO_SNDTIMEO
;
2448 case TARGET_SO_ATTACH_FILTER
:
2450 struct target_sock_fprog
*tfprog
;
2451 struct target_sock_filter
*tfilter
;
2452 struct sock_fprog fprog
;
2453 struct sock_filter
*filter
;
2456 if (optlen
!= sizeof(*tfprog
)) {
2457 return -TARGET_EINVAL
;
2459 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2460 return -TARGET_EFAULT
;
2462 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2463 tswapal(tfprog
->filter
), 0)) {
2464 unlock_user_struct(tfprog
, optval_addr
, 1);
2465 return -TARGET_EFAULT
;
2468 fprog
.len
= tswap16(tfprog
->len
);
2469 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2470 if (filter
== NULL
) {
2471 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2472 unlock_user_struct(tfprog
, optval_addr
, 1);
2473 return -TARGET_ENOMEM
;
2475 for (i
= 0; i
< fprog
.len
; i
++) {
2476 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2477 filter
[i
].jt
= tfilter
[i
].jt
;
2478 filter
[i
].jf
= tfilter
[i
].jf
;
2479 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2481 fprog
.filter
= filter
;
2483 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2484 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2487 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2488 unlock_user_struct(tfprog
, optval_addr
, 1);
2491 case TARGET_SO_BINDTODEVICE
:
2493 char *dev_ifname
, *addr_ifname
;
2495 if (optlen
> IFNAMSIZ
- 1) {
2496 optlen
= IFNAMSIZ
- 1;
2498 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2500 return -TARGET_EFAULT
;
2502 optname
= SO_BINDTODEVICE
;
2503 addr_ifname
= alloca(IFNAMSIZ
);
2504 memcpy(addr_ifname
, dev_ifname
, optlen
);
2505 addr_ifname
[optlen
] = 0;
2506 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2507 addr_ifname
, optlen
));
2508 unlock_user (dev_ifname
, optval_addr
, 0);
2511 case TARGET_SO_LINGER
:
2514 struct target_linger
*tlg
;
2516 if (optlen
!= sizeof(struct target_linger
)) {
2517 return -TARGET_EINVAL
;
2519 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2520 return -TARGET_EFAULT
;
2522 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2523 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2524 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2526 unlock_user_struct(tlg
, optval_addr
, 0);
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG
:
2533 case TARGET_SO_REUSEADDR
:
2534 optname
= SO_REUSEADDR
;
2537 case TARGET_SO_REUSEPORT
:
2538 optname
= SO_REUSEPORT
;
2541 case TARGET_SO_TYPE
:
2544 case TARGET_SO_ERROR
:
2547 case TARGET_SO_DONTROUTE
:
2548 optname
= SO_DONTROUTE
;
2550 case TARGET_SO_BROADCAST
:
2551 optname
= SO_BROADCAST
;
2553 case TARGET_SO_SNDBUF
:
2554 optname
= SO_SNDBUF
;
2556 case TARGET_SO_SNDBUFFORCE
:
2557 optname
= SO_SNDBUFFORCE
;
2559 case TARGET_SO_RCVBUF
:
2560 optname
= SO_RCVBUF
;
2562 case TARGET_SO_RCVBUFFORCE
:
2563 optname
= SO_RCVBUFFORCE
;
2565 case TARGET_SO_KEEPALIVE
:
2566 optname
= SO_KEEPALIVE
;
2568 case TARGET_SO_OOBINLINE
:
2569 optname
= SO_OOBINLINE
;
2571 case TARGET_SO_NO_CHECK
:
2572 optname
= SO_NO_CHECK
;
2574 case TARGET_SO_PRIORITY
:
2575 optname
= SO_PRIORITY
;
2578 case TARGET_SO_BSDCOMPAT
:
2579 optname
= SO_BSDCOMPAT
;
2582 case TARGET_SO_PASSCRED
:
2583 optname
= SO_PASSCRED
;
2585 case TARGET_SO_PASSSEC
:
2586 optname
= SO_PASSSEC
;
2588 case TARGET_SO_TIMESTAMP
:
2589 optname
= SO_TIMESTAMP
;
2591 case TARGET_SO_RCVLOWAT
:
2592 optname
= SO_RCVLOWAT
;
2597 if (optlen
< sizeof(uint32_t))
2598 return -TARGET_EINVAL
;
2600 if (get_user_u32(val
, optval_addr
))
2601 return -TARGET_EFAULT
;
2602 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2607 case NETLINK_PKTINFO
:
2608 case NETLINK_ADD_MEMBERSHIP
:
2609 case NETLINK_DROP_MEMBERSHIP
:
2610 case NETLINK_BROADCAST_ERROR
:
2611 case NETLINK_NO_ENOBUFS
:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613 case NETLINK_LISTEN_ALL_NSID
:
2614 case NETLINK_CAP_ACK
:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617 case NETLINK_EXT_ACK
:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620 case NETLINK_GET_STRICT_CHK
:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2627 if (optlen
< sizeof(uint32_t)) {
2628 return -TARGET_EINVAL
;
2630 if (get_user_u32(val
, optval_addr
)) {
2631 return -TARGET_EFAULT
;
2633 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2636 #endif /* SOL_NETLINK */
2639 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2641 ret
= -TARGET_ENOPROTOOPT
;
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2648 abi_ulong optval_addr
, abi_ulong optlen
)
2655 case TARGET_SOL_SOCKET
:
2658 /* These don't just return a single integer */
2659 case TARGET_SO_PEERNAME
:
2661 case TARGET_SO_RCVTIMEO
: {
2665 optname
= SO_RCVTIMEO
;
2668 if (get_user_u32(len
, optlen
)) {
2669 return -TARGET_EFAULT
;
2672 return -TARGET_EINVAL
;
2676 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2681 if (len
> sizeof(struct target_timeval
)) {
2682 len
= sizeof(struct target_timeval
);
2684 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2685 return -TARGET_EFAULT
;
2687 if (put_user_u32(len
, optlen
)) {
2688 return -TARGET_EFAULT
;
2692 case TARGET_SO_SNDTIMEO
:
2693 optname
= SO_SNDTIMEO
;
2695 case TARGET_SO_PEERCRED
: {
2698 struct target_ucred
*tcr
;
2700 if (get_user_u32(len
, optlen
)) {
2701 return -TARGET_EFAULT
;
2704 return -TARGET_EINVAL
;
2708 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2716 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2717 return -TARGET_EFAULT
;
2719 __put_user(cr
.pid
, &tcr
->pid
);
2720 __put_user(cr
.uid
, &tcr
->uid
);
2721 __put_user(cr
.gid
, &tcr
->gid
);
2722 unlock_user_struct(tcr
, optval_addr
, 1);
2723 if (put_user_u32(len
, optlen
)) {
2724 return -TARGET_EFAULT
;
2728 case TARGET_SO_PEERSEC
: {
2731 if (get_user_u32(len
, optlen
)) {
2732 return -TARGET_EFAULT
;
2735 return -TARGET_EINVAL
;
2737 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2739 return -TARGET_EFAULT
;
2742 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2744 if (put_user_u32(lv
, optlen
)) {
2745 ret
= -TARGET_EFAULT
;
2747 unlock_user(name
, optval_addr
, lv
);
2750 case TARGET_SO_LINGER
:
2754 struct target_linger
*tlg
;
2756 if (get_user_u32(len
, optlen
)) {
2757 return -TARGET_EFAULT
;
2760 return -TARGET_EINVAL
;
2764 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2772 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2773 return -TARGET_EFAULT
;
2775 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2776 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2777 unlock_user_struct(tlg
, optval_addr
, 1);
2778 if (put_user_u32(len
, optlen
)) {
2779 return -TARGET_EFAULT
;
2783 /* Options with 'int' argument. */
2784 case TARGET_SO_DEBUG
:
2787 case TARGET_SO_REUSEADDR
:
2788 optname
= SO_REUSEADDR
;
2791 case TARGET_SO_REUSEPORT
:
2792 optname
= SO_REUSEPORT
;
2795 case TARGET_SO_TYPE
:
2798 case TARGET_SO_ERROR
:
2801 case TARGET_SO_DONTROUTE
:
2802 optname
= SO_DONTROUTE
;
2804 case TARGET_SO_BROADCAST
:
2805 optname
= SO_BROADCAST
;
2807 case TARGET_SO_SNDBUF
:
2808 optname
= SO_SNDBUF
;
2810 case TARGET_SO_RCVBUF
:
2811 optname
= SO_RCVBUF
;
2813 case TARGET_SO_KEEPALIVE
:
2814 optname
= SO_KEEPALIVE
;
2816 case TARGET_SO_OOBINLINE
:
2817 optname
= SO_OOBINLINE
;
2819 case TARGET_SO_NO_CHECK
:
2820 optname
= SO_NO_CHECK
;
2822 case TARGET_SO_PRIORITY
:
2823 optname
= SO_PRIORITY
;
2826 case TARGET_SO_BSDCOMPAT
:
2827 optname
= SO_BSDCOMPAT
;
2830 case TARGET_SO_PASSCRED
:
2831 optname
= SO_PASSCRED
;
2833 case TARGET_SO_TIMESTAMP
:
2834 optname
= SO_TIMESTAMP
;
2836 case TARGET_SO_RCVLOWAT
:
2837 optname
= SO_RCVLOWAT
;
2839 case TARGET_SO_ACCEPTCONN
:
2840 optname
= SO_ACCEPTCONN
;
2842 case TARGET_SO_PROTOCOL
:
2843 optname
= SO_PROTOCOL
;
2845 case TARGET_SO_DOMAIN
:
2846 optname
= SO_DOMAIN
;
2854 /* TCP and UDP options all take an 'int' value. */
2856 if (get_user_u32(len
, optlen
))
2857 return -TARGET_EFAULT
;
2859 return -TARGET_EINVAL
;
2861 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2864 if (optname
== SO_TYPE
) {
2865 val
= host_to_target_sock_type(val
);
2870 if (put_user_u32(val
, optval_addr
))
2871 return -TARGET_EFAULT
;
2873 if (put_user_u8(val
, optval_addr
))
2874 return -TARGET_EFAULT
;
2876 if (put_user_u32(len
, optlen
))
2877 return -TARGET_EFAULT
;
2884 case IP_ROUTER_ALERT
:
2888 case IP_MTU_DISCOVER
:
2894 case IP_MULTICAST_TTL
:
2895 case IP_MULTICAST_LOOP
:
2896 if (get_user_u32(len
, optlen
))
2897 return -TARGET_EFAULT
;
2899 return -TARGET_EINVAL
;
2901 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2904 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2906 if (put_user_u32(len
, optlen
)
2907 || put_user_u8(val
, optval_addr
))
2908 return -TARGET_EFAULT
;
2910 if (len
> sizeof(int))
2912 if (put_user_u32(len
, optlen
)
2913 || put_user_u32(val
, optval_addr
))
2914 return -TARGET_EFAULT
;
2918 ret
= -TARGET_ENOPROTOOPT
;
2924 case IPV6_MTU_DISCOVER
:
2927 case IPV6_RECVPKTINFO
:
2928 case IPV6_UNICAST_HOPS
:
2929 case IPV6_MULTICAST_HOPS
:
2930 case IPV6_MULTICAST_LOOP
:
2932 case IPV6_RECVHOPLIMIT
:
2933 case IPV6_2292HOPLIMIT
:
2936 case IPV6_2292PKTINFO
:
2937 case IPV6_RECVTCLASS
:
2938 case IPV6_RECVRTHDR
:
2939 case IPV6_2292RTHDR
:
2940 case IPV6_RECVHOPOPTS
:
2941 case IPV6_2292HOPOPTS
:
2942 case IPV6_RECVDSTOPTS
:
2943 case IPV6_2292DSTOPTS
:
2945 case IPV6_ADDR_PREFERENCES
:
2946 #ifdef IPV6_RECVPATHMTU
2947 case IPV6_RECVPATHMTU
:
2949 #ifdef IPV6_TRANSPARENT
2950 case IPV6_TRANSPARENT
:
2952 #ifdef IPV6_FREEBIND
2955 #ifdef IPV6_RECVORIGDSTADDR
2956 case IPV6_RECVORIGDSTADDR
:
2958 if (get_user_u32(len
, optlen
))
2959 return -TARGET_EFAULT
;
2961 return -TARGET_EINVAL
;
2963 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2966 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2968 if (put_user_u32(len
, optlen
)
2969 || put_user_u8(val
, optval_addr
))
2970 return -TARGET_EFAULT
;
2972 if (len
> sizeof(int))
2974 if (put_user_u32(len
, optlen
)
2975 || put_user_u32(val
, optval_addr
))
2976 return -TARGET_EFAULT
;
2980 ret
= -TARGET_ENOPROTOOPT
;
2987 case NETLINK_PKTINFO
:
2988 case NETLINK_BROADCAST_ERROR
:
2989 case NETLINK_NO_ENOBUFS
:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991 case NETLINK_LISTEN_ALL_NSID
:
2992 case NETLINK_CAP_ACK
:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995 case NETLINK_EXT_ACK
:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998 case NETLINK_GET_STRICT_CHK
:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000 if (get_user_u32(len
, optlen
)) {
3001 return -TARGET_EFAULT
;
3003 if (len
!= sizeof(val
)) {
3004 return -TARGET_EINVAL
;
3007 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3011 if (put_user_u32(lv
, optlen
)
3012 || put_user_u32(val
, optval_addr
)) {
3013 return -TARGET_EFAULT
;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017 case NETLINK_LIST_MEMBERSHIPS
:
3021 if (get_user_u32(len
, optlen
)) {
3022 return -TARGET_EFAULT
;
3025 return -TARGET_EINVAL
;
3027 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3028 if (!results
&& len
> 0) {
3029 return -TARGET_EFAULT
;
3032 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3034 unlock_user(results
, optval_addr
, 0);
3037 /* swap host endianess to target endianess. */
3038 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3039 results
[i
] = tswap32(results
[i
]);
3041 if (put_user_u32(lv
, optlen
)) {
3042 return -TARGET_EFAULT
;
3044 unlock_user(results
, optval_addr
, 0);
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3052 #endif /* SOL_NETLINK */
3055 qemu_log_mask(LOG_UNIMP
,
3056 "getsockopt level=%d optname=%d not yet supported\n",
3058 ret
= -TARGET_EOPNOTSUPP
;
3064 /* Convert target low/high pair representing file offset into the host
3065 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066 * as the kernel doesn't handle them either.
3068 static void target_to_host_low_high(abi_ulong tlow
,
3070 unsigned long *hlow
,
3071 unsigned long *hhigh
)
3073 uint64_t off
= tlow
|
3074 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3075 TARGET_LONG_BITS
/ 2;
3078 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3081 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3082 abi_ulong count
, int copy
)
3084 struct target_iovec
*target_vec
;
3086 abi_ulong total_len
, max_len
;
3089 bool bad_address
= false;
3095 if (count
> IOV_MAX
) {
3100 vec
= g_try_new0(struct iovec
, count
);
3106 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3107 count
* sizeof(struct target_iovec
), 1);
3108 if (target_vec
== NULL
) {
3113 /* ??? If host page size > target page size, this will result in a
3114 value larger than what we can actually support. */
3115 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3118 for (i
= 0; i
< count
; i
++) {
3119 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3120 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3125 } else if (len
== 0) {
3126 /* Zero length pointer is ignored. */
3127 vec
[i
].iov_base
= 0;
3129 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3130 /* If the first buffer pointer is bad, this is a fault. But
3131 * subsequent bad buffers will result in a partial write; this
3132 * is realized by filling the vector with null pointers and
3134 if (!vec
[i
].iov_base
) {
3145 if (len
> max_len
- total_len
) {
3146 len
= max_len
- total_len
;
3149 vec
[i
].iov_len
= len
;
3153 unlock_user(target_vec
, target_addr
, 0);
3158 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3159 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3162 unlock_user(target_vec
, target_addr
, 0);
3169 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3170 abi_ulong count
, int copy
)
3172 struct target_iovec
*target_vec
;
3175 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3176 count
* sizeof(struct target_iovec
), 1);
3178 for (i
= 0; i
< count
; i
++) {
3179 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3180 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3184 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3186 unlock_user(target_vec
, target_addr
, 0);
3192 static inline int target_to_host_sock_type(int *type
)
3195 int target_type
= *type
;
3197 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3198 case TARGET_SOCK_DGRAM
:
3199 host_type
= SOCK_DGRAM
;
3201 case TARGET_SOCK_STREAM
:
3202 host_type
= SOCK_STREAM
;
3205 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3208 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3209 #if defined(SOCK_CLOEXEC)
3210 host_type
|= SOCK_CLOEXEC
;
3212 return -TARGET_EINVAL
;
3215 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3216 #if defined(SOCK_NONBLOCK)
3217 host_type
|= SOCK_NONBLOCK
;
3218 #elif !defined(O_NONBLOCK)
3219 return -TARGET_EINVAL
;
3226 /* Try to emulate socket type flags after socket creation. */
3227 static int sock_flags_fixup(int fd
, int target_type
)
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3231 int flags
= fcntl(fd
, F_GETFL
);
3232 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3234 return -TARGET_EINVAL
;
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long
do_socket(int domain
, int type
, int protocol
)
3244 int target_type
= type
;
3247 ret
= target_to_host_sock_type(&type
);
3252 if (domain
== PF_NETLINK
&& !(
3253 #ifdef CONFIG_RTNETLINK
3254 protocol
== NETLINK_ROUTE
||
3256 protocol
== NETLINK_KOBJECT_UEVENT
||
3257 protocol
== NETLINK_AUDIT
)) {
3258 return -TARGET_EPROTONOSUPPORT
;
3261 if (domain
== AF_PACKET
||
3262 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3263 protocol
= tswap16(protocol
);
3266 ret
= get_errno(socket(domain
, type
, protocol
));
3268 ret
= sock_flags_fixup(ret
, target_type
);
3269 if (type
== SOCK_PACKET
) {
3270 /* Manage an obsolete case :
3271 * if socket type is SOCK_PACKET, bind by name
3273 fd_trans_register(ret
, &target_packet_trans
);
3274 } else if (domain
== PF_NETLINK
) {
3276 #ifdef CONFIG_RTNETLINK
3278 fd_trans_register(ret
, &target_netlink_route_trans
);
3281 case NETLINK_KOBJECT_UEVENT
:
3282 /* nothing to do: messages are strings */
3285 fd_trans_register(ret
, &target_netlink_audit_trans
);
3288 g_assert_not_reached();
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3302 if ((int)addrlen
< 0) {
3303 return -TARGET_EINVAL
;
3306 addr
= alloca(addrlen
+1);
3308 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3312 return get_errno(bind(sockfd
, addr
, addrlen
));
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3322 if ((int)addrlen
< 0) {
3323 return -TARGET_EINVAL
;
3326 addr
= alloca(addrlen
+1);
3328 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3332 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3337 int flags
, int send
)
3343 abi_ulong target_vec
;
3345 if (msgp
->msg_name
) {
3346 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3347 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3348 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3349 tswapal(msgp
->msg_name
),
3351 if (ret
== -TARGET_EFAULT
) {
3352 /* For connected sockets msg_name and msg_namelen must
3353 * be ignored, so returning EFAULT immediately is wrong.
3354 * Instead, pass a bad msg_name to the host kernel, and
3355 * let it decide whether to return EFAULT or not.
3357 msg
.msg_name
= (void *)-1;
3362 msg
.msg_name
= NULL
;
3363 msg
.msg_namelen
= 0;
3365 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3366 msg
.msg_control
= alloca(msg
.msg_controllen
);
3367 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3369 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3371 count
= tswapal(msgp
->msg_iovlen
);
3372 target_vec
= tswapal(msgp
->msg_iov
);
3374 if (count
> IOV_MAX
) {
3375 /* sendrcvmsg returns a different errno for this condition than
3376 * readv/writev, so we must catch it here before lock_iovec() does.
3378 ret
= -TARGET_EMSGSIZE
;
3382 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3383 target_vec
, count
, send
);
3385 ret
= -host_to_target_errno(errno
);
3388 msg
.msg_iovlen
= count
;
3392 if (fd_trans_target_to_host_data(fd
)) {
3395 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3396 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3397 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3398 msg
.msg_iov
->iov_len
);
3400 msg
.msg_iov
->iov_base
= host_msg
;
3401 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3405 ret
= target_to_host_cmsg(&msg
, msgp
);
3407 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3411 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3412 if (!is_error(ret
)) {
3414 if (fd_trans_host_to_target_data(fd
)) {
3415 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3416 MIN(msg
.msg_iov
->iov_len
, len
));
3418 ret
= host_to_target_cmsg(msgp
, &msg
);
3420 if (!is_error(ret
)) {
3421 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3422 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3423 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3424 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3425 msg
.msg_name
, msg
.msg_namelen
);
3437 unlock_iovec(vec
, target_vec
, count
, !send
);
3442 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3443 int flags
, int send
)
3446 struct target_msghdr
*msgp
;
3448 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3452 return -TARGET_EFAULT
;
3454 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3455 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460 * so it might not have this *mmsg-specific flag either.
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3466 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3467 unsigned int vlen
, unsigned int flags
,
3470 struct target_mmsghdr
*mmsgp
;
3474 if (vlen
> UIO_MAXIOV
) {
3478 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3480 return -TARGET_EFAULT
;
3483 for (i
= 0; i
< vlen
; i
++) {
3484 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3485 if (is_error(ret
)) {
3488 mmsgp
[i
].msg_len
= tswap32(ret
);
3489 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490 if (flags
& MSG_WAITFORONE
) {
3491 flags
|= MSG_DONTWAIT
;
3495 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3497 /* Return number of datagrams sent if we sent any at all;
3498 * otherwise return the error.
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3508 abi_ulong target_addrlen_addr
, int flags
)
3510 socklen_t addrlen
, ret_addrlen
;
3515 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3517 if (target_addr
== 0) {
3518 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3521 /* linux returns EFAULT if addrlen pointer is invalid */
3522 if (get_user_u32(addrlen
, target_addrlen_addr
))
3523 return -TARGET_EFAULT
;
3525 if ((int)addrlen
< 0) {
3526 return -TARGET_EINVAL
;
3529 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3530 return -TARGET_EFAULT
;
3533 addr
= alloca(addrlen
);
3535 ret_addrlen
= addrlen
;
3536 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3537 if (!is_error(ret
)) {
3538 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3539 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3540 ret
= -TARGET_EFAULT
;
3546 /* do_getpeername() Must return target values and target errnos. */
3547 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3548 abi_ulong target_addrlen_addr
)
3550 socklen_t addrlen
, ret_addrlen
;
3554 if (get_user_u32(addrlen
, target_addrlen_addr
))
3555 return -TARGET_EFAULT
;
3557 if ((int)addrlen
< 0) {
3558 return -TARGET_EINVAL
;
3561 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3562 return -TARGET_EFAULT
;
3565 addr
= alloca(addrlen
);
3567 ret_addrlen
= addrlen
;
3568 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3569 if (!is_error(ret
)) {
3570 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3571 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3572 ret
= -TARGET_EFAULT
;
3578 /* do_getsockname() Must return target values and target errnos. */
3579 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3580 abi_ulong target_addrlen_addr
)
3582 socklen_t addrlen
, ret_addrlen
;
3586 if (get_user_u32(addrlen
, target_addrlen_addr
))
3587 return -TARGET_EFAULT
;
3589 if ((int)addrlen
< 0) {
3590 return -TARGET_EINVAL
;
3593 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3594 return -TARGET_EFAULT
;
3597 addr
= alloca(addrlen
);
3599 ret_addrlen
= addrlen
;
3600 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3601 if (!is_error(ret
)) {
3602 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3603 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3604 ret
= -TARGET_EFAULT
;
3610 /* do_socketpair() Must return target values and target errnos. */
3611 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3612 abi_ulong target_tab_addr
)
3617 target_to_host_sock_type(&type
);
3619 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3620 if (!is_error(ret
)) {
3621 if (put_user_s32(tab
[0], target_tab_addr
)
3622 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3623 ret
= -TARGET_EFAULT
;
3628 /* do_sendto() Must return target values and target errnos. */
3629 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3630 abi_ulong target_addr
, socklen_t addrlen
)
3634 void *copy_msg
= NULL
;
3637 if ((int)addrlen
< 0) {
3638 return -TARGET_EINVAL
;
3641 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3643 return -TARGET_EFAULT
;
3644 if (fd_trans_target_to_host_data(fd
)) {
3645 copy_msg
= host_msg
;
3646 host_msg
= g_malloc(len
);
3647 memcpy(host_msg
, copy_msg
, len
);
3648 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3654 addr
= alloca(addrlen
+1);
3655 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3659 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3661 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3666 host_msg
= copy_msg
;
3668 unlock_user(host_msg
, msg
, 0);
3672 /* do_recvfrom() Must return target values and target errnos. */
3673 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3674 abi_ulong target_addr
,
3675 abi_ulong target_addrlen
)
3677 socklen_t addrlen
, ret_addrlen
;
3685 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3687 return -TARGET_EFAULT
;
3691 if (get_user_u32(addrlen
, target_addrlen
)) {
3692 ret
= -TARGET_EFAULT
;
3695 if ((int)addrlen
< 0) {
3696 ret
= -TARGET_EINVAL
;
3699 addr
= alloca(addrlen
);
3700 ret_addrlen
= addrlen
;
3701 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3702 addr
, &ret_addrlen
));
3704 addr
= NULL
; /* To keep compiler quiet. */
3705 addrlen
= 0; /* To keep compiler quiet. */
3706 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3708 if (!is_error(ret
)) {
3709 if (fd_trans_host_to_target_data(fd
)) {
3711 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3712 if (is_error(trans
)) {
3718 host_to_target_sockaddr(target_addr
, addr
,
3719 MIN(addrlen
, ret_addrlen
));
3720 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3721 ret
= -TARGET_EFAULT
;
3725 unlock_user(host_msg
, msg
, len
);
3728 unlock_user(host_msg
, msg
, 0);
3733 #ifdef TARGET_NR_socketcall
3734 /* do_socketcall() must return target values and target errnos. */
3735 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3737 static const unsigned nargs
[] = { /* number of arguments per operation */
3738 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3739 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3740 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3741 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3742 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3743 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3744 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3745 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3746 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3747 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3748 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3749 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3750 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3751 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3752 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3753 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3754 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3755 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3756 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3757 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3759 abi_long a
[6]; /* max 6 args */
3762 /* check the range of the first argument num */
3763 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3765 return -TARGET_EINVAL
;
3767 /* ensure we have space for args */
3768 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3769 return -TARGET_EINVAL
;
3771 /* collect the arguments in a[] according to nargs[] */
3772 for (i
= 0; i
< nargs
[num
]; ++i
) {
3773 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3774 return -TARGET_EFAULT
;
3777 /* now when we have the args, invoke the appropriate underlying function */
3779 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3780 return do_socket(a
[0], a
[1], a
[2]);
3781 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3782 return do_bind(a
[0], a
[1], a
[2]);
3783 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3784 return do_connect(a
[0], a
[1], a
[2]);
3785 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3786 return get_errno(listen(a
[0], a
[1]));
3787 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3788 return do_accept4(a
[0], a
[1], a
[2], 0);
3789 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3790 return do_getsockname(a
[0], a
[1], a
[2]);
3791 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3792 return do_getpeername(a
[0], a
[1], a
[2]);
3793 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3794 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3795 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3796 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3797 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3798 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3799 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3800 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3801 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3802 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3803 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3804 return get_errno(shutdown(a
[0], a
[1]));
3805 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3806 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3807 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3808 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3809 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3810 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3811 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3812 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3813 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3814 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3815 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3816 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3817 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3818 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3820 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3821 return -TARGET_EINVAL
;
3826 #define N_SHM_REGIONS 32
3828 static struct shm_region
{
3832 } shm_regions
[N_SHM_REGIONS
];
3834 #ifndef TARGET_SEMID64_DS
3835 /* asm-generic version of this struct */
3836 struct target_semid64_ds
3838 struct target_ipc_perm sem_perm
;
3839 abi_ulong sem_otime
;
3840 #if TARGET_ABI_BITS == 32
3841 abi_ulong __unused1
;
3843 abi_ulong sem_ctime
;
3844 #if TARGET_ABI_BITS == 32
3845 abi_ulong __unused2
;
3847 abi_ulong sem_nsems
;
3848 abi_ulong __unused3
;
3849 abi_ulong __unused4
;
3853 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3854 abi_ulong target_addr
)
3856 struct target_ipc_perm
*target_ip
;
3857 struct target_semid64_ds
*target_sd
;
3859 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3860 return -TARGET_EFAULT
;
3861 target_ip
= &(target_sd
->sem_perm
);
3862 host_ip
->__key
= tswap32(target_ip
->__key
);
3863 host_ip
->uid
= tswap32(target_ip
->uid
);
3864 host_ip
->gid
= tswap32(target_ip
->gid
);
3865 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3866 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868 host_ip
->mode
= tswap32(target_ip
->mode
);
3870 host_ip
->mode
= tswap16(target_ip
->mode
);
3872 #if defined(TARGET_PPC)
3873 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3875 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3877 unlock_user_struct(target_sd
, target_addr
, 0);
3881 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3882 struct ipc_perm
*host_ip
)
3884 struct target_ipc_perm
*target_ip
;
3885 struct target_semid64_ds
*target_sd
;
3887 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3888 return -TARGET_EFAULT
;
3889 target_ip
= &(target_sd
->sem_perm
);
3890 target_ip
->__key
= tswap32(host_ip
->__key
);
3891 target_ip
->uid
= tswap32(host_ip
->uid
);
3892 target_ip
->gid
= tswap32(host_ip
->gid
);
3893 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3894 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3895 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896 target_ip
->mode
= tswap32(host_ip
->mode
);
3898 target_ip
->mode
= tswap16(host_ip
->mode
);
3900 #if defined(TARGET_PPC)
3901 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3903 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3905 unlock_user_struct(target_sd
, target_addr
, 1);
3909 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3910 abi_ulong target_addr
)
3912 struct target_semid64_ds
*target_sd
;
3914 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3915 return -TARGET_EFAULT
;
3916 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3917 return -TARGET_EFAULT
;
3918 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3919 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3920 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3921 unlock_user_struct(target_sd
, target_addr
, 0);
3925 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3926 struct semid_ds
*host_sd
)
3928 struct target_semid64_ds
*target_sd
;
3930 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3931 return -TARGET_EFAULT
;
3932 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3933 return -TARGET_EFAULT
;
3934 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3935 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3936 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3937 unlock_user_struct(target_sd
, target_addr
, 1);
3941 struct target_seminfo
{
3954 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3955 struct seminfo
*host_seminfo
)
3957 struct target_seminfo
*target_seminfo
;
3958 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3959 return -TARGET_EFAULT
;
3960 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3961 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3962 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3963 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3964 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3965 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3966 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3967 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3968 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3969 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3970 unlock_user_struct(target_seminfo
, target_addr
, 1);
3976 struct semid_ds
*buf
;
3977 unsigned short *array
;
3978 struct seminfo
*__buf
;
3981 union target_semun
{
3988 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3989 abi_ulong target_addr
)
3992 unsigned short *array
;
3994 struct semid_ds semid_ds
;
3997 semun
.buf
= &semid_ds
;
3999 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4001 return get_errno(ret
);
4003 nsems
= semid_ds
.sem_nsems
;
4005 *host_array
= g_try_new(unsigned short, nsems
);
4007 return -TARGET_ENOMEM
;
4009 array
= lock_user(VERIFY_READ
, target_addr
,
4010 nsems
*sizeof(unsigned short), 1);
4012 g_free(*host_array
);
4013 return -TARGET_EFAULT
;
4016 for(i
=0; i
<nsems
; i
++) {
4017 __get_user((*host_array
)[i
], &array
[i
]);
4019 unlock_user(array
, target_addr
, 0);
4024 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4025 unsigned short **host_array
)
4028 unsigned short *array
;
4030 struct semid_ds semid_ds
;
4033 semun
.buf
= &semid_ds
;
4035 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4037 return get_errno(ret
);
4039 nsems
= semid_ds
.sem_nsems
;
4041 array
= lock_user(VERIFY_WRITE
, target_addr
,
4042 nsems
*sizeof(unsigned short), 0);
4044 return -TARGET_EFAULT
;
4046 for(i
=0; i
<nsems
; i
++) {
4047 __put_user((*host_array
)[i
], &array
[i
]);
4049 g_free(*host_array
);
4050 unlock_user(array
, target_addr
, 1);
4055 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4056 abi_ulong target_arg
)
4058 union target_semun target_su
= { .buf
= target_arg
};
4060 struct semid_ds dsarg
;
4061 unsigned short *array
= NULL
;
4062 struct seminfo seminfo
;
4063 abi_long ret
= -TARGET_EINVAL
;
4070 /* In 64 bit cross-endian situations, we will erroneously pick up
4071 * the wrong half of the union for the "val" element. To rectify
4072 * this, the entire 8-byte structure is byteswapped, followed by
4073 * a swap of the 4 byte val field. In other cases, the data is
4074 * already in proper host byte order. */
4075 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4076 target_su
.buf
= tswapal(target_su
.buf
);
4077 arg
.val
= tswap32(target_su
.val
);
4079 arg
.val
= target_su
.val
;
4081 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4085 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4089 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4090 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4097 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4101 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4102 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4108 arg
.__buf
= &seminfo
;
4109 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4110 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4118 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4125 struct target_sembuf
{
4126 unsigned short sem_num
;
4131 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4132 abi_ulong target_addr
,
4135 struct target_sembuf
*target_sembuf
;
4138 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4139 nsops
*sizeof(struct target_sembuf
), 1);
4141 return -TARGET_EFAULT
;
4143 for(i
=0; i
<nsops
; i
++) {
4144 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4145 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4146 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4149 unlock_user(target_sembuf
, target_addr
, 0);
4154 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4158 * This macro is required to handle the s390 variants, which passes the
4159 * arguments in a different order than default.
4162 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163 (__nsops), (__timeout), (__sops)
4165 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166 (__nsops), 0, (__sops), (__timeout)
4169 static inline abi_long
do_semtimedop(int semid
,
4172 abi_long timeout
, bool time64
)
4174 struct sembuf
*sops
;
4175 struct timespec ts
, *pts
= NULL
;
4181 if (target_to_host_timespec64(pts
, timeout
)) {
4182 return -TARGET_EFAULT
;
4185 if (target_to_host_timespec(pts
, timeout
)) {
4186 return -TARGET_EFAULT
;
4191 if (nsops
> TARGET_SEMOPM
) {
4192 return -TARGET_E2BIG
;
4195 sops
= g_new(struct sembuf
, nsops
);
4197 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4199 return -TARGET_EFAULT
;
4202 ret
= -TARGET_ENOSYS
;
4203 #ifdef __NR_semtimedop
4204 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4207 if (ret
== -TARGET_ENOSYS
) {
4208 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4209 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4217 struct target_msqid_ds
4219 struct target_ipc_perm msg_perm
;
4220 abi_ulong msg_stime
;
4221 #if TARGET_ABI_BITS == 32
4222 abi_ulong __unused1
;
4224 abi_ulong msg_rtime
;
4225 #if TARGET_ABI_BITS == 32
4226 abi_ulong __unused2
;
4228 abi_ulong msg_ctime
;
4229 #if TARGET_ABI_BITS == 32
4230 abi_ulong __unused3
;
4232 abi_ulong __msg_cbytes
;
4234 abi_ulong msg_qbytes
;
4235 abi_ulong msg_lspid
;
4236 abi_ulong msg_lrpid
;
4237 abi_ulong __unused4
;
4238 abi_ulong __unused5
;
4241 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4242 abi_ulong target_addr
)
4244 struct target_msqid_ds
*target_md
;
4246 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4247 return -TARGET_EFAULT
;
4248 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4249 return -TARGET_EFAULT
;
4250 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4251 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4252 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4253 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4254 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4255 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4256 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4257 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4258 unlock_user_struct(target_md
, target_addr
, 0);
4262 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4263 struct msqid_ds
*host_md
)
4265 struct target_msqid_ds
*target_md
;
4267 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4268 return -TARGET_EFAULT
;
4269 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4270 return -TARGET_EFAULT
;
4271 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4272 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4273 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4274 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4275 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4276 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4277 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4278 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4279 unlock_user_struct(target_md
, target_addr
, 1);
4283 struct target_msginfo
{
4291 unsigned short int msgseg
;
4294 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4295 struct msginfo
*host_msginfo
)
4297 struct target_msginfo
*target_msginfo
;
4298 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4299 return -TARGET_EFAULT
;
4300 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4301 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4302 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4303 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4304 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4305 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4306 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4307 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4308 unlock_user_struct(target_msginfo
, target_addr
, 1);
4312 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4314 struct msqid_ds dsarg
;
4315 struct msginfo msginfo
;
4316 abi_long ret
= -TARGET_EINVAL
;
4324 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4325 return -TARGET_EFAULT
;
4326 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4327 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4328 return -TARGET_EFAULT
;
4331 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4335 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4336 if (host_to_target_msginfo(ptr
, &msginfo
))
4337 return -TARGET_EFAULT
;
4344 struct target_msgbuf
{
4349 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4350 ssize_t msgsz
, int msgflg
)
4352 struct target_msgbuf
*target_mb
;
4353 struct msgbuf
*host_mb
;
4357 return -TARGET_EINVAL
;
4360 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4361 return -TARGET_EFAULT
;
4362 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4364 unlock_user_struct(target_mb
, msgp
, 0);
4365 return -TARGET_ENOMEM
;
4367 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4368 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4369 ret
= -TARGET_ENOSYS
;
4371 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4374 if (ret
== -TARGET_ENOSYS
) {
4376 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4379 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4385 unlock_user_struct(target_mb
, msgp
, 0);
4391 #if defined(__sparc__)
4392 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4393 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394 #elif defined(__s390x__)
4395 /* The s390 sys_ipc variant has only five parameters. */
4396 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4397 ((long int[]){(long int)__msgp, __msgtyp})
4399 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4400 ((long int[]){(long int)__msgp, __msgtyp}), 0
4404 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4405 ssize_t msgsz
, abi_long msgtyp
,
4408 struct target_msgbuf
*target_mb
;
4410 struct msgbuf
*host_mb
;
4414 return -TARGET_EINVAL
;
4417 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4418 return -TARGET_EFAULT
;
4420 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4422 ret
= -TARGET_ENOMEM
;
4425 ret
= -TARGET_ENOSYS
;
4427 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4430 if (ret
== -TARGET_ENOSYS
) {
4431 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4432 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4437 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4438 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4439 if (!target_mtext
) {
4440 ret
= -TARGET_EFAULT
;
4443 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4444 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4447 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4451 unlock_user_struct(target_mb
, msgp
, 1);
4456 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4457 abi_ulong target_addr
)
4459 struct target_shmid_ds
*target_sd
;
4461 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4462 return -TARGET_EFAULT
;
4463 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4464 return -TARGET_EFAULT
;
4465 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4466 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4467 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4468 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4469 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4470 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4471 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4472 unlock_user_struct(target_sd
, target_addr
, 0);
4476 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4477 struct shmid_ds
*host_sd
)
4479 struct target_shmid_ds
*target_sd
;
4481 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4482 return -TARGET_EFAULT
;
4483 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4484 return -TARGET_EFAULT
;
4485 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4486 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4487 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4488 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4489 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4490 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4491 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4492 unlock_user_struct(target_sd
, target_addr
, 1);
4496 struct target_shminfo
{
4504 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4505 struct shminfo
*host_shminfo
)
4507 struct target_shminfo
*target_shminfo
;
4508 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4509 return -TARGET_EFAULT
;
4510 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4511 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4512 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4513 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4514 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4515 unlock_user_struct(target_shminfo
, target_addr
, 1);
4519 struct target_shm_info
{
4524 abi_ulong swap_attempts
;
4525 abi_ulong swap_successes
;
4528 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4529 struct shm_info
*host_shm_info
)
4531 struct target_shm_info
*target_shm_info
;
4532 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4533 return -TARGET_EFAULT
;
4534 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4535 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4536 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4537 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4538 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4539 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4540 unlock_user_struct(target_shm_info
, target_addr
, 1);
4544 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4546 struct shmid_ds dsarg
;
4547 struct shminfo shminfo
;
4548 struct shm_info shm_info
;
4549 abi_long ret
= -TARGET_EINVAL
;
4557 if (target_to_host_shmid_ds(&dsarg
, buf
))
4558 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4560 if (host_to_target_shmid_ds(buf
, &dsarg
))
4561 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4565 if (host_to_target_shminfo(buf
, &shminfo
))
4566 return -TARGET_EFAULT
;
4569 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4570 if (host_to_target_shm_info(buf
, &shm_info
))
4571 return -TARGET_EFAULT
;
4576 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4583 #ifndef TARGET_FORCE_SHMLBA
4584 /* For most architectures, SHMLBA is the same as the page size;
4585 * some architectures have larger values, in which case they should
4586 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588 * and defining its own value for SHMLBA.
4590 * The kernel also permits SHMLBA to be set by the architecture to a
4591 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592 * this means that addresses are rounded to the large size if
4593 * SHM_RND is set but addresses not aligned to that size are not rejected
4594 * as long as they are at least page-aligned. Since the only architecture
4595 * which uses this is ia64 this code doesn't provide for that oddity.
4597 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4599 return TARGET_PAGE_SIZE
;
4603 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4604 int shmid
, abi_ulong shmaddr
, int shmflg
)
4608 struct shmid_ds shm_info
;
4612 /* shmat pointers are always untagged */
4614 /* find out the length of the shared memory segment */
4615 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4616 if (is_error(ret
)) {
4617 /* can't get length, bail out */
4621 shmlba
= target_shmlba(cpu_env
);
4623 if (shmaddr
& (shmlba
- 1)) {
4624 if (shmflg
& SHM_RND
) {
4625 shmaddr
&= ~(shmlba
- 1);
4627 return -TARGET_EINVAL
;
4630 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4631 return -TARGET_EINVAL
;
4637 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4639 abi_ulong mmap_start
;
4641 /* In order to use the host shmat, we need to honor host SHMLBA. */
4642 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4644 if (mmap_start
== -1) {
4646 host_raddr
= (void *)-1;
4648 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4649 shmflg
| SHM_REMAP
);
4652 if (host_raddr
== (void *)-1) {
4654 return get_errno((long)host_raddr
);
4656 raddr
=h2g((unsigned long)host_raddr
);
4658 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4659 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4660 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4662 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4663 if (!shm_regions
[i
].in_use
) {
4664 shm_regions
[i
].in_use
= true;
4665 shm_regions
[i
].start
= raddr
;
4666 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4676 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4681 /* shmdt pointers are always untagged */
4685 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4686 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4687 shm_regions
[i
].in_use
= false;
4688 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4692 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4699 #ifdef TARGET_NR_ipc
4700 /* ??? This only works with linear mappings. */
4701 /* do_ipc() must return target values and target errnos. */
4702 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4703 unsigned int call
, abi_long first
,
4704 abi_long second
, abi_long third
,
4705 abi_long ptr
, abi_long fifth
)
4710 version
= call
>> 16;
4715 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4717 case IPCOP_semtimedop
:
4719 * The s390 sys_ipc variant has only five parameters instead of six
4720 * (as for default variant) and the only difference is the handling of
4721 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4722 * to a struct timespec where the generic variant uses fifth parameter.
4724 #if defined(TARGET_S390X)
4725 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4727 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4732 ret
= get_errno(semget(first
, second
, third
));
4735 case IPCOP_semctl
: {
4736 /* The semun argument to semctl is passed by value, so dereference the
4739 get_user_ual(atptr
, ptr
);
4740 ret
= do_semctl(first
, second
, third
, atptr
);
4745 ret
= get_errno(msgget(first
, second
));
4749 ret
= do_msgsnd(first
, ptr
, second
, third
);
4753 ret
= do_msgctl(first
, second
, ptr
);
4760 struct target_ipc_kludge
{
4765 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4766 ret
= -TARGET_EFAULT
;
4770 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4772 unlock_user_struct(tmp
, ptr
, 0);
4776 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4785 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4786 if (is_error(raddr
))
4787 return get_errno(raddr
);
4788 if (put_user_ual(raddr
, third
))
4789 return -TARGET_EFAULT
;
4793 ret
= -TARGET_EINVAL
;
4798 ret
= do_shmdt(ptr
);
4802 /* IPC_* flag values are the same on all linux platforms */
4803 ret
= get_errno(shmget(first
, second
, third
));
4806 /* IPC_* and SHM_* command values are the same on all linux platforms */
4808 ret
= do_shmctl(first
, second
, ptr
);
4811 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4813 ret
= -TARGET_ENOSYS
;
4820 /* kernel structure types definitions */
4822 #define STRUCT(name, ...) STRUCT_ ## name,
4823 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4825 #include "syscall_types.h"
4829 #undef STRUCT_SPECIAL
4831 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4832 #define STRUCT_SPECIAL(name)
4833 #include "syscall_types.h"
4835 #undef STRUCT_SPECIAL
4837 #define MAX_STRUCT_SIZE 4096
4839 #ifdef CONFIG_FIEMAP
4840 /* So fiemap access checks don't overflow on 32 bit systems.
4841 * This is very slightly smaller than the limit imposed by
4842 * the underlying kernel.
4844 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4845 / sizeof(struct fiemap_extent))
4847 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4848 int fd
, int cmd
, abi_long arg
)
4850 /* The parameter for this ioctl is a struct fiemap followed
4851 * by an array of struct fiemap_extent whose size is set
4852 * in fiemap->fm_extent_count. The array is filled in by the
4855 int target_size_in
, target_size_out
;
4857 const argtype
*arg_type
= ie
->arg_type
;
4858 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4861 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4865 assert(arg_type
[0] == TYPE_PTR
);
4866 assert(ie
->access
== IOC_RW
);
4868 target_size_in
= thunk_type_size(arg_type
, 0);
4869 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4871 return -TARGET_EFAULT
;
4873 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4874 unlock_user(argptr
, arg
, 0);
4875 fm
= (struct fiemap
*)buf_temp
;
4876 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4877 return -TARGET_EINVAL
;
4880 outbufsz
= sizeof (*fm
) +
4881 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4883 if (outbufsz
> MAX_STRUCT_SIZE
) {
4884 /* We can't fit all the extents into the fixed size buffer.
4885 * Allocate one that is large enough and use it instead.
4887 fm
= g_try_malloc(outbufsz
);
4889 return -TARGET_ENOMEM
;
4891 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4894 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4895 if (!is_error(ret
)) {
4896 target_size_out
= target_size_in
;
4897 /* An extent_count of 0 means we were only counting the extents
4898 * so there are no structs to copy
4900 if (fm
->fm_extent_count
!= 0) {
4901 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4903 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4905 ret
= -TARGET_EFAULT
;
4907 /* Convert the struct fiemap */
4908 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4909 if (fm
->fm_extent_count
!= 0) {
4910 p
= argptr
+ target_size_in
;
4911 /* ...and then all the struct fiemap_extents */
4912 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4913 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4918 unlock_user(argptr
, arg
, target_size_out
);
4928 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4929 int fd
, int cmd
, abi_long arg
)
4931 const argtype
*arg_type
= ie
->arg_type
;
4935 struct ifconf
*host_ifconf
;
4937 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4938 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4939 int target_ifreq_size
;
4944 abi_long target_ifc_buf
;
4948 assert(arg_type
[0] == TYPE_PTR
);
4949 assert(ie
->access
== IOC_RW
);
4952 target_size
= thunk_type_size(arg_type
, 0);
4954 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4956 return -TARGET_EFAULT
;
4957 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4958 unlock_user(argptr
, arg
, 0);
4960 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4961 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4962 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4964 if (target_ifc_buf
!= 0) {
4965 target_ifc_len
= host_ifconf
->ifc_len
;
4966 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4967 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4969 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4970 if (outbufsz
> MAX_STRUCT_SIZE
) {
4972 * We can't fit all the extents into the fixed size buffer.
4973 * Allocate one that is large enough and use it instead.
4975 host_ifconf
= malloc(outbufsz
);
4977 return -TARGET_ENOMEM
;
4979 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4982 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4984 host_ifconf
->ifc_len
= host_ifc_len
;
4986 host_ifc_buf
= NULL
;
4988 host_ifconf
->ifc_buf
= host_ifc_buf
;
4990 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4991 if (!is_error(ret
)) {
4992 /* convert host ifc_len to target ifc_len */
4994 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4995 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4996 host_ifconf
->ifc_len
= target_ifc_len
;
4998 /* restore target ifc_buf */
5000 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5002 /* copy struct ifconf to target user */
5004 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5006 return -TARGET_EFAULT
;
5007 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5008 unlock_user(argptr
, arg
, target_size
);
5010 if (target_ifc_buf
!= 0) {
5011 /* copy ifreq[] to target user */
5012 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5013 for (i
= 0; i
< nb_ifreq
; i
++) {
5014 thunk_convert(argptr
+ i
* target_ifreq_size
,
5015 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5016 ifreq_arg_type
, THUNK_TARGET
);
5018 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5029 #if defined(CONFIG_USBFS)
5030 #if HOST_LONG_BITS > 64
5031 #error USBDEVFS thunks do not support >64 bit hosts yet.
5034 uint64_t target_urb_adr
;
5035 uint64_t target_buf_adr
;
5036 char *target_buf_ptr
;
5037 struct usbdevfs_urb host_urb
;
5040 static GHashTable
*usbdevfs_urb_hashtable(void)
5042 static GHashTable
*urb_hashtable
;
5044 if (!urb_hashtable
) {
5045 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
5047 return urb_hashtable
;
5050 static void urb_hashtable_insert(struct live_urb
*urb
)
5052 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5053 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5056 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5058 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5059 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5062 static void urb_hashtable_remove(struct live_urb
*urb
)
5064 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5065 g_hash_table_remove(urb_hashtable
, urb
);
5069 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5070 int fd
, int cmd
, abi_long arg
)
5072 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5073 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5074 struct live_urb
*lurb
;
5078 uintptr_t target_urb_adr
;
5081 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5083 memset(buf_temp
, 0, sizeof(uint64_t));
5084 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5085 if (is_error(ret
)) {
5089 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5090 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5091 if (!lurb
->target_urb_adr
) {
5092 return -TARGET_EFAULT
;
5094 urb_hashtable_remove(lurb
);
5095 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5096 lurb
->host_urb
.buffer_length
);
5097 lurb
->target_buf_ptr
= NULL
;
5099 /* restore the guest buffer pointer */
5100 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5102 /* update the guest urb struct */
5103 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5106 return -TARGET_EFAULT
;
5108 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5109 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5111 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5112 /* write back the urb handle */
5113 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5116 return -TARGET_EFAULT
;
5119 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5120 target_urb_adr
= lurb
->target_urb_adr
;
5121 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5122 unlock_user(argptr
, arg
, target_size
);
5129 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5130 uint8_t *buf_temp
__attribute__((unused
)),
5131 int fd
, int cmd
, abi_long arg
)
5133 struct live_urb
*lurb
;
5135 /* map target address back to host URB with metadata. */
5136 lurb
= urb_hashtable_lookup(arg
);
5138 return -TARGET_EFAULT
;
5140 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5144 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5145 int fd
, int cmd
, abi_long arg
)
5147 const argtype
*arg_type
= ie
->arg_type
;
5152 struct live_urb
*lurb
;
5155 * each submitted URB needs to map to a unique ID for the
5156 * kernel, and that unique ID needs to be a pointer to
5157 * host memory. hence, we need to malloc for each URB.
5158 * isochronous transfers have a variable length struct.
5161 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5163 /* construct host copy of urb and metadata */
5164 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5166 return -TARGET_ENOMEM
;
5169 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5172 return -TARGET_EFAULT
;
5174 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5175 unlock_user(argptr
, arg
, 0);
5177 lurb
->target_urb_adr
= arg
;
5178 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5180 /* buffer space used depends on endpoint type so lock the entire buffer */
5181 /* control type urbs should check the buffer contents for true direction */
5182 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5183 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5184 lurb
->host_urb
.buffer_length
, 1);
5185 if (lurb
->target_buf_ptr
== NULL
) {
5187 return -TARGET_EFAULT
;
5190 /* update buffer pointer in host copy */
5191 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5193 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5194 if (is_error(ret
)) {
5195 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5198 urb_hashtable_insert(lurb
);
5203 #endif /* CONFIG_USBFS */
5205 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5206 int cmd
, abi_long arg
)
5209 struct dm_ioctl
*host_dm
;
5210 abi_long guest_data
;
5211 uint32_t guest_data_size
;
5213 const argtype
*arg_type
= ie
->arg_type
;
5215 void *big_buf
= NULL
;
5219 target_size
= thunk_type_size(arg_type
, 0);
5220 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5222 ret
= -TARGET_EFAULT
;
5225 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5226 unlock_user(argptr
, arg
, 0);
5228 /* buf_temp is too small, so fetch things into a bigger buffer */
5229 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5230 memcpy(big_buf
, buf_temp
, target_size
);
5234 guest_data
= arg
+ host_dm
->data_start
;
5235 if ((guest_data
- arg
) < 0) {
5236 ret
= -TARGET_EINVAL
;
5239 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5240 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5242 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5244 ret
= -TARGET_EFAULT
;
5248 switch (ie
->host_cmd
) {
5250 case DM_LIST_DEVICES
:
5253 case DM_DEV_SUSPEND
:
5256 case DM_TABLE_STATUS
:
5257 case DM_TABLE_CLEAR
:
5259 case DM_LIST_VERSIONS
:
5263 case DM_DEV_SET_GEOMETRY
:
5264 /* data contains only strings */
5265 memcpy(host_data
, argptr
, guest_data_size
);
5268 memcpy(host_data
, argptr
, guest_data_size
);
5269 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5273 void *gspec
= argptr
;
5274 void *cur_data
= host_data
;
5275 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5276 int spec_size
= thunk_type_size(arg_type
, 0);
5279 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5280 struct dm_target_spec
*spec
= cur_data
;
5284 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5285 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5287 spec
->next
= sizeof(*spec
) + slen
;
5288 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5290 cur_data
+= spec
->next
;
5295 ret
= -TARGET_EINVAL
;
5296 unlock_user(argptr
, guest_data
, 0);
5299 unlock_user(argptr
, guest_data
, 0);
5301 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5302 if (!is_error(ret
)) {
5303 guest_data
= arg
+ host_dm
->data_start
;
5304 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5305 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5306 switch (ie
->host_cmd
) {
5311 case DM_DEV_SUSPEND
:
5314 case DM_TABLE_CLEAR
:
5316 case DM_DEV_SET_GEOMETRY
:
5317 /* no return data */
5319 case DM_LIST_DEVICES
:
5321 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5322 uint32_t remaining_data
= guest_data_size
;
5323 void *cur_data
= argptr
;
5324 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5325 int nl_size
= 12; /* can't use thunk_size due to alignment */
5328 uint32_t next
= nl
->next
;
5330 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5332 if (remaining_data
< nl
->next
) {
5333 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5336 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5337 strcpy(cur_data
+ nl_size
, nl
->name
);
5338 cur_data
+= nl
->next
;
5339 remaining_data
-= nl
->next
;
5343 nl
= (void*)nl
+ next
;
5348 case DM_TABLE_STATUS
:
5350 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5351 void *cur_data
= argptr
;
5352 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5353 int spec_size
= thunk_type_size(arg_type
, 0);
5356 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5357 uint32_t next
= spec
->next
;
5358 int slen
= strlen((char*)&spec
[1]) + 1;
5359 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5360 if (guest_data_size
< spec
->next
) {
5361 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5364 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5365 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5366 cur_data
= argptr
+ spec
->next
;
5367 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5373 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5374 int count
= *(uint32_t*)hdata
;
5375 uint64_t *hdev
= hdata
+ 8;
5376 uint64_t *gdev
= argptr
+ 8;
5379 *(uint32_t*)argptr
= tswap32(count
);
5380 for (i
= 0; i
< count
; i
++) {
5381 *gdev
= tswap64(*hdev
);
5387 case DM_LIST_VERSIONS
:
5389 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5390 uint32_t remaining_data
= guest_data_size
;
5391 void *cur_data
= argptr
;
5392 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5393 int vers_size
= thunk_type_size(arg_type
, 0);
5396 uint32_t next
= vers
->next
;
5398 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5400 if (remaining_data
< vers
->next
) {
5401 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5404 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5405 strcpy(cur_data
+ vers_size
, vers
->name
);
5406 cur_data
+= vers
->next
;
5407 remaining_data
-= vers
->next
;
5411 vers
= (void*)vers
+ next
;
5416 unlock_user(argptr
, guest_data
, 0);
5417 ret
= -TARGET_EINVAL
;
5420 unlock_user(argptr
, guest_data
, guest_data_size
);
5422 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5424 ret
= -TARGET_EFAULT
;
5427 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5428 unlock_user(argptr
, arg
, target_size
);
5435 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5436 int cmd
, abi_long arg
)
5440 const argtype
*arg_type
= ie
->arg_type
;
5441 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5444 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5445 struct blkpg_partition host_part
;
5447 /* Read and convert blkpg */
5449 target_size
= thunk_type_size(arg_type
, 0);
5450 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5452 ret
= -TARGET_EFAULT
;
5455 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5456 unlock_user(argptr
, arg
, 0);
5458 switch (host_blkpg
->op
) {
5459 case BLKPG_ADD_PARTITION
:
5460 case BLKPG_DEL_PARTITION
:
5461 /* payload is struct blkpg_partition */
5464 /* Unknown opcode */
5465 ret
= -TARGET_EINVAL
;
5469 /* Read and convert blkpg->data */
5470 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5471 target_size
= thunk_type_size(part_arg_type
, 0);
5472 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5474 ret
= -TARGET_EFAULT
;
5477 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5478 unlock_user(argptr
, arg
, 0);
5480 /* Swizzle the data pointer to our local copy and call! */
5481 host_blkpg
->data
= &host_part
;
5482 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5488 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5489 int fd
, int cmd
, abi_long arg
)
5491 const argtype
*arg_type
= ie
->arg_type
;
5492 const StructEntry
*se
;
5493 const argtype
*field_types
;
5494 const int *dst_offsets
, *src_offsets
;
5497 abi_ulong
*target_rt_dev_ptr
= NULL
;
5498 unsigned long *host_rt_dev_ptr
= NULL
;
5502 assert(ie
->access
== IOC_W
);
5503 assert(*arg_type
== TYPE_PTR
);
5505 assert(*arg_type
== TYPE_STRUCT
);
5506 target_size
= thunk_type_size(arg_type
, 0);
5507 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5509 return -TARGET_EFAULT
;
5512 assert(*arg_type
== (int)STRUCT_rtentry
);
5513 se
= struct_entries
+ *arg_type
++;
5514 assert(se
->convert
[0] == NULL
);
5515 /* convert struct here to be able to catch rt_dev string */
5516 field_types
= se
->field_types
;
5517 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5518 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5519 for (i
= 0; i
< se
->nb_fields
; i
++) {
5520 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5521 assert(*field_types
== TYPE_PTRVOID
);
5522 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5523 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5524 if (*target_rt_dev_ptr
!= 0) {
5525 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5526 tswapal(*target_rt_dev_ptr
));
5527 if (!*host_rt_dev_ptr
) {
5528 unlock_user(argptr
, arg
, 0);
5529 return -TARGET_EFAULT
;
5532 *host_rt_dev_ptr
= 0;
5537 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5538 argptr
+ src_offsets
[i
],
5539 field_types
, THUNK_HOST
);
5541 unlock_user(argptr
, arg
, 0);
5543 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5545 assert(host_rt_dev_ptr
!= NULL
);
5546 assert(target_rt_dev_ptr
!= NULL
);
5547 if (*host_rt_dev_ptr
!= 0) {
5548 unlock_user((void *)*host_rt_dev_ptr
,
5549 *target_rt_dev_ptr
, 0);
5554 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5555 int fd
, int cmd
, abi_long arg
)
5557 int sig
= target_to_host_signal(arg
);
5558 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5561 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5562 int fd
, int cmd
, abi_long arg
)
5567 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5568 if (is_error(ret
)) {
5572 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5573 if (copy_to_user_timeval(arg
, &tv
)) {
5574 return -TARGET_EFAULT
;
5577 if (copy_to_user_timeval64(arg
, &tv
)) {
5578 return -TARGET_EFAULT
;
5585 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5586 int fd
, int cmd
, abi_long arg
)
5591 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5592 if (is_error(ret
)) {
5596 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5597 if (host_to_target_timespec(arg
, &ts
)) {
5598 return -TARGET_EFAULT
;
5601 if (host_to_target_timespec64(arg
, &ts
)) {
5602 return -TARGET_EFAULT
;
5610 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5611 int fd
, int cmd
, abi_long arg
)
5613 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5614 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5620 static void unlock_drm_version(struct drm_version
*host_ver
,
5621 struct target_drm_version
*target_ver
,
5624 unlock_user(host_ver
->name
, target_ver
->name
,
5625 copy
? host_ver
->name_len
: 0);
5626 unlock_user(host_ver
->date
, target_ver
->date
,
5627 copy
? host_ver
->date_len
: 0);
5628 unlock_user(host_ver
->desc
, target_ver
->desc
,
5629 copy
? host_ver
->desc_len
: 0);
5632 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5633 struct target_drm_version
*target_ver
)
5635 memset(host_ver
, 0, sizeof(*host_ver
));
5637 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5638 if (host_ver
->name_len
) {
5639 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5640 target_ver
->name_len
, 0);
5641 if (!host_ver
->name
) {
5646 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5647 if (host_ver
->date_len
) {
5648 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5649 target_ver
->date_len
, 0);
5650 if (!host_ver
->date
) {
5655 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5656 if (host_ver
->desc_len
) {
5657 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5658 target_ver
->desc_len
, 0);
5659 if (!host_ver
->desc
) {
5666 unlock_drm_version(host_ver
, target_ver
, false);
5670 static inline void host_to_target_drmversion(
5671 struct target_drm_version
*target_ver
,
5672 struct drm_version
*host_ver
)
5674 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5675 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5676 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5677 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5678 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5679 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5680 unlock_drm_version(host_ver
, target_ver
, true);
5683 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5684 int fd
, int cmd
, abi_long arg
)
5686 struct drm_version
*ver
;
5687 struct target_drm_version
*target_ver
;
5690 switch (ie
->host_cmd
) {
5691 case DRM_IOCTL_VERSION
:
5692 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5693 return -TARGET_EFAULT
;
5695 ver
= (struct drm_version
*)buf_temp
;
5696 ret
= target_to_host_drmversion(ver
, target_ver
);
5697 if (!is_error(ret
)) {
5698 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5699 if (is_error(ret
)) {
5700 unlock_drm_version(ver
, target_ver
, false);
5702 host_to_target_drmversion(target_ver
, ver
);
5705 unlock_user_struct(target_ver
, arg
, 0);
5708 return -TARGET_ENOSYS
;
5711 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5712 struct drm_i915_getparam
*gparam
,
5713 int fd
, abi_long arg
)
5717 struct target_drm_i915_getparam
*target_gparam
;
5719 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5720 return -TARGET_EFAULT
;
5723 __get_user(gparam
->param
, &target_gparam
->param
);
5724 gparam
->value
= &value
;
5725 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5726 put_user_s32(value
, target_gparam
->value
);
5728 unlock_user_struct(target_gparam
, arg
, 0);
5732 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5733 int fd
, int cmd
, abi_long arg
)
5735 switch (ie
->host_cmd
) {
5736 case DRM_IOCTL_I915_GETPARAM
:
5737 return do_ioctl_drm_i915_getparam(ie
,
5738 (struct drm_i915_getparam
*)buf_temp
,
5741 return -TARGET_ENOSYS
;
5747 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5748 int fd
, int cmd
, abi_long arg
)
5750 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5751 struct tun_filter
*target_filter
;
5754 assert(ie
->access
== IOC_W
);
5756 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5757 if (!target_filter
) {
5758 return -TARGET_EFAULT
;
5760 filter
->flags
= tswap16(target_filter
->flags
);
5761 filter
->count
= tswap16(target_filter
->count
);
5762 unlock_user(target_filter
, arg
, 0);
5764 if (filter
->count
) {
5765 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5767 return -TARGET_EFAULT
;
5770 target_addr
= lock_user(VERIFY_READ
,
5771 arg
+ offsetof(struct tun_filter
, addr
),
5772 filter
->count
* ETH_ALEN
, 1);
5774 return -TARGET_EFAULT
;
5776 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5777 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5780 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5783 IOCTLEntry ioctl_entries
[] = {
5784 #define IOCTL(cmd, access, ...) \
5785 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5786 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5787 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5788 #define IOCTL_IGNORE(cmd) \
5789 { TARGET_ ## cmd, 0, #cmd },
5794 /* ??? Implement proper locking for ioctls. */
5795 /* do_ioctl() Must return target values and target errnos. */
5796 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5798 const IOCTLEntry
*ie
;
5799 const argtype
*arg_type
;
5801 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5807 if (ie
->target_cmd
== 0) {
5809 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5810 return -TARGET_ENOSYS
;
5812 if (ie
->target_cmd
== cmd
)
5816 arg_type
= ie
->arg_type
;
5818 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5819 } else if (!ie
->host_cmd
) {
5820 /* Some architectures define BSD ioctls in their headers
5821 that are not implemented in Linux. */
5822 return -TARGET_ENOSYS
;
5825 switch(arg_type
[0]) {
5828 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5834 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5838 target_size
= thunk_type_size(arg_type
, 0);
5839 switch(ie
->access
) {
5841 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5842 if (!is_error(ret
)) {
5843 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5845 return -TARGET_EFAULT
;
5846 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5847 unlock_user(argptr
, arg
, target_size
);
5851 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5853 return -TARGET_EFAULT
;
5854 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5855 unlock_user(argptr
, arg
, 0);
5856 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5860 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5862 return -TARGET_EFAULT
;
5863 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5864 unlock_user(argptr
, arg
, 0);
5865 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5866 if (!is_error(ret
)) {
5867 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5869 return -TARGET_EFAULT
;
5870 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5871 unlock_user(argptr
, arg
, target_size
);
5877 qemu_log_mask(LOG_UNIMP
,
5878 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5879 (long)cmd
, arg_type
[0]);
5880 ret
= -TARGET_ENOSYS
;
5886 static const bitmask_transtbl iflag_tbl
[] = {
5887 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5888 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5889 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5890 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5891 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5892 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5893 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5894 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5895 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5896 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5897 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5898 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5899 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5900 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5901 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5905 static const bitmask_transtbl oflag_tbl
[] = {
5906 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5907 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5908 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5909 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5910 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5911 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5912 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5913 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5914 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5915 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5916 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5917 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5918 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5919 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5920 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5921 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5922 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5923 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5924 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5925 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5926 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5927 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5928 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5929 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5933 static const bitmask_transtbl cflag_tbl
[] = {
5934 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5935 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5936 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5937 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5938 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5939 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5940 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5941 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5942 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5943 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5944 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5945 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5946 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5947 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5948 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5949 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5950 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5951 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5952 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5953 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5954 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5955 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5956 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5957 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5958 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5959 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5960 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5961 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5962 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5963 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5964 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5968 static const bitmask_transtbl lflag_tbl
[] = {
5969 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5970 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5971 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5972 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5973 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5974 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5975 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5976 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5977 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5978 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5979 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5980 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5981 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5982 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5983 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5984 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5988 static void target_to_host_termios (void *dst
, const void *src
)
5990 struct host_termios
*host
= dst
;
5991 const struct target_termios
*target
= src
;
5994 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5996 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5998 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
6000 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
6001 host
->c_line
= target
->c_line
;
6003 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
6004 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
6005 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
6006 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
6007 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
6008 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
6009 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
6010 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
6011 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
6012 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
6013 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6014 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6015 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6016 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6017 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6018 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6019 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6020 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6023 static void host_to_target_termios (void *dst
, const void *src
)
6025 struct target_termios
*target
= dst
;
6026 const struct host_termios
*host
= src
;
6029 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6031 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6033 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6035 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6036 target
->c_line
= host
->c_line
;
6038 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6039 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6040 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6041 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6042 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6043 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6044 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6045 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6046 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6047 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6048 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6049 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6050 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6051 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6052 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6053 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6054 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6055 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6058 static const StructEntry struct_termios_def
= {
6059 .convert
= { host_to_target_termios
, target_to_host_termios
},
6060 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6061 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6062 .print
= print_termios
,
6065 static bitmask_transtbl mmap_flags_tbl
[] = {
6066 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6067 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6068 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6069 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6070 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6071 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6072 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6073 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6074 MAP_DENYWRITE
, MAP_DENYWRITE
},
6075 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6076 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6077 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6078 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6079 MAP_NORESERVE
, MAP_NORESERVE
},
6080 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6081 /* MAP_STACK had been ignored by the kernel for quite some time.
6082 Recognize it for the target insofar as we do not want to pass
6083 it through to the host. */
6084 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6089 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6090 * TARGET_I386 is defined if TARGET_X86_64 is defined
6092 #if defined(TARGET_I386)
6094 /* NOTE: there is really one LDT for all the threads */
6095 static uint8_t *ldt_table
;
6097 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6104 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6105 if (size
> bytecount
)
6107 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6109 return -TARGET_EFAULT
;
6110 /* ??? Should this by byteswapped? */
6111 memcpy(p
, ldt_table
, size
);
6112 unlock_user(p
, ptr
, size
);
6116 /* XXX: add locking support */
6117 static abi_long
write_ldt(CPUX86State
*env
,
6118 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6120 struct target_modify_ldt_ldt_s ldt_info
;
6121 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6122 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6123 int seg_not_present
, useable
, lm
;
6124 uint32_t *lp
, entry_1
, entry_2
;
6126 if (bytecount
!= sizeof(ldt_info
))
6127 return -TARGET_EINVAL
;
6128 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6129 return -TARGET_EFAULT
;
6130 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6131 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6132 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6133 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6134 unlock_user_struct(target_ldt_info
, ptr
, 0);
6136 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6137 return -TARGET_EINVAL
;
6138 seg_32bit
= ldt_info
.flags
& 1;
6139 contents
= (ldt_info
.flags
>> 1) & 3;
6140 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6141 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6142 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6143 useable
= (ldt_info
.flags
>> 6) & 1;
6147 lm
= (ldt_info
.flags
>> 7) & 1;
6149 if (contents
== 3) {
6151 return -TARGET_EINVAL
;
6152 if (seg_not_present
== 0)
6153 return -TARGET_EINVAL
;
6155 /* allocate the LDT */
6157 env
->ldt
.base
= target_mmap(0,
6158 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6159 PROT_READ
|PROT_WRITE
,
6160 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6161 if (env
->ldt
.base
== -1)
6162 return -TARGET_ENOMEM
;
6163 memset(g2h_untagged(env
->ldt
.base
), 0,
6164 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6165 env
->ldt
.limit
= 0xffff;
6166 ldt_table
= g2h_untagged(env
->ldt
.base
);
6169 /* NOTE: same code as Linux kernel */
6170 /* Allow LDTs to be cleared by the user. */
6171 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6174 read_exec_only
== 1 &&
6176 limit_in_pages
== 0 &&
6177 seg_not_present
== 1 &&
6185 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6186 (ldt_info
.limit
& 0x0ffff);
6187 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6188 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6189 (ldt_info
.limit
& 0xf0000) |
6190 ((read_exec_only
^ 1) << 9) |
6192 ((seg_not_present
^ 1) << 15) |
6194 (limit_in_pages
<< 23) |
6198 entry_2
|= (useable
<< 20);
6200 /* Install the new entry ... */
6202 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6203 lp
[0] = tswap32(entry_1
);
6204 lp
[1] = tswap32(entry_2
);
6208 /* specific and weird i386 syscalls */
6209 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6210 unsigned long bytecount
)
6216 ret
= read_ldt(ptr
, bytecount
);
6219 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6222 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6225 ret
= -TARGET_ENOSYS
;
6231 #if defined(TARGET_ABI32)
6232 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6234 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6235 struct target_modify_ldt_ldt_s ldt_info
;
6236 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6237 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6238 int seg_not_present
, useable
, lm
;
6239 uint32_t *lp
, entry_1
, entry_2
;
6242 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6243 if (!target_ldt_info
)
6244 return -TARGET_EFAULT
;
6245 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6246 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6247 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6248 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6249 if (ldt_info
.entry_number
== -1) {
6250 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6251 if (gdt_table
[i
] == 0) {
6252 ldt_info
.entry_number
= i
;
6253 target_ldt_info
->entry_number
= tswap32(i
);
6258 unlock_user_struct(target_ldt_info
, ptr
, 1);
6260 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6261 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6262 return -TARGET_EINVAL
;
6263 seg_32bit
= ldt_info
.flags
& 1;
6264 contents
= (ldt_info
.flags
>> 1) & 3;
6265 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6266 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6267 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6268 useable
= (ldt_info
.flags
>> 6) & 1;
6272 lm
= (ldt_info
.flags
>> 7) & 1;
6275 if (contents
== 3) {
6276 if (seg_not_present
== 0)
6277 return -TARGET_EINVAL
;
6280 /* NOTE: same code as Linux kernel */
6281 /* Allow LDTs to be cleared by the user. */
6282 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6283 if ((contents
== 0 &&
6284 read_exec_only
== 1 &&
6286 limit_in_pages
== 0 &&
6287 seg_not_present
== 1 &&
6295 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6296 (ldt_info
.limit
& 0x0ffff);
6297 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6298 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6299 (ldt_info
.limit
& 0xf0000) |
6300 ((read_exec_only
^ 1) << 9) |
6302 ((seg_not_present
^ 1) << 15) |
6304 (limit_in_pages
<< 23) |
6309 /* Install the new entry ... */
6311 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6312 lp
[0] = tswap32(entry_1
);
6313 lp
[1] = tswap32(entry_2
);
6317 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6319 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6320 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6321 uint32_t base_addr
, limit
, flags
;
6322 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6323 int seg_not_present
, useable
, lm
;
6324 uint32_t *lp
, entry_1
, entry_2
;
6326 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6327 if (!target_ldt_info
)
6328 return -TARGET_EFAULT
;
6329 idx
= tswap32(target_ldt_info
->entry_number
);
6330 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6331 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6332 unlock_user_struct(target_ldt_info
, ptr
, 1);
6333 return -TARGET_EINVAL
;
6335 lp
= (uint32_t *)(gdt_table
+ idx
);
6336 entry_1
= tswap32(lp
[0]);
6337 entry_2
= tswap32(lp
[1]);
6339 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6340 contents
= (entry_2
>> 10) & 3;
6341 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6342 seg_32bit
= (entry_2
>> 22) & 1;
6343 limit_in_pages
= (entry_2
>> 23) & 1;
6344 useable
= (entry_2
>> 20) & 1;
6348 lm
= (entry_2
>> 21) & 1;
6350 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6351 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6352 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6353 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6354 base_addr
= (entry_1
>> 16) |
6355 (entry_2
& 0xff000000) |
6356 ((entry_2
& 0xff) << 16);
6357 target_ldt_info
->base_addr
= tswapal(base_addr
);
6358 target_ldt_info
->limit
= tswap32(limit
);
6359 target_ldt_info
->flags
= tswap32(flags
);
6360 unlock_user_struct(target_ldt_info
, ptr
, 1);
6364 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6366 return -TARGET_ENOSYS
;
6369 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6376 case TARGET_ARCH_SET_GS
:
6377 case TARGET_ARCH_SET_FS
:
6378 if (code
== TARGET_ARCH_SET_GS
)
6382 cpu_x86_load_seg(env
, idx
, 0);
6383 env
->segs
[idx
].base
= addr
;
6385 case TARGET_ARCH_GET_GS
:
6386 case TARGET_ARCH_GET_FS
:
6387 if (code
== TARGET_ARCH_GET_GS
)
6391 val
= env
->segs
[idx
].base
;
6392 if (put_user(val
, addr
, abi_ulong
))
6393 ret
= -TARGET_EFAULT
;
6396 ret
= -TARGET_EINVAL
;
6401 #endif /* defined(TARGET_ABI32 */
6403 #endif /* defined(TARGET_I386) */
6405 #define NEW_STACK_SIZE 0x40000
6408 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6411 pthread_mutex_t mutex
;
6412 pthread_cond_t cond
;
6415 abi_ulong child_tidptr
;
6416 abi_ulong parent_tidptr
;
6420 static void *clone_func(void *arg
)
6422 new_thread_info
*info
= arg
;
6427 rcu_register_thread();
6428 tcg_register_thread();
6432 ts
= (TaskState
*)cpu
->opaque
;
6433 info
->tid
= sys_gettid();
6435 if (info
->child_tidptr
)
6436 put_user_u32(info
->tid
, info
->child_tidptr
);
6437 if (info
->parent_tidptr
)
6438 put_user_u32(info
->tid
, info
->parent_tidptr
);
6439 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6440 /* Enable signals. */
6441 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6442 /* Signal to the parent that we're ready. */
6443 pthread_mutex_lock(&info
->mutex
);
6444 pthread_cond_broadcast(&info
->cond
);
6445 pthread_mutex_unlock(&info
->mutex
);
6446 /* Wait until the parent has finished initializing the tls state. */
6447 pthread_mutex_lock(&clone_lock
);
6448 pthread_mutex_unlock(&clone_lock
);
6454 /* do_fork() Must return host values and target errnos (unlike most
6455 do_*() functions). */
6456 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6457 abi_ulong parent_tidptr
, target_ulong newtls
,
6458 abi_ulong child_tidptr
)
6460 CPUState
*cpu
= env_cpu(env
);
6464 CPUArchState
*new_env
;
6467 flags
&= ~CLONE_IGNORED_FLAGS
;
6469 /* Emulate vfork() with fork() */
6470 if (flags
& CLONE_VFORK
)
6471 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6473 if (flags
& CLONE_VM
) {
6474 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6475 new_thread_info info
;
6476 pthread_attr_t attr
;
6478 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6479 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6480 return -TARGET_EINVAL
;
6483 ts
= g_new0(TaskState
, 1);
6484 init_task_state(ts
);
6486 /* Grab a mutex so that thread setup appears atomic. */
6487 pthread_mutex_lock(&clone_lock
);
6490 * If this is our first additional thread, we need to ensure we
6491 * generate code for parallel execution and flush old translations.
6492 * Do this now so that the copy gets CF_PARALLEL too.
6494 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6495 cpu
->tcg_cflags
|= CF_PARALLEL
;
6499 /* we create a new CPU instance. */
6500 new_env
= cpu_copy(env
);
6501 /* Init regs that differ from the parent. */
6502 cpu_clone_regs_child(new_env
, newsp
, flags
);
6503 cpu_clone_regs_parent(env
, flags
);
6504 new_cpu
= env_cpu(new_env
);
6505 new_cpu
->opaque
= ts
;
6506 ts
->bprm
= parent_ts
->bprm
;
6507 ts
->info
= parent_ts
->info
;
6508 ts
->signal_mask
= parent_ts
->signal_mask
;
6510 if (flags
& CLONE_CHILD_CLEARTID
) {
6511 ts
->child_tidptr
= child_tidptr
;
6514 if (flags
& CLONE_SETTLS
) {
6515 cpu_set_tls (new_env
, newtls
);
6518 memset(&info
, 0, sizeof(info
));
6519 pthread_mutex_init(&info
.mutex
, NULL
);
6520 pthread_mutex_lock(&info
.mutex
);
6521 pthread_cond_init(&info
.cond
, NULL
);
6523 if (flags
& CLONE_CHILD_SETTID
) {
6524 info
.child_tidptr
= child_tidptr
;
6526 if (flags
& CLONE_PARENT_SETTID
) {
6527 info
.parent_tidptr
= parent_tidptr
;
6530 ret
= pthread_attr_init(&attr
);
6531 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6532 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6533 /* It is not safe to deliver signals until the child has finished
6534 initializing, so temporarily block all signals. */
6535 sigfillset(&sigmask
);
6536 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6537 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6539 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6540 /* TODO: Free new CPU state if thread creation failed. */
6542 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6543 pthread_attr_destroy(&attr
);
6545 /* Wait for the child to initialize. */
6546 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6551 pthread_mutex_unlock(&info
.mutex
);
6552 pthread_cond_destroy(&info
.cond
);
6553 pthread_mutex_destroy(&info
.mutex
);
6554 pthread_mutex_unlock(&clone_lock
);
6556 /* if no CLONE_VM, we consider it is a fork */
6557 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6558 return -TARGET_EINVAL
;
6561 /* We can't support custom termination signals */
6562 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6563 return -TARGET_EINVAL
;
6566 if (block_signals()) {
6567 return -TARGET_ERESTARTSYS
;
6573 /* Child Process. */
6574 cpu_clone_regs_child(env
, newsp
, flags
);
6576 /* There is a race condition here. The parent process could
6577 theoretically read the TID in the child process before the child
6578 tid is set. This would require using either ptrace
6579 (not implemented) or having *_tidptr to point at a shared memory
6580 mapping. We can't repeat the spinlock hack used above because
6581 the child process gets its own copy of the lock. */
6582 if (flags
& CLONE_CHILD_SETTID
)
6583 put_user_u32(sys_gettid(), child_tidptr
);
6584 if (flags
& CLONE_PARENT_SETTID
)
6585 put_user_u32(sys_gettid(), parent_tidptr
);
6586 ts
= (TaskState
*)cpu
->opaque
;
6587 if (flags
& CLONE_SETTLS
)
6588 cpu_set_tls (env
, newtls
);
6589 if (flags
& CLONE_CHILD_CLEARTID
)
6590 ts
->child_tidptr
= child_tidptr
;
6592 cpu_clone_regs_parent(env
, flags
);
6599 /* warning : doesn't handle linux specific flags... */
6600 static int target_to_host_fcntl_cmd(int cmd
)
6605 case TARGET_F_DUPFD
:
6606 case TARGET_F_GETFD
:
6607 case TARGET_F_SETFD
:
6608 case TARGET_F_GETFL
:
6609 case TARGET_F_SETFL
:
6610 case TARGET_F_OFD_GETLK
:
6611 case TARGET_F_OFD_SETLK
:
6612 case TARGET_F_OFD_SETLKW
:
6615 case TARGET_F_GETLK
:
6618 case TARGET_F_SETLK
:
6621 case TARGET_F_SETLKW
:
6624 case TARGET_F_GETOWN
:
6627 case TARGET_F_SETOWN
:
6630 case TARGET_F_GETSIG
:
6633 case TARGET_F_SETSIG
:
6636 #if TARGET_ABI_BITS == 32
6637 case TARGET_F_GETLK64
:
6640 case TARGET_F_SETLK64
:
6643 case TARGET_F_SETLKW64
:
6647 case TARGET_F_SETLEASE
:
6650 case TARGET_F_GETLEASE
:
6653 #ifdef F_DUPFD_CLOEXEC
6654 case TARGET_F_DUPFD_CLOEXEC
:
6655 ret
= F_DUPFD_CLOEXEC
;
6658 case TARGET_F_NOTIFY
:
6662 case TARGET_F_GETOWN_EX
:
6667 case TARGET_F_SETOWN_EX
:
6672 case TARGET_F_SETPIPE_SZ
:
6675 case TARGET_F_GETPIPE_SZ
:
6680 case TARGET_F_ADD_SEALS
:
6683 case TARGET_F_GET_SEALS
:
6688 ret
= -TARGET_EINVAL
;
6692 #if defined(__powerpc64__)
6693 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6694 * is not supported by kernel. The glibc fcntl call actually adjusts
6695 * them to 5, 6 and 7 before making the syscall(). Since we make the
6696 * syscall directly, adjust to what is supported by the kernel.
6698 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6699 ret
-= F_GETLK64
- 5;
6706 #define FLOCK_TRANSTBL \
6708 TRANSTBL_CONVERT(F_RDLCK); \
6709 TRANSTBL_CONVERT(F_WRLCK); \
6710 TRANSTBL_CONVERT(F_UNLCK); \
6713 static int target_to_host_flock(int type
)
6715 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6717 #undef TRANSTBL_CONVERT
6718 return -TARGET_EINVAL
;
6721 static int host_to_target_flock(int type
)
6723 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6725 #undef TRANSTBL_CONVERT
6726 /* if we don't know how to convert the value coming
6727 * from the host we copy to the target field as-is
6732 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6733 abi_ulong target_flock_addr
)
6735 struct target_flock
*target_fl
;
6738 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6739 return -TARGET_EFAULT
;
6742 __get_user(l_type
, &target_fl
->l_type
);
6743 l_type
= target_to_host_flock(l_type
);
6747 fl
->l_type
= l_type
;
6748 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6749 __get_user(fl
->l_start
, &target_fl
->l_start
);
6750 __get_user(fl
->l_len
, &target_fl
->l_len
);
6751 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6752 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6756 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6757 const struct flock64
*fl
)
6759 struct target_flock
*target_fl
;
6762 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6763 return -TARGET_EFAULT
;
6766 l_type
= host_to_target_flock(fl
->l_type
);
6767 __put_user(l_type
, &target_fl
->l_type
);
6768 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6769 __put_user(fl
->l_start
, &target_fl
->l_start
);
6770 __put_user(fl
->l_len
, &target_fl
->l_len
);
6771 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6772 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6776 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6777 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6779 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6780 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6781 abi_ulong target_flock_addr
)
6783 struct target_oabi_flock64
*target_fl
;
6786 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6787 return -TARGET_EFAULT
;
6790 __get_user(l_type
, &target_fl
->l_type
);
6791 l_type
= target_to_host_flock(l_type
);
6795 fl
->l_type
= l_type
;
6796 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6797 __get_user(fl
->l_start
, &target_fl
->l_start
);
6798 __get_user(fl
->l_len
, &target_fl
->l_len
);
6799 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6800 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6804 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6805 const struct flock64
*fl
)
6807 struct target_oabi_flock64
*target_fl
;
6810 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6811 return -TARGET_EFAULT
;
6814 l_type
= host_to_target_flock(fl
->l_type
);
6815 __put_user(l_type
, &target_fl
->l_type
);
6816 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6817 __put_user(fl
->l_start
, &target_fl
->l_start
);
6818 __put_user(fl
->l_len
, &target_fl
->l_len
);
6819 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6820 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6825 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6826 abi_ulong target_flock_addr
)
6828 struct target_flock64
*target_fl
;
6831 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6832 return -TARGET_EFAULT
;
6835 __get_user(l_type
, &target_fl
->l_type
);
6836 l_type
= target_to_host_flock(l_type
);
6840 fl
->l_type
= l_type
;
6841 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6842 __get_user(fl
->l_start
, &target_fl
->l_start
);
6843 __get_user(fl
->l_len
, &target_fl
->l_len
);
6844 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6845 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6849 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6850 const struct flock64
*fl
)
6852 struct target_flock64
*target_fl
;
6855 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6856 return -TARGET_EFAULT
;
6859 l_type
= host_to_target_flock(fl
->l_type
);
6860 __put_user(l_type
, &target_fl
->l_type
);
6861 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6862 __put_user(fl
->l_start
, &target_fl
->l_start
);
6863 __put_user(fl
->l_len
, &target_fl
->l_len
);
6864 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6865 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6869 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6871 struct flock64 fl64
;
6873 struct f_owner_ex fox
;
6874 struct target_f_owner_ex
*target_fox
;
6877 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6879 if (host_cmd
== -TARGET_EINVAL
)
6883 case TARGET_F_GETLK
:
6884 ret
= copy_from_user_flock(&fl64
, arg
);
6888 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6890 ret
= copy_to_user_flock(arg
, &fl64
);
6894 case TARGET_F_SETLK
:
6895 case TARGET_F_SETLKW
:
6896 ret
= copy_from_user_flock(&fl64
, arg
);
6900 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6903 case TARGET_F_GETLK64
:
6904 case TARGET_F_OFD_GETLK
:
6905 ret
= copy_from_user_flock64(&fl64
, arg
);
6909 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6911 ret
= copy_to_user_flock64(arg
, &fl64
);
6914 case TARGET_F_SETLK64
:
6915 case TARGET_F_SETLKW64
:
6916 case TARGET_F_OFD_SETLK
:
6917 case TARGET_F_OFD_SETLKW
:
6918 ret
= copy_from_user_flock64(&fl64
, arg
);
6922 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6925 case TARGET_F_GETFL
:
6926 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6928 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6932 case TARGET_F_SETFL
:
6933 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6934 target_to_host_bitmask(arg
,
6939 case TARGET_F_GETOWN_EX
:
6940 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6942 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6943 return -TARGET_EFAULT
;
6944 target_fox
->type
= tswap32(fox
.type
);
6945 target_fox
->pid
= tswap32(fox
.pid
);
6946 unlock_user_struct(target_fox
, arg
, 1);
6952 case TARGET_F_SETOWN_EX
:
6953 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6954 return -TARGET_EFAULT
;
6955 fox
.type
= tswap32(target_fox
->type
);
6956 fox
.pid
= tswap32(target_fox
->pid
);
6957 unlock_user_struct(target_fox
, arg
, 0);
6958 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6962 case TARGET_F_SETSIG
:
6963 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6966 case TARGET_F_GETSIG
:
6967 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6970 case TARGET_F_SETOWN
:
6971 case TARGET_F_GETOWN
:
6972 case TARGET_F_SETLEASE
:
6973 case TARGET_F_GETLEASE
:
6974 case TARGET_F_SETPIPE_SZ
:
6975 case TARGET_F_GETPIPE_SZ
:
6976 case TARGET_F_ADD_SEALS
:
6977 case TARGET_F_GET_SEALS
:
6978 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6982 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6990 static inline int high2lowuid(int uid
)
6998 static inline int high2lowgid(int gid
)
7006 static inline int low2highuid(int uid
)
7008 if ((int16_t)uid
== -1)
7014 static inline int low2highgid(int gid
)
7016 if ((int16_t)gid
== -1)
7021 static inline int tswapid(int id
)
7026 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7028 #else /* !USE_UID16 */
7029 static inline int high2lowuid(int uid
)
7033 static inline int high2lowgid(int gid
)
7037 static inline int low2highuid(int uid
)
7041 static inline int low2highgid(int gid
)
7045 static inline int tswapid(int id
)
7050 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7052 #endif /* USE_UID16 */
7054 /* We must do direct syscalls for setting UID/GID, because we want to
7055 * implement the Linux system call semantics of "change only for this thread",
7056 * not the libc/POSIX semantics of "change for all threads in process".
7057 * (See http://ewontfix.com/17/ for more details.)
7058 * We use the 32-bit version of the syscalls if present; if it is not
7059 * then either the host architecture supports 32-bit UIDs natively with
7060 * the standard syscall, or the 16-bit UID is the best we can do.
7062 #ifdef __NR_setuid32
7063 #define __NR_sys_setuid __NR_setuid32
7065 #define __NR_sys_setuid __NR_setuid
7067 #ifdef __NR_setgid32
7068 #define __NR_sys_setgid __NR_setgid32
7070 #define __NR_sys_setgid __NR_setgid
7072 #ifdef __NR_setresuid32
7073 #define __NR_sys_setresuid __NR_setresuid32
7075 #define __NR_sys_setresuid __NR_setresuid
7077 #ifdef __NR_setresgid32
7078 #define __NR_sys_setresgid __NR_setresgid32
7080 #define __NR_sys_setresgid __NR_setresgid
7083 _syscall1(int, sys_setuid
, uid_t
, uid
)
7084 _syscall1(int, sys_setgid
, gid_t
, gid
)
7085 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7086 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7088 void syscall_init(void)
7091 const argtype
*arg_type
;
7095 thunk_init(STRUCT_MAX
);
7097 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7098 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7099 #include "syscall_types.h"
7101 #undef STRUCT_SPECIAL
7103 /* Build target_to_host_errno_table[] table from
7104 * host_to_target_errno_table[]. */
7105 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7106 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7109 /* we patch the ioctl size if necessary. We rely on the fact that
7110 no ioctl has all the bits at '1' in the size field */
7112 while (ie
->target_cmd
!= 0) {
7113 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7114 TARGET_IOC_SIZEMASK
) {
7115 arg_type
= ie
->arg_type
;
7116 if (arg_type
[0] != TYPE_PTR
) {
7117 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7122 size
= thunk_type_size(arg_type
, 0);
7123 ie
->target_cmd
= (ie
->target_cmd
&
7124 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7125 (size
<< TARGET_IOC_SIZESHIFT
);
7128 /* automatic consistency check if same arch */
7129 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7130 (defined(__x86_64__) && defined(TARGET_X86_64))
7131 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7132 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7133 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7140 #ifdef TARGET_NR_truncate64
7141 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7146 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7150 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7154 #ifdef TARGET_NR_ftruncate64
7155 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7160 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7164 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7168 #if defined(TARGET_NR_timer_settime) || \
7169 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7170 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7171 abi_ulong target_addr
)
7173 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7174 offsetof(struct target_itimerspec
,
7176 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7177 offsetof(struct target_itimerspec
,
7179 return -TARGET_EFAULT
;
7186 #if defined(TARGET_NR_timer_settime64) || \
7187 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7188 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7189 abi_ulong target_addr
)
7191 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7192 offsetof(struct target__kernel_itimerspec
,
7194 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7195 offsetof(struct target__kernel_itimerspec
,
7197 return -TARGET_EFAULT
;
7204 #if ((defined(TARGET_NR_timerfd_gettime) || \
7205 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7206 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7207 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7208 struct itimerspec
*host_its
)
7210 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7212 &host_its
->it_interval
) ||
7213 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7215 &host_its
->it_value
)) {
7216 return -TARGET_EFAULT
;
7222 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7223 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7224 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7225 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7226 struct itimerspec
*host_its
)
7228 if (host_to_target_timespec64(target_addr
+
7229 offsetof(struct target__kernel_itimerspec
,
7231 &host_its
->it_interval
) ||
7232 host_to_target_timespec64(target_addr
+
7233 offsetof(struct target__kernel_itimerspec
,
7235 &host_its
->it_value
)) {
7236 return -TARGET_EFAULT
;
7242 #if defined(TARGET_NR_adjtimex) || \
7243 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7244 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7245 abi_long target_addr
)
7247 struct target_timex
*target_tx
;
7249 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7250 return -TARGET_EFAULT
;
7253 __get_user(host_tx
->modes
, &target_tx
->modes
);
7254 __get_user(host_tx
->offset
, &target_tx
->offset
);
7255 __get_user(host_tx
->freq
, &target_tx
->freq
);
7256 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7257 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7258 __get_user(host_tx
->status
, &target_tx
->status
);
7259 __get_user(host_tx
->constant
, &target_tx
->constant
);
7260 __get_user(host_tx
->precision
, &target_tx
->precision
);
7261 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7262 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7263 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7264 __get_user(host_tx
->tick
, &target_tx
->tick
);
7265 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7266 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7267 __get_user(host_tx
->shift
, &target_tx
->shift
);
7268 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7269 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7270 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7271 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7272 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7273 __get_user(host_tx
->tai
, &target_tx
->tai
);
7275 unlock_user_struct(target_tx
, target_addr
, 0);
7279 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7280 struct timex
*host_tx
)
7282 struct target_timex
*target_tx
;
7284 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7285 return -TARGET_EFAULT
;
7288 __put_user(host_tx
->modes
, &target_tx
->modes
);
7289 __put_user(host_tx
->offset
, &target_tx
->offset
);
7290 __put_user(host_tx
->freq
, &target_tx
->freq
);
7291 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7292 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7293 __put_user(host_tx
->status
, &target_tx
->status
);
7294 __put_user(host_tx
->constant
, &target_tx
->constant
);
7295 __put_user(host_tx
->precision
, &target_tx
->precision
);
7296 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7297 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7298 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7299 __put_user(host_tx
->tick
, &target_tx
->tick
);
7300 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7301 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7302 __put_user(host_tx
->shift
, &target_tx
->shift
);
7303 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7304 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7305 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7306 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7307 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7308 __put_user(host_tx
->tai
, &target_tx
->tai
);
7310 unlock_user_struct(target_tx
, target_addr
, 1);
7316 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7317 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7318 abi_long target_addr
)
7320 struct target__kernel_timex
*target_tx
;
7322 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7323 offsetof(struct target__kernel_timex
,
7325 return -TARGET_EFAULT
;
7328 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7329 return -TARGET_EFAULT
;
7332 __get_user(host_tx
->modes
, &target_tx
->modes
);
7333 __get_user(host_tx
->offset
, &target_tx
->offset
);
7334 __get_user(host_tx
->freq
, &target_tx
->freq
);
7335 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7336 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7337 __get_user(host_tx
->status
, &target_tx
->status
);
7338 __get_user(host_tx
->constant
, &target_tx
->constant
);
7339 __get_user(host_tx
->precision
, &target_tx
->precision
);
7340 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7341 __get_user(host_tx
->tick
, &target_tx
->tick
);
7342 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7343 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7344 __get_user(host_tx
->shift
, &target_tx
->shift
);
7345 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7346 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7347 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7348 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7349 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7350 __get_user(host_tx
->tai
, &target_tx
->tai
);
7352 unlock_user_struct(target_tx
, target_addr
, 0);
7356 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7357 struct timex
*host_tx
)
7359 struct target__kernel_timex
*target_tx
;
7361 if (copy_to_user_timeval64(target_addr
+
7362 offsetof(struct target__kernel_timex
, time
),
7364 return -TARGET_EFAULT
;
7367 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7368 return -TARGET_EFAULT
;
7371 __put_user(host_tx
->modes
, &target_tx
->modes
);
7372 __put_user(host_tx
->offset
, &target_tx
->offset
);
7373 __put_user(host_tx
->freq
, &target_tx
->freq
);
7374 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7375 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7376 __put_user(host_tx
->status
, &target_tx
->status
);
7377 __put_user(host_tx
->constant
, &target_tx
->constant
);
7378 __put_user(host_tx
->precision
, &target_tx
->precision
);
7379 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7380 __put_user(host_tx
->tick
, &target_tx
->tick
);
7381 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7382 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7383 __put_user(host_tx
->shift
, &target_tx
->shift
);
7384 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7385 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7386 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7387 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7388 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7389 __put_user(host_tx
->tai
, &target_tx
->tai
);
7391 unlock_user_struct(target_tx
, target_addr
, 1);
7396 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7397 abi_ulong target_addr
)
7399 struct target_sigevent
*target_sevp
;
7401 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7402 return -TARGET_EFAULT
;
7405 /* This union is awkward on 64 bit systems because it has a 32 bit
7406 * integer and a pointer in it; we follow the conversion approach
7407 * used for handling sigval types in signal.c so the guest should get
7408 * the correct value back even if we did a 64 bit byteswap and it's
7409 * using the 32 bit integer.
7411 host_sevp
->sigev_value
.sival_ptr
=
7412 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7413 host_sevp
->sigev_signo
=
7414 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7415 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7416 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7418 unlock_user_struct(target_sevp
, target_addr
, 1);
7422 #if defined(TARGET_NR_mlockall)
7423 static inline int target_to_host_mlockall_arg(int arg
)
7427 if (arg
& TARGET_MCL_CURRENT
) {
7428 result
|= MCL_CURRENT
;
7430 if (arg
& TARGET_MCL_FUTURE
) {
7431 result
|= MCL_FUTURE
;
7434 if (arg
& TARGET_MCL_ONFAULT
) {
7435 result
|= MCL_ONFAULT
;
7443 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7444 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7445 defined(TARGET_NR_newfstatat))
7446 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7447 abi_ulong target_addr
,
7448 struct stat
*host_st
)
7450 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7451 if (((CPUARMState
*)cpu_env
)->eabi
) {
7452 struct target_eabi_stat64
*target_st
;
7454 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7455 return -TARGET_EFAULT
;
7456 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7457 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7458 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7459 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7460 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7462 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7463 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7464 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7465 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7466 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7467 __put_user(host_st
->st_size
, &target_st
->st_size
);
7468 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7469 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7470 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7471 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7472 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7473 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7474 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7475 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7476 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7478 unlock_user_struct(target_st
, target_addr
, 1);
7482 #if defined(TARGET_HAS_STRUCT_STAT64)
7483 struct target_stat64
*target_st
;
7485 struct target_stat
*target_st
;
7488 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7489 return -TARGET_EFAULT
;
7490 memset(target_st
, 0, sizeof(*target_st
));
7491 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7492 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7493 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7494 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7496 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7497 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7498 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7499 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7500 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7501 /* XXX: better use of kernel struct */
7502 __put_user(host_st
->st_size
, &target_st
->st_size
);
7503 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7504 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7505 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7506 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7507 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7508 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7509 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7510 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7511 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7513 unlock_user_struct(target_st
, target_addr
, 1);
7520 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7521 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7522 abi_ulong target_addr
)
7524 struct target_statx
*target_stx
;
7526 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7527 return -TARGET_EFAULT
;
7529 memset(target_stx
, 0, sizeof(*target_stx
));
7531 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7532 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7533 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7534 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7535 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7536 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7537 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7538 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7539 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7540 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7541 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7542 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7543 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7544 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7545 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7546 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7547 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7548 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7549 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7550 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7551 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7552 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7553 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7555 unlock_user_struct(target_stx
, target_addr
, 1);
7561 static int do_sys_futex(int *uaddr
, int op
, int val
,
7562 const struct timespec
*timeout
, int *uaddr2
,
7565 #if HOST_LONG_BITS == 64
7566 #if defined(__NR_futex)
7567 /* always a 64-bit time_t, it doesn't define _time64 version */
7568 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7571 #else /* HOST_LONG_BITS == 64 */
7572 #if defined(__NR_futex_time64)
7573 if (sizeof(timeout
->tv_sec
) == 8) {
7574 /* _time64 function on 32bit arch */
7575 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7578 #if defined(__NR_futex)
7579 /* old function on 32bit arch */
7580 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7582 #endif /* HOST_LONG_BITS == 64 */
7583 g_assert_not_reached();
7586 static int do_safe_futex(int *uaddr
, int op
, int val
,
7587 const struct timespec
*timeout
, int *uaddr2
,
7590 #if HOST_LONG_BITS == 64
7591 #if defined(__NR_futex)
7592 /* always a 64-bit time_t, it doesn't define _time64 version */
7593 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7595 #else /* HOST_LONG_BITS == 64 */
7596 #if defined(__NR_futex_time64)
7597 if (sizeof(timeout
->tv_sec
) == 8) {
7598 /* _time64 function on 32bit arch */
7599 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7603 #if defined(__NR_futex)
7604 /* old function on 32bit arch */
7605 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7607 #endif /* HOST_LONG_BITS == 64 */
7608 return -TARGET_ENOSYS
;
7611 /* ??? Using host futex calls even when target atomic operations
7612 are not really atomic probably breaks things. However implementing
7613 futexes locally would make futexes shared between multiple processes
7614 tricky. However they're probably useless because guest atomic
7615 operations won't work either. */
7616 #if defined(TARGET_NR_futex)
7617 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7618 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7620 struct timespec ts
, *pts
;
7623 /* ??? We assume FUTEX_* constants are the same on both host
7625 #ifdef FUTEX_CMD_MASK
7626 base_op
= op
& FUTEX_CMD_MASK
;
7632 case FUTEX_WAIT_BITSET
:
7635 target_to_host_timespec(pts
, timeout
);
7639 return do_safe_futex(g2h(cpu
, uaddr
),
7640 op
, tswap32(val
), pts
, NULL
, val3
);
7642 return do_safe_futex(g2h(cpu
, uaddr
),
7643 op
, val
, NULL
, NULL
, 0);
7645 return do_safe_futex(g2h(cpu
, uaddr
),
7646 op
, val
, NULL
, NULL
, 0);
7648 case FUTEX_CMP_REQUEUE
:
7650 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7651 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7652 But the prototype takes a `struct timespec *'; insert casts
7653 to satisfy the compiler. We do not need to tswap TIMEOUT
7654 since it's not compared to guest memory. */
7655 pts
= (struct timespec
*)(uintptr_t) timeout
;
7656 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7657 (base_op
== FUTEX_CMP_REQUEUE
7658 ? tswap32(val3
) : val3
));
7660 return -TARGET_ENOSYS
;
7665 #if defined(TARGET_NR_futex_time64)
7666 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7667 int val
, target_ulong timeout
,
7668 target_ulong uaddr2
, int val3
)
7670 struct timespec ts
, *pts
;
7673 /* ??? We assume FUTEX_* constants are the same on both host
7675 #ifdef FUTEX_CMD_MASK
7676 base_op
= op
& FUTEX_CMD_MASK
;
7682 case FUTEX_WAIT_BITSET
:
7685 if (target_to_host_timespec64(pts
, timeout
)) {
7686 return -TARGET_EFAULT
;
7691 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7692 tswap32(val
), pts
, NULL
, val3
);
7694 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7696 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7698 case FUTEX_CMP_REQUEUE
:
7700 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7701 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7702 But the prototype takes a `struct timespec *'; insert casts
7703 to satisfy the compiler. We do not need to tswap TIMEOUT
7704 since it's not compared to guest memory. */
7705 pts
= (struct timespec
*)(uintptr_t) timeout
;
7706 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7707 (base_op
== FUTEX_CMP_REQUEUE
7708 ? tswap32(val3
) : val3
));
7710 return -TARGET_ENOSYS
;
7715 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7716 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7717 abi_long handle
, abi_long mount_id
,
7720 struct file_handle
*target_fh
;
7721 struct file_handle
*fh
;
7725 unsigned int size
, total_size
;
7727 if (get_user_s32(size
, handle
)) {
7728 return -TARGET_EFAULT
;
7731 name
= lock_user_string(pathname
);
7733 return -TARGET_EFAULT
;
7736 total_size
= sizeof(struct file_handle
) + size
;
7737 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7739 unlock_user(name
, pathname
, 0);
7740 return -TARGET_EFAULT
;
7743 fh
= g_malloc0(total_size
);
7744 fh
->handle_bytes
= size
;
7746 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7747 unlock_user(name
, pathname
, 0);
7749 /* man name_to_handle_at(2):
7750 * Other than the use of the handle_bytes field, the caller should treat
7751 * the file_handle structure as an opaque data type
7754 memcpy(target_fh
, fh
, total_size
);
7755 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7756 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7758 unlock_user(target_fh
, handle
, total_size
);
7760 if (put_user_s32(mid
, mount_id
)) {
7761 return -TARGET_EFAULT
;
7769 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7770 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7773 struct file_handle
*target_fh
;
7774 struct file_handle
*fh
;
7775 unsigned int size
, total_size
;
7778 if (get_user_s32(size
, handle
)) {
7779 return -TARGET_EFAULT
;
7782 total_size
= sizeof(struct file_handle
) + size
;
7783 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7785 return -TARGET_EFAULT
;
7788 fh
= g_memdup(target_fh
, total_size
);
7789 fh
->handle_bytes
= size
;
7790 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7792 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7793 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7797 unlock_user(target_fh
, handle
, total_size
);
7803 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7805 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7808 target_sigset_t
*target_mask
;
7812 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7813 return -TARGET_EINVAL
;
7815 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7816 return -TARGET_EFAULT
;
7819 target_to_host_sigset(&host_mask
, target_mask
);
7821 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7823 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7825 fd_trans_register(ret
, &target_signalfd_trans
);
7828 unlock_user_struct(target_mask
, mask
, 0);
7834 /* Map host to target signal numbers for the wait family of syscalls.
7835 Assume all other status bits are the same. */
7836 int host_to_target_waitstatus(int status
)
7838 if (WIFSIGNALED(status
)) {
7839 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7841 if (WIFSTOPPED(status
)) {
7842 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7848 static int open_self_cmdline(void *cpu_env
, int fd
)
7850 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7851 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7854 for (i
= 0; i
< bprm
->argc
; i
++) {
7855 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7857 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7865 static int open_self_maps(void *cpu_env
, int fd
)
7867 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7868 TaskState
*ts
= cpu
->opaque
;
7869 GSList
*map_info
= read_self_maps();
7873 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7874 MapInfo
*e
= (MapInfo
*) s
->data
;
7876 if (h2g_valid(e
->start
)) {
7877 unsigned long min
= e
->start
;
7878 unsigned long max
= e
->end
;
7879 int flags
= page_get_flags(h2g(min
));
7882 max
= h2g_valid(max
- 1) ?
7883 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7885 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7889 if (h2g(min
) == ts
->info
->stack_limit
) {
7895 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7896 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7897 h2g(min
), h2g(max
- 1) + 1,
7898 (flags
& PAGE_READ
) ? 'r' : '-',
7899 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7900 (flags
& PAGE_EXEC
) ? 'x' : '-',
7901 e
->is_priv
? 'p' : '-',
7902 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7904 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7911 free_self_maps(map_info
);
7913 #ifdef TARGET_VSYSCALL_PAGE
7915 * We only support execution from the vsyscall page.
7916 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7918 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7919 " --xp 00000000 00:00 0",
7920 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7921 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7927 static int open_self_stat(void *cpu_env
, int fd
)
7929 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7930 TaskState
*ts
= cpu
->opaque
;
7931 g_autoptr(GString
) buf
= g_string_new(NULL
);
7934 for (i
= 0; i
< 44; i
++) {
7937 g_string_printf(buf
, FMT_pid
" ", getpid());
7938 } else if (i
== 1) {
7940 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7941 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7942 g_string_printf(buf
, "(%.15s) ", bin
);
7943 } else if (i
== 27) {
7945 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7947 /* for the rest, there is MasterCard */
7948 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7951 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7959 static int open_self_auxv(void *cpu_env
, int fd
)
7961 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7962 TaskState
*ts
= cpu
->opaque
;
7963 abi_ulong auxv
= ts
->info
->saved_auxv
;
7964 abi_ulong len
= ts
->info
->auxv_len
;
7968 * Auxiliary vector is stored in target process stack.
7969 * read in whole auxv vector and copy it to file
7971 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7975 r
= write(fd
, ptr
, len
);
7982 lseek(fd
, 0, SEEK_SET
);
7983 unlock_user(ptr
, auxv
, len
);
7989 static int is_proc_myself(const char *filename
, const char *entry
)
7991 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7992 filename
+= strlen("/proc/");
7993 if (!strncmp(filename
, "self/", strlen("self/"))) {
7994 filename
+= strlen("self/");
7995 } else if (*filename
>= '1' && *filename
<= '9') {
7997 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7998 if (!strncmp(filename
, myself
, strlen(myself
))) {
7999 filename
+= strlen(myself
);
8006 if (!strcmp(filename
, entry
)) {
8013 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8014 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8015 static int is_proc(const char *filename
, const char *entry
)
8017 return strcmp(filename
, entry
) == 0;
8021 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8022 static int open_net_route(void *cpu_env
, int fd
)
8029 fp
= fopen("/proc/net/route", "r");
8036 read
= getline(&line
, &len
, fp
);
8037 dprintf(fd
, "%s", line
);
8041 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8043 uint32_t dest
, gw
, mask
;
8044 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8047 fields
= sscanf(line
,
8048 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8049 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8050 &mask
, &mtu
, &window
, &irtt
);
8054 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8055 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8056 metric
, tswap32(mask
), mtu
, window
, irtt
);
8066 #if defined(TARGET_SPARC)
8067 static int open_cpuinfo(void *cpu_env
, int fd
)
8069 dprintf(fd
, "type\t\t: sun4u\n");
8074 #if defined(TARGET_HPPA)
8075 static int open_cpuinfo(void *cpu_env
, int fd
)
8077 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8078 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8079 dprintf(fd
, "capabilities\t: os32\n");
8080 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8081 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8086 #if defined(TARGET_M68K)
8087 static int open_hardware(void *cpu_env
, int fd
)
8089 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8094 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8097 const char *filename
;
8098 int (*fill
)(void *cpu_env
, int fd
);
8099 int (*cmp
)(const char *s1
, const char *s2
);
8101 const struct fake_open
*fake_open
;
8102 static const struct fake_open fakes
[] = {
8103 { "maps", open_self_maps
, is_proc_myself
},
8104 { "stat", open_self_stat
, is_proc_myself
},
8105 { "auxv", open_self_auxv
, is_proc_myself
},
8106 { "cmdline", open_self_cmdline
, is_proc_myself
},
8107 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8108 { "/proc/net/route", open_net_route
, is_proc
},
8110 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8111 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8113 #if defined(TARGET_M68K)
8114 { "/proc/hardware", open_hardware
, is_proc
},
8116 { NULL
, NULL
, NULL
}
8119 if (is_proc_myself(pathname
, "exe")) {
8120 int execfd
= qemu_getauxval(AT_EXECFD
);
8121 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8124 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8125 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8130 if (fake_open
->filename
) {
8132 char filename
[PATH_MAX
];
8135 /* create temporary file to map stat to */
8136 tmpdir
= getenv("TMPDIR");
8139 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8140 fd
= mkstemp(filename
);
8146 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8152 lseek(fd
, 0, SEEK_SET
);
8157 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8160 #define TIMER_MAGIC 0x0caf0000
8161 #define TIMER_MAGIC_MASK 0xffff0000
8163 /* Convert QEMU provided timer ID back to internal 16bit index format */
8164 static target_timer_t
get_timer_id(abi_long arg
)
8166 target_timer_t timerid
= arg
;
8168 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8169 return -TARGET_EINVAL
;
8174 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8175 return -TARGET_EINVAL
;
8181 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8183 abi_ulong target_addr
,
8186 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8187 unsigned host_bits
= sizeof(*host_mask
) * 8;
8188 abi_ulong
*target_mask
;
8191 assert(host_size
>= target_size
);
8193 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8195 return -TARGET_EFAULT
;
8197 memset(host_mask
, 0, host_size
);
8199 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8200 unsigned bit
= i
* target_bits
;
8203 __get_user(val
, &target_mask
[i
]);
8204 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8205 if (val
& (1UL << j
)) {
8206 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8211 unlock_user(target_mask
, target_addr
, 0);
8215 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8217 abi_ulong target_addr
,
8220 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8221 unsigned host_bits
= sizeof(*host_mask
) * 8;
8222 abi_ulong
*target_mask
;
8225 assert(host_size
>= target_size
);
8227 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8229 return -TARGET_EFAULT
;
8232 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8233 unsigned bit
= i
* target_bits
;
8236 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8237 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8241 __put_user(val
, &target_mask
[i
]);
8244 unlock_user(target_mask
, target_addr
, target_size
);
8248 /* This is an internal helper for do_syscall so that it is easier
8249 * to have a single return point, so that actions, such as logging
8250 * of syscall results, can be performed.
8251 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8253 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8254 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8255 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8258 CPUState
*cpu
= env_cpu(cpu_env
);
8260 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8261 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8262 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8263 || defined(TARGET_NR_statx)
8266 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8267 || defined(TARGET_NR_fstatfs)
8273 case TARGET_NR_exit
:
8274 /* In old applications this may be used to implement _exit(2).
8275 However in threaded applications it is used for thread termination,
8276 and _exit_group is used for application termination.
8277 Do thread termination if we have more then one thread. */
8279 if (block_signals()) {
8280 return -TARGET_ERESTARTSYS
;
8283 pthread_mutex_lock(&clone_lock
);
8285 if (CPU_NEXT(first_cpu
)) {
8286 TaskState
*ts
= cpu
->opaque
;
8288 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8289 object_unref(OBJECT(cpu
));
8291 * At this point the CPU should be unrealized and removed
8292 * from cpu lists. We can clean-up the rest of the thread
8293 * data without the lock held.
8296 pthread_mutex_unlock(&clone_lock
);
8298 if (ts
->child_tidptr
) {
8299 put_user_u32(0, ts
->child_tidptr
);
8300 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8301 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8305 rcu_unregister_thread();
8309 pthread_mutex_unlock(&clone_lock
);
8310 preexit_cleanup(cpu_env
, arg1
);
8312 return 0; /* avoid warning */
8313 case TARGET_NR_read
:
8314 if (arg2
== 0 && arg3
== 0) {
8315 return get_errno(safe_read(arg1
, 0, 0));
8317 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8318 return -TARGET_EFAULT
;
8319 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8321 fd_trans_host_to_target_data(arg1
)) {
8322 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8324 unlock_user(p
, arg2
, ret
);
8327 case TARGET_NR_write
:
8328 if (arg2
== 0 && arg3
== 0) {
8329 return get_errno(safe_write(arg1
, 0, 0));
8331 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8332 return -TARGET_EFAULT
;
8333 if (fd_trans_target_to_host_data(arg1
)) {
8334 void *copy
= g_malloc(arg3
);
8335 memcpy(copy
, p
, arg3
);
8336 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8338 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8342 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8344 unlock_user(p
, arg2
, 0);
8347 #ifdef TARGET_NR_open
8348 case TARGET_NR_open
:
8349 if (!(p
= lock_user_string(arg1
)))
8350 return -TARGET_EFAULT
;
8351 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8352 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8354 fd_trans_unregister(ret
);
8355 unlock_user(p
, arg1
, 0);
8358 case TARGET_NR_openat
:
8359 if (!(p
= lock_user_string(arg2
)))
8360 return -TARGET_EFAULT
;
8361 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8362 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8364 fd_trans_unregister(ret
);
8365 unlock_user(p
, arg2
, 0);
8367 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8368 case TARGET_NR_name_to_handle_at
:
8369 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8372 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8373 case TARGET_NR_open_by_handle_at
:
8374 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8375 fd_trans_unregister(ret
);
8378 case TARGET_NR_close
:
8379 fd_trans_unregister(arg1
);
8380 return get_errno(close(arg1
));
8383 return do_brk(arg1
);
8384 #ifdef TARGET_NR_fork
8385 case TARGET_NR_fork
:
8386 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8388 #ifdef TARGET_NR_waitpid
8389 case TARGET_NR_waitpid
:
8392 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8393 if (!is_error(ret
) && arg2
&& ret
8394 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8395 return -TARGET_EFAULT
;
8399 #ifdef TARGET_NR_waitid
8400 case TARGET_NR_waitid
:
8404 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8405 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8406 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8407 return -TARGET_EFAULT
;
8408 host_to_target_siginfo(p
, &info
);
8409 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8414 #ifdef TARGET_NR_creat /* not on alpha */
8415 case TARGET_NR_creat
:
8416 if (!(p
= lock_user_string(arg1
)))
8417 return -TARGET_EFAULT
;
8418 ret
= get_errno(creat(p
, arg2
));
8419 fd_trans_unregister(ret
);
8420 unlock_user(p
, arg1
, 0);
8423 #ifdef TARGET_NR_link
8424 case TARGET_NR_link
:
8427 p
= lock_user_string(arg1
);
8428 p2
= lock_user_string(arg2
);
8430 ret
= -TARGET_EFAULT
;
8432 ret
= get_errno(link(p
, p2
));
8433 unlock_user(p2
, arg2
, 0);
8434 unlock_user(p
, arg1
, 0);
8438 #if defined(TARGET_NR_linkat)
8439 case TARGET_NR_linkat
:
8443 return -TARGET_EFAULT
;
8444 p
= lock_user_string(arg2
);
8445 p2
= lock_user_string(arg4
);
8447 ret
= -TARGET_EFAULT
;
8449 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8450 unlock_user(p
, arg2
, 0);
8451 unlock_user(p2
, arg4
, 0);
8455 #ifdef TARGET_NR_unlink
8456 case TARGET_NR_unlink
:
8457 if (!(p
= lock_user_string(arg1
)))
8458 return -TARGET_EFAULT
;
8459 ret
= get_errno(unlink(p
));
8460 unlock_user(p
, arg1
, 0);
8463 #if defined(TARGET_NR_unlinkat)
8464 case TARGET_NR_unlinkat
:
8465 if (!(p
= lock_user_string(arg2
)))
8466 return -TARGET_EFAULT
;
8467 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8468 unlock_user(p
, arg2
, 0);
8471 case TARGET_NR_execve
:
8473 char **argp
, **envp
;
8476 abi_ulong guest_argp
;
8477 abi_ulong guest_envp
;
8484 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8485 if (get_user_ual(addr
, gp
))
8486 return -TARGET_EFAULT
;
8493 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8494 if (get_user_ual(addr
, gp
))
8495 return -TARGET_EFAULT
;
8501 argp
= g_new0(char *, argc
+ 1);
8502 envp
= g_new0(char *, envc
+ 1);
8504 for (gp
= guest_argp
, q
= argp
; gp
;
8505 gp
+= sizeof(abi_ulong
), q
++) {
8506 if (get_user_ual(addr
, gp
))
8510 if (!(*q
= lock_user_string(addr
)))
8512 total_size
+= strlen(*q
) + 1;
8516 for (gp
= guest_envp
, q
= envp
; gp
;
8517 gp
+= sizeof(abi_ulong
), q
++) {
8518 if (get_user_ual(addr
, gp
))
8522 if (!(*q
= lock_user_string(addr
)))
8524 total_size
+= strlen(*q
) + 1;
8528 if (!(p
= lock_user_string(arg1
)))
8530 /* Although execve() is not an interruptible syscall it is
8531 * a special case where we must use the safe_syscall wrapper:
8532 * if we allow a signal to happen before we make the host
8533 * syscall then we will 'lose' it, because at the point of
8534 * execve the process leaves QEMU's control. So we use the
8535 * safe syscall wrapper to ensure that we either take the
8536 * signal as a guest signal, or else it does not happen
8537 * before the execve completes and makes it the other
8538 * program's problem.
8540 ret
= get_errno(safe_execve(p
, argp
, envp
));
8541 unlock_user(p
, arg1
, 0);
8546 ret
= -TARGET_EFAULT
;
8549 for (gp
= guest_argp
, q
= argp
; *q
;
8550 gp
+= sizeof(abi_ulong
), q
++) {
8551 if (get_user_ual(addr
, gp
)
8554 unlock_user(*q
, addr
, 0);
8556 for (gp
= guest_envp
, q
= envp
; *q
;
8557 gp
+= sizeof(abi_ulong
), q
++) {
8558 if (get_user_ual(addr
, gp
)
8561 unlock_user(*q
, addr
, 0);
8568 case TARGET_NR_chdir
:
8569 if (!(p
= lock_user_string(arg1
)))
8570 return -TARGET_EFAULT
;
8571 ret
= get_errno(chdir(p
));
8572 unlock_user(p
, arg1
, 0);
8574 #ifdef TARGET_NR_time
8575 case TARGET_NR_time
:
8578 ret
= get_errno(time(&host_time
));
8581 && put_user_sal(host_time
, arg1
))
8582 return -TARGET_EFAULT
;
8586 #ifdef TARGET_NR_mknod
8587 case TARGET_NR_mknod
:
8588 if (!(p
= lock_user_string(arg1
)))
8589 return -TARGET_EFAULT
;
8590 ret
= get_errno(mknod(p
, arg2
, arg3
));
8591 unlock_user(p
, arg1
, 0);
8594 #if defined(TARGET_NR_mknodat)
8595 case TARGET_NR_mknodat
:
8596 if (!(p
= lock_user_string(arg2
)))
8597 return -TARGET_EFAULT
;
8598 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8599 unlock_user(p
, arg2
, 0);
8602 #ifdef TARGET_NR_chmod
8603 case TARGET_NR_chmod
:
8604 if (!(p
= lock_user_string(arg1
)))
8605 return -TARGET_EFAULT
;
8606 ret
= get_errno(chmod(p
, arg2
));
8607 unlock_user(p
, arg1
, 0);
8610 #ifdef TARGET_NR_lseek
8611 case TARGET_NR_lseek
:
8612 return get_errno(lseek(arg1
, arg2
, arg3
));
8614 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8615 /* Alpha specific */
8616 case TARGET_NR_getxpid
:
8617 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8618 return get_errno(getpid());
8620 #ifdef TARGET_NR_getpid
8621 case TARGET_NR_getpid
:
8622 return get_errno(getpid());
8624 case TARGET_NR_mount
:
8626 /* need to look at the data field */
8630 p
= lock_user_string(arg1
);
8632 return -TARGET_EFAULT
;
8638 p2
= lock_user_string(arg2
);
8641 unlock_user(p
, arg1
, 0);
8643 return -TARGET_EFAULT
;
8647 p3
= lock_user_string(arg3
);
8650 unlock_user(p
, arg1
, 0);
8652 unlock_user(p2
, arg2
, 0);
8653 return -TARGET_EFAULT
;
8659 /* FIXME - arg5 should be locked, but it isn't clear how to
8660 * do that since it's not guaranteed to be a NULL-terminated
8664 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8666 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8668 ret
= get_errno(ret
);
8671 unlock_user(p
, arg1
, 0);
8673 unlock_user(p2
, arg2
, 0);
8675 unlock_user(p3
, arg3
, 0);
8679 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8680 #if defined(TARGET_NR_umount)
8681 case TARGET_NR_umount
:
8683 #if defined(TARGET_NR_oldumount)
8684 case TARGET_NR_oldumount
:
8686 if (!(p
= lock_user_string(arg1
)))
8687 return -TARGET_EFAULT
;
8688 ret
= get_errno(umount(p
));
8689 unlock_user(p
, arg1
, 0);
8692 #ifdef TARGET_NR_stime /* not on alpha */
8693 case TARGET_NR_stime
:
8697 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8698 return -TARGET_EFAULT
;
8700 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8703 #ifdef TARGET_NR_alarm /* not on alpha */
8704 case TARGET_NR_alarm
:
8707 #ifdef TARGET_NR_pause /* not on alpha */
8708 case TARGET_NR_pause
:
8709 if (!block_signals()) {
8710 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8712 return -TARGET_EINTR
;
8714 #ifdef TARGET_NR_utime
8715 case TARGET_NR_utime
:
8717 struct utimbuf tbuf
, *host_tbuf
;
8718 struct target_utimbuf
*target_tbuf
;
8720 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8721 return -TARGET_EFAULT
;
8722 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8723 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8724 unlock_user_struct(target_tbuf
, arg2
, 0);
8729 if (!(p
= lock_user_string(arg1
)))
8730 return -TARGET_EFAULT
;
8731 ret
= get_errno(utime(p
, host_tbuf
));
8732 unlock_user(p
, arg1
, 0);
8736 #ifdef TARGET_NR_utimes
8737 case TARGET_NR_utimes
:
8739 struct timeval
*tvp
, tv
[2];
8741 if (copy_from_user_timeval(&tv
[0], arg2
)
8742 || copy_from_user_timeval(&tv
[1],
8743 arg2
+ sizeof(struct target_timeval
)))
8744 return -TARGET_EFAULT
;
8749 if (!(p
= lock_user_string(arg1
)))
8750 return -TARGET_EFAULT
;
8751 ret
= get_errno(utimes(p
, tvp
));
8752 unlock_user(p
, arg1
, 0);
8756 #if defined(TARGET_NR_futimesat)
8757 case TARGET_NR_futimesat
:
8759 struct timeval
*tvp
, tv
[2];
8761 if (copy_from_user_timeval(&tv
[0], arg3
)
8762 || copy_from_user_timeval(&tv
[1],
8763 arg3
+ sizeof(struct target_timeval
)))
8764 return -TARGET_EFAULT
;
8769 if (!(p
= lock_user_string(arg2
))) {
8770 return -TARGET_EFAULT
;
8772 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8773 unlock_user(p
, arg2
, 0);
8777 #ifdef TARGET_NR_access
8778 case TARGET_NR_access
:
8779 if (!(p
= lock_user_string(arg1
))) {
8780 return -TARGET_EFAULT
;
8782 ret
= get_errno(access(path(p
), arg2
));
8783 unlock_user(p
, arg1
, 0);
8786 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8787 case TARGET_NR_faccessat
:
8788 if (!(p
= lock_user_string(arg2
))) {
8789 return -TARGET_EFAULT
;
8791 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8792 unlock_user(p
, arg2
, 0);
8795 #ifdef TARGET_NR_nice /* not on alpha */
8796 case TARGET_NR_nice
:
8797 return get_errno(nice(arg1
));
8799 case TARGET_NR_sync
:
8802 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8803 case TARGET_NR_syncfs
:
8804 return get_errno(syncfs(arg1
));
8806 case TARGET_NR_kill
:
8807 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8808 #ifdef TARGET_NR_rename
8809 case TARGET_NR_rename
:
8812 p
= lock_user_string(arg1
);
8813 p2
= lock_user_string(arg2
);
8815 ret
= -TARGET_EFAULT
;
8817 ret
= get_errno(rename(p
, p2
));
8818 unlock_user(p2
, arg2
, 0);
8819 unlock_user(p
, arg1
, 0);
8823 #if defined(TARGET_NR_renameat)
8824 case TARGET_NR_renameat
:
8827 p
= lock_user_string(arg2
);
8828 p2
= lock_user_string(arg4
);
8830 ret
= -TARGET_EFAULT
;
8832 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8833 unlock_user(p2
, arg4
, 0);
8834 unlock_user(p
, arg2
, 0);
8838 #if defined(TARGET_NR_renameat2)
8839 case TARGET_NR_renameat2
:
8842 p
= lock_user_string(arg2
);
8843 p2
= lock_user_string(arg4
);
8845 ret
= -TARGET_EFAULT
;
8847 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8849 unlock_user(p2
, arg4
, 0);
8850 unlock_user(p
, arg2
, 0);
8854 #ifdef TARGET_NR_mkdir
8855 case TARGET_NR_mkdir
:
8856 if (!(p
= lock_user_string(arg1
)))
8857 return -TARGET_EFAULT
;
8858 ret
= get_errno(mkdir(p
, arg2
));
8859 unlock_user(p
, arg1
, 0);
8862 #if defined(TARGET_NR_mkdirat)
8863 case TARGET_NR_mkdirat
:
8864 if (!(p
= lock_user_string(arg2
)))
8865 return -TARGET_EFAULT
;
8866 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8867 unlock_user(p
, arg2
, 0);
8870 #ifdef TARGET_NR_rmdir
8871 case TARGET_NR_rmdir
:
8872 if (!(p
= lock_user_string(arg1
)))
8873 return -TARGET_EFAULT
;
8874 ret
= get_errno(rmdir(p
));
8875 unlock_user(p
, arg1
, 0);
8879 ret
= get_errno(dup(arg1
));
8881 fd_trans_dup(arg1
, ret
);
8884 #ifdef TARGET_NR_pipe
8885 case TARGET_NR_pipe
:
8886 return do_pipe(cpu_env
, arg1
, 0, 0);
8888 #ifdef TARGET_NR_pipe2
8889 case TARGET_NR_pipe2
:
8890 return do_pipe(cpu_env
, arg1
,
8891 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8893 case TARGET_NR_times
:
8895 struct target_tms
*tmsp
;
8897 ret
= get_errno(times(&tms
));
8899 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8901 return -TARGET_EFAULT
;
8902 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8903 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8904 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8905 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8908 ret
= host_to_target_clock_t(ret
);
8911 case TARGET_NR_acct
:
8913 ret
= get_errno(acct(NULL
));
8915 if (!(p
= lock_user_string(arg1
))) {
8916 return -TARGET_EFAULT
;
8918 ret
= get_errno(acct(path(p
)));
8919 unlock_user(p
, arg1
, 0);
8922 #ifdef TARGET_NR_umount2
8923 case TARGET_NR_umount2
:
8924 if (!(p
= lock_user_string(arg1
)))
8925 return -TARGET_EFAULT
;
8926 ret
= get_errno(umount2(p
, arg2
));
8927 unlock_user(p
, arg1
, 0);
8930 case TARGET_NR_ioctl
:
8931 return do_ioctl(arg1
, arg2
, arg3
);
8932 #ifdef TARGET_NR_fcntl
8933 case TARGET_NR_fcntl
:
8934 return do_fcntl(arg1
, arg2
, arg3
);
8936 case TARGET_NR_setpgid
:
8937 return get_errno(setpgid(arg1
, arg2
));
8938 case TARGET_NR_umask
:
8939 return get_errno(umask(arg1
));
8940 case TARGET_NR_chroot
:
8941 if (!(p
= lock_user_string(arg1
)))
8942 return -TARGET_EFAULT
;
8943 ret
= get_errno(chroot(p
));
8944 unlock_user(p
, arg1
, 0);
8946 #ifdef TARGET_NR_dup2
8947 case TARGET_NR_dup2
:
8948 ret
= get_errno(dup2(arg1
, arg2
));
8950 fd_trans_dup(arg1
, arg2
);
8954 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8955 case TARGET_NR_dup3
:
8959 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8962 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8963 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8965 fd_trans_dup(arg1
, arg2
);
8970 #ifdef TARGET_NR_getppid /* not on alpha */
8971 case TARGET_NR_getppid
:
8972 return get_errno(getppid());
8974 #ifdef TARGET_NR_getpgrp
8975 case TARGET_NR_getpgrp
:
8976 return get_errno(getpgrp());
8978 case TARGET_NR_setsid
:
8979 return get_errno(setsid());
8980 #ifdef TARGET_NR_sigaction
8981 case TARGET_NR_sigaction
:
8983 #if defined(TARGET_MIPS)
8984 struct target_sigaction act
, oact
, *pact
, *old_act
;
8987 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8988 return -TARGET_EFAULT
;
8989 act
._sa_handler
= old_act
->_sa_handler
;
8990 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8991 act
.sa_flags
= old_act
->sa_flags
;
8992 unlock_user_struct(old_act
, arg2
, 0);
8998 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9000 if (!is_error(ret
) && arg3
) {
9001 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9002 return -TARGET_EFAULT
;
9003 old_act
->_sa_handler
= oact
._sa_handler
;
9004 old_act
->sa_flags
= oact
.sa_flags
;
9005 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9006 old_act
->sa_mask
.sig
[1] = 0;
9007 old_act
->sa_mask
.sig
[2] = 0;
9008 old_act
->sa_mask
.sig
[3] = 0;
9009 unlock_user_struct(old_act
, arg3
, 1);
9012 struct target_old_sigaction
*old_act
;
9013 struct target_sigaction act
, oact
, *pact
;
9015 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9016 return -TARGET_EFAULT
;
9017 act
._sa_handler
= old_act
->_sa_handler
;
9018 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9019 act
.sa_flags
= old_act
->sa_flags
;
9020 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9021 act
.sa_restorer
= old_act
->sa_restorer
;
9023 unlock_user_struct(old_act
, arg2
, 0);
9028 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9029 if (!is_error(ret
) && arg3
) {
9030 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9031 return -TARGET_EFAULT
;
9032 old_act
->_sa_handler
= oact
._sa_handler
;
9033 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9034 old_act
->sa_flags
= oact
.sa_flags
;
9035 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9036 old_act
->sa_restorer
= oact
.sa_restorer
;
9038 unlock_user_struct(old_act
, arg3
, 1);
9044 case TARGET_NR_rt_sigaction
:
9047 * For Alpha and SPARC this is a 5 argument syscall, with
9048 * a 'restorer' parameter which must be copied into the
9049 * sa_restorer field of the sigaction struct.
9050 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9051 * and arg5 is the sigsetsize.
9053 #if defined(TARGET_ALPHA)
9054 target_ulong sigsetsize
= arg4
;
9055 target_ulong restorer
= arg5
;
9056 #elif defined(TARGET_SPARC)
9057 target_ulong restorer
= arg4
;
9058 target_ulong sigsetsize
= arg5
;
9060 target_ulong sigsetsize
= arg4
;
9061 target_ulong restorer
= 0;
9063 struct target_sigaction
*act
= NULL
;
9064 struct target_sigaction
*oact
= NULL
;
9066 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9067 return -TARGET_EINVAL
;
9069 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9070 return -TARGET_EFAULT
;
9072 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9073 ret
= -TARGET_EFAULT
;
9075 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9077 unlock_user_struct(oact
, arg3
, 1);
9081 unlock_user_struct(act
, arg2
, 0);
9085 #ifdef TARGET_NR_sgetmask /* not on alpha */
9086 case TARGET_NR_sgetmask
:
9089 abi_ulong target_set
;
9090 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9092 host_to_target_old_sigset(&target_set
, &cur_set
);
9098 #ifdef TARGET_NR_ssetmask /* not on alpha */
9099 case TARGET_NR_ssetmask
:
9102 abi_ulong target_set
= arg1
;
9103 target_to_host_old_sigset(&set
, &target_set
);
9104 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9106 host_to_target_old_sigset(&target_set
, &oset
);
9112 #ifdef TARGET_NR_sigprocmask
9113 case TARGET_NR_sigprocmask
:
9115 #if defined(TARGET_ALPHA)
9116 sigset_t set
, oldset
;
9121 case TARGET_SIG_BLOCK
:
9124 case TARGET_SIG_UNBLOCK
:
9127 case TARGET_SIG_SETMASK
:
9131 return -TARGET_EINVAL
;
9134 target_to_host_old_sigset(&set
, &mask
);
9136 ret
= do_sigprocmask(how
, &set
, &oldset
);
9137 if (!is_error(ret
)) {
9138 host_to_target_old_sigset(&mask
, &oldset
);
9140 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9143 sigset_t set
, oldset
, *set_ptr
;
9148 case TARGET_SIG_BLOCK
:
9151 case TARGET_SIG_UNBLOCK
:
9154 case TARGET_SIG_SETMASK
:
9158 return -TARGET_EINVAL
;
9160 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9161 return -TARGET_EFAULT
;
9162 target_to_host_old_sigset(&set
, p
);
9163 unlock_user(p
, arg2
, 0);
9169 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9170 if (!is_error(ret
) && arg3
) {
9171 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9172 return -TARGET_EFAULT
;
9173 host_to_target_old_sigset(p
, &oldset
);
9174 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9180 case TARGET_NR_rt_sigprocmask
:
9183 sigset_t set
, oldset
, *set_ptr
;
9185 if (arg4
!= sizeof(target_sigset_t
)) {
9186 return -TARGET_EINVAL
;
9191 case TARGET_SIG_BLOCK
:
9194 case TARGET_SIG_UNBLOCK
:
9197 case TARGET_SIG_SETMASK
:
9201 return -TARGET_EINVAL
;
9203 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9204 return -TARGET_EFAULT
;
9205 target_to_host_sigset(&set
, p
);
9206 unlock_user(p
, arg2
, 0);
9212 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9213 if (!is_error(ret
) && arg3
) {
9214 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9215 return -TARGET_EFAULT
;
9216 host_to_target_sigset(p
, &oldset
);
9217 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9221 #ifdef TARGET_NR_sigpending
9222 case TARGET_NR_sigpending
:
9225 ret
= get_errno(sigpending(&set
));
9226 if (!is_error(ret
)) {
9227 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9228 return -TARGET_EFAULT
;
9229 host_to_target_old_sigset(p
, &set
);
9230 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9235 case TARGET_NR_rt_sigpending
:
9239 /* Yes, this check is >, not != like most. We follow the kernel's
9240 * logic and it does it like this because it implements
9241 * NR_sigpending through the same code path, and in that case
9242 * the old_sigset_t is smaller in size.
9244 if (arg2
> sizeof(target_sigset_t
)) {
9245 return -TARGET_EINVAL
;
9248 ret
= get_errno(sigpending(&set
));
9249 if (!is_error(ret
)) {
9250 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9251 return -TARGET_EFAULT
;
9252 host_to_target_sigset(p
, &set
);
9253 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9257 #ifdef TARGET_NR_sigsuspend
9258 case TARGET_NR_sigsuspend
:
9260 TaskState
*ts
= cpu
->opaque
;
9261 #if defined(TARGET_ALPHA)
9262 abi_ulong mask
= arg1
;
9263 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9265 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9266 return -TARGET_EFAULT
;
9267 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9268 unlock_user(p
, arg1
, 0);
9270 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9272 if (ret
!= -TARGET_ERESTARTSYS
) {
9273 ts
->in_sigsuspend
= 1;
9278 case TARGET_NR_rt_sigsuspend
:
9280 TaskState
*ts
= cpu
->opaque
;
9282 if (arg2
!= sizeof(target_sigset_t
)) {
9283 return -TARGET_EINVAL
;
9285 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9286 return -TARGET_EFAULT
;
9287 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9288 unlock_user(p
, arg1
, 0);
9289 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9291 if (ret
!= -TARGET_ERESTARTSYS
) {
9292 ts
->in_sigsuspend
= 1;
9296 #ifdef TARGET_NR_rt_sigtimedwait
9297 case TARGET_NR_rt_sigtimedwait
:
9300 struct timespec uts
, *puts
;
9303 if (arg4
!= sizeof(target_sigset_t
)) {
9304 return -TARGET_EINVAL
;
9307 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9308 return -TARGET_EFAULT
;
9309 target_to_host_sigset(&set
, p
);
9310 unlock_user(p
, arg1
, 0);
9313 if (target_to_host_timespec(puts
, arg3
)) {
9314 return -TARGET_EFAULT
;
9319 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9321 if (!is_error(ret
)) {
9323 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9326 return -TARGET_EFAULT
;
9328 host_to_target_siginfo(p
, &uinfo
);
9329 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9331 ret
= host_to_target_signal(ret
);
9336 #ifdef TARGET_NR_rt_sigtimedwait_time64
9337 case TARGET_NR_rt_sigtimedwait_time64
:
9340 struct timespec uts
, *puts
;
9343 if (arg4
!= sizeof(target_sigset_t
)) {
9344 return -TARGET_EINVAL
;
9347 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9349 return -TARGET_EFAULT
;
9351 target_to_host_sigset(&set
, p
);
9352 unlock_user(p
, arg1
, 0);
9355 if (target_to_host_timespec64(puts
, arg3
)) {
9356 return -TARGET_EFAULT
;
9361 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9363 if (!is_error(ret
)) {
9365 p
= lock_user(VERIFY_WRITE
, arg2
,
9366 sizeof(target_siginfo_t
), 0);
9368 return -TARGET_EFAULT
;
9370 host_to_target_siginfo(p
, &uinfo
);
9371 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9373 ret
= host_to_target_signal(ret
);
9378 case TARGET_NR_rt_sigqueueinfo
:
9382 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9384 return -TARGET_EFAULT
;
9386 target_to_host_siginfo(&uinfo
, p
);
9387 unlock_user(p
, arg3
, 0);
9388 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9391 case TARGET_NR_rt_tgsigqueueinfo
:
9395 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9397 return -TARGET_EFAULT
;
9399 target_to_host_siginfo(&uinfo
, p
);
9400 unlock_user(p
, arg4
, 0);
9401 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9404 #ifdef TARGET_NR_sigreturn
9405 case TARGET_NR_sigreturn
:
9406 if (block_signals()) {
9407 return -TARGET_ERESTARTSYS
;
9409 return do_sigreturn(cpu_env
);
9411 case TARGET_NR_rt_sigreturn
:
9412 if (block_signals()) {
9413 return -TARGET_ERESTARTSYS
;
9415 return do_rt_sigreturn(cpu_env
);
9416 case TARGET_NR_sethostname
:
9417 if (!(p
= lock_user_string(arg1
)))
9418 return -TARGET_EFAULT
;
9419 ret
= get_errno(sethostname(p
, arg2
));
9420 unlock_user(p
, arg1
, 0);
9422 #ifdef TARGET_NR_setrlimit
9423 case TARGET_NR_setrlimit
:
9425 int resource
= target_to_host_resource(arg1
);
9426 struct target_rlimit
*target_rlim
;
9428 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9429 return -TARGET_EFAULT
;
9430 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9431 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9432 unlock_user_struct(target_rlim
, arg2
, 0);
9434 * If we just passed through resource limit settings for memory then
9435 * they would also apply to QEMU's own allocations, and QEMU will
9436 * crash or hang or die if its allocations fail. Ideally we would
9437 * track the guest allocations in QEMU and apply the limits ourselves.
9438 * For now, just tell the guest the call succeeded but don't actually
9441 if (resource
!= RLIMIT_AS
&&
9442 resource
!= RLIMIT_DATA
&&
9443 resource
!= RLIMIT_STACK
) {
9444 return get_errno(setrlimit(resource
, &rlim
));
9450 #ifdef TARGET_NR_getrlimit
9451 case TARGET_NR_getrlimit
:
9453 int resource
= target_to_host_resource(arg1
);
9454 struct target_rlimit
*target_rlim
;
9457 ret
= get_errno(getrlimit(resource
, &rlim
));
9458 if (!is_error(ret
)) {
9459 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9460 return -TARGET_EFAULT
;
9461 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9462 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9463 unlock_user_struct(target_rlim
, arg2
, 1);
9468 case TARGET_NR_getrusage
:
9470 struct rusage rusage
;
9471 ret
= get_errno(getrusage(arg1
, &rusage
));
9472 if (!is_error(ret
)) {
9473 ret
= host_to_target_rusage(arg2
, &rusage
);
9477 #if defined(TARGET_NR_gettimeofday)
9478 case TARGET_NR_gettimeofday
:
9483 ret
= get_errno(gettimeofday(&tv
, &tz
));
9484 if (!is_error(ret
)) {
9485 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9486 return -TARGET_EFAULT
;
9488 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9489 return -TARGET_EFAULT
;
9495 #if defined(TARGET_NR_settimeofday)
9496 case TARGET_NR_settimeofday
:
9498 struct timeval tv
, *ptv
= NULL
;
9499 struct timezone tz
, *ptz
= NULL
;
9502 if (copy_from_user_timeval(&tv
, arg1
)) {
9503 return -TARGET_EFAULT
;
9509 if (copy_from_user_timezone(&tz
, arg2
)) {
9510 return -TARGET_EFAULT
;
9515 return get_errno(settimeofday(ptv
, ptz
));
9518 #if defined(TARGET_NR_select)
9519 case TARGET_NR_select
:
9520 #if defined(TARGET_WANT_NI_OLD_SELECT)
9521 /* some architectures used to have old_select here
9522 * but now ENOSYS it.
9524 ret
= -TARGET_ENOSYS
;
9525 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9526 ret
= do_old_select(arg1
);
9528 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9532 #ifdef TARGET_NR_pselect6
9533 case TARGET_NR_pselect6
:
9534 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9536 #ifdef TARGET_NR_pselect6_time64
9537 case TARGET_NR_pselect6_time64
:
9538 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9540 #ifdef TARGET_NR_symlink
9541 case TARGET_NR_symlink
:
9544 p
= lock_user_string(arg1
);
9545 p2
= lock_user_string(arg2
);
9547 ret
= -TARGET_EFAULT
;
9549 ret
= get_errno(symlink(p
, p2
));
9550 unlock_user(p2
, arg2
, 0);
9551 unlock_user(p
, arg1
, 0);
9555 #if defined(TARGET_NR_symlinkat)
9556 case TARGET_NR_symlinkat
:
9559 p
= lock_user_string(arg1
);
9560 p2
= lock_user_string(arg3
);
9562 ret
= -TARGET_EFAULT
;
9564 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9565 unlock_user(p2
, arg3
, 0);
9566 unlock_user(p
, arg1
, 0);
9570 #ifdef TARGET_NR_readlink
9571 case TARGET_NR_readlink
:
9574 p
= lock_user_string(arg1
);
9575 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9577 ret
= -TARGET_EFAULT
;
9579 /* Short circuit this for the magic exe check. */
9580 ret
= -TARGET_EINVAL
;
9581 } else if (is_proc_myself((const char *)p
, "exe")) {
9582 char real
[PATH_MAX
], *temp
;
9583 temp
= realpath(exec_path
, real
);
9584 /* Return value is # of bytes that we wrote to the buffer. */
9586 ret
= get_errno(-1);
9588 /* Don't worry about sign mismatch as earlier mapping
9589 * logic would have thrown a bad address error. */
9590 ret
= MIN(strlen(real
), arg3
);
9591 /* We cannot NUL terminate the string. */
9592 memcpy(p2
, real
, ret
);
9595 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9597 unlock_user(p2
, arg2
, ret
);
9598 unlock_user(p
, arg1
, 0);
9602 #if defined(TARGET_NR_readlinkat)
9603 case TARGET_NR_readlinkat
:
9606 p
= lock_user_string(arg2
);
9607 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9609 ret
= -TARGET_EFAULT
;
9610 } else if (is_proc_myself((const char *)p
, "exe")) {
9611 char real
[PATH_MAX
], *temp
;
9612 temp
= realpath(exec_path
, real
);
9613 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9614 snprintf((char *)p2
, arg4
, "%s", real
);
9616 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9618 unlock_user(p2
, arg3
, ret
);
9619 unlock_user(p
, arg2
, 0);
9623 #ifdef TARGET_NR_swapon
9624 case TARGET_NR_swapon
:
9625 if (!(p
= lock_user_string(arg1
)))
9626 return -TARGET_EFAULT
;
9627 ret
= get_errno(swapon(p
, arg2
));
9628 unlock_user(p
, arg1
, 0);
9631 case TARGET_NR_reboot
:
9632 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9633 /* arg4 must be ignored in all other cases */
9634 p
= lock_user_string(arg4
);
9636 return -TARGET_EFAULT
;
9638 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9639 unlock_user(p
, arg4
, 0);
9641 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9644 #ifdef TARGET_NR_mmap
9645 case TARGET_NR_mmap
:
9646 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9647 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9648 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9649 || defined(TARGET_S390X)
9652 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9653 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9654 return -TARGET_EFAULT
;
9661 unlock_user(v
, arg1
, 0);
9662 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9663 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9667 /* mmap pointers are always untagged */
9668 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9669 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9675 #ifdef TARGET_NR_mmap2
9676 case TARGET_NR_mmap2
:
9678 #define MMAP_SHIFT 12
9680 ret
= target_mmap(arg1
, arg2
, arg3
,
9681 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9682 arg5
, arg6
<< MMAP_SHIFT
);
9683 return get_errno(ret
);
9685 case TARGET_NR_munmap
:
9686 arg1
= cpu_untagged_addr(cpu
, arg1
);
9687 return get_errno(target_munmap(arg1
, arg2
));
9688 case TARGET_NR_mprotect
:
9689 arg1
= cpu_untagged_addr(cpu
, arg1
);
9691 TaskState
*ts
= cpu
->opaque
;
9692 /* Special hack to detect libc making the stack executable. */
9693 if ((arg3
& PROT_GROWSDOWN
)
9694 && arg1
>= ts
->info
->stack_limit
9695 && arg1
<= ts
->info
->start_stack
) {
9696 arg3
&= ~PROT_GROWSDOWN
;
9697 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9698 arg1
= ts
->info
->stack_limit
;
9701 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9702 #ifdef TARGET_NR_mremap
9703 case TARGET_NR_mremap
:
9704 arg1
= cpu_untagged_addr(cpu
, arg1
);
9705 /* mremap new_addr (arg5) is always untagged */
9706 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9708 /* ??? msync/mlock/munlock are broken for softmmu. */
9709 #ifdef TARGET_NR_msync
9710 case TARGET_NR_msync
:
9711 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9713 #ifdef TARGET_NR_mlock
9714 case TARGET_NR_mlock
:
9715 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9717 #ifdef TARGET_NR_munlock
9718 case TARGET_NR_munlock
:
9719 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9721 #ifdef TARGET_NR_mlockall
9722 case TARGET_NR_mlockall
:
9723 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9725 #ifdef TARGET_NR_munlockall
9726 case TARGET_NR_munlockall
:
9727 return get_errno(munlockall());
9729 #ifdef TARGET_NR_truncate
9730 case TARGET_NR_truncate
:
9731 if (!(p
= lock_user_string(arg1
)))
9732 return -TARGET_EFAULT
;
9733 ret
= get_errno(truncate(p
, arg2
));
9734 unlock_user(p
, arg1
, 0);
9737 #ifdef TARGET_NR_ftruncate
9738 case TARGET_NR_ftruncate
:
9739 return get_errno(ftruncate(arg1
, arg2
));
9741 case TARGET_NR_fchmod
:
9742 return get_errno(fchmod(arg1
, arg2
));
9743 #if defined(TARGET_NR_fchmodat)
9744 case TARGET_NR_fchmodat
:
9745 if (!(p
= lock_user_string(arg2
)))
9746 return -TARGET_EFAULT
;
9747 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9748 unlock_user(p
, arg2
, 0);
9751 case TARGET_NR_getpriority
:
9752 /* Note that negative values are valid for getpriority, so we must
9753 differentiate based on errno settings. */
9755 ret
= getpriority(arg1
, arg2
);
9756 if (ret
== -1 && errno
!= 0) {
9757 return -host_to_target_errno(errno
);
9760 /* Return value is the unbiased priority. Signal no error. */
9761 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9763 /* Return value is a biased priority to avoid negative numbers. */
9767 case TARGET_NR_setpriority
:
9768 return get_errno(setpriority(arg1
, arg2
, arg3
));
9769 #ifdef TARGET_NR_statfs
9770 case TARGET_NR_statfs
:
9771 if (!(p
= lock_user_string(arg1
))) {
9772 return -TARGET_EFAULT
;
9774 ret
= get_errno(statfs(path(p
), &stfs
));
9775 unlock_user(p
, arg1
, 0);
9777 if (!is_error(ret
)) {
9778 struct target_statfs
*target_stfs
;
9780 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9781 return -TARGET_EFAULT
;
9782 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9783 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9784 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9785 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9786 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9787 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9788 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9789 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9790 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9791 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9792 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9793 #ifdef _STATFS_F_FLAGS
9794 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9796 __put_user(0, &target_stfs
->f_flags
);
9798 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9799 unlock_user_struct(target_stfs
, arg2
, 1);
9803 #ifdef TARGET_NR_fstatfs
9804 case TARGET_NR_fstatfs
:
9805 ret
= get_errno(fstatfs(arg1
, &stfs
));
9806 goto convert_statfs
;
9808 #ifdef TARGET_NR_statfs64
9809 case TARGET_NR_statfs64
:
9810 if (!(p
= lock_user_string(arg1
))) {
9811 return -TARGET_EFAULT
;
9813 ret
= get_errno(statfs(path(p
), &stfs
));
9814 unlock_user(p
, arg1
, 0);
9816 if (!is_error(ret
)) {
9817 struct target_statfs64
*target_stfs
;
9819 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9820 return -TARGET_EFAULT
;
9821 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9822 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9823 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9824 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9825 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9826 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9827 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9828 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9829 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9830 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9831 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9832 #ifdef _STATFS_F_FLAGS
9833 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9835 __put_user(0, &target_stfs
->f_flags
);
9837 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9838 unlock_user_struct(target_stfs
, arg3
, 1);
9841 case TARGET_NR_fstatfs64
:
9842 ret
= get_errno(fstatfs(arg1
, &stfs
));
9843 goto convert_statfs64
;
9845 #ifdef TARGET_NR_socketcall
9846 case TARGET_NR_socketcall
:
9847 return do_socketcall(arg1
, arg2
);
9849 #ifdef TARGET_NR_accept
9850 case TARGET_NR_accept
:
9851 return do_accept4(arg1
, arg2
, arg3
, 0);
9853 #ifdef TARGET_NR_accept4
9854 case TARGET_NR_accept4
:
9855 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9857 #ifdef TARGET_NR_bind
9858 case TARGET_NR_bind
:
9859 return do_bind(arg1
, arg2
, arg3
);
9861 #ifdef TARGET_NR_connect
9862 case TARGET_NR_connect
:
9863 return do_connect(arg1
, arg2
, arg3
);
9865 #ifdef TARGET_NR_getpeername
9866 case TARGET_NR_getpeername
:
9867 return do_getpeername(arg1
, arg2
, arg3
);
9869 #ifdef TARGET_NR_getsockname
9870 case TARGET_NR_getsockname
:
9871 return do_getsockname(arg1
, arg2
, arg3
);
9873 #ifdef TARGET_NR_getsockopt
9874 case TARGET_NR_getsockopt
:
9875 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9877 #ifdef TARGET_NR_listen
9878 case TARGET_NR_listen
:
9879 return get_errno(listen(arg1
, arg2
));
9881 #ifdef TARGET_NR_recv
9882 case TARGET_NR_recv
:
9883 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9885 #ifdef TARGET_NR_recvfrom
9886 case TARGET_NR_recvfrom
:
9887 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9889 #ifdef TARGET_NR_recvmsg
9890 case TARGET_NR_recvmsg
:
9891 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9893 #ifdef TARGET_NR_send
9894 case TARGET_NR_send
:
9895 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9897 #ifdef TARGET_NR_sendmsg
9898 case TARGET_NR_sendmsg
:
9899 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9901 #ifdef TARGET_NR_sendmmsg
9902 case TARGET_NR_sendmmsg
:
9903 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9905 #ifdef TARGET_NR_recvmmsg
9906 case TARGET_NR_recvmmsg
:
9907 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9909 #ifdef TARGET_NR_sendto
9910 case TARGET_NR_sendto
:
9911 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9913 #ifdef TARGET_NR_shutdown
9914 case TARGET_NR_shutdown
:
9915 return get_errno(shutdown(arg1
, arg2
));
9917 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9918 case TARGET_NR_getrandom
:
9919 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9921 return -TARGET_EFAULT
;
9923 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9924 unlock_user(p
, arg1
, ret
);
9927 #ifdef TARGET_NR_socket
9928 case TARGET_NR_socket
:
9929 return do_socket(arg1
, arg2
, arg3
);
9931 #ifdef TARGET_NR_socketpair
9932 case TARGET_NR_socketpair
:
9933 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9935 #ifdef TARGET_NR_setsockopt
9936 case TARGET_NR_setsockopt
:
9937 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9939 #if defined(TARGET_NR_syslog)
9940 case TARGET_NR_syslog
:
9945 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9946 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9947 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9948 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9949 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9950 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9951 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9952 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9953 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9954 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9955 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9956 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9959 return -TARGET_EINVAL
;
9964 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9966 return -TARGET_EFAULT
;
9968 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9969 unlock_user(p
, arg2
, arg3
);
9973 return -TARGET_EINVAL
;
9978 case TARGET_NR_setitimer
:
9980 struct itimerval value
, ovalue
, *pvalue
;
9984 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9985 || copy_from_user_timeval(&pvalue
->it_value
,
9986 arg2
+ sizeof(struct target_timeval
)))
9987 return -TARGET_EFAULT
;
9991 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9992 if (!is_error(ret
) && arg3
) {
9993 if (copy_to_user_timeval(arg3
,
9994 &ovalue
.it_interval
)
9995 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9997 return -TARGET_EFAULT
;
10001 case TARGET_NR_getitimer
:
10003 struct itimerval value
;
10005 ret
= get_errno(getitimer(arg1
, &value
));
10006 if (!is_error(ret
) && arg2
) {
10007 if (copy_to_user_timeval(arg2
,
10008 &value
.it_interval
)
10009 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10011 return -TARGET_EFAULT
;
10015 #ifdef TARGET_NR_stat
10016 case TARGET_NR_stat
:
10017 if (!(p
= lock_user_string(arg1
))) {
10018 return -TARGET_EFAULT
;
10020 ret
= get_errno(stat(path(p
), &st
));
10021 unlock_user(p
, arg1
, 0);
10024 #ifdef TARGET_NR_lstat
10025 case TARGET_NR_lstat
:
10026 if (!(p
= lock_user_string(arg1
))) {
10027 return -TARGET_EFAULT
;
10029 ret
= get_errno(lstat(path(p
), &st
));
10030 unlock_user(p
, arg1
, 0);
10033 #ifdef TARGET_NR_fstat
10034 case TARGET_NR_fstat
:
10036 ret
= get_errno(fstat(arg1
, &st
));
10037 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10040 if (!is_error(ret
)) {
10041 struct target_stat
*target_st
;
10043 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10044 return -TARGET_EFAULT
;
10045 memset(target_st
, 0, sizeof(*target_st
));
10046 __put_user(st
.st_dev
, &target_st
->st_dev
);
10047 __put_user(st
.st_ino
, &target_st
->st_ino
);
10048 __put_user(st
.st_mode
, &target_st
->st_mode
);
10049 __put_user(st
.st_uid
, &target_st
->st_uid
);
10050 __put_user(st
.st_gid
, &target_st
->st_gid
);
10051 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10052 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10053 __put_user(st
.st_size
, &target_st
->st_size
);
10054 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10055 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10056 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10057 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10058 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10059 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10060 defined(TARGET_STAT_HAVE_NSEC)
10061 __put_user(st
.st_atim
.tv_nsec
,
10062 &target_st
->target_st_atime_nsec
);
10063 __put_user(st
.st_mtim
.tv_nsec
,
10064 &target_st
->target_st_mtime_nsec
);
10065 __put_user(st
.st_ctim
.tv_nsec
,
10066 &target_st
->target_st_ctime_nsec
);
10068 unlock_user_struct(target_st
, arg2
, 1);
10073 case TARGET_NR_vhangup
:
10074 return get_errno(vhangup());
10075 #ifdef TARGET_NR_syscall
10076 case TARGET_NR_syscall
:
10077 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10078 arg6
, arg7
, arg8
, 0);
10080 #if defined(TARGET_NR_wait4)
10081 case TARGET_NR_wait4
:
10084 abi_long status_ptr
= arg2
;
10085 struct rusage rusage
, *rusage_ptr
;
10086 abi_ulong target_rusage
= arg4
;
10087 abi_long rusage_err
;
10089 rusage_ptr
= &rusage
;
10092 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10093 if (!is_error(ret
)) {
10094 if (status_ptr
&& ret
) {
10095 status
= host_to_target_waitstatus(status
);
10096 if (put_user_s32(status
, status_ptr
))
10097 return -TARGET_EFAULT
;
10099 if (target_rusage
) {
10100 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10109 #ifdef TARGET_NR_swapoff
10110 case TARGET_NR_swapoff
:
10111 if (!(p
= lock_user_string(arg1
)))
10112 return -TARGET_EFAULT
;
10113 ret
= get_errno(swapoff(p
));
10114 unlock_user(p
, arg1
, 0);
10117 case TARGET_NR_sysinfo
:
10119 struct target_sysinfo
*target_value
;
10120 struct sysinfo value
;
10121 ret
= get_errno(sysinfo(&value
));
10122 if (!is_error(ret
) && arg1
)
10124 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10125 return -TARGET_EFAULT
;
10126 __put_user(value
.uptime
, &target_value
->uptime
);
10127 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10128 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10129 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10130 __put_user(value
.totalram
, &target_value
->totalram
);
10131 __put_user(value
.freeram
, &target_value
->freeram
);
10132 __put_user(value
.sharedram
, &target_value
->sharedram
);
10133 __put_user(value
.bufferram
, &target_value
->bufferram
);
10134 __put_user(value
.totalswap
, &target_value
->totalswap
);
10135 __put_user(value
.freeswap
, &target_value
->freeswap
);
10136 __put_user(value
.procs
, &target_value
->procs
);
10137 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10138 __put_user(value
.freehigh
, &target_value
->freehigh
);
10139 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10140 unlock_user_struct(target_value
, arg1
, 1);
10144 #ifdef TARGET_NR_ipc
10145 case TARGET_NR_ipc
:
10146 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10148 #ifdef TARGET_NR_semget
10149 case TARGET_NR_semget
:
10150 return get_errno(semget(arg1
, arg2
, arg3
));
10152 #ifdef TARGET_NR_semop
10153 case TARGET_NR_semop
:
10154 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10156 #ifdef TARGET_NR_semtimedop
10157 case TARGET_NR_semtimedop
:
10158 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10160 #ifdef TARGET_NR_semtimedop_time64
10161 case TARGET_NR_semtimedop_time64
:
10162 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10164 #ifdef TARGET_NR_semctl
10165 case TARGET_NR_semctl
:
10166 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10168 #ifdef TARGET_NR_msgctl
10169 case TARGET_NR_msgctl
:
10170 return do_msgctl(arg1
, arg2
, arg3
);
10172 #ifdef TARGET_NR_msgget
10173 case TARGET_NR_msgget
:
10174 return get_errno(msgget(arg1
, arg2
));
10176 #ifdef TARGET_NR_msgrcv
10177 case TARGET_NR_msgrcv
:
10178 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10180 #ifdef TARGET_NR_msgsnd
10181 case TARGET_NR_msgsnd
:
10182 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10184 #ifdef TARGET_NR_shmget
10185 case TARGET_NR_shmget
:
10186 return get_errno(shmget(arg1
, arg2
, arg3
));
10188 #ifdef TARGET_NR_shmctl
10189 case TARGET_NR_shmctl
:
10190 return do_shmctl(arg1
, arg2
, arg3
);
10192 #ifdef TARGET_NR_shmat
10193 case TARGET_NR_shmat
:
10194 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10196 #ifdef TARGET_NR_shmdt
10197 case TARGET_NR_shmdt
:
10198 return do_shmdt(arg1
);
10200 case TARGET_NR_fsync
:
10201 return get_errno(fsync(arg1
));
10202 case TARGET_NR_clone
:
10203 /* Linux manages to have three different orderings for its
10204 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10205 * match the kernel's CONFIG_CLONE_* settings.
10206 * Microblaze is further special in that it uses a sixth
10207 * implicit argument to clone for the TLS pointer.
10209 #if defined(TARGET_MICROBLAZE)
10210 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10211 #elif defined(TARGET_CLONE_BACKWARDS)
10212 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10213 #elif defined(TARGET_CLONE_BACKWARDS2)
10214 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10216 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10219 #ifdef __NR_exit_group
10220 /* new thread calls */
10221 case TARGET_NR_exit_group
:
10222 preexit_cleanup(cpu_env
, arg1
);
10223 return get_errno(exit_group(arg1
));
10225 case TARGET_NR_setdomainname
:
10226 if (!(p
= lock_user_string(arg1
)))
10227 return -TARGET_EFAULT
;
10228 ret
= get_errno(setdomainname(p
, arg2
));
10229 unlock_user(p
, arg1
, 0);
10231 case TARGET_NR_uname
:
10232 /* no need to transcode because we use the linux syscall */
10234 struct new_utsname
* buf
;
10236 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10237 return -TARGET_EFAULT
;
10238 ret
= get_errno(sys_uname(buf
));
10239 if (!is_error(ret
)) {
10240 /* Overwrite the native machine name with whatever is being
10242 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10243 sizeof(buf
->machine
));
10244 /* Allow the user to override the reported release. */
10245 if (qemu_uname_release
&& *qemu_uname_release
) {
10246 g_strlcpy(buf
->release
, qemu_uname_release
,
10247 sizeof(buf
->release
));
10250 unlock_user_struct(buf
, arg1
, 1);
10254 case TARGET_NR_modify_ldt
:
10255 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10256 #if !defined(TARGET_X86_64)
10257 case TARGET_NR_vm86
:
10258 return do_vm86(cpu_env
, arg1
, arg2
);
10261 #if defined(TARGET_NR_adjtimex)
10262 case TARGET_NR_adjtimex
:
10264 struct timex host_buf
;
10266 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10267 return -TARGET_EFAULT
;
10269 ret
= get_errno(adjtimex(&host_buf
));
10270 if (!is_error(ret
)) {
10271 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10272 return -TARGET_EFAULT
;
10278 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10279 case TARGET_NR_clock_adjtime
:
10281 struct timex htx
, *phtx
= &htx
;
10283 if (target_to_host_timex(phtx
, arg2
) != 0) {
10284 return -TARGET_EFAULT
;
10286 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10287 if (!is_error(ret
) && phtx
) {
10288 if (host_to_target_timex(arg2
, phtx
) != 0) {
10289 return -TARGET_EFAULT
;
10295 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10296 case TARGET_NR_clock_adjtime64
:
10300 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10301 return -TARGET_EFAULT
;
10303 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10304 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10305 return -TARGET_EFAULT
;
10310 case TARGET_NR_getpgid
:
10311 return get_errno(getpgid(arg1
));
10312 case TARGET_NR_fchdir
:
10313 return get_errno(fchdir(arg1
));
10314 case TARGET_NR_personality
:
10315 return get_errno(personality(arg1
));
10316 #ifdef TARGET_NR__llseek /* Not on alpha */
10317 case TARGET_NR__llseek
:
10320 #if !defined(__NR_llseek)
10321 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10323 ret
= get_errno(res
);
10328 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10330 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10331 return -TARGET_EFAULT
;
10336 #ifdef TARGET_NR_getdents
10337 case TARGET_NR_getdents
:
10338 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10339 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10341 struct target_dirent
*target_dirp
;
10342 struct linux_dirent
*dirp
;
10343 abi_long count
= arg3
;
10345 dirp
= g_try_malloc(count
);
10347 return -TARGET_ENOMEM
;
10350 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10351 if (!is_error(ret
)) {
10352 struct linux_dirent
*de
;
10353 struct target_dirent
*tde
;
10355 int reclen
, treclen
;
10356 int count1
, tnamelen
;
10360 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10361 return -TARGET_EFAULT
;
10364 reclen
= de
->d_reclen
;
10365 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10366 assert(tnamelen
>= 0);
10367 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10368 assert(count1
+ treclen
<= count
);
10369 tde
->d_reclen
= tswap16(treclen
);
10370 tde
->d_ino
= tswapal(de
->d_ino
);
10371 tde
->d_off
= tswapal(de
->d_off
);
10372 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10373 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10375 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10379 unlock_user(target_dirp
, arg2
, ret
);
10385 struct linux_dirent
*dirp
;
10386 abi_long count
= arg3
;
10388 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10389 return -TARGET_EFAULT
;
10390 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10391 if (!is_error(ret
)) {
10392 struct linux_dirent
*de
;
10397 reclen
= de
->d_reclen
;
10400 de
->d_reclen
= tswap16(reclen
);
10401 tswapls(&de
->d_ino
);
10402 tswapls(&de
->d_off
);
10403 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10407 unlock_user(dirp
, arg2
, ret
);
10411 /* Implement getdents in terms of getdents64 */
10413 struct linux_dirent64
*dirp
;
10414 abi_long count
= arg3
;
10416 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10418 return -TARGET_EFAULT
;
10420 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10421 if (!is_error(ret
)) {
10422 /* Convert the dirent64 structs to target dirent. We do this
10423 * in-place, since we can guarantee that a target_dirent is no
10424 * larger than a dirent64; however this means we have to be
10425 * careful to read everything before writing in the new format.
10427 struct linux_dirent64
*de
;
10428 struct target_dirent
*tde
;
10433 tde
= (struct target_dirent
*)dirp
;
10435 int namelen
, treclen
;
10436 int reclen
= de
->d_reclen
;
10437 uint64_t ino
= de
->d_ino
;
10438 int64_t off
= de
->d_off
;
10439 uint8_t type
= de
->d_type
;
10441 namelen
= strlen(de
->d_name
);
10442 treclen
= offsetof(struct target_dirent
, d_name
)
10444 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10446 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10447 tde
->d_ino
= tswapal(ino
);
10448 tde
->d_off
= tswapal(off
);
10449 tde
->d_reclen
= tswap16(treclen
);
10450 /* The target_dirent type is in what was formerly a padding
10451 * byte at the end of the structure:
10453 *(((char *)tde
) + treclen
- 1) = type
;
10455 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10456 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10462 unlock_user(dirp
, arg2
, ret
);
10466 #endif /* TARGET_NR_getdents */
10467 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10468 case TARGET_NR_getdents64
:
10470 struct linux_dirent64
*dirp
;
10471 abi_long count
= arg3
;
10472 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10473 return -TARGET_EFAULT
;
10474 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10475 if (!is_error(ret
)) {
10476 struct linux_dirent64
*de
;
10481 reclen
= de
->d_reclen
;
10484 de
->d_reclen
= tswap16(reclen
);
10485 tswap64s((uint64_t *)&de
->d_ino
);
10486 tswap64s((uint64_t *)&de
->d_off
);
10487 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10491 unlock_user(dirp
, arg2
, ret
);
10494 #endif /* TARGET_NR_getdents64 */
10495 #if defined(TARGET_NR__newselect)
10496 case TARGET_NR__newselect
:
10497 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10499 #ifdef TARGET_NR_poll
10500 case TARGET_NR_poll
:
10501 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10503 #ifdef TARGET_NR_ppoll
10504 case TARGET_NR_ppoll
:
10505 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10507 #ifdef TARGET_NR_ppoll_time64
10508 case TARGET_NR_ppoll_time64
:
10509 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10511 case TARGET_NR_flock
:
10512 /* NOTE: the flock constant seems to be the same for every
10514 return get_errno(safe_flock(arg1
, arg2
));
10515 case TARGET_NR_readv
:
10517 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10519 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10520 unlock_iovec(vec
, arg2
, arg3
, 1);
10522 ret
= -host_to_target_errno(errno
);
10526 case TARGET_NR_writev
:
10528 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10530 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10531 unlock_iovec(vec
, arg2
, arg3
, 0);
10533 ret
= -host_to_target_errno(errno
);
10537 #if defined(TARGET_NR_preadv)
10538 case TARGET_NR_preadv
:
10540 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10542 unsigned long low
, high
;
10544 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10545 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10546 unlock_iovec(vec
, arg2
, arg3
, 1);
10548 ret
= -host_to_target_errno(errno
);
10553 #if defined(TARGET_NR_pwritev)
10554 case TARGET_NR_pwritev
:
10556 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10558 unsigned long low
, high
;
10560 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10561 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10562 unlock_iovec(vec
, arg2
, arg3
, 0);
10564 ret
= -host_to_target_errno(errno
);
10569 case TARGET_NR_getsid
:
10570 return get_errno(getsid(arg1
));
10571 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10572 case TARGET_NR_fdatasync
:
10573 return get_errno(fdatasync(arg1
));
10575 case TARGET_NR_sched_getaffinity
:
10577 unsigned int mask_size
;
10578 unsigned long *mask
;
10581 * sched_getaffinity needs multiples of ulong, so need to take
10582 * care of mismatches between target ulong and host ulong sizes.
10584 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10585 return -TARGET_EINVAL
;
10587 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10589 mask
= alloca(mask_size
);
10590 memset(mask
, 0, mask_size
);
10591 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10593 if (!is_error(ret
)) {
10595 /* More data returned than the caller's buffer will fit.
10596 * This only happens if sizeof(abi_long) < sizeof(long)
10597 * and the caller passed us a buffer holding an odd number
10598 * of abi_longs. If the host kernel is actually using the
10599 * extra 4 bytes then fail EINVAL; otherwise we can just
10600 * ignore them and only copy the interesting part.
10602 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10603 if (numcpus
> arg2
* 8) {
10604 return -TARGET_EINVAL
;
10609 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10610 return -TARGET_EFAULT
;
10615 case TARGET_NR_sched_setaffinity
:
10617 unsigned int mask_size
;
10618 unsigned long *mask
;
10621 * sched_setaffinity needs multiples of ulong, so need to take
10622 * care of mismatches between target ulong and host ulong sizes.
10624 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10625 return -TARGET_EINVAL
;
10627 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10628 mask
= alloca(mask_size
);
10630 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10635 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10637 case TARGET_NR_getcpu
:
10639 unsigned cpu
, node
;
10640 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10641 arg2
? &node
: NULL
,
10643 if (is_error(ret
)) {
10646 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10647 return -TARGET_EFAULT
;
10649 if (arg2
&& put_user_u32(node
, arg2
)) {
10650 return -TARGET_EFAULT
;
10654 case TARGET_NR_sched_setparam
:
10656 struct sched_param
*target_schp
;
10657 struct sched_param schp
;
10660 return -TARGET_EINVAL
;
10662 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10663 return -TARGET_EFAULT
;
10664 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10665 unlock_user_struct(target_schp
, arg2
, 0);
10666 return get_errno(sched_setparam(arg1
, &schp
));
10668 case TARGET_NR_sched_getparam
:
10670 struct sched_param
*target_schp
;
10671 struct sched_param schp
;
10674 return -TARGET_EINVAL
;
10676 ret
= get_errno(sched_getparam(arg1
, &schp
));
10677 if (!is_error(ret
)) {
10678 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10679 return -TARGET_EFAULT
;
10680 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10681 unlock_user_struct(target_schp
, arg2
, 1);
10685 case TARGET_NR_sched_setscheduler
:
10687 struct sched_param
*target_schp
;
10688 struct sched_param schp
;
10690 return -TARGET_EINVAL
;
10692 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10693 return -TARGET_EFAULT
;
10694 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10695 unlock_user_struct(target_schp
, arg3
, 0);
10696 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10698 case TARGET_NR_sched_getscheduler
:
10699 return get_errno(sched_getscheduler(arg1
));
10700 case TARGET_NR_sched_yield
:
10701 return get_errno(sched_yield());
10702 case TARGET_NR_sched_get_priority_max
:
10703 return get_errno(sched_get_priority_max(arg1
));
10704 case TARGET_NR_sched_get_priority_min
:
10705 return get_errno(sched_get_priority_min(arg1
));
10706 #ifdef TARGET_NR_sched_rr_get_interval
10707 case TARGET_NR_sched_rr_get_interval
:
10709 struct timespec ts
;
10710 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10711 if (!is_error(ret
)) {
10712 ret
= host_to_target_timespec(arg2
, &ts
);
10717 #ifdef TARGET_NR_sched_rr_get_interval_time64
10718 case TARGET_NR_sched_rr_get_interval_time64
:
10720 struct timespec ts
;
10721 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10722 if (!is_error(ret
)) {
10723 ret
= host_to_target_timespec64(arg2
, &ts
);
10728 #if defined(TARGET_NR_nanosleep)
10729 case TARGET_NR_nanosleep
:
10731 struct timespec req
, rem
;
10732 target_to_host_timespec(&req
, arg1
);
10733 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10734 if (is_error(ret
) && arg2
) {
10735 host_to_target_timespec(arg2
, &rem
);
10740 case TARGET_NR_prctl
:
10742 case PR_GET_PDEATHSIG
:
10745 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10746 if (!is_error(ret
) && arg2
10747 && put_user_s32(deathsig
, arg2
)) {
10748 return -TARGET_EFAULT
;
10755 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10757 return -TARGET_EFAULT
;
10759 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10760 arg3
, arg4
, arg5
));
10761 unlock_user(name
, arg2
, 16);
10766 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10768 return -TARGET_EFAULT
;
10770 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10771 arg3
, arg4
, arg5
));
10772 unlock_user(name
, arg2
, 0);
10777 case TARGET_PR_GET_FP_MODE
:
10779 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10781 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10782 ret
|= TARGET_PR_FP_MODE_FR
;
10784 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10785 ret
|= TARGET_PR_FP_MODE_FRE
;
10789 case TARGET_PR_SET_FP_MODE
:
10791 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10792 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10793 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10794 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10795 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10797 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10798 TARGET_PR_FP_MODE_FRE
;
10800 /* If nothing to change, return right away, successfully. */
10801 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10804 /* Check the value is valid */
10805 if (arg2
& ~known_bits
) {
10806 return -TARGET_EOPNOTSUPP
;
10808 /* Setting FRE without FR is not supported. */
10809 if (new_fre
&& !new_fr
) {
10810 return -TARGET_EOPNOTSUPP
;
10812 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10813 /* FR1 is not supported */
10814 return -TARGET_EOPNOTSUPP
;
10816 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10817 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10818 /* cannot set FR=0 */
10819 return -TARGET_EOPNOTSUPP
;
10821 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10822 /* Cannot set FRE=1 */
10823 return -TARGET_EOPNOTSUPP
;
10827 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10828 for (i
= 0; i
< 32 ; i
+= 2) {
10829 if (!old_fr
&& new_fr
) {
10830 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10831 } else if (old_fr
&& !new_fr
) {
10832 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10837 env
->CP0_Status
|= (1 << CP0St_FR
);
10838 env
->hflags
|= MIPS_HFLAG_F64
;
10840 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10841 env
->hflags
&= ~MIPS_HFLAG_F64
;
10844 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10845 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10846 env
->hflags
|= MIPS_HFLAG_FRE
;
10849 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10850 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10856 #ifdef TARGET_AARCH64
10857 case TARGET_PR_SVE_SET_VL
:
10859 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10860 * PR_SVE_VL_INHERIT. Note the kernel definition
10861 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10862 * even though the current architectural maximum is VQ=16.
10864 ret
= -TARGET_EINVAL
;
10865 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10866 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10867 CPUARMState
*env
= cpu_env
;
10868 ARMCPU
*cpu
= env_archcpu(env
);
10869 uint32_t vq
, old_vq
;
10871 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10872 vq
= MAX(arg2
/ 16, 1);
10873 vq
= MIN(vq
, cpu
->sve_max_vq
);
10876 aarch64_sve_narrow_vq(env
, vq
);
10878 env
->vfp
.zcr_el
[1] = vq
- 1;
10879 arm_rebuild_hflags(env
);
10883 case TARGET_PR_SVE_GET_VL
:
10884 ret
= -TARGET_EINVAL
;
10886 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10887 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10888 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10892 case TARGET_PR_PAC_RESET_KEYS
:
10894 CPUARMState
*env
= cpu_env
;
10895 ARMCPU
*cpu
= env_archcpu(env
);
10897 if (arg3
|| arg4
|| arg5
) {
10898 return -TARGET_EINVAL
;
10900 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10901 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10902 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10903 TARGET_PR_PAC_APGAKEY
);
10909 } else if (arg2
& ~all
) {
10910 return -TARGET_EINVAL
;
10912 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10913 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10914 sizeof(ARMPACKey
), &err
);
10916 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10917 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10918 sizeof(ARMPACKey
), &err
);
10920 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10921 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10922 sizeof(ARMPACKey
), &err
);
10924 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10925 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10926 sizeof(ARMPACKey
), &err
);
10928 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10929 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10930 sizeof(ARMPACKey
), &err
);
10934 * Some unknown failure in the crypto. The best
10935 * we can do is log it and fail the syscall.
10936 * The real syscall cannot fail this way.
10938 qemu_log_mask(LOG_UNIMP
,
10939 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10940 error_get_pretty(err
));
10942 return -TARGET_EIO
;
10947 return -TARGET_EINVAL
;
10948 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10950 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10951 CPUARMState
*env
= cpu_env
;
10952 ARMCPU
*cpu
= env_archcpu(env
);
10954 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10955 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10956 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10959 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10960 return -TARGET_EINVAL
;
10962 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10964 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10965 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10966 case TARGET_PR_MTE_TCF_NONE
:
10967 case TARGET_PR_MTE_TCF_SYNC
:
10968 case TARGET_PR_MTE_TCF_ASYNC
:
10975 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10976 * Note that the syscall values are consistent with hw.
10978 env
->cp15
.sctlr_el
[1] =
10979 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10980 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
10983 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10984 * Note that the syscall uses an include mask,
10985 * and hardware uses an exclude mask -- invert.
10987 env
->cp15
.gcr_el1
=
10988 deposit64(env
->cp15
.gcr_el1
, 0, 16,
10989 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
10990 arm_rebuild_hflags(env
);
10994 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
10997 CPUARMState
*env
= cpu_env
;
10998 ARMCPU
*cpu
= env_archcpu(env
);
11000 if (arg2
|| arg3
|| arg4
|| arg5
) {
11001 return -TARGET_EINVAL
;
11003 if (env
->tagged_addr_enable
) {
11004 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
11006 if (cpu_isar_feature(aa64_mte
, cpu
)) {
11008 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
11009 << TARGET_PR_MTE_TCF_SHIFT
);
11010 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
11011 ~env
->cp15
.gcr_el1
);
11015 #endif /* AARCH64 */
11016 case PR_GET_SECCOMP
:
11017 case PR_SET_SECCOMP
:
11018 /* Disable seccomp to prevent the target disabling syscalls we
11020 return -TARGET_EINVAL
;
11022 /* Most prctl options have no pointer arguments */
11023 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
11026 #ifdef TARGET_NR_arch_prctl
11027 case TARGET_NR_arch_prctl
:
11028 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11030 #ifdef TARGET_NR_pread64
11031 case TARGET_NR_pread64
:
11032 if (regpairs_aligned(cpu_env
, num
)) {
11036 if (arg2
== 0 && arg3
== 0) {
11037 /* Special-case NULL buffer and zero length, which should succeed */
11040 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11042 return -TARGET_EFAULT
;
11045 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11046 unlock_user(p
, arg2
, ret
);
11048 case TARGET_NR_pwrite64
:
11049 if (regpairs_aligned(cpu_env
, num
)) {
11053 if (arg2
== 0 && arg3
== 0) {
11054 /* Special-case NULL buffer and zero length, which should succeed */
11057 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11059 return -TARGET_EFAULT
;
11062 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11063 unlock_user(p
, arg2
, 0);
11066 case TARGET_NR_getcwd
:
11067 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11068 return -TARGET_EFAULT
;
11069 ret
= get_errno(sys_getcwd1(p
, arg2
));
11070 unlock_user(p
, arg1
, ret
);
11072 case TARGET_NR_capget
:
11073 case TARGET_NR_capset
:
11075 struct target_user_cap_header
*target_header
;
11076 struct target_user_cap_data
*target_data
= NULL
;
11077 struct __user_cap_header_struct header
;
11078 struct __user_cap_data_struct data
[2];
11079 struct __user_cap_data_struct
*dataptr
= NULL
;
11080 int i
, target_datalen
;
11081 int data_items
= 1;
11083 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11084 return -TARGET_EFAULT
;
11086 header
.version
= tswap32(target_header
->version
);
11087 header
.pid
= tswap32(target_header
->pid
);
11089 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11090 /* Version 2 and up takes pointer to two user_data structs */
11094 target_datalen
= sizeof(*target_data
) * data_items
;
11097 if (num
== TARGET_NR_capget
) {
11098 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11100 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11102 if (!target_data
) {
11103 unlock_user_struct(target_header
, arg1
, 0);
11104 return -TARGET_EFAULT
;
11107 if (num
== TARGET_NR_capset
) {
11108 for (i
= 0; i
< data_items
; i
++) {
11109 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11110 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11111 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11118 if (num
== TARGET_NR_capget
) {
11119 ret
= get_errno(capget(&header
, dataptr
));
11121 ret
= get_errno(capset(&header
, dataptr
));
11124 /* The kernel always updates version for both capget and capset */
11125 target_header
->version
= tswap32(header
.version
);
11126 unlock_user_struct(target_header
, arg1
, 1);
11129 if (num
== TARGET_NR_capget
) {
11130 for (i
= 0; i
< data_items
; i
++) {
11131 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11132 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11133 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11135 unlock_user(target_data
, arg2
, target_datalen
);
11137 unlock_user(target_data
, arg2
, 0);
11142 case TARGET_NR_sigaltstack
:
11143 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11145 #ifdef CONFIG_SENDFILE
11146 #ifdef TARGET_NR_sendfile
11147 case TARGET_NR_sendfile
:
11149 off_t
*offp
= NULL
;
11152 ret
= get_user_sal(off
, arg3
);
11153 if (is_error(ret
)) {
11158 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11159 if (!is_error(ret
) && arg3
) {
11160 abi_long ret2
= put_user_sal(off
, arg3
);
11161 if (is_error(ret2
)) {
11168 #ifdef TARGET_NR_sendfile64
11169 case TARGET_NR_sendfile64
:
11171 off_t
*offp
= NULL
;
11174 ret
= get_user_s64(off
, arg3
);
11175 if (is_error(ret
)) {
11180 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11181 if (!is_error(ret
) && arg3
) {
11182 abi_long ret2
= put_user_s64(off
, arg3
);
11183 if (is_error(ret2
)) {
11191 #ifdef TARGET_NR_vfork
11192 case TARGET_NR_vfork
:
11193 return get_errno(do_fork(cpu_env
,
11194 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11197 #ifdef TARGET_NR_ugetrlimit
11198 case TARGET_NR_ugetrlimit
:
11200 struct rlimit rlim
;
11201 int resource
= target_to_host_resource(arg1
);
11202 ret
= get_errno(getrlimit(resource
, &rlim
));
11203 if (!is_error(ret
)) {
11204 struct target_rlimit
*target_rlim
;
11205 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11206 return -TARGET_EFAULT
;
11207 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11208 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11209 unlock_user_struct(target_rlim
, arg2
, 1);
11214 #ifdef TARGET_NR_truncate64
11215 case TARGET_NR_truncate64
:
11216 if (!(p
= lock_user_string(arg1
)))
11217 return -TARGET_EFAULT
;
11218 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11219 unlock_user(p
, arg1
, 0);
11222 #ifdef TARGET_NR_ftruncate64
11223 case TARGET_NR_ftruncate64
:
11224 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11226 #ifdef TARGET_NR_stat64
11227 case TARGET_NR_stat64
:
11228 if (!(p
= lock_user_string(arg1
))) {
11229 return -TARGET_EFAULT
;
11231 ret
= get_errno(stat(path(p
), &st
));
11232 unlock_user(p
, arg1
, 0);
11233 if (!is_error(ret
))
11234 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11237 #ifdef TARGET_NR_lstat64
11238 case TARGET_NR_lstat64
:
11239 if (!(p
= lock_user_string(arg1
))) {
11240 return -TARGET_EFAULT
;
11242 ret
= get_errno(lstat(path(p
), &st
));
11243 unlock_user(p
, arg1
, 0);
11244 if (!is_error(ret
))
11245 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11248 #ifdef TARGET_NR_fstat64
11249 case TARGET_NR_fstat64
:
11250 ret
= get_errno(fstat(arg1
, &st
));
11251 if (!is_error(ret
))
11252 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11255 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11256 #ifdef TARGET_NR_fstatat64
11257 case TARGET_NR_fstatat64
:
11259 #ifdef TARGET_NR_newfstatat
11260 case TARGET_NR_newfstatat
:
11262 if (!(p
= lock_user_string(arg2
))) {
11263 return -TARGET_EFAULT
;
11265 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11266 unlock_user(p
, arg2
, 0);
11267 if (!is_error(ret
))
11268 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11271 #if defined(TARGET_NR_statx)
11272 case TARGET_NR_statx
:
11274 struct target_statx
*target_stx
;
11278 p
= lock_user_string(arg2
);
11280 return -TARGET_EFAULT
;
11282 #if defined(__NR_statx)
11285 * It is assumed that struct statx is architecture independent.
11287 struct target_statx host_stx
;
11290 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11291 if (!is_error(ret
)) {
11292 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11293 unlock_user(p
, arg2
, 0);
11294 return -TARGET_EFAULT
;
11298 if (ret
!= -TARGET_ENOSYS
) {
11299 unlock_user(p
, arg2
, 0);
11304 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11305 unlock_user(p
, arg2
, 0);
11307 if (!is_error(ret
)) {
11308 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11309 return -TARGET_EFAULT
;
11311 memset(target_stx
, 0, sizeof(*target_stx
));
11312 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11313 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11314 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11315 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11316 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11317 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11318 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11319 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11320 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11321 __put_user(st
.st_size
, &target_stx
->stx_size
);
11322 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11323 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11324 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11325 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11326 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11327 unlock_user_struct(target_stx
, arg5
, 1);
11332 #ifdef TARGET_NR_lchown
11333 case TARGET_NR_lchown
:
11334 if (!(p
= lock_user_string(arg1
)))
11335 return -TARGET_EFAULT
;
11336 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11337 unlock_user(p
, arg1
, 0);
11340 #ifdef TARGET_NR_getuid
11341 case TARGET_NR_getuid
:
11342 return get_errno(high2lowuid(getuid()));
11344 #ifdef TARGET_NR_getgid
11345 case TARGET_NR_getgid
:
11346 return get_errno(high2lowgid(getgid()));
11348 #ifdef TARGET_NR_geteuid
11349 case TARGET_NR_geteuid
:
11350 return get_errno(high2lowuid(geteuid()));
11352 #ifdef TARGET_NR_getegid
11353 case TARGET_NR_getegid
:
11354 return get_errno(high2lowgid(getegid()));
11356 case TARGET_NR_setreuid
:
11357 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11358 case TARGET_NR_setregid
:
11359 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11360 case TARGET_NR_getgroups
:
11362 int gidsetsize
= arg1
;
11363 target_id
*target_grouplist
;
11367 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11368 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11369 if (gidsetsize
== 0)
11371 if (!is_error(ret
)) {
11372 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11373 if (!target_grouplist
)
11374 return -TARGET_EFAULT
;
11375 for(i
= 0;i
< ret
; i
++)
11376 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11377 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11381 case TARGET_NR_setgroups
:
11383 int gidsetsize
= arg1
;
11384 target_id
*target_grouplist
;
11385 gid_t
*grouplist
= NULL
;
11388 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11389 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11390 if (!target_grouplist
) {
11391 return -TARGET_EFAULT
;
11393 for (i
= 0; i
< gidsetsize
; i
++) {
11394 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11396 unlock_user(target_grouplist
, arg2
, 0);
11398 return get_errno(setgroups(gidsetsize
, grouplist
));
11400 case TARGET_NR_fchown
:
11401 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11402 #if defined(TARGET_NR_fchownat)
11403 case TARGET_NR_fchownat
:
11404 if (!(p
= lock_user_string(arg2
)))
11405 return -TARGET_EFAULT
;
11406 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11407 low2highgid(arg4
), arg5
));
11408 unlock_user(p
, arg2
, 0);
11411 #ifdef TARGET_NR_setresuid
11412 case TARGET_NR_setresuid
:
11413 return get_errno(sys_setresuid(low2highuid(arg1
),
11415 low2highuid(arg3
)));
11417 #ifdef TARGET_NR_getresuid
11418 case TARGET_NR_getresuid
:
11420 uid_t ruid
, euid
, suid
;
11421 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11422 if (!is_error(ret
)) {
11423 if (put_user_id(high2lowuid(ruid
), arg1
)
11424 || put_user_id(high2lowuid(euid
), arg2
)
11425 || put_user_id(high2lowuid(suid
), arg3
))
11426 return -TARGET_EFAULT
;
11431 #ifdef TARGET_NR_getresgid
11432 case TARGET_NR_setresgid
:
11433 return get_errno(sys_setresgid(low2highgid(arg1
),
11435 low2highgid(arg3
)));
11437 #ifdef TARGET_NR_getresgid
11438 case TARGET_NR_getresgid
:
11440 gid_t rgid
, egid
, sgid
;
11441 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11442 if (!is_error(ret
)) {
11443 if (put_user_id(high2lowgid(rgid
), arg1
)
11444 || put_user_id(high2lowgid(egid
), arg2
)
11445 || put_user_id(high2lowgid(sgid
), arg3
))
11446 return -TARGET_EFAULT
;
11451 #ifdef TARGET_NR_chown
11452 case TARGET_NR_chown
:
11453 if (!(p
= lock_user_string(arg1
)))
11454 return -TARGET_EFAULT
;
11455 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11456 unlock_user(p
, arg1
, 0);
11459 case TARGET_NR_setuid
:
11460 return get_errno(sys_setuid(low2highuid(arg1
)));
11461 case TARGET_NR_setgid
:
11462 return get_errno(sys_setgid(low2highgid(arg1
)));
11463 case TARGET_NR_setfsuid
:
11464 return get_errno(setfsuid(arg1
));
11465 case TARGET_NR_setfsgid
:
11466 return get_errno(setfsgid(arg1
));
11468 #ifdef TARGET_NR_lchown32
11469 case TARGET_NR_lchown32
:
11470 if (!(p
= lock_user_string(arg1
)))
11471 return -TARGET_EFAULT
;
11472 ret
= get_errno(lchown(p
, arg2
, arg3
));
11473 unlock_user(p
, arg1
, 0);
11476 #ifdef TARGET_NR_getuid32
11477 case TARGET_NR_getuid32
:
11478 return get_errno(getuid());
11481 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11482 /* Alpha specific */
11483 case TARGET_NR_getxuid
:
11487 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11489 return get_errno(getuid());
11491 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11492 /* Alpha specific */
11493 case TARGET_NR_getxgid
:
11497 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11499 return get_errno(getgid());
11501 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11502 /* Alpha specific */
11503 case TARGET_NR_osf_getsysinfo
:
11504 ret
= -TARGET_EOPNOTSUPP
;
11506 case TARGET_GSI_IEEE_FP_CONTROL
:
11508 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11509 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11511 swcr
&= ~SWCR_STATUS_MASK
;
11512 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11514 if (put_user_u64 (swcr
, arg2
))
11515 return -TARGET_EFAULT
;
11520 /* case GSI_IEEE_STATE_AT_SIGNAL:
11521 -- Not implemented in linux kernel.
11523 -- Retrieves current unaligned access state; not much used.
11524 case GSI_PROC_TYPE:
11525 -- Retrieves implver information; surely not used.
11526 case GSI_GET_HWRPB:
11527 -- Grabs a copy of the HWRPB; surely not used.
11532 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11533 /* Alpha specific */
11534 case TARGET_NR_osf_setsysinfo
:
11535 ret
= -TARGET_EOPNOTSUPP
;
11537 case TARGET_SSI_IEEE_FP_CONTROL
:
11539 uint64_t swcr
, fpcr
;
11541 if (get_user_u64 (swcr
, arg2
)) {
11542 return -TARGET_EFAULT
;
11546 * The kernel calls swcr_update_status to update the
11547 * status bits from the fpcr at every point that it
11548 * could be queried. Therefore, we store the status
11549 * bits only in FPCR.
11551 ((CPUAlphaState
*)cpu_env
)->swcr
11552 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11554 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11555 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11556 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11557 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11562 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11564 uint64_t exc
, fpcr
, fex
;
11566 if (get_user_u64(exc
, arg2
)) {
11567 return -TARGET_EFAULT
;
11569 exc
&= SWCR_STATUS_MASK
;
11570 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11572 /* Old exceptions are not signaled. */
11573 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11575 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11576 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11578 /* Update the hardware fpcr. */
11579 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11580 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11583 int si_code
= TARGET_FPE_FLTUNK
;
11584 target_siginfo_t info
;
11586 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11587 si_code
= TARGET_FPE_FLTUND
;
11589 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11590 si_code
= TARGET_FPE_FLTRES
;
11592 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11593 si_code
= TARGET_FPE_FLTUND
;
11595 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11596 si_code
= TARGET_FPE_FLTOVF
;
11598 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11599 si_code
= TARGET_FPE_FLTDIV
;
11601 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11602 si_code
= TARGET_FPE_FLTINV
;
11605 info
.si_signo
= SIGFPE
;
11607 info
.si_code
= si_code
;
11608 info
._sifields
._sigfault
._addr
11609 = ((CPUArchState
*)cpu_env
)->pc
;
11610 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11611 QEMU_SI_FAULT
, &info
);
11617 /* case SSI_NVPAIRS:
11618 -- Used with SSIN_UACPROC to enable unaligned accesses.
11619 case SSI_IEEE_STATE_AT_SIGNAL:
11620 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11621 -- Not implemented in linux kernel
11626 #ifdef TARGET_NR_osf_sigprocmask
11627 /* Alpha specific. */
11628 case TARGET_NR_osf_sigprocmask
:
11632 sigset_t set
, oldset
;
11635 case TARGET_SIG_BLOCK
:
11638 case TARGET_SIG_UNBLOCK
:
11641 case TARGET_SIG_SETMASK
:
11645 return -TARGET_EINVAL
;
11648 target_to_host_old_sigset(&set
, &mask
);
11649 ret
= do_sigprocmask(how
, &set
, &oldset
);
11651 host_to_target_old_sigset(&mask
, &oldset
);
11658 #ifdef TARGET_NR_getgid32
11659 case TARGET_NR_getgid32
:
11660 return get_errno(getgid());
11662 #ifdef TARGET_NR_geteuid32
11663 case TARGET_NR_geteuid32
:
11664 return get_errno(geteuid());
11666 #ifdef TARGET_NR_getegid32
11667 case TARGET_NR_getegid32
:
11668 return get_errno(getegid());
11670 #ifdef TARGET_NR_setreuid32
11671 case TARGET_NR_setreuid32
:
11672 return get_errno(setreuid(arg1
, arg2
));
11674 #ifdef TARGET_NR_setregid32
11675 case TARGET_NR_setregid32
:
11676 return get_errno(setregid(arg1
, arg2
));
11678 #ifdef TARGET_NR_getgroups32
11679 case TARGET_NR_getgroups32
:
11681 int gidsetsize
= arg1
;
11682 uint32_t *target_grouplist
;
11686 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11687 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11688 if (gidsetsize
== 0)
11690 if (!is_error(ret
)) {
11691 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11692 if (!target_grouplist
) {
11693 return -TARGET_EFAULT
;
11695 for(i
= 0;i
< ret
; i
++)
11696 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11697 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11702 #ifdef TARGET_NR_setgroups32
11703 case TARGET_NR_setgroups32
:
11705 int gidsetsize
= arg1
;
11706 uint32_t *target_grouplist
;
11710 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11711 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11712 if (!target_grouplist
) {
11713 return -TARGET_EFAULT
;
11715 for(i
= 0;i
< gidsetsize
; i
++)
11716 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11717 unlock_user(target_grouplist
, arg2
, 0);
11718 return get_errno(setgroups(gidsetsize
, grouplist
));
11721 #ifdef TARGET_NR_fchown32
11722 case TARGET_NR_fchown32
:
11723 return get_errno(fchown(arg1
, arg2
, arg3
));
11725 #ifdef TARGET_NR_setresuid32
11726 case TARGET_NR_setresuid32
:
11727 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11729 #ifdef TARGET_NR_getresuid32
11730 case TARGET_NR_getresuid32
:
11732 uid_t ruid
, euid
, suid
;
11733 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11734 if (!is_error(ret
)) {
11735 if (put_user_u32(ruid
, arg1
)
11736 || put_user_u32(euid
, arg2
)
11737 || put_user_u32(suid
, arg3
))
11738 return -TARGET_EFAULT
;
11743 #ifdef TARGET_NR_setresgid32
11744 case TARGET_NR_setresgid32
:
11745 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11747 #ifdef TARGET_NR_getresgid32
11748 case TARGET_NR_getresgid32
:
11750 gid_t rgid
, egid
, sgid
;
11751 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11752 if (!is_error(ret
)) {
11753 if (put_user_u32(rgid
, arg1
)
11754 || put_user_u32(egid
, arg2
)
11755 || put_user_u32(sgid
, arg3
))
11756 return -TARGET_EFAULT
;
11761 #ifdef TARGET_NR_chown32
11762 case TARGET_NR_chown32
:
11763 if (!(p
= lock_user_string(arg1
)))
11764 return -TARGET_EFAULT
;
11765 ret
= get_errno(chown(p
, arg2
, arg3
));
11766 unlock_user(p
, arg1
, 0);
11769 #ifdef TARGET_NR_setuid32
11770 case TARGET_NR_setuid32
:
11771 return get_errno(sys_setuid(arg1
));
11773 #ifdef TARGET_NR_setgid32
11774 case TARGET_NR_setgid32
:
11775 return get_errno(sys_setgid(arg1
));
11777 #ifdef TARGET_NR_setfsuid32
11778 case TARGET_NR_setfsuid32
:
11779 return get_errno(setfsuid(arg1
));
11781 #ifdef TARGET_NR_setfsgid32
11782 case TARGET_NR_setfsgid32
:
11783 return get_errno(setfsgid(arg1
));
11785 #ifdef TARGET_NR_mincore
11786 case TARGET_NR_mincore
:
11788 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11790 return -TARGET_ENOMEM
;
11792 p
= lock_user_string(arg3
);
11794 ret
= -TARGET_EFAULT
;
11796 ret
= get_errno(mincore(a
, arg2
, p
));
11797 unlock_user(p
, arg3
, ret
);
11799 unlock_user(a
, arg1
, 0);
11803 #ifdef TARGET_NR_arm_fadvise64_64
11804 case TARGET_NR_arm_fadvise64_64
:
11805 /* arm_fadvise64_64 looks like fadvise64_64 but
11806 * with different argument order: fd, advice, offset, len
11807 * rather than the usual fd, offset, len, advice.
11808 * Note that offset and len are both 64-bit so appear as
11809 * pairs of 32-bit registers.
11811 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11812 target_offset64(arg5
, arg6
), arg2
);
11813 return -host_to_target_errno(ret
);
11816 #if TARGET_ABI_BITS == 32
11818 #ifdef TARGET_NR_fadvise64_64
11819 case TARGET_NR_fadvise64_64
:
11820 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11821 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11829 /* 6 args: fd, offset (high, low), len (high, low), advice */
11830 if (regpairs_aligned(cpu_env
, num
)) {
11831 /* offset is in (3,4), len in (5,6) and advice in 7 */
11839 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11840 target_offset64(arg4
, arg5
), arg6
);
11841 return -host_to_target_errno(ret
);
11844 #ifdef TARGET_NR_fadvise64
11845 case TARGET_NR_fadvise64
:
11846 /* 5 args: fd, offset (high, low), len, advice */
11847 if (regpairs_aligned(cpu_env
, num
)) {
11848 /* offset is in (3,4), len in 5 and advice in 6 */
11854 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11855 return -host_to_target_errno(ret
);
11858 #else /* not a 32-bit ABI */
11859 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11860 #ifdef TARGET_NR_fadvise64_64
11861 case TARGET_NR_fadvise64_64
:
11863 #ifdef TARGET_NR_fadvise64
11864 case TARGET_NR_fadvise64
:
11866 #ifdef TARGET_S390X
11868 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11869 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11870 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11871 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11875 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11877 #endif /* end of 64-bit ABI fadvise handling */
11879 #ifdef TARGET_NR_madvise
11880 case TARGET_NR_madvise
:
11881 /* A straight passthrough may not be safe because qemu sometimes
11882 turns private file-backed mappings into anonymous mappings.
11883 This will break MADV_DONTNEED.
11884 This is a hint, so ignoring and returning success is ok. */
11887 #ifdef TARGET_NR_fcntl64
11888 case TARGET_NR_fcntl64
:
11892 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11893 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11896 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11897 copyfrom
= copy_from_user_oabi_flock64
;
11898 copyto
= copy_to_user_oabi_flock64
;
11902 cmd
= target_to_host_fcntl_cmd(arg2
);
11903 if (cmd
== -TARGET_EINVAL
) {
11908 case TARGET_F_GETLK64
:
11909 ret
= copyfrom(&fl
, arg3
);
11913 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11915 ret
= copyto(arg3
, &fl
);
11919 case TARGET_F_SETLK64
:
11920 case TARGET_F_SETLKW64
:
11921 ret
= copyfrom(&fl
, arg3
);
11925 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11928 ret
= do_fcntl(arg1
, arg2
, arg3
);
11934 #ifdef TARGET_NR_cacheflush
11935 case TARGET_NR_cacheflush
:
11936 /* self-modifying code is handled automatically, so nothing needed */
11939 #ifdef TARGET_NR_getpagesize
11940 case TARGET_NR_getpagesize
:
11941 return TARGET_PAGE_SIZE
;
11943 case TARGET_NR_gettid
:
11944 return get_errno(sys_gettid());
11945 #ifdef TARGET_NR_readahead
11946 case TARGET_NR_readahead
:
11947 #if TARGET_ABI_BITS == 32
11948 if (regpairs_aligned(cpu_env
, num
)) {
11953 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11955 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11960 #ifdef TARGET_NR_setxattr
11961 case TARGET_NR_listxattr
:
11962 case TARGET_NR_llistxattr
:
11966 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11968 return -TARGET_EFAULT
;
11971 p
= lock_user_string(arg1
);
11973 if (num
== TARGET_NR_listxattr
) {
11974 ret
= get_errno(listxattr(p
, b
, arg3
));
11976 ret
= get_errno(llistxattr(p
, b
, arg3
));
11979 ret
= -TARGET_EFAULT
;
11981 unlock_user(p
, arg1
, 0);
11982 unlock_user(b
, arg2
, arg3
);
11985 case TARGET_NR_flistxattr
:
11989 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11991 return -TARGET_EFAULT
;
11994 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11995 unlock_user(b
, arg2
, arg3
);
11998 case TARGET_NR_setxattr
:
11999 case TARGET_NR_lsetxattr
:
12001 void *p
, *n
, *v
= 0;
12003 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12005 return -TARGET_EFAULT
;
12008 p
= lock_user_string(arg1
);
12009 n
= lock_user_string(arg2
);
12011 if (num
== TARGET_NR_setxattr
) {
12012 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12014 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12017 ret
= -TARGET_EFAULT
;
12019 unlock_user(p
, arg1
, 0);
12020 unlock_user(n
, arg2
, 0);
12021 unlock_user(v
, arg3
, 0);
12024 case TARGET_NR_fsetxattr
:
12028 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12030 return -TARGET_EFAULT
;
12033 n
= lock_user_string(arg2
);
12035 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12037 ret
= -TARGET_EFAULT
;
12039 unlock_user(n
, arg2
, 0);
12040 unlock_user(v
, arg3
, 0);
12043 case TARGET_NR_getxattr
:
12044 case TARGET_NR_lgetxattr
:
12046 void *p
, *n
, *v
= 0;
12048 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12050 return -TARGET_EFAULT
;
12053 p
= lock_user_string(arg1
);
12054 n
= lock_user_string(arg2
);
12056 if (num
== TARGET_NR_getxattr
) {
12057 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12059 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12062 ret
= -TARGET_EFAULT
;
12064 unlock_user(p
, arg1
, 0);
12065 unlock_user(n
, arg2
, 0);
12066 unlock_user(v
, arg3
, arg4
);
12069 case TARGET_NR_fgetxattr
:
12073 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12075 return -TARGET_EFAULT
;
12078 n
= lock_user_string(arg2
);
12080 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12082 ret
= -TARGET_EFAULT
;
12084 unlock_user(n
, arg2
, 0);
12085 unlock_user(v
, arg3
, arg4
);
12088 case TARGET_NR_removexattr
:
12089 case TARGET_NR_lremovexattr
:
12092 p
= lock_user_string(arg1
);
12093 n
= lock_user_string(arg2
);
12095 if (num
== TARGET_NR_removexattr
) {
12096 ret
= get_errno(removexattr(p
, n
));
12098 ret
= get_errno(lremovexattr(p
, n
));
12101 ret
= -TARGET_EFAULT
;
12103 unlock_user(p
, arg1
, 0);
12104 unlock_user(n
, arg2
, 0);
12107 case TARGET_NR_fremovexattr
:
12110 n
= lock_user_string(arg2
);
12112 ret
= get_errno(fremovexattr(arg1
, n
));
12114 ret
= -TARGET_EFAULT
;
12116 unlock_user(n
, arg2
, 0);
12120 #endif /* CONFIG_ATTR */
12121 #ifdef TARGET_NR_set_thread_area
12122 case TARGET_NR_set_thread_area
:
12123 #if defined(TARGET_MIPS)
12124 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12126 #elif defined(TARGET_CRIS)
12128 ret
= -TARGET_EINVAL
;
12130 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12134 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12135 return do_set_thread_area(cpu_env
, arg1
);
12136 #elif defined(TARGET_M68K)
12138 TaskState
*ts
= cpu
->opaque
;
12139 ts
->tp_value
= arg1
;
12143 return -TARGET_ENOSYS
;
12146 #ifdef TARGET_NR_get_thread_area
12147 case TARGET_NR_get_thread_area
:
12148 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12149 return do_get_thread_area(cpu_env
, arg1
);
12150 #elif defined(TARGET_M68K)
12152 TaskState
*ts
= cpu
->opaque
;
12153 return ts
->tp_value
;
12156 return -TARGET_ENOSYS
;
12159 #ifdef TARGET_NR_getdomainname
12160 case TARGET_NR_getdomainname
:
12161 return -TARGET_ENOSYS
;
12164 #ifdef TARGET_NR_clock_settime
12165 case TARGET_NR_clock_settime
:
12167 struct timespec ts
;
12169 ret
= target_to_host_timespec(&ts
, arg2
);
12170 if (!is_error(ret
)) {
12171 ret
= get_errno(clock_settime(arg1
, &ts
));
12176 #ifdef TARGET_NR_clock_settime64
12177 case TARGET_NR_clock_settime64
:
12179 struct timespec ts
;
12181 ret
= target_to_host_timespec64(&ts
, arg2
);
12182 if (!is_error(ret
)) {
12183 ret
= get_errno(clock_settime(arg1
, &ts
));
12188 #ifdef TARGET_NR_clock_gettime
12189 case TARGET_NR_clock_gettime
:
12191 struct timespec ts
;
12192 ret
= get_errno(clock_gettime(arg1
, &ts
));
12193 if (!is_error(ret
)) {
12194 ret
= host_to_target_timespec(arg2
, &ts
);
12199 #ifdef TARGET_NR_clock_gettime64
12200 case TARGET_NR_clock_gettime64
:
12202 struct timespec ts
;
12203 ret
= get_errno(clock_gettime(arg1
, &ts
));
12204 if (!is_error(ret
)) {
12205 ret
= host_to_target_timespec64(arg2
, &ts
);
12210 #ifdef TARGET_NR_clock_getres
12211 case TARGET_NR_clock_getres
:
12213 struct timespec ts
;
12214 ret
= get_errno(clock_getres(arg1
, &ts
));
12215 if (!is_error(ret
)) {
12216 host_to_target_timespec(arg2
, &ts
);
12221 #ifdef TARGET_NR_clock_getres_time64
12222 case TARGET_NR_clock_getres_time64
:
12224 struct timespec ts
;
12225 ret
= get_errno(clock_getres(arg1
, &ts
));
12226 if (!is_error(ret
)) {
12227 host_to_target_timespec64(arg2
, &ts
);
12232 #ifdef TARGET_NR_clock_nanosleep
12233 case TARGET_NR_clock_nanosleep
:
12235 struct timespec ts
;
12236 if (target_to_host_timespec(&ts
, arg3
)) {
12237 return -TARGET_EFAULT
;
12239 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12240 &ts
, arg4
? &ts
: NULL
));
12242 * if the call is interrupted by a signal handler, it fails
12243 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12244 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12246 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12247 host_to_target_timespec(arg4
, &ts
)) {
12248 return -TARGET_EFAULT
;
12254 #ifdef TARGET_NR_clock_nanosleep_time64
12255 case TARGET_NR_clock_nanosleep_time64
:
12257 struct timespec ts
;
12259 if (target_to_host_timespec64(&ts
, arg3
)) {
12260 return -TARGET_EFAULT
;
12263 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12264 &ts
, arg4
? &ts
: NULL
));
12266 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12267 host_to_target_timespec64(arg4
, &ts
)) {
12268 return -TARGET_EFAULT
;
12274 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12275 case TARGET_NR_set_tid_address
:
12276 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12279 case TARGET_NR_tkill
:
12280 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12282 case TARGET_NR_tgkill
:
12283 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12284 target_to_host_signal(arg3
)));
12286 #ifdef TARGET_NR_set_robust_list
12287 case TARGET_NR_set_robust_list
:
12288 case TARGET_NR_get_robust_list
:
12289 /* The ABI for supporting robust futexes has userspace pass
12290 * the kernel a pointer to a linked list which is updated by
12291 * userspace after the syscall; the list is walked by the kernel
12292 * when the thread exits. Since the linked list in QEMU guest
12293 * memory isn't a valid linked list for the host and we have
12294 * no way to reliably intercept the thread-death event, we can't
12295 * support these. Silently return ENOSYS so that guest userspace
12296 * falls back to a non-robust futex implementation (which should
12297 * be OK except in the corner case of the guest crashing while
12298 * holding a mutex that is shared with another process via
12301 return -TARGET_ENOSYS
;
12304 #if defined(TARGET_NR_utimensat)
12305 case TARGET_NR_utimensat
:
12307 struct timespec
*tsp
, ts
[2];
12311 if (target_to_host_timespec(ts
, arg3
)) {
12312 return -TARGET_EFAULT
;
12314 if (target_to_host_timespec(ts
+ 1, arg3
+
12315 sizeof(struct target_timespec
))) {
12316 return -TARGET_EFAULT
;
12321 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12323 if (!(p
= lock_user_string(arg2
))) {
12324 return -TARGET_EFAULT
;
12326 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12327 unlock_user(p
, arg2
, 0);
12332 #ifdef TARGET_NR_utimensat_time64
12333 case TARGET_NR_utimensat_time64
:
12335 struct timespec
*tsp
, ts
[2];
12339 if (target_to_host_timespec64(ts
, arg3
)) {
12340 return -TARGET_EFAULT
;
12342 if (target_to_host_timespec64(ts
+ 1, arg3
+
12343 sizeof(struct target__kernel_timespec
))) {
12344 return -TARGET_EFAULT
;
12349 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12351 p
= lock_user_string(arg2
);
12353 return -TARGET_EFAULT
;
12355 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12356 unlock_user(p
, arg2
, 0);
12361 #ifdef TARGET_NR_futex
12362 case TARGET_NR_futex
:
12363 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12365 #ifdef TARGET_NR_futex_time64
12366 case TARGET_NR_futex_time64
:
12367 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12370 case TARGET_NR_inotify_init
:
12371 ret
= get_errno(sys_inotify_init());
12373 fd_trans_register(ret
, &target_inotify_trans
);
12377 #ifdef CONFIG_INOTIFY1
12378 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12379 case TARGET_NR_inotify_init1
:
12380 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12381 fcntl_flags_tbl
)));
12383 fd_trans_register(ret
, &target_inotify_trans
);
12388 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12389 case TARGET_NR_inotify_add_watch
:
12390 p
= lock_user_string(arg2
);
12391 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12392 unlock_user(p
, arg2
, 0);
12395 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12396 case TARGET_NR_inotify_rm_watch
:
12397 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12400 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12401 case TARGET_NR_mq_open
:
12403 struct mq_attr posix_mq_attr
;
12404 struct mq_attr
*pposix_mq_attr
;
12407 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12408 pposix_mq_attr
= NULL
;
12410 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12411 return -TARGET_EFAULT
;
12413 pposix_mq_attr
= &posix_mq_attr
;
12415 p
= lock_user_string(arg1
- 1);
12417 return -TARGET_EFAULT
;
12419 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12420 unlock_user (p
, arg1
, 0);
12424 case TARGET_NR_mq_unlink
:
12425 p
= lock_user_string(arg1
- 1);
12427 return -TARGET_EFAULT
;
12429 ret
= get_errno(mq_unlink(p
));
12430 unlock_user (p
, arg1
, 0);
12433 #ifdef TARGET_NR_mq_timedsend
12434 case TARGET_NR_mq_timedsend
:
12436 struct timespec ts
;
12438 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12440 if (target_to_host_timespec(&ts
, arg5
)) {
12441 return -TARGET_EFAULT
;
12443 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12444 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12445 return -TARGET_EFAULT
;
12448 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12450 unlock_user (p
, arg2
, arg3
);
12454 #ifdef TARGET_NR_mq_timedsend_time64
12455 case TARGET_NR_mq_timedsend_time64
:
12457 struct timespec ts
;
12459 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12461 if (target_to_host_timespec64(&ts
, arg5
)) {
12462 return -TARGET_EFAULT
;
12464 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12465 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12466 return -TARGET_EFAULT
;
12469 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12471 unlock_user(p
, arg2
, arg3
);
12476 #ifdef TARGET_NR_mq_timedreceive
12477 case TARGET_NR_mq_timedreceive
:
12479 struct timespec ts
;
12482 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12484 if (target_to_host_timespec(&ts
, arg5
)) {
12485 return -TARGET_EFAULT
;
12487 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12489 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12490 return -TARGET_EFAULT
;
12493 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12496 unlock_user (p
, arg2
, arg3
);
12498 put_user_u32(prio
, arg4
);
12502 #ifdef TARGET_NR_mq_timedreceive_time64
12503 case TARGET_NR_mq_timedreceive_time64
:
12505 struct timespec ts
;
12508 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12510 if (target_to_host_timespec64(&ts
, arg5
)) {
12511 return -TARGET_EFAULT
;
12513 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12515 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12516 return -TARGET_EFAULT
;
12519 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12522 unlock_user(p
, arg2
, arg3
);
12524 put_user_u32(prio
, arg4
);
12530 /* Not implemented for now... */
12531 /* case TARGET_NR_mq_notify: */
12534 case TARGET_NR_mq_getsetattr
:
12536 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12539 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12540 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12541 &posix_mq_attr_out
));
12542 } else if (arg3
!= 0) {
12543 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12545 if (ret
== 0 && arg3
!= 0) {
12546 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12552 #ifdef CONFIG_SPLICE
12553 #ifdef TARGET_NR_tee
12554 case TARGET_NR_tee
:
12556 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12560 #ifdef TARGET_NR_splice
12561 case TARGET_NR_splice
:
12563 loff_t loff_in
, loff_out
;
12564 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12566 if (get_user_u64(loff_in
, arg2
)) {
12567 return -TARGET_EFAULT
;
12569 ploff_in
= &loff_in
;
12572 if (get_user_u64(loff_out
, arg4
)) {
12573 return -TARGET_EFAULT
;
12575 ploff_out
= &loff_out
;
12577 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12579 if (put_user_u64(loff_in
, arg2
)) {
12580 return -TARGET_EFAULT
;
12584 if (put_user_u64(loff_out
, arg4
)) {
12585 return -TARGET_EFAULT
;
12591 #ifdef TARGET_NR_vmsplice
12592 case TARGET_NR_vmsplice
:
12594 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12596 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12597 unlock_iovec(vec
, arg2
, arg3
, 0);
12599 ret
= -host_to_target_errno(errno
);
12604 #endif /* CONFIG_SPLICE */
12605 #ifdef CONFIG_EVENTFD
12606 #if defined(TARGET_NR_eventfd)
12607 case TARGET_NR_eventfd
:
12608 ret
= get_errno(eventfd(arg1
, 0));
12610 fd_trans_register(ret
, &target_eventfd_trans
);
12614 #if defined(TARGET_NR_eventfd2)
12615 case TARGET_NR_eventfd2
:
12617 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12618 if (arg2
& TARGET_O_NONBLOCK
) {
12619 host_flags
|= O_NONBLOCK
;
12621 if (arg2
& TARGET_O_CLOEXEC
) {
12622 host_flags
|= O_CLOEXEC
;
12624 ret
= get_errno(eventfd(arg1
, host_flags
));
12626 fd_trans_register(ret
, &target_eventfd_trans
);
12631 #endif /* CONFIG_EVENTFD */
12632 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12633 case TARGET_NR_fallocate
:
12634 #if TARGET_ABI_BITS == 32
12635 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12636 target_offset64(arg5
, arg6
)));
12638 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12642 #if defined(CONFIG_SYNC_FILE_RANGE)
12643 #if defined(TARGET_NR_sync_file_range)
12644 case TARGET_NR_sync_file_range
:
12645 #if TARGET_ABI_BITS == 32
12646 #if defined(TARGET_MIPS)
12647 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12648 target_offset64(arg5
, arg6
), arg7
));
12650 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12651 target_offset64(arg4
, arg5
), arg6
));
12652 #endif /* !TARGET_MIPS */
12654 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12658 #if defined(TARGET_NR_sync_file_range2) || \
12659 defined(TARGET_NR_arm_sync_file_range)
12660 #if defined(TARGET_NR_sync_file_range2)
12661 case TARGET_NR_sync_file_range2
:
12663 #if defined(TARGET_NR_arm_sync_file_range)
12664 case TARGET_NR_arm_sync_file_range
:
12666 /* This is like sync_file_range but the arguments are reordered */
12667 #if TARGET_ABI_BITS == 32
12668 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12669 target_offset64(arg5
, arg6
), arg2
));
12671 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12676 #if defined(TARGET_NR_signalfd4)
12677 case TARGET_NR_signalfd4
:
12678 return do_signalfd4(arg1
, arg2
, arg4
);
12680 #if defined(TARGET_NR_signalfd)
12681 case TARGET_NR_signalfd
:
12682 return do_signalfd4(arg1
, arg2
, 0);
12684 #if defined(CONFIG_EPOLL)
12685 #if defined(TARGET_NR_epoll_create)
12686 case TARGET_NR_epoll_create
:
12687 return get_errno(epoll_create(arg1
));
12689 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12690 case TARGET_NR_epoll_create1
:
12691 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12693 #if defined(TARGET_NR_epoll_ctl)
12694 case TARGET_NR_epoll_ctl
:
12696 struct epoll_event ep
;
12697 struct epoll_event
*epp
= 0;
12699 if (arg2
!= EPOLL_CTL_DEL
) {
12700 struct target_epoll_event
*target_ep
;
12701 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12702 return -TARGET_EFAULT
;
12704 ep
.events
= tswap32(target_ep
->events
);
12706 * The epoll_data_t union is just opaque data to the kernel,
12707 * so we transfer all 64 bits across and need not worry what
12708 * actual data type it is.
12710 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12711 unlock_user_struct(target_ep
, arg4
, 0);
12714 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12715 * non-null pointer, even though this argument is ignored.
12720 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12724 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12725 #if defined(TARGET_NR_epoll_wait)
12726 case TARGET_NR_epoll_wait
:
12728 #if defined(TARGET_NR_epoll_pwait)
12729 case TARGET_NR_epoll_pwait
:
12732 struct target_epoll_event
*target_ep
;
12733 struct epoll_event
*ep
;
12735 int maxevents
= arg3
;
12736 int timeout
= arg4
;
12738 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12739 return -TARGET_EINVAL
;
12742 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12743 maxevents
* sizeof(struct target_epoll_event
), 1);
12745 return -TARGET_EFAULT
;
12748 ep
= g_try_new(struct epoll_event
, maxevents
);
12750 unlock_user(target_ep
, arg2
, 0);
12751 return -TARGET_ENOMEM
;
12755 #if defined(TARGET_NR_epoll_pwait)
12756 case TARGET_NR_epoll_pwait
:
12758 target_sigset_t
*target_set
;
12759 sigset_t _set
, *set
= &_set
;
12762 if (arg6
!= sizeof(target_sigset_t
)) {
12763 ret
= -TARGET_EINVAL
;
12767 target_set
= lock_user(VERIFY_READ
, arg5
,
12768 sizeof(target_sigset_t
), 1);
12770 ret
= -TARGET_EFAULT
;
12773 target_to_host_sigset(set
, target_set
);
12774 unlock_user(target_set
, arg5
, 0);
12779 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12780 set
, SIGSET_T_SIZE
));
12784 #if defined(TARGET_NR_epoll_wait)
12785 case TARGET_NR_epoll_wait
:
12786 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12791 ret
= -TARGET_ENOSYS
;
12793 if (!is_error(ret
)) {
12795 for (i
= 0; i
< ret
; i
++) {
12796 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12797 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12799 unlock_user(target_ep
, arg2
,
12800 ret
* sizeof(struct target_epoll_event
));
12802 unlock_user(target_ep
, arg2
, 0);
12809 #ifdef TARGET_NR_prlimit64
12810 case TARGET_NR_prlimit64
:
12812 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12813 struct target_rlimit64
*target_rnew
, *target_rold
;
12814 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12815 int resource
= target_to_host_resource(arg2
);
12817 if (arg3
&& (resource
!= RLIMIT_AS
&&
12818 resource
!= RLIMIT_DATA
&&
12819 resource
!= RLIMIT_STACK
)) {
12820 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12821 return -TARGET_EFAULT
;
12823 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12824 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12825 unlock_user_struct(target_rnew
, arg3
, 0);
12829 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12830 if (!is_error(ret
) && arg4
) {
12831 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12832 return -TARGET_EFAULT
;
12834 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12835 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12836 unlock_user_struct(target_rold
, arg4
, 1);
12841 #ifdef TARGET_NR_gethostname
12842 case TARGET_NR_gethostname
:
12844 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12846 ret
= get_errno(gethostname(name
, arg2
));
12847 unlock_user(name
, arg1
, arg2
);
12849 ret
= -TARGET_EFAULT
;
12854 #ifdef TARGET_NR_atomic_cmpxchg_32
12855 case TARGET_NR_atomic_cmpxchg_32
:
12857 /* should use start_exclusive from main.c */
12858 abi_ulong mem_value
;
12859 if (get_user_u32(mem_value
, arg6
)) {
12860 target_siginfo_t info
;
12861 info
.si_signo
= SIGSEGV
;
12863 info
.si_code
= TARGET_SEGV_MAPERR
;
12864 info
._sifields
._sigfault
._addr
= arg6
;
12865 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12866 QEMU_SI_FAULT
, &info
);
12870 if (mem_value
== arg2
)
12871 put_user_u32(arg1
, arg6
);
12875 #ifdef TARGET_NR_atomic_barrier
12876 case TARGET_NR_atomic_barrier
:
12877 /* Like the kernel implementation and the
12878 qemu arm barrier, no-op this? */
12882 #ifdef TARGET_NR_timer_create
12883 case TARGET_NR_timer_create
:
12885 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12887 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12890 int timer_index
= next_free_host_timer();
12892 if (timer_index
< 0) {
12893 ret
= -TARGET_EAGAIN
;
12895 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12898 phost_sevp
= &host_sevp
;
12899 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12905 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12909 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12910 return -TARGET_EFAULT
;
12918 #ifdef TARGET_NR_timer_settime
12919 case TARGET_NR_timer_settime
:
12921 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12922 * struct itimerspec * old_value */
12923 target_timer_t timerid
= get_timer_id(arg1
);
12927 } else if (arg3
== 0) {
12928 ret
= -TARGET_EINVAL
;
12930 timer_t htimer
= g_posix_timers
[timerid
];
12931 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12933 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12934 return -TARGET_EFAULT
;
12937 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12938 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12939 return -TARGET_EFAULT
;
12946 #ifdef TARGET_NR_timer_settime64
12947 case TARGET_NR_timer_settime64
:
12949 target_timer_t timerid
= get_timer_id(arg1
);
12953 } else if (arg3
== 0) {
12954 ret
= -TARGET_EINVAL
;
12956 timer_t htimer
= g_posix_timers
[timerid
];
12957 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12959 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12960 return -TARGET_EFAULT
;
12963 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12964 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12965 return -TARGET_EFAULT
;
12972 #ifdef TARGET_NR_timer_gettime
12973 case TARGET_NR_timer_gettime
:
12975 /* args: timer_t timerid, struct itimerspec *curr_value */
12976 target_timer_t timerid
= get_timer_id(arg1
);
12980 } else if (!arg2
) {
12981 ret
= -TARGET_EFAULT
;
12983 timer_t htimer
= g_posix_timers
[timerid
];
12984 struct itimerspec hspec
;
12985 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12987 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12988 ret
= -TARGET_EFAULT
;
12995 #ifdef TARGET_NR_timer_gettime64
12996 case TARGET_NR_timer_gettime64
:
12998 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12999 target_timer_t timerid
= get_timer_id(arg1
);
13003 } else if (!arg2
) {
13004 ret
= -TARGET_EFAULT
;
13006 timer_t htimer
= g_posix_timers
[timerid
];
13007 struct itimerspec hspec
;
13008 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13010 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13011 ret
= -TARGET_EFAULT
;
13018 #ifdef TARGET_NR_timer_getoverrun
13019 case TARGET_NR_timer_getoverrun
:
13021 /* args: timer_t timerid */
13022 target_timer_t timerid
= get_timer_id(arg1
);
13027 timer_t htimer
= g_posix_timers
[timerid
];
13028 ret
= get_errno(timer_getoverrun(htimer
));
13034 #ifdef TARGET_NR_timer_delete
13035 case TARGET_NR_timer_delete
:
13037 /* args: timer_t timerid */
13038 target_timer_t timerid
= get_timer_id(arg1
);
13043 timer_t htimer
= g_posix_timers
[timerid
];
13044 ret
= get_errno(timer_delete(htimer
));
13045 g_posix_timers
[timerid
] = 0;
13051 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13052 case TARGET_NR_timerfd_create
:
13053 return get_errno(timerfd_create(arg1
,
13054 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13057 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13058 case TARGET_NR_timerfd_gettime
:
13060 struct itimerspec its_curr
;
13062 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13064 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13065 return -TARGET_EFAULT
;
13071 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13072 case TARGET_NR_timerfd_gettime64
:
13074 struct itimerspec its_curr
;
13076 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13078 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13079 return -TARGET_EFAULT
;
13085 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13086 case TARGET_NR_timerfd_settime
:
13088 struct itimerspec its_new
, its_old
, *p_new
;
13091 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13092 return -TARGET_EFAULT
;
13099 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13101 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13102 return -TARGET_EFAULT
;
13108 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13109 case TARGET_NR_timerfd_settime64
:
13111 struct itimerspec its_new
, its_old
, *p_new
;
13114 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13115 return -TARGET_EFAULT
;
13122 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13124 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13125 return -TARGET_EFAULT
;
13131 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13132 case TARGET_NR_ioprio_get
:
13133 return get_errno(ioprio_get(arg1
, arg2
));
13136 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13137 case TARGET_NR_ioprio_set
:
13138 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13141 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13142 case TARGET_NR_setns
:
13143 return get_errno(setns(arg1
, arg2
));
13145 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13146 case TARGET_NR_unshare
:
13147 return get_errno(unshare(arg1
));
13149 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13150 case TARGET_NR_kcmp
:
13151 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13153 #ifdef TARGET_NR_swapcontext
13154 case TARGET_NR_swapcontext
:
13155 /* PowerPC specific. */
13156 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13158 #ifdef TARGET_NR_memfd_create
13159 case TARGET_NR_memfd_create
:
13160 p
= lock_user_string(arg1
);
13162 return -TARGET_EFAULT
;
13164 ret
= get_errno(memfd_create(p
, arg2
));
13165 fd_trans_unregister(ret
);
13166 unlock_user(p
, arg1
, 0);
13169 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13170 case TARGET_NR_membarrier
:
13171 return get_errno(membarrier(arg1
, arg2
));
13174 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13175 case TARGET_NR_copy_file_range
:
13177 loff_t inoff
, outoff
;
13178 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13181 if (get_user_u64(inoff
, arg2
)) {
13182 return -TARGET_EFAULT
;
13187 if (get_user_u64(outoff
, arg4
)) {
13188 return -TARGET_EFAULT
;
13192 /* Do not sign-extend the count parameter. */
13193 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13194 (abi_ulong
)arg5
, arg6
));
13195 if (!is_error(ret
) && ret
> 0) {
13197 if (put_user_u64(inoff
, arg2
)) {
13198 return -TARGET_EFAULT
;
13202 if (put_user_u64(outoff
, arg4
)) {
13203 return -TARGET_EFAULT
;
13212 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13213 return -TARGET_ENOSYS
;
13218 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13219 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13220 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13223 CPUState
*cpu
= env_cpu(cpu_env
);
13226 #ifdef DEBUG_ERESTARTSYS
13227 /* Debug-only code for exercising the syscall-restart code paths
13228 * in the per-architecture cpu main loops: restart every syscall
13229 * the guest makes once before letting it through.
13235 return -TARGET_ERESTARTSYS
;
13240 record_syscall_start(cpu
, num
, arg1
,
13241 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13243 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13244 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13247 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13248 arg5
, arg6
, arg7
, arg8
);
13250 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13251 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13252 arg3
, arg4
, arg5
, arg6
);
13255 record_syscall_return(cpu
, num
, ret
);