4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
125 #define CLONE_IO 0x80000000 /* Clone io context */
128 /* We can't directly call the host clone syscall, because this will
129 * badly confuse libc (breaking mutexes, for example). So we must
130 * divide clone flags into:
131 * * flag combinations that look like pthread_create()
132 * * flag combinations that look like fork()
133 * * flags we can implement within QEMU itself
134 * * flags we can't support and will return an error for
136 /* For thread creation, all these flags must be present; for
137 * fork, none must be present.
139 #define CLONE_THREAD_FLAGS \
140 (CLONE_VM | CLONE_FS | CLONE_FILES | \
141 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 /* These flags are ignored:
144 * CLONE_DETACHED is now ignored by the kernel;
145 * CLONE_IO is just an optimisation hint to the I/O scheduler
147 #define CLONE_IGNORED_FLAGS \
148 (CLONE_DETACHED | CLONE_IO)
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 #define CLONE_INVALID_FORK_FLAGS \
161 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 #define CLONE_INVALID_THREAD_FLAGS \
164 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
165 CLONE_IGNORED_FLAGS))
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168 * have almost all been allocated. We cannot support any of
169 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171 * The checks against the invalid thread masks above will catch these.
172 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176 * once. This exercises the codepaths for restart.
178 //#define DEBUG_ERESTARTSYS
180 //#include <linux/msdos_fs.h>
181 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
182 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
192 #define _syscall0(type,name) \
193 static type name (void) \
195 return syscall(__NR_##name); \
198 #define _syscall1(type,name,type1,arg1) \
199 static type name (type1 arg1) \
201 return syscall(__NR_##name, arg1); \
204 #define _syscall2(type,name,type1,arg1,type2,arg2) \
205 static type name (type1 arg1,type2 arg2) \
207 return syscall(__NR_##name, arg1, arg2); \
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
211 static type name (type1 arg1,type2 arg2,type3 arg3) \
213 return syscall(__NR_##name, arg1, arg2, arg3); \
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
231 type5,arg5,type6,arg6) \
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid
)
265 /* For the 64-bit guest on 32-bit host case we must emulate
266 * getdents using getdents64, because otherwise the host
267 * might hand us back more dirent records than we can fit
268 * into the guest buffer after structure format conversion.
269 * Otherwise we emulate getdents with getdents if the host has it.
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
278 #if (defined(TARGET_NR_getdents) && \
279 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
285 loff_t
*, res
, uint
, wh
);
287 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
288 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
290 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group
,int,error_code
)
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address
,int *,tidptr
)
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
299 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
303 unsigned long *, user_mask_ptr
);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
306 unsigned long *, user_mask_ptr
);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
309 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
311 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
314 struct __user_cap_data_struct
*, data
);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get
, int, which
, int, who
)
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
327 unsigned long, idx1
, unsigned long, idx2
)
331 * It is assumed that struct statx is architecture independent.
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
335 unsigned int, mask
, struct target_statx
*, statxbuf
)
338 static bitmask_transtbl fcntl_flags_tbl
[] = {
339 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
340 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
341 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
342 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
343 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
344 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
345 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
346 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
347 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
348 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
349 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
350 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
351 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
352 #if defined(O_DIRECT)
353 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
355 #if defined(O_NOATIME)
356 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
358 #if defined(O_CLOEXEC)
359 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
362 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
364 #if defined(O_TMPFILE)
365 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
367 /* Don't terminate the list prematurely on 64-bit host+guest. */
368 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
369 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
374 static int sys_getcwd1(char *buf
, size_t size
)
376 if (getcwd(buf
, size
) == NULL
) {
377 /* getcwd() sets errno */
380 return strlen(buf
)+1;
383 #ifdef TARGET_NR_utimensat
384 #if defined(__NR_utimensat)
385 #define __NR_sys_utimensat __NR_utimensat
386 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
387 const struct timespec
*,tsp
,int,flags
)
389 static int sys_utimensat(int dirfd
, const char *pathname
,
390 const struct timespec times
[2], int flags
)
396 #endif /* TARGET_NR_utimensat */
398 #ifdef TARGET_NR_renameat2
399 #if defined(__NR_renameat2)
400 #define __NR_sys_renameat2 __NR_renameat2
401 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
402 const char *, new, unsigned int, flags
)
404 static int sys_renameat2(int oldfd
, const char *old
,
405 int newfd
, const char *new, int flags
)
408 return renameat(oldfd
, old
, newfd
, new);
414 #endif /* TARGET_NR_renameat2 */
416 #ifdef CONFIG_INOTIFY
417 #include <sys/inotify.h>
419 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
420 static int sys_inotify_init(void)
422 return (inotify_init());
425 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
426 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
428 return (inotify_add_watch(fd
, pathname
, mask
));
431 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
432 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
434 return (inotify_rm_watch(fd
, wd
));
437 #ifdef CONFIG_INOTIFY1
438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
439 static int sys_inotify_init1(int flags
)
441 return (inotify_init1(flags
));
446 /* Userspace can usually survive runtime without inotify */
447 #undef TARGET_NR_inotify_init
448 #undef TARGET_NR_inotify_init1
449 #undef TARGET_NR_inotify_add_watch
450 #undef TARGET_NR_inotify_rm_watch
451 #endif /* CONFIG_INOTIFY */
453 #if defined(TARGET_NR_prlimit64)
454 #ifndef __NR_prlimit64
455 # define __NR_prlimit64 -1
457 #define __NR_sys_prlimit64 __NR_prlimit64
458 /* The glibc rlimit structure may not be that used by the underlying syscall */
459 struct host_rlimit64
{
463 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
464 const struct host_rlimit64
*, new_limit
,
465 struct host_rlimit64
*, old_limit
)
469 #if defined(TARGET_NR_timer_create)
470 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
471 static timer_t g_posix_timers
[32] = { 0, } ;
473 static inline int next_free_host_timer(void)
476 /* FIXME: Does finding the next free slot require a lock? */
477 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
478 if (g_posix_timers
[k
] == 0) {
479 g_posix_timers
[k
] = (timer_t
) 1;
487 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
489 static inline int regpairs_aligned(void *cpu_env
, int num
)
491 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
493 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
494 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
495 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
496 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
497 * of registers which translates to the same as ARM/MIPS, because we start with
499 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
500 #elif defined(TARGET_SH4)
501 /* SH4 doesn't align register pairs, except for p{read,write}64 */
502 static inline int regpairs_aligned(void *cpu_env
, int num
)
505 case TARGET_NR_pread64
:
506 case TARGET_NR_pwrite64
:
513 #elif defined(TARGET_XTENSA)
514 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
516 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
519 #define ERRNO_TABLE_SIZE 1200
521 /* target_to_host_errno_table[] is initialized from
522 * host_to_target_errno_table[] in syscall_init(). */
523 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
527 * This list is the union of errno values overridden in asm-<arch>/errno.h
528 * minus the errnos that are not actually generic to all archs.
530 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
531 [EAGAIN
] = TARGET_EAGAIN
,
532 [EIDRM
] = TARGET_EIDRM
,
533 [ECHRNG
] = TARGET_ECHRNG
,
534 [EL2NSYNC
] = TARGET_EL2NSYNC
,
535 [EL3HLT
] = TARGET_EL3HLT
,
536 [EL3RST
] = TARGET_EL3RST
,
537 [ELNRNG
] = TARGET_ELNRNG
,
538 [EUNATCH
] = TARGET_EUNATCH
,
539 [ENOCSI
] = TARGET_ENOCSI
,
540 [EL2HLT
] = TARGET_EL2HLT
,
541 [EDEADLK
] = TARGET_EDEADLK
,
542 [ENOLCK
] = TARGET_ENOLCK
,
543 [EBADE
] = TARGET_EBADE
,
544 [EBADR
] = TARGET_EBADR
,
545 [EXFULL
] = TARGET_EXFULL
,
546 [ENOANO
] = TARGET_ENOANO
,
547 [EBADRQC
] = TARGET_EBADRQC
,
548 [EBADSLT
] = TARGET_EBADSLT
,
549 [EBFONT
] = TARGET_EBFONT
,
550 [ENOSTR
] = TARGET_ENOSTR
,
551 [ENODATA
] = TARGET_ENODATA
,
552 [ETIME
] = TARGET_ETIME
,
553 [ENOSR
] = TARGET_ENOSR
,
554 [ENONET
] = TARGET_ENONET
,
555 [ENOPKG
] = TARGET_ENOPKG
,
556 [EREMOTE
] = TARGET_EREMOTE
,
557 [ENOLINK
] = TARGET_ENOLINK
,
558 [EADV
] = TARGET_EADV
,
559 [ESRMNT
] = TARGET_ESRMNT
,
560 [ECOMM
] = TARGET_ECOMM
,
561 [EPROTO
] = TARGET_EPROTO
,
562 [EDOTDOT
] = TARGET_EDOTDOT
,
563 [EMULTIHOP
] = TARGET_EMULTIHOP
,
564 [EBADMSG
] = TARGET_EBADMSG
,
565 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
566 [EOVERFLOW
] = TARGET_EOVERFLOW
,
567 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
568 [EBADFD
] = TARGET_EBADFD
,
569 [EREMCHG
] = TARGET_EREMCHG
,
570 [ELIBACC
] = TARGET_ELIBACC
,
571 [ELIBBAD
] = TARGET_ELIBBAD
,
572 [ELIBSCN
] = TARGET_ELIBSCN
,
573 [ELIBMAX
] = TARGET_ELIBMAX
,
574 [ELIBEXEC
] = TARGET_ELIBEXEC
,
575 [EILSEQ
] = TARGET_EILSEQ
,
576 [ENOSYS
] = TARGET_ENOSYS
,
577 [ELOOP
] = TARGET_ELOOP
,
578 [ERESTART
] = TARGET_ERESTART
,
579 [ESTRPIPE
] = TARGET_ESTRPIPE
,
580 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
581 [EUSERS
] = TARGET_EUSERS
,
582 [ENOTSOCK
] = TARGET_ENOTSOCK
,
583 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
584 [EMSGSIZE
] = TARGET_EMSGSIZE
,
585 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
586 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
587 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
588 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
589 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
590 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
591 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
592 [EADDRINUSE
] = TARGET_EADDRINUSE
,
593 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
594 [ENETDOWN
] = TARGET_ENETDOWN
,
595 [ENETUNREACH
] = TARGET_ENETUNREACH
,
596 [ENETRESET
] = TARGET_ENETRESET
,
597 [ECONNABORTED
] = TARGET_ECONNABORTED
,
598 [ECONNRESET
] = TARGET_ECONNRESET
,
599 [ENOBUFS
] = TARGET_ENOBUFS
,
600 [EISCONN
] = TARGET_EISCONN
,
601 [ENOTCONN
] = TARGET_ENOTCONN
,
602 [EUCLEAN
] = TARGET_EUCLEAN
,
603 [ENOTNAM
] = TARGET_ENOTNAM
,
604 [ENAVAIL
] = TARGET_ENAVAIL
,
605 [EISNAM
] = TARGET_EISNAM
,
606 [EREMOTEIO
] = TARGET_EREMOTEIO
,
607 [EDQUOT
] = TARGET_EDQUOT
,
608 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
609 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
610 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
611 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
612 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
613 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
614 [EALREADY
] = TARGET_EALREADY
,
615 [EINPROGRESS
] = TARGET_EINPROGRESS
,
616 [ESTALE
] = TARGET_ESTALE
,
617 [ECANCELED
] = TARGET_ECANCELED
,
618 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
619 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
621 [ENOKEY
] = TARGET_ENOKEY
,
624 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
627 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
630 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
633 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
635 #ifdef ENOTRECOVERABLE
636 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
639 [ENOMSG
] = TARGET_ENOMSG
,
642 [ERFKILL
] = TARGET_ERFKILL
,
645 [EHWPOISON
] = TARGET_EHWPOISON
,
649 static inline int host_to_target_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 host_to_target_errno_table
[err
]) {
653 return host_to_target_errno_table
[err
];
658 static inline int target_to_host_errno(int err
)
660 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
661 target_to_host_errno_table
[err
]) {
662 return target_to_host_errno_table
[err
];
667 static inline abi_long
get_errno(abi_long ret
)
670 return -host_to_target_errno(errno
);
675 const char *target_strerror(int err
)
677 if (err
== TARGET_ERESTARTSYS
) {
678 return "To be restarted";
680 if (err
== TARGET_QEMU_ESIGRETURN
) {
681 return "Successful exit from sigreturn";
684 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
687 return strerror(target_to_host_errno(err
));
690 #define safe_syscall0(type, name) \
691 static type safe_##name(void) \
693 return safe_syscall(__NR_##name); \
696 #define safe_syscall1(type, name, type1, arg1) \
697 static type safe_##name(type1 arg1) \
699 return safe_syscall(__NR_##name, arg1); \
702 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
703 static type safe_##name(type1 arg1, type2 arg2) \
705 return safe_syscall(__NR_##name, arg1, arg2); \
708 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
711 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
714 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
718 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
721 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
722 type4, arg4, type5, arg5) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
726 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
729 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
730 type4, arg4, type5, arg5, type6, arg6) \
731 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
732 type5 arg5, type6 arg6) \
734 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
737 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
738 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
739 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
740 int, flags
, mode_t
, mode
)
741 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
742 struct rusage
*, rusage
)
743 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
744 int, options
, struct rusage
*, rusage
)
745 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
746 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
747 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
748 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
749 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
751 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
752 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
754 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
755 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
757 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
758 safe_syscall2(int, tkill
, int, tid
, int, sig
)
759 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
760 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
761 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
762 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
763 unsigned long, pos_l
, unsigned long, pos_h
)
764 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
765 unsigned long, pos_l
, unsigned long, pos_h
)
766 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
768 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
769 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
770 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
771 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
772 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
773 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
774 safe_syscall2(int, flock
, int, fd
, int, operation
)
775 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
776 const struct timespec
*, uts
, size_t, sigsetsize
)
777 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
779 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
780 struct timespec
*, rem
)
781 #ifdef TARGET_NR_clock_nanosleep
782 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
783 const struct timespec
*, req
, struct timespec
*, rem
)
786 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
787 void *, ptr
, long, fifth
)
790 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
794 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
795 long, msgtype
, int, flags
)
797 #ifdef __NR_semtimedop
798 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
799 unsigned, nsops
, const struct timespec
*, timeout
)
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
803 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
804 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
805 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808 * "third argument might be integer or pointer or not present" behaviour of
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814 * use the flock64 struct rather than unsuffixed flock
815 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
823 static inline int host_to_target_sock_type(int host_type
)
827 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
829 target_type
= TARGET_SOCK_DGRAM
;
832 target_type
= TARGET_SOCK_STREAM
;
835 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
839 #if defined(SOCK_CLOEXEC)
840 if (host_type
& SOCK_CLOEXEC
) {
841 target_type
|= TARGET_SOCK_CLOEXEC
;
845 #if defined(SOCK_NONBLOCK)
846 if (host_type
& SOCK_NONBLOCK
) {
847 target_type
|= TARGET_SOCK_NONBLOCK
;
854 static abi_ulong target_brk
;
855 static abi_ulong target_original_brk
;
856 static abi_ulong brk_page
;
858 void target_set_brk(abi_ulong new_brk
)
860 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
861 brk_page
= HOST_PAGE_ALIGN(target_brk
);
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
867 /* do_brk() must return target values and target errnos. */
868 abi_long
do_brk(abi_ulong new_brk
)
870 abi_long mapped_addr
;
871 abi_ulong new_alloc_size
;
873 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
876 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
879 if (new_brk
< target_original_brk
) {
880 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
885 /* If the new brk is less than the highest page reserved to the
886 * target heap allocation, set it and we're almost done... */
887 if (new_brk
<= brk_page
) {
888 /* Heap contents are initialized to zero, as for anonymous
890 if (new_brk
> target_brk
) {
891 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
893 target_brk
= new_brk
;
894 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
898 /* We need to allocate more memory after the brk... Note that
899 * we don't use MAP_FIXED because that will map over the top of
900 * any existing mapping (like the one with the host libc or qemu
901 * itself); instead we treat "mapped but at wrong address" as
902 * a failure and unmap again.
904 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
905 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
906 PROT_READ
|PROT_WRITE
,
907 MAP_ANON
|MAP_PRIVATE
, 0, 0));
909 if (mapped_addr
== brk_page
) {
910 /* Heap contents are initialized to zero, as for anonymous
911 * mapped pages. Technically the new pages are already
912 * initialized to zero since they *are* anonymous mapped
913 * pages, however we have to take care with the contents that
914 * come from the remaining part of the previous page: it may
915 * contains garbage data due to a previous heap usage (grown
917 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
919 target_brk
= new_brk
;
920 brk_page
= HOST_PAGE_ALIGN(target_brk
);
921 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
924 } else if (mapped_addr
!= -1) {
925 /* Mapped but at wrong address, meaning there wasn't actually
926 * enough space for this brk.
928 target_munmap(mapped_addr
, new_alloc_size
);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
933 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
936 #if defined(TARGET_ALPHA)
937 /* We (partially) emulate OSF/1 on Alpha, which requires we
938 return a proper errno, not an unchanged brk value. */
939 return -TARGET_ENOMEM
;
941 /* For everything else, return the previous break. */
945 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
946 abi_ulong target_fds_addr
,
950 abi_ulong b
, *target_fds
;
952 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
953 if (!(target_fds
= lock_user(VERIFY_READ
,
955 sizeof(abi_ulong
) * nw
,
957 return -TARGET_EFAULT
;
961 for (i
= 0; i
< nw
; i
++) {
962 /* grab the abi_ulong */
963 __get_user(b
, &target_fds
[i
]);
964 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
965 /* check the bit inside the abi_ulong */
972 unlock_user(target_fds
, target_fds_addr
, 0);
977 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
978 abi_ulong target_fds_addr
,
981 if (target_fds_addr
) {
982 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
983 return -TARGET_EFAULT
;
991 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
997 abi_ulong
*target_fds
;
999 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1000 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1002 sizeof(abi_ulong
) * nw
,
1004 return -TARGET_EFAULT
;
1007 for (i
= 0; i
< nw
; i
++) {
1009 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1010 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1013 __put_user(v
, &target_fds
[i
]);
1016 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1027 static inline abi_long
host_to_target_clock_t(long ticks
)
1029 #if HOST_HZ == TARGET_HZ
1032 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1036 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1037 const struct rusage
*rusage
)
1039 struct target_rusage
*target_rusage
;
1041 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1042 return -TARGET_EFAULT
;
1043 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1044 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1045 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1046 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1047 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1048 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1049 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1050 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1051 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1052 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1053 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1054 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1055 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1056 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1057 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1058 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1059 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1060 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1061 unlock_user_struct(target_rusage
, target_addr
, 1);
1066 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1068 abi_ulong target_rlim_swap
;
1071 target_rlim_swap
= tswapal(target_rlim
);
1072 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1073 return RLIM_INFINITY
;
1075 result
= target_rlim_swap
;
1076 if (target_rlim_swap
!= (rlim_t
)result
)
1077 return RLIM_INFINITY
;
1082 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1084 abi_ulong target_rlim_swap
;
1087 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1088 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1090 target_rlim_swap
= rlim
;
1091 result
= tswapal(target_rlim_swap
);
1096 static inline int target_to_host_resource(int code
)
1099 case TARGET_RLIMIT_AS
:
1101 case TARGET_RLIMIT_CORE
:
1103 case TARGET_RLIMIT_CPU
:
1105 case TARGET_RLIMIT_DATA
:
1107 case TARGET_RLIMIT_FSIZE
:
1108 return RLIMIT_FSIZE
;
1109 case TARGET_RLIMIT_LOCKS
:
1110 return RLIMIT_LOCKS
;
1111 case TARGET_RLIMIT_MEMLOCK
:
1112 return RLIMIT_MEMLOCK
;
1113 case TARGET_RLIMIT_MSGQUEUE
:
1114 return RLIMIT_MSGQUEUE
;
1115 case TARGET_RLIMIT_NICE
:
1117 case TARGET_RLIMIT_NOFILE
:
1118 return RLIMIT_NOFILE
;
1119 case TARGET_RLIMIT_NPROC
:
1120 return RLIMIT_NPROC
;
1121 case TARGET_RLIMIT_RSS
:
1123 case TARGET_RLIMIT_RTPRIO
:
1124 return RLIMIT_RTPRIO
;
1125 case TARGET_RLIMIT_SIGPENDING
:
1126 return RLIMIT_SIGPENDING
;
1127 case TARGET_RLIMIT_STACK
:
1128 return RLIMIT_STACK
;
1134 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1135 abi_ulong target_tv_addr
)
1137 struct target_timeval
*target_tv
;
1139 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1140 return -TARGET_EFAULT
;
1143 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1144 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1146 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1151 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1152 const struct timeval
*tv
)
1154 struct target_timeval
*target_tv
;
1156 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1157 return -TARGET_EFAULT
;
1160 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1161 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1163 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1168 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1169 const struct timeval
*tv
)
1171 struct target__kernel_sock_timeval
*target_tv
;
1173 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1174 return -TARGET_EFAULT
;
1177 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1178 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1180 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1185 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1186 abi_ulong target_addr
)
1188 struct target_timespec
*target_ts
;
1190 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1191 return -TARGET_EFAULT
;
1193 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1194 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1195 unlock_user_struct(target_ts
, target_addr
, 0);
1199 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1200 struct timespec
*host_ts
)
1202 struct target_timespec
*target_ts
;
1204 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1205 return -TARGET_EFAULT
;
1207 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1208 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1209 unlock_user_struct(target_ts
, target_addr
, 1);
1213 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1214 struct timespec
*host_ts
)
1216 struct target__kernel_timespec
*target_ts
;
1218 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1219 return -TARGET_EFAULT
;
1221 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1222 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1223 unlock_user_struct(target_ts
, target_addr
, 1);
1227 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1228 abi_ulong target_tz_addr
)
1230 struct target_timezone
*target_tz
;
1232 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1233 return -TARGET_EFAULT
;
1236 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1237 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1239 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1247 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1248 abi_ulong target_mq_attr_addr
)
1250 struct target_mq_attr
*target_mq_attr
;
1252 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1253 target_mq_attr_addr
, 1))
1254 return -TARGET_EFAULT
;
1256 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1257 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1258 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1259 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1261 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1266 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1267 const struct mq_attr
*attr
)
1269 struct target_mq_attr
*target_mq_attr
;
1271 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1272 target_mq_attr_addr
, 0))
1273 return -TARGET_EFAULT
;
1275 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1276 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1277 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1278 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1280 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1286 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1287 /* do_select() must return target values and target errnos. */
1288 static abi_long
do_select(int n
,
1289 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1290 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1292 fd_set rfds
, wfds
, efds
;
1293 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1295 struct timespec ts
, *ts_ptr
;
1298 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1302 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1306 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1311 if (target_tv_addr
) {
1312 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1313 return -TARGET_EFAULT
;
1314 ts
.tv_sec
= tv
.tv_sec
;
1315 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1321 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1324 if (!is_error(ret
)) {
1325 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1326 return -TARGET_EFAULT
;
1327 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1328 return -TARGET_EFAULT
;
1329 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1330 return -TARGET_EFAULT
;
1332 if (target_tv_addr
) {
1333 tv
.tv_sec
= ts
.tv_sec
;
1334 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1335 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1336 return -TARGET_EFAULT
;
1344 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1345 static abi_long
do_old_select(abi_ulong arg1
)
1347 struct target_sel_arg_struct
*sel
;
1348 abi_ulong inp
, outp
, exp
, tvp
;
1351 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1352 return -TARGET_EFAULT
;
1355 nsel
= tswapal(sel
->n
);
1356 inp
= tswapal(sel
->inp
);
1357 outp
= tswapal(sel
->outp
);
1358 exp
= tswapal(sel
->exp
);
1359 tvp
= tswapal(sel
->tvp
);
1361 unlock_user_struct(sel
, arg1
, 0);
1363 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1368 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1371 return pipe2(host_pipe
, flags
);
1377 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1378 int flags
, int is_pipe2
)
1382 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1385 return get_errno(ret
);
1387 /* Several targets have special calling conventions for the original
1388 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1390 #if defined(TARGET_ALPHA)
1391 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1392 return host_pipe
[0];
1393 #elif defined(TARGET_MIPS)
1394 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1395 return host_pipe
[0];
1396 #elif defined(TARGET_SH4)
1397 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1398 return host_pipe
[0];
1399 #elif defined(TARGET_SPARC)
1400 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1401 return host_pipe
[0];
1405 if (put_user_s32(host_pipe
[0], pipedes
)
1406 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1407 return -TARGET_EFAULT
;
1408 return get_errno(ret
);
1411 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1412 abi_ulong target_addr
,
1415 struct target_ip_mreqn
*target_smreqn
;
1417 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1419 return -TARGET_EFAULT
;
1420 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1421 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1422 if (len
== sizeof(struct target_ip_mreqn
))
1423 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1424 unlock_user(target_smreqn
, target_addr
, 0);
1429 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1430 abi_ulong target_addr
,
1433 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1434 sa_family_t sa_family
;
1435 struct target_sockaddr
*target_saddr
;
1437 if (fd_trans_target_to_host_addr(fd
)) {
1438 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1441 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1443 return -TARGET_EFAULT
;
1445 sa_family
= tswap16(target_saddr
->sa_family
);
1447 /* Oops. The caller might send a incomplete sun_path; sun_path
1448 * must be terminated by \0 (see the manual page), but
1449 * unfortunately it is quite common to specify sockaddr_un
1450 * length as "strlen(x->sun_path)" while it should be
1451 * "strlen(...) + 1". We'll fix that here if needed.
1452 * Linux kernel has a similar feature.
1455 if (sa_family
== AF_UNIX
) {
1456 if (len
< unix_maxlen
&& len
> 0) {
1457 char *cp
= (char*)target_saddr
;
1459 if ( cp
[len
-1] && !cp
[len
] )
1462 if (len
> unix_maxlen
)
1466 memcpy(addr
, target_saddr
, len
);
1467 addr
->sa_family
= sa_family
;
1468 if (sa_family
== AF_NETLINK
) {
1469 struct sockaddr_nl
*nladdr
;
1471 nladdr
= (struct sockaddr_nl
*)addr
;
1472 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1473 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1474 } else if (sa_family
== AF_PACKET
) {
1475 struct target_sockaddr_ll
*lladdr
;
1477 lladdr
= (struct target_sockaddr_ll
*)addr
;
1478 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1479 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1481 unlock_user(target_saddr
, target_addr
, 0);
1486 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1487 struct sockaddr
*addr
,
1490 struct target_sockaddr
*target_saddr
;
1497 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1499 return -TARGET_EFAULT
;
1500 memcpy(target_saddr
, addr
, len
);
1501 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1502 sizeof(target_saddr
->sa_family
)) {
1503 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1505 if (addr
->sa_family
== AF_NETLINK
&&
1506 len
>= sizeof(struct target_sockaddr_nl
)) {
1507 struct target_sockaddr_nl
*target_nl
=
1508 (struct target_sockaddr_nl
*)target_saddr
;
1509 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1510 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1511 } else if (addr
->sa_family
== AF_PACKET
) {
1512 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1513 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1514 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1515 } else if (addr
->sa_family
== AF_INET6
&&
1516 len
>= sizeof(struct target_sockaddr_in6
)) {
1517 struct target_sockaddr_in6
*target_in6
=
1518 (struct target_sockaddr_in6
*)target_saddr
;
1519 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1521 unlock_user(target_saddr
, target_addr
, len
);
1526 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1527 struct target_msghdr
*target_msgh
)
1529 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1530 abi_long msg_controllen
;
1531 abi_ulong target_cmsg_addr
;
1532 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1533 socklen_t space
= 0;
1535 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1536 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1538 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1539 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1540 target_cmsg_start
= target_cmsg
;
1542 return -TARGET_EFAULT
;
1544 while (cmsg
&& target_cmsg
) {
1545 void *data
= CMSG_DATA(cmsg
);
1546 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1548 int len
= tswapal(target_cmsg
->cmsg_len
)
1549 - sizeof(struct target_cmsghdr
);
1551 space
+= CMSG_SPACE(len
);
1552 if (space
> msgh
->msg_controllen
) {
1553 space
-= CMSG_SPACE(len
);
1554 /* This is a QEMU bug, since we allocated the payload
1555 * area ourselves (unlike overflow in host-to-target
1556 * conversion, which is just the guest giving us a buffer
1557 * that's too small). It can't happen for the payload types
1558 * we currently support; if it becomes an issue in future
1559 * we would need to improve our allocation strategy to
1560 * something more intelligent than "twice the size of the
1561 * target buffer we're reading from".
1563 gemu_log("Host cmsg overflow\n");
1567 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1568 cmsg
->cmsg_level
= SOL_SOCKET
;
1570 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1572 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1573 cmsg
->cmsg_len
= CMSG_LEN(len
);
1575 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1576 int *fd
= (int *)data
;
1577 int *target_fd
= (int *)target_data
;
1578 int i
, numfds
= len
/ sizeof(int);
1580 for (i
= 0; i
< numfds
; i
++) {
1581 __get_user(fd
[i
], target_fd
+ i
);
1583 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1584 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1585 struct ucred
*cred
= (struct ucred
*)data
;
1586 struct target_ucred
*target_cred
=
1587 (struct target_ucred
*)target_data
;
1589 __get_user(cred
->pid
, &target_cred
->pid
);
1590 __get_user(cred
->uid
, &target_cred
->uid
);
1591 __get_user(cred
->gid
, &target_cred
->gid
);
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1595 memcpy(data
, target_data
, len
);
1598 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1599 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1602 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1604 msgh
->msg_controllen
= space
;
1608 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1609 struct msghdr
*msgh
)
1611 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1612 abi_long msg_controllen
;
1613 abi_ulong target_cmsg_addr
;
1614 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1615 socklen_t space
= 0;
1617 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1618 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1620 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1621 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1622 target_cmsg_start
= target_cmsg
;
1624 return -TARGET_EFAULT
;
1626 while (cmsg
&& target_cmsg
) {
1627 void *data
= CMSG_DATA(cmsg
);
1628 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1630 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1631 int tgt_len
, tgt_space
;
1633 /* We never copy a half-header but may copy half-data;
1634 * this is Linux's behaviour in put_cmsg(). Note that
1635 * truncation here is a guest problem (which we report
1636 * to the guest via the CTRUNC bit), unlike truncation
1637 * in target_to_host_cmsg, which is a QEMU bug.
1639 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1640 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1644 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1645 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1647 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1649 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1651 /* Payload types which need a different size of payload on
1652 * the target must adjust tgt_len here.
1655 switch (cmsg
->cmsg_level
) {
1657 switch (cmsg
->cmsg_type
) {
1659 tgt_len
= sizeof(struct target_timeval
);
1669 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1670 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1671 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1674 /* We must now copy-and-convert len bytes of payload
1675 * into tgt_len bytes of destination space. Bear in mind
1676 * that in both source and destination we may be dealing
1677 * with a truncated value!
1679 switch (cmsg
->cmsg_level
) {
1681 switch (cmsg
->cmsg_type
) {
1684 int *fd
= (int *)data
;
1685 int *target_fd
= (int *)target_data
;
1686 int i
, numfds
= tgt_len
/ sizeof(int);
1688 for (i
= 0; i
< numfds
; i
++) {
1689 __put_user(fd
[i
], target_fd
+ i
);
1695 struct timeval
*tv
= (struct timeval
*)data
;
1696 struct target_timeval
*target_tv
=
1697 (struct target_timeval
*)target_data
;
1699 if (len
!= sizeof(struct timeval
) ||
1700 tgt_len
!= sizeof(struct target_timeval
)) {
1704 /* copy struct timeval to target */
1705 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1706 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1709 case SCM_CREDENTIALS
:
1711 struct ucred
*cred
= (struct ucred
*)data
;
1712 struct target_ucred
*target_cred
=
1713 (struct target_ucred
*)target_data
;
1715 __put_user(cred
->pid
, &target_cred
->pid
);
1716 __put_user(cred
->uid
, &target_cred
->uid
);
1717 __put_user(cred
->gid
, &target_cred
->gid
);
1726 switch (cmsg
->cmsg_type
) {
1729 uint32_t *v
= (uint32_t *)data
;
1730 uint32_t *t_int
= (uint32_t *)target_data
;
1732 if (len
!= sizeof(uint32_t) ||
1733 tgt_len
!= sizeof(uint32_t)) {
1736 __put_user(*v
, t_int
);
1742 struct sock_extended_err ee
;
1743 struct sockaddr_in offender
;
1745 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1746 struct errhdr_t
*target_errh
=
1747 (struct errhdr_t
*)target_data
;
1749 if (len
!= sizeof(struct errhdr_t
) ||
1750 tgt_len
!= sizeof(struct errhdr_t
)) {
1753 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1754 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1755 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1756 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1757 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1758 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1759 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1760 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1761 (void *) &errh
->offender
, sizeof(errh
->offender
));
1770 switch (cmsg
->cmsg_type
) {
1773 uint32_t *v
= (uint32_t *)data
;
1774 uint32_t *t_int
= (uint32_t *)target_data
;
1776 if (len
!= sizeof(uint32_t) ||
1777 tgt_len
!= sizeof(uint32_t)) {
1780 __put_user(*v
, t_int
);
1786 struct sock_extended_err ee
;
1787 struct sockaddr_in6 offender
;
1789 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1790 struct errhdr6_t
*target_errh
=
1791 (struct errhdr6_t
*)target_data
;
1793 if (len
!= sizeof(struct errhdr6_t
) ||
1794 tgt_len
!= sizeof(struct errhdr6_t
)) {
1797 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1798 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1799 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1800 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1801 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1802 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1803 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1804 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1805 (void *) &errh
->offender
, sizeof(errh
->offender
));
1815 gemu_log("Unsupported ancillary data: %d/%d\n",
1816 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1817 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1818 if (tgt_len
> len
) {
1819 memset(target_data
+ len
, 0, tgt_len
- len
);
1823 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1824 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1825 if (msg_controllen
< tgt_space
) {
1826 tgt_space
= msg_controllen
;
1828 msg_controllen
-= tgt_space
;
1830 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1831 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1834 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1836 target_msgh
->msg_controllen
= tswapal(space
);
1840 /* do_setsockopt() Must return target values and target errnos. */
1841 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1842 abi_ulong optval_addr
, socklen_t optlen
)
1846 struct ip_mreqn
*ip_mreq
;
1847 struct ip_mreq_source
*ip_mreq_source
;
1851 /* TCP options all take an 'int' value. */
1852 if (optlen
< sizeof(uint32_t))
1853 return -TARGET_EINVAL
;
1855 if (get_user_u32(val
, optval_addr
))
1856 return -TARGET_EFAULT
;
1857 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1864 case IP_ROUTER_ALERT
:
1868 case IP_MTU_DISCOVER
:
1875 case IP_MULTICAST_TTL
:
1876 case IP_MULTICAST_LOOP
:
1878 if (optlen
>= sizeof(uint32_t)) {
1879 if (get_user_u32(val
, optval_addr
))
1880 return -TARGET_EFAULT
;
1881 } else if (optlen
>= 1) {
1882 if (get_user_u8(val
, optval_addr
))
1883 return -TARGET_EFAULT
;
1885 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1887 case IP_ADD_MEMBERSHIP
:
1888 case IP_DROP_MEMBERSHIP
:
1889 if (optlen
< sizeof (struct target_ip_mreq
) ||
1890 optlen
> sizeof (struct target_ip_mreqn
))
1891 return -TARGET_EINVAL
;
1893 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1894 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1895 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1898 case IP_BLOCK_SOURCE
:
1899 case IP_UNBLOCK_SOURCE
:
1900 case IP_ADD_SOURCE_MEMBERSHIP
:
1901 case IP_DROP_SOURCE_MEMBERSHIP
:
1902 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1903 return -TARGET_EINVAL
;
1905 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1906 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1907 unlock_user (ip_mreq_source
, optval_addr
, 0);
1916 case IPV6_MTU_DISCOVER
:
1919 case IPV6_RECVPKTINFO
:
1920 case IPV6_UNICAST_HOPS
:
1921 case IPV6_MULTICAST_HOPS
:
1922 case IPV6_MULTICAST_LOOP
:
1924 case IPV6_RECVHOPLIMIT
:
1925 case IPV6_2292HOPLIMIT
:
1928 case IPV6_2292PKTINFO
:
1929 case IPV6_RECVTCLASS
:
1930 case IPV6_RECVRTHDR
:
1931 case IPV6_2292RTHDR
:
1932 case IPV6_RECVHOPOPTS
:
1933 case IPV6_2292HOPOPTS
:
1934 case IPV6_RECVDSTOPTS
:
1935 case IPV6_2292DSTOPTS
:
1937 #ifdef IPV6_RECVPATHMTU
1938 case IPV6_RECVPATHMTU
:
1940 #ifdef IPV6_TRANSPARENT
1941 case IPV6_TRANSPARENT
:
1943 #ifdef IPV6_FREEBIND
1946 #ifdef IPV6_RECVORIGDSTADDR
1947 case IPV6_RECVORIGDSTADDR
:
1950 if (optlen
< sizeof(uint32_t)) {
1951 return -TARGET_EINVAL
;
1953 if (get_user_u32(val
, optval_addr
)) {
1954 return -TARGET_EFAULT
;
1956 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1957 &val
, sizeof(val
)));
1961 struct in6_pktinfo pki
;
1963 if (optlen
< sizeof(pki
)) {
1964 return -TARGET_EINVAL
;
1967 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1968 return -TARGET_EFAULT
;
1971 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1973 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1974 &pki
, sizeof(pki
)));
1977 case IPV6_ADD_MEMBERSHIP
:
1978 case IPV6_DROP_MEMBERSHIP
:
1980 struct ipv6_mreq ipv6mreq
;
1982 if (optlen
< sizeof(ipv6mreq
)) {
1983 return -TARGET_EINVAL
;
1986 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1987 return -TARGET_EFAULT
;
1990 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1992 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1993 &ipv6mreq
, sizeof(ipv6mreq
)));
2004 struct icmp6_filter icmp6f
;
2006 if (optlen
> sizeof(icmp6f
)) {
2007 optlen
= sizeof(icmp6f
);
2010 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2011 return -TARGET_EFAULT
;
2014 for (val
= 0; val
< 8; val
++) {
2015 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2018 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2030 /* those take an u32 value */
2031 if (optlen
< sizeof(uint32_t)) {
2032 return -TARGET_EINVAL
;
2035 if (get_user_u32(val
, optval_addr
)) {
2036 return -TARGET_EFAULT
;
2038 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2039 &val
, sizeof(val
)));
2046 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2051 char *alg_key
= g_malloc(optlen
);
2054 return -TARGET_ENOMEM
;
2056 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2058 return -TARGET_EFAULT
;
2060 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2065 case ALG_SET_AEAD_AUTHSIZE
:
2067 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2076 case TARGET_SOL_SOCKET
:
2078 case TARGET_SO_RCVTIMEO
:
2082 optname
= SO_RCVTIMEO
;
2085 if (optlen
!= sizeof(struct target_timeval
)) {
2086 return -TARGET_EINVAL
;
2089 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2090 return -TARGET_EFAULT
;
2093 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2097 case TARGET_SO_SNDTIMEO
:
2098 optname
= SO_SNDTIMEO
;
2100 case TARGET_SO_ATTACH_FILTER
:
2102 struct target_sock_fprog
*tfprog
;
2103 struct target_sock_filter
*tfilter
;
2104 struct sock_fprog fprog
;
2105 struct sock_filter
*filter
;
2108 if (optlen
!= sizeof(*tfprog
)) {
2109 return -TARGET_EINVAL
;
2111 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2112 return -TARGET_EFAULT
;
2114 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2115 tswapal(tfprog
->filter
), 0)) {
2116 unlock_user_struct(tfprog
, optval_addr
, 1);
2117 return -TARGET_EFAULT
;
2120 fprog
.len
= tswap16(tfprog
->len
);
2121 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2122 if (filter
== NULL
) {
2123 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2124 unlock_user_struct(tfprog
, optval_addr
, 1);
2125 return -TARGET_ENOMEM
;
2127 for (i
= 0; i
< fprog
.len
; i
++) {
2128 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2129 filter
[i
].jt
= tfilter
[i
].jt
;
2130 filter
[i
].jf
= tfilter
[i
].jf
;
2131 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2133 fprog
.filter
= filter
;
2135 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2136 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2139 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2140 unlock_user_struct(tfprog
, optval_addr
, 1);
2143 case TARGET_SO_BINDTODEVICE
:
2145 char *dev_ifname
, *addr_ifname
;
2147 if (optlen
> IFNAMSIZ
- 1) {
2148 optlen
= IFNAMSIZ
- 1;
2150 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2152 return -TARGET_EFAULT
;
2154 optname
= SO_BINDTODEVICE
;
2155 addr_ifname
= alloca(IFNAMSIZ
);
2156 memcpy(addr_ifname
, dev_ifname
, optlen
);
2157 addr_ifname
[optlen
] = 0;
2158 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2159 addr_ifname
, optlen
));
2160 unlock_user (dev_ifname
, optval_addr
, 0);
2163 case TARGET_SO_LINGER
:
2166 struct target_linger
*tlg
;
2168 if (optlen
!= sizeof(struct target_linger
)) {
2169 return -TARGET_EINVAL
;
2171 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2172 return -TARGET_EFAULT
;
2174 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2175 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2176 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2178 unlock_user_struct(tlg
, optval_addr
, 0);
2181 /* Options with 'int' argument. */
2182 case TARGET_SO_DEBUG
:
2185 case TARGET_SO_REUSEADDR
:
2186 optname
= SO_REUSEADDR
;
2189 case TARGET_SO_REUSEPORT
:
2190 optname
= SO_REUSEPORT
;
2193 case TARGET_SO_TYPE
:
2196 case TARGET_SO_ERROR
:
2199 case TARGET_SO_DONTROUTE
:
2200 optname
= SO_DONTROUTE
;
2202 case TARGET_SO_BROADCAST
:
2203 optname
= SO_BROADCAST
;
2205 case TARGET_SO_SNDBUF
:
2206 optname
= SO_SNDBUF
;
2208 case TARGET_SO_SNDBUFFORCE
:
2209 optname
= SO_SNDBUFFORCE
;
2211 case TARGET_SO_RCVBUF
:
2212 optname
= SO_RCVBUF
;
2214 case TARGET_SO_RCVBUFFORCE
:
2215 optname
= SO_RCVBUFFORCE
;
2217 case TARGET_SO_KEEPALIVE
:
2218 optname
= SO_KEEPALIVE
;
2220 case TARGET_SO_OOBINLINE
:
2221 optname
= SO_OOBINLINE
;
2223 case TARGET_SO_NO_CHECK
:
2224 optname
= SO_NO_CHECK
;
2226 case TARGET_SO_PRIORITY
:
2227 optname
= SO_PRIORITY
;
2230 case TARGET_SO_BSDCOMPAT
:
2231 optname
= SO_BSDCOMPAT
;
2234 case TARGET_SO_PASSCRED
:
2235 optname
= SO_PASSCRED
;
2237 case TARGET_SO_PASSSEC
:
2238 optname
= SO_PASSSEC
;
2240 case TARGET_SO_TIMESTAMP
:
2241 optname
= SO_TIMESTAMP
;
2243 case TARGET_SO_RCVLOWAT
:
2244 optname
= SO_RCVLOWAT
;
2249 if (optlen
< sizeof(uint32_t))
2250 return -TARGET_EINVAL
;
2252 if (get_user_u32(val
, optval_addr
))
2253 return -TARGET_EFAULT
;
2254 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2259 case NETLINK_PKTINFO
:
2260 case NETLINK_ADD_MEMBERSHIP
:
2261 case NETLINK_DROP_MEMBERSHIP
:
2262 case NETLINK_BROADCAST_ERROR
:
2263 case NETLINK_NO_ENOBUFS
:
2264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2265 case NETLINK_LISTEN_ALL_NSID
:
2266 case NETLINK_CAP_ACK
:
2267 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2269 case NETLINK_EXT_ACK
:
2270 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2272 case NETLINK_GET_STRICT_CHK
:
2273 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2279 if (optlen
< sizeof(uint32_t)) {
2280 return -TARGET_EINVAL
;
2282 if (get_user_u32(val
, optval_addr
)) {
2283 return -TARGET_EFAULT
;
2285 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2288 #endif /* SOL_NETLINK */
2291 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2292 ret
= -TARGET_ENOPROTOOPT
;
2297 /* do_getsockopt() Must return target values and target errnos. */
2298 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2299 abi_ulong optval_addr
, abi_ulong optlen
)
2306 case TARGET_SOL_SOCKET
:
2309 /* These don't just return a single integer */
2310 case TARGET_SO_RCVTIMEO
:
2311 case TARGET_SO_SNDTIMEO
:
2312 case TARGET_SO_PEERNAME
:
2314 case TARGET_SO_PEERCRED
: {
2317 struct target_ucred
*tcr
;
2319 if (get_user_u32(len
, optlen
)) {
2320 return -TARGET_EFAULT
;
2323 return -TARGET_EINVAL
;
2327 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2335 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2336 return -TARGET_EFAULT
;
2338 __put_user(cr
.pid
, &tcr
->pid
);
2339 __put_user(cr
.uid
, &tcr
->uid
);
2340 __put_user(cr
.gid
, &tcr
->gid
);
2341 unlock_user_struct(tcr
, optval_addr
, 1);
2342 if (put_user_u32(len
, optlen
)) {
2343 return -TARGET_EFAULT
;
2347 case TARGET_SO_LINGER
:
2351 struct target_linger
*tlg
;
2353 if (get_user_u32(len
, optlen
)) {
2354 return -TARGET_EFAULT
;
2357 return -TARGET_EINVAL
;
2361 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2369 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2370 return -TARGET_EFAULT
;
2372 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2373 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2374 unlock_user_struct(tlg
, optval_addr
, 1);
2375 if (put_user_u32(len
, optlen
)) {
2376 return -TARGET_EFAULT
;
2380 /* Options with 'int' argument. */
2381 case TARGET_SO_DEBUG
:
2384 case TARGET_SO_REUSEADDR
:
2385 optname
= SO_REUSEADDR
;
2388 case TARGET_SO_REUSEPORT
:
2389 optname
= SO_REUSEPORT
;
2392 case TARGET_SO_TYPE
:
2395 case TARGET_SO_ERROR
:
2398 case TARGET_SO_DONTROUTE
:
2399 optname
= SO_DONTROUTE
;
2401 case TARGET_SO_BROADCAST
:
2402 optname
= SO_BROADCAST
;
2404 case TARGET_SO_SNDBUF
:
2405 optname
= SO_SNDBUF
;
2407 case TARGET_SO_RCVBUF
:
2408 optname
= SO_RCVBUF
;
2410 case TARGET_SO_KEEPALIVE
:
2411 optname
= SO_KEEPALIVE
;
2413 case TARGET_SO_OOBINLINE
:
2414 optname
= SO_OOBINLINE
;
2416 case TARGET_SO_NO_CHECK
:
2417 optname
= SO_NO_CHECK
;
2419 case TARGET_SO_PRIORITY
:
2420 optname
= SO_PRIORITY
;
2423 case TARGET_SO_BSDCOMPAT
:
2424 optname
= SO_BSDCOMPAT
;
2427 case TARGET_SO_PASSCRED
:
2428 optname
= SO_PASSCRED
;
2430 case TARGET_SO_TIMESTAMP
:
2431 optname
= SO_TIMESTAMP
;
2433 case TARGET_SO_RCVLOWAT
:
2434 optname
= SO_RCVLOWAT
;
2436 case TARGET_SO_ACCEPTCONN
:
2437 optname
= SO_ACCEPTCONN
;
2444 /* TCP options all take an 'int' value. */
2446 if (get_user_u32(len
, optlen
))
2447 return -TARGET_EFAULT
;
2449 return -TARGET_EINVAL
;
2451 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2454 if (optname
== SO_TYPE
) {
2455 val
= host_to_target_sock_type(val
);
2460 if (put_user_u32(val
, optval_addr
))
2461 return -TARGET_EFAULT
;
2463 if (put_user_u8(val
, optval_addr
))
2464 return -TARGET_EFAULT
;
2466 if (put_user_u32(len
, optlen
))
2467 return -TARGET_EFAULT
;
2474 case IP_ROUTER_ALERT
:
2478 case IP_MTU_DISCOVER
:
2484 case IP_MULTICAST_TTL
:
2485 case IP_MULTICAST_LOOP
:
2486 if (get_user_u32(len
, optlen
))
2487 return -TARGET_EFAULT
;
2489 return -TARGET_EINVAL
;
2491 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2494 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2496 if (put_user_u32(len
, optlen
)
2497 || put_user_u8(val
, optval_addr
))
2498 return -TARGET_EFAULT
;
2500 if (len
> sizeof(int))
2502 if (put_user_u32(len
, optlen
)
2503 || put_user_u32(val
, optval_addr
))
2504 return -TARGET_EFAULT
;
2508 ret
= -TARGET_ENOPROTOOPT
;
2514 case IPV6_MTU_DISCOVER
:
2517 case IPV6_RECVPKTINFO
:
2518 case IPV6_UNICAST_HOPS
:
2519 case IPV6_MULTICAST_HOPS
:
2520 case IPV6_MULTICAST_LOOP
:
2522 case IPV6_RECVHOPLIMIT
:
2523 case IPV6_2292HOPLIMIT
:
2526 case IPV6_2292PKTINFO
:
2527 case IPV6_RECVTCLASS
:
2528 case IPV6_RECVRTHDR
:
2529 case IPV6_2292RTHDR
:
2530 case IPV6_RECVHOPOPTS
:
2531 case IPV6_2292HOPOPTS
:
2532 case IPV6_RECVDSTOPTS
:
2533 case IPV6_2292DSTOPTS
:
2535 #ifdef IPV6_RECVPATHMTU
2536 case IPV6_RECVPATHMTU
:
2538 #ifdef IPV6_TRANSPARENT
2539 case IPV6_TRANSPARENT
:
2541 #ifdef IPV6_FREEBIND
2544 #ifdef IPV6_RECVORIGDSTADDR
2545 case IPV6_RECVORIGDSTADDR
:
2547 if (get_user_u32(len
, optlen
))
2548 return -TARGET_EFAULT
;
2550 return -TARGET_EINVAL
;
2552 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2555 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2557 if (put_user_u32(len
, optlen
)
2558 || put_user_u8(val
, optval_addr
))
2559 return -TARGET_EFAULT
;
2561 if (len
> sizeof(int))
2563 if (put_user_u32(len
, optlen
)
2564 || put_user_u32(val
, optval_addr
))
2565 return -TARGET_EFAULT
;
2569 ret
= -TARGET_ENOPROTOOPT
;
2576 case NETLINK_PKTINFO
:
2577 case NETLINK_BROADCAST_ERROR
:
2578 case NETLINK_NO_ENOBUFS
:
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2580 case NETLINK_LISTEN_ALL_NSID
:
2581 case NETLINK_CAP_ACK
:
2582 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2584 case NETLINK_EXT_ACK
:
2585 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2586 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2587 case NETLINK_GET_STRICT_CHK
:
2588 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2589 if (get_user_u32(len
, optlen
)) {
2590 return -TARGET_EFAULT
;
2592 if (len
!= sizeof(val
)) {
2593 return -TARGET_EINVAL
;
2596 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2600 if (put_user_u32(lv
, optlen
)
2601 || put_user_u32(val
, optval_addr
)) {
2602 return -TARGET_EFAULT
;
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606 case NETLINK_LIST_MEMBERSHIPS
:
2610 if (get_user_u32(len
, optlen
)) {
2611 return -TARGET_EFAULT
;
2614 return -TARGET_EINVAL
;
2616 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2618 return -TARGET_EFAULT
;
2621 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2623 unlock_user(results
, optval_addr
, 0);
2626 /* swap host endianess to target endianess. */
2627 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2628 results
[i
] = tswap32(results
[i
]);
2630 if (put_user_u32(lv
, optlen
)) {
2631 return -TARGET_EFAULT
;
2633 unlock_user(results
, optval_addr
, 0);
2636 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2641 #endif /* SOL_NETLINK */
2644 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2646 ret
= -TARGET_EOPNOTSUPP
;
2652 /* Convert target low/high pair representing file offset into the host
2653 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2654 * as the kernel doesn't handle them either.
2656 static void target_to_host_low_high(abi_ulong tlow
,
2658 unsigned long *hlow
,
2659 unsigned long *hhigh
)
2661 uint64_t off
= tlow
|
2662 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2663 TARGET_LONG_BITS
/ 2;
2666 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2669 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2670 abi_ulong count
, int copy
)
2672 struct target_iovec
*target_vec
;
2674 abi_ulong total_len
, max_len
;
2677 bool bad_address
= false;
2683 if (count
> IOV_MAX
) {
2688 vec
= g_try_new0(struct iovec
, count
);
2694 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2695 count
* sizeof(struct target_iovec
), 1);
2696 if (target_vec
== NULL
) {
2701 /* ??? If host page size > target page size, this will result in a
2702 value larger than what we can actually support. */
2703 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2706 for (i
= 0; i
< count
; i
++) {
2707 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2708 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2713 } else if (len
== 0) {
2714 /* Zero length pointer is ignored. */
2715 vec
[i
].iov_base
= 0;
2717 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2718 /* If the first buffer pointer is bad, this is a fault. But
2719 * subsequent bad buffers will result in a partial write; this
2720 * is realized by filling the vector with null pointers and
2722 if (!vec
[i
].iov_base
) {
2733 if (len
> max_len
- total_len
) {
2734 len
= max_len
- total_len
;
2737 vec
[i
].iov_len
= len
;
2741 unlock_user(target_vec
, target_addr
, 0);
2746 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2747 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2750 unlock_user(target_vec
, target_addr
, 0);
2757 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2758 abi_ulong count
, int copy
)
2760 struct target_iovec
*target_vec
;
2763 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2764 count
* sizeof(struct target_iovec
), 1);
2766 for (i
= 0; i
< count
; i
++) {
2767 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2768 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2772 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2774 unlock_user(target_vec
, target_addr
, 0);
2780 static inline int target_to_host_sock_type(int *type
)
2783 int target_type
= *type
;
2785 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2786 case TARGET_SOCK_DGRAM
:
2787 host_type
= SOCK_DGRAM
;
2789 case TARGET_SOCK_STREAM
:
2790 host_type
= SOCK_STREAM
;
2793 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2796 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2797 #if defined(SOCK_CLOEXEC)
2798 host_type
|= SOCK_CLOEXEC
;
2800 return -TARGET_EINVAL
;
2803 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2804 #if defined(SOCK_NONBLOCK)
2805 host_type
|= SOCK_NONBLOCK
;
2806 #elif !defined(O_NONBLOCK)
2807 return -TARGET_EINVAL
;
2814 /* Try to emulate socket type flags after socket creation. */
2815 static int sock_flags_fixup(int fd
, int target_type
)
2817 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2818 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2819 int flags
= fcntl(fd
, F_GETFL
);
2820 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2822 return -TARGET_EINVAL
;
2829 /* do_socket() Must return target values and target errnos. */
2830 static abi_long
do_socket(int domain
, int type
, int protocol
)
2832 int target_type
= type
;
2835 ret
= target_to_host_sock_type(&type
);
2840 if (domain
== PF_NETLINK
&& !(
2841 #ifdef CONFIG_RTNETLINK
2842 protocol
== NETLINK_ROUTE
||
2844 protocol
== NETLINK_KOBJECT_UEVENT
||
2845 protocol
== NETLINK_AUDIT
)) {
2846 return -EPFNOSUPPORT
;
2849 if (domain
== AF_PACKET
||
2850 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2851 protocol
= tswap16(protocol
);
2854 ret
= get_errno(socket(domain
, type
, protocol
));
2856 ret
= sock_flags_fixup(ret
, target_type
);
2857 if (type
== SOCK_PACKET
) {
2858 /* Manage an obsolete case :
2859 * if socket type is SOCK_PACKET, bind by name
2861 fd_trans_register(ret
, &target_packet_trans
);
2862 } else if (domain
== PF_NETLINK
) {
2864 #ifdef CONFIG_RTNETLINK
2866 fd_trans_register(ret
, &target_netlink_route_trans
);
2869 case NETLINK_KOBJECT_UEVENT
:
2870 /* nothing to do: messages are strings */
2873 fd_trans_register(ret
, &target_netlink_audit_trans
);
2876 g_assert_not_reached();
2883 /* do_bind() Must return target values and target errnos. */
2884 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2890 if ((int)addrlen
< 0) {
2891 return -TARGET_EINVAL
;
2894 addr
= alloca(addrlen
+1);
2896 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2900 return get_errno(bind(sockfd
, addr
, addrlen
));
2903 /* do_connect() Must return target values and target errnos. */
2904 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2910 if ((int)addrlen
< 0) {
2911 return -TARGET_EINVAL
;
2914 addr
= alloca(addrlen
+1);
2916 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2920 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2923 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2924 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2925 int flags
, int send
)
2931 abi_ulong target_vec
;
2933 if (msgp
->msg_name
) {
2934 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2935 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2936 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2937 tswapal(msgp
->msg_name
),
2939 if (ret
== -TARGET_EFAULT
) {
2940 /* For connected sockets msg_name and msg_namelen must
2941 * be ignored, so returning EFAULT immediately is wrong.
2942 * Instead, pass a bad msg_name to the host kernel, and
2943 * let it decide whether to return EFAULT or not.
2945 msg
.msg_name
= (void *)-1;
2950 msg
.msg_name
= NULL
;
2951 msg
.msg_namelen
= 0;
2953 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2954 msg
.msg_control
= alloca(msg
.msg_controllen
);
2955 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2957 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2959 count
= tswapal(msgp
->msg_iovlen
);
2960 target_vec
= tswapal(msgp
->msg_iov
);
2962 if (count
> IOV_MAX
) {
2963 /* sendrcvmsg returns a different errno for this condition than
2964 * readv/writev, so we must catch it here before lock_iovec() does.
2966 ret
= -TARGET_EMSGSIZE
;
2970 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2971 target_vec
, count
, send
);
2973 ret
= -host_to_target_errno(errno
);
2976 msg
.msg_iovlen
= count
;
2980 if (fd_trans_target_to_host_data(fd
)) {
2983 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2984 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2985 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2986 msg
.msg_iov
->iov_len
);
2988 msg
.msg_iov
->iov_base
= host_msg
;
2989 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2993 ret
= target_to_host_cmsg(&msg
, msgp
);
2995 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2999 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3000 if (!is_error(ret
)) {
3002 if (fd_trans_host_to_target_data(fd
)) {
3003 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3004 MIN(msg
.msg_iov
->iov_len
, len
));
3006 ret
= host_to_target_cmsg(msgp
, &msg
);
3008 if (!is_error(ret
)) {
3009 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3010 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3011 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3012 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3013 msg
.msg_name
, msg
.msg_namelen
);
3025 unlock_iovec(vec
, target_vec
, count
, !send
);
3030 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3031 int flags
, int send
)
3034 struct target_msghdr
*msgp
;
3036 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3040 return -TARGET_EFAULT
;
3042 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3043 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3047 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3048 * so it might not have this *mmsg-specific flag either.
3050 #ifndef MSG_WAITFORONE
3051 #define MSG_WAITFORONE 0x10000
3054 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3055 unsigned int vlen
, unsigned int flags
,
3058 struct target_mmsghdr
*mmsgp
;
3062 if (vlen
> UIO_MAXIOV
) {
3066 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3068 return -TARGET_EFAULT
;
3071 for (i
= 0; i
< vlen
; i
++) {
3072 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3073 if (is_error(ret
)) {
3076 mmsgp
[i
].msg_len
= tswap32(ret
);
3077 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3078 if (flags
& MSG_WAITFORONE
) {
3079 flags
|= MSG_DONTWAIT
;
3083 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3085 /* Return number of datagrams sent if we sent any at all;
3086 * otherwise return the error.
3094 /* do_accept4() Must return target values and target errnos. */
3095 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3096 abi_ulong target_addrlen_addr
, int flags
)
3098 socklen_t addrlen
, ret_addrlen
;
3103 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3105 if (target_addr
== 0) {
3106 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3109 /* linux returns EINVAL if addrlen pointer is invalid */
3110 if (get_user_u32(addrlen
, target_addrlen_addr
))
3111 return -TARGET_EINVAL
;
3113 if ((int)addrlen
< 0) {
3114 return -TARGET_EINVAL
;
3117 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3118 return -TARGET_EINVAL
;
3120 addr
= alloca(addrlen
);
3122 ret_addrlen
= addrlen
;
3123 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3124 if (!is_error(ret
)) {
3125 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3126 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3127 ret
= -TARGET_EFAULT
;
3133 /* do_getpeername() Must return target values and target errnos. */
3134 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3135 abi_ulong target_addrlen_addr
)
3137 socklen_t addrlen
, ret_addrlen
;
3141 if (get_user_u32(addrlen
, target_addrlen_addr
))
3142 return -TARGET_EFAULT
;
3144 if ((int)addrlen
< 0) {
3145 return -TARGET_EINVAL
;
3148 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3149 return -TARGET_EFAULT
;
3151 addr
= alloca(addrlen
);
3153 ret_addrlen
= addrlen
;
3154 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3155 if (!is_error(ret
)) {
3156 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3157 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3158 ret
= -TARGET_EFAULT
;
3164 /* do_getsockname() Must return target values and target errnos. */
3165 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3166 abi_ulong target_addrlen_addr
)
3168 socklen_t addrlen
, ret_addrlen
;
3172 if (get_user_u32(addrlen
, target_addrlen_addr
))
3173 return -TARGET_EFAULT
;
3175 if ((int)addrlen
< 0) {
3176 return -TARGET_EINVAL
;
3179 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3180 return -TARGET_EFAULT
;
3182 addr
= alloca(addrlen
);
3184 ret_addrlen
= addrlen
;
3185 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3186 if (!is_error(ret
)) {
3187 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3188 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3189 ret
= -TARGET_EFAULT
;
3195 /* do_socketpair() Must return target values and target errnos. */
3196 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3197 abi_ulong target_tab_addr
)
3202 target_to_host_sock_type(&type
);
3204 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3205 if (!is_error(ret
)) {
3206 if (put_user_s32(tab
[0], target_tab_addr
)
3207 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3208 ret
= -TARGET_EFAULT
;
3213 /* do_sendto() Must return target values and target errnos. */
3214 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3215 abi_ulong target_addr
, socklen_t addrlen
)
3219 void *copy_msg
= NULL
;
3222 if ((int)addrlen
< 0) {
3223 return -TARGET_EINVAL
;
3226 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3228 return -TARGET_EFAULT
;
3229 if (fd_trans_target_to_host_data(fd
)) {
3230 copy_msg
= host_msg
;
3231 host_msg
= g_malloc(len
);
3232 memcpy(host_msg
, copy_msg
, len
);
3233 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3239 addr
= alloca(addrlen
+1);
3240 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3244 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3246 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3251 host_msg
= copy_msg
;
3253 unlock_user(host_msg
, msg
, 0);
3257 /* do_recvfrom() Must return target values and target errnos. */
3258 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3259 abi_ulong target_addr
,
3260 abi_ulong target_addrlen
)
3262 socklen_t addrlen
, ret_addrlen
;
3267 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3269 return -TARGET_EFAULT
;
3271 if (get_user_u32(addrlen
, target_addrlen
)) {
3272 ret
= -TARGET_EFAULT
;
3275 if ((int)addrlen
< 0) {
3276 ret
= -TARGET_EINVAL
;
3279 addr
= alloca(addrlen
);
3280 ret_addrlen
= addrlen
;
3281 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3282 addr
, &ret_addrlen
));
3284 addr
= NULL
; /* To keep compiler quiet. */
3285 addrlen
= 0; /* To keep compiler quiet. */
3286 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3288 if (!is_error(ret
)) {
3289 if (fd_trans_host_to_target_data(fd
)) {
3291 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3292 if (is_error(trans
)) {
3298 host_to_target_sockaddr(target_addr
, addr
,
3299 MIN(addrlen
, ret_addrlen
));
3300 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3301 ret
= -TARGET_EFAULT
;
3305 unlock_user(host_msg
, msg
, len
);
3308 unlock_user(host_msg
, msg
, 0);
3313 #ifdef TARGET_NR_socketcall
3314 /* do_socketcall() must return target values and target errnos. */
3315 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3317 static const unsigned nargs
[] = { /* number of arguments per operation */
3318 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3319 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3320 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3321 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3322 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3323 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3324 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3325 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3326 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3327 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3328 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3329 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3330 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3331 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3332 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3333 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3334 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3335 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3336 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3337 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3339 abi_long a
[6]; /* max 6 args */
3342 /* check the range of the first argument num */
3343 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3344 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3345 return -TARGET_EINVAL
;
3347 /* ensure we have space for args */
3348 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3349 return -TARGET_EINVAL
;
3351 /* collect the arguments in a[] according to nargs[] */
3352 for (i
= 0; i
< nargs
[num
]; ++i
) {
3353 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3354 return -TARGET_EFAULT
;
3357 /* now when we have the args, invoke the appropriate underlying function */
3359 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3360 return do_socket(a
[0], a
[1], a
[2]);
3361 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3362 return do_bind(a
[0], a
[1], a
[2]);
3363 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3364 return do_connect(a
[0], a
[1], a
[2]);
3365 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3366 return get_errno(listen(a
[0], a
[1]));
3367 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3368 return do_accept4(a
[0], a
[1], a
[2], 0);
3369 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3370 return do_getsockname(a
[0], a
[1], a
[2]);
3371 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3372 return do_getpeername(a
[0], a
[1], a
[2]);
3373 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3374 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3375 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3376 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3377 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3378 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3379 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3380 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3381 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3382 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3383 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3384 return get_errno(shutdown(a
[0], a
[1]));
3385 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3386 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3387 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3388 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3389 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3390 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3391 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3392 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3393 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3394 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3395 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3396 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3397 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3398 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3400 gemu_log("Unsupported socketcall: %d\n", num
);
3401 return -TARGET_EINVAL
;
3406 #define N_SHM_REGIONS 32
3408 static struct shm_region
{
3412 } shm_regions
[N_SHM_REGIONS
];
3414 #ifndef TARGET_SEMID64_DS
3415 /* asm-generic version of this struct */
3416 struct target_semid64_ds
3418 struct target_ipc_perm sem_perm
;
3419 abi_ulong sem_otime
;
3420 #if TARGET_ABI_BITS == 32
3421 abi_ulong __unused1
;
3423 abi_ulong sem_ctime
;
3424 #if TARGET_ABI_BITS == 32
3425 abi_ulong __unused2
;
3427 abi_ulong sem_nsems
;
3428 abi_ulong __unused3
;
3429 abi_ulong __unused4
;
3433 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3434 abi_ulong target_addr
)
3436 struct target_ipc_perm
*target_ip
;
3437 struct target_semid64_ds
*target_sd
;
3439 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3440 return -TARGET_EFAULT
;
3441 target_ip
= &(target_sd
->sem_perm
);
3442 host_ip
->__key
= tswap32(target_ip
->__key
);
3443 host_ip
->uid
= tswap32(target_ip
->uid
);
3444 host_ip
->gid
= tswap32(target_ip
->gid
);
3445 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3446 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3447 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3448 host_ip
->mode
= tswap32(target_ip
->mode
);
3450 host_ip
->mode
= tswap16(target_ip
->mode
);
3452 #if defined(TARGET_PPC)
3453 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3455 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3457 unlock_user_struct(target_sd
, target_addr
, 0);
3461 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3462 struct ipc_perm
*host_ip
)
3464 struct target_ipc_perm
*target_ip
;
3465 struct target_semid64_ds
*target_sd
;
3467 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3468 return -TARGET_EFAULT
;
3469 target_ip
= &(target_sd
->sem_perm
);
3470 target_ip
->__key
= tswap32(host_ip
->__key
);
3471 target_ip
->uid
= tswap32(host_ip
->uid
);
3472 target_ip
->gid
= tswap32(host_ip
->gid
);
3473 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3474 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3475 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3476 target_ip
->mode
= tswap32(host_ip
->mode
);
3478 target_ip
->mode
= tswap16(host_ip
->mode
);
3480 #if defined(TARGET_PPC)
3481 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3483 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3485 unlock_user_struct(target_sd
, target_addr
, 1);
3489 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3490 abi_ulong target_addr
)
3492 struct target_semid64_ds
*target_sd
;
3494 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3495 return -TARGET_EFAULT
;
3496 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3497 return -TARGET_EFAULT
;
3498 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3499 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3500 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3501 unlock_user_struct(target_sd
, target_addr
, 0);
3505 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3506 struct semid_ds
*host_sd
)
3508 struct target_semid64_ds
*target_sd
;
3510 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3511 return -TARGET_EFAULT
;
3512 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3513 return -TARGET_EFAULT
;
3514 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3515 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3516 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3517 unlock_user_struct(target_sd
, target_addr
, 1);
3521 struct target_seminfo
{
3534 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3535 struct seminfo
*host_seminfo
)
3537 struct target_seminfo
*target_seminfo
;
3538 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3539 return -TARGET_EFAULT
;
3540 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3541 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3542 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3543 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3544 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3545 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3546 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3547 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3548 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3549 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3550 unlock_user_struct(target_seminfo
, target_addr
, 1);
3556 struct semid_ds
*buf
;
3557 unsigned short *array
;
3558 struct seminfo
*__buf
;
3561 union target_semun
{
3568 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3569 abi_ulong target_addr
)
3572 unsigned short *array
;
3574 struct semid_ds semid_ds
;
3577 semun
.buf
= &semid_ds
;
3579 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3581 return get_errno(ret
);
3583 nsems
= semid_ds
.sem_nsems
;
3585 *host_array
= g_try_new(unsigned short, nsems
);
3587 return -TARGET_ENOMEM
;
3589 array
= lock_user(VERIFY_READ
, target_addr
,
3590 nsems
*sizeof(unsigned short), 1);
3592 g_free(*host_array
);
3593 return -TARGET_EFAULT
;
3596 for(i
=0; i
<nsems
; i
++) {
3597 __get_user((*host_array
)[i
], &array
[i
]);
3599 unlock_user(array
, target_addr
, 0);
3604 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3605 unsigned short **host_array
)
3608 unsigned short *array
;
3610 struct semid_ds semid_ds
;
3613 semun
.buf
= &semid_ds
;
3615 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3617 return get_errno(ret
);
3619 nsems
= semid_ds
.sem_nsems
;
3621 array
= lock_user(VERIFY_WRITE
, target_addr
,
3622 nsems
*sizeof(unsigned short), 0);
3624 return -TARGET_EFAULT
;
3626 for(i
=0; i
<nsems
; i
++) {
3627 __put_user((*host_array
)[i
], &array
[i
]);
3629 g_free(*host_array
);
3630 unlock_user(array
, target_addr
, 1);
3635 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3636 abi_ulong target_arg
)
3638 union target_semun target_su
= { .buf
= target_arg
};
3640 struct semid_ds dsarg
;
3641 unsigned short *array
= NULL
;
3642 struct seminfo seminfo
;
3643 abi_long ret
= -TARGET_EINVAL
;
3650 /* In 64 bit cross-endian situations, we will erroneously pick up
3651 * the wrong half of the union for the "val" element. To rectify
3652 * this, the entire 8-byte structure is byteswapped, followed by
3653 * a swap of the 4 byte val field. In other cases, the data is
3654 * already in proper host byte order. */
3655 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3656 target_su
.buf
= tswapal(target_su
.buf
);
3657 arg
.val
= tswap32(target_su
.val
);
3659 arg
.val
= target_su
.val
;
3661 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3665 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3669 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3670 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3677 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3681 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3682 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3688 arg
.__buf
= &seminfo
;
3689 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3690 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3698 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3705 struct target_sembuf
{
3706 unsigned short sem_num
;
3711 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3712 abi_ulong target_addr
,
3715 struct target_sembuf
*target_sembuf
;
3718 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3719 nsops
*sizeof(struct target_sembuf
), 1);
3721 return -TARGET_EFAULT
;
3723 for(i
=0; i
<nsops
; i
++) {
3724 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3725 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3726 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3729 unlock_user(target_sembuf
, target_addr
, 0);
3734 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3736 struct sembuf sops
[nsops
];
3739 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3740 return -TARGET_EFAULT
;
3742 ret
= -TARGET_ENOSYS
;
3743 #ifdef __NR_semtimedop
3744 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3747 if (ret
== -TARGET_ENOSYS
) {
3748 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3754 struct target_msqid_ds
3756 struct target_ipc_perm msg_perm
;
3757 abi_ulong msg_stime
;
3758 #if TARGET_ABI_BITS == 32
3759 abi_ulong __unused1
;
3761 abi_ulong msg_rtime
;
3762 #if TARGET_ABI_BITS == 32
3763 abi_ulong __unused2
;
3765 abi_ulong msg_ctime
;
3766 #if TARGET_ABI_BITS == 32
3767 abi_ulong __unused3
;
3769 abi_ulong __msg_cbytes
;
3771 abi_ulong msg_qbytes
;
3772 abi_ulong msg_lspid
;
3773 abi_ulong msg_lrpid
;
3774 abi_ulong __unused4
;
3775 abi_ulong __unused5
;
3778 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3779 abi_ulong target_addr
)
3781 struct target_msqid_ds
*target_md
;
3783 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3784 return -TARGET_EFAULT
;
3785 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3786 return -TARGET_EFAULT
;
3787 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3788 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3789 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3790 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3791 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3792 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3793 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3794 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3795 unlock_user_struct(target_md
, target_addr
, 0);
3799 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3800 struct msqid_ds
*host_md
)
3802 struct target_msqid_ds
*target_md
;
3804 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3805 return -TARGET_EFAULT
;
3806 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3807 return -TARGET_EFAULT
;
3808 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3809 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3810 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3811 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3812 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3813 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3814 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3815 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3816 unlock_user_struct(target_md
, target_addr
, 1);
3820 struct target_msginfo
{
3828 unsigned short int msgseg
;
3831 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3832 struct msginfo
*host_msginfo
)
3834 struct target_msginfo
*target_msginfo
;
3835 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3836 return -TARGET_EFAULT
;
3837 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3838 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3839 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3840 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3841 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3842 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3843 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3844 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3845 unlock_user_struct(target_msginfo
, target_addr
, 1);
3849 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3851 struct msqid_ds dsarg
;
3852 struct msginfo msginfo
;
3853 abi_long ret
= -TARGET_EINVAL
;
3861 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3862 return -TARGET_EFAULT
;
3863 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3864 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3865 return -TARGET_EFAULT
;
3868 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3872 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3873 if (host_to_target_msginfo(ptr
, &msginfo
))
3874 return -TARGET_EFAULT
;
3881 struct target_msgbuf
{
3886 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3887 ssize_t msgsz
, int msgflg
)
3889 struct target_msgbuf
*target_mb
;
3890 struct msgbuf
*host_mb
;
3894 return -TARGET_EINVAL
;
3897 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3898 return -TARGET_EFAULT
;
3899 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3901 unlock_user_struct(target_mb
, msgp
, 0);
3902 return -TARGET_ENOMEM
;
3904 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3905 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3906 ret
= -TARGET_ENOSYS
;
3908 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3911 if (ret
== -TARGET_ENOSYS
) {
3912 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3917 unlock_user_struct(target_mb
, msgp
, 0);
3922 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3923 ssize_t msgsz
, abi_long msgtyp
,
3926 struct target_msgbuf
*target_mb
;
3928 struct msgbuf
*host_mb
;
3932 return -TARGET_EINVAL
;
3935 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3936 return -TARGET_EFAULT
;
3938 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3940 ret
= -TARGET_ENOMEM
;
3943 ret
= -TARGET_ENOSYS
;
3945 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3948 if (ret
== -TARGET_ENOSYS
) {
3949 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
3950 msgflg
, host_mb
, msgtyp
));
3955 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3956 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3957 if (!target_mtext
) {
3958 ret
= -TARGET_EFAULT
;
3961 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3962 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3965 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3969 unlock_user_struct(target_mb
, msgp
, 1);
3974 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3975 abi_ulong target_addr
)
3977 struct target_shmid_ds
*target_sd
;
3979 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3980 return -TARGET_EFAULT
;
3981 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3982 return -TARGET_EFAULT
;
3983 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3984 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3985 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3986 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3987 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3988 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3989 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3990 unlock_user_struct(target_sd
, target_addr
, 0);
3994 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3995 struct shmid_ds
*host_sd
)
3997 struct target_shmid_ds
*target_sd
;
3999 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4000 return -TARGET_EFAULT
;
4001 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4002 return -TARGET_EFAULT
;
4003 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4004 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4005 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4006 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4007 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4008 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4009 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4010 unlock_user_struct(target_sd
, target_addr
, 1);
4014 struct target_shminfo
{
4022 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4023 struct shminfo
*host_shminfo
)
4025 struct target_shminfo
*target_shminfo
;
4026 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4027 return -TARGET_EFAULT
;
4028 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4029 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4030 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4031 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4032 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4033 unlock_user_struct(target_shminfo
, target_addr
, 1);
4037 struct target_shm_info
{
4042 abi_ulong swap_attempts
;
4043 abi_ulong swap_successes
;
4046 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4047 struct shm_info
*host_shm_info
)
4049 struct target_shm_info
*target_shm_info
;
4050 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4051 return -TARGET_EFAULT
;
4052 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4053 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4054 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4055 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4056 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4057 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4058 unlock_user_struct(target_shm_info
, target_addr
, 1);
4062 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4064 struct shmid_ds dsarg
;
4065 struct shminfo shminfo
;
4066 struct shm_info shm_info
;
4067 abi_long ret
= -TARGET_EINVAL
;
4075 if (target_to_host_shmid_ds(&dsarg
, buf
))
4076 return -TARGET_EFAULT
;
4077 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4078 if (host_to_target_shmid_ds(buf
, &dsarg
))
4079 return -TARGET_EFAULT
;
4082 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4083 if (host_to_target_shminfo(buf
, &shminfo
))
4084 return -TARGET_EFAULT
;
4087 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4088 if (host_to_target_shm_info(buf
, &shm_info
))
4089 return -TARGET_EFAULT
;
4094 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4101 #ifndef TARGET_FORCE_SHMLBA
4102 /* For most architectures, SHMLBA is the same as the page size;
4103 * some architectures have larger values, in which case they should
4104 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4105 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4106 * and defining its own value for SHMLBA.
4108 * The kernel also permits SHMLBA to be set by the architecture to a
4109 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4110 * this means that addresses are rounded to the large size if
4111 * SHM_RND is set but addresses not aligned to that size are not rejected
4112 * as long as they are at least page-aligned. Since the only architecture
4113 * which uses this is ia64 this code doesn't provide for that oddity.
4115 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4117 return TARGET_PAGE_SIZE
;
4121 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4122 int shmid
, abi_ulong shmaddr
, int shmflg
)
4126 struct shmid_ds shm_info
;
4130 /* find out the length of the shared memory segment */
4131 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4132 if (is_error(ret
)) {
4133 /* can't get length, bail out */
4137 shmlba
= target_shmlba(cpu_env
);
4139 if (shmaddr
& (shmlba
- 1)) {
4140 if (shmflg
& SHM_RND
) {
4141 shmaddr
&= ~(shmlba
- 1);
4143 return -TARGET_EINVAL
;
4146 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4147 return -TARGET_EINVAL
;
4153 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4155 abi_ulong mmap_start
;
4157 /* In order to use the host shmat, we need to honor host SHMLBA. */
4158 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4160 if (mmap_start
== -1) {
4162 host_raddr
= (void *)-1;
4164 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4167 if (host_raddr
== (void *)-1) {
4169 return get_errno((long)host_raddr
);
4171 raddr
=h2g((unsigned long)host_raddr
);
4173 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4174 PAGE_VALID
| PAGE_READ
|
4175 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4177 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4178 if (!shm_regions
[i
].in_use
) {
4179 shm_regions
[i
].in_use
= true;
4180 shm_regions
[i
].start
= raddr
;
4181 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4191 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4198 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4199 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4200 shm_regions
[i
].in_use
= false;
4201 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4205 rv
= get_errno(shmdt(g2h(shmaddr
)));
4212 #ifdef TARGET_NR_ipc
4213 /* ??? This only works with linear mappings. */
4214 /* do_ipc() must return target values and target errnos. */
4215 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4216 unsigned int call
, abi_long first
,
4217 abi_long second
, abi_long third
,
4218 abi_long ptr
, abi_long fifth
)
4223 version
= call
>> 16;
4228 ret
= do_semop(first
, ptr
, second
);
4232 ret
= get_errno(semget(first
, second
, third
));
4235 case IPCOP_semctl
: {
4236 /* The semun argument to semctl is passed by value, so dereference the
4239 get_user_ual(atptr
, ptr
);
4240 ret
= do_semctl(first
, second
, third
, atptr
);
4245 ret
= get_errno(msgget(first
, second
));
4249 ret
= do_msgsnd(first
, ptr
, second
, third
);
4253 ret
= do_msgctl(first
, second
, ptr
);
4260 struct target_ipc_kludge
{
4265 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4266 ret
= -TARGET_EFAULT
;
4270 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4272 unlock_user_struct(tmp
, ptr
, 0);
4276 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4285 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4286 if (is_error(raddr
))
4287 return get_errno(raddr
);
4288 if (put_user_ual(raddr
, third
))
4289 return -TARGET_EFAULT
;
4293 ret
= -TARGET_EINVAL
;
4298 ret
= do_shmdt(ptr
);
4302 /* IPC_* flag values are the same on all linux platforms */
4303 ret
= get_errno(shmget(first
, second
, third
));
4306 /* IPC_* and SHM_* command values are the same on all linux platforms */
4308 ret
= do_shmctl(first
, second
, ptr
);
4311 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4312 ret
= -TARGET_ENOSYS
;
4319 /* kernel structure types definitions */
4321 #define STRUCT(name, ...) STRUCT_ ## name,
4322 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4324 #include "syscall_types.h"
4328 #undef STRUCT_SPECIAL
4330 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4331 #define STRUCT_SPECIAL(name)
4332 #include "syscall_types.h"
4334 #undef STRUCT_SPECIAL
4336 typedef struct IOCTLEntry IOCTLEntry
;
4338 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4339 int fd
, int cmd
, abi_long arg
);
4343 unsigned int host_cmd
;
4346 do_ioctl_fn
*do_ioctl
;
4347 const argtype arg_type
[5];
4350 #define IOC_R 0x0001
4351 #define IOC_W 0x0002
4352 #define IOC_RW (IOC_R | IOC_W)
4354 #define MAX_STRUCT_SIZE 4096
4356 #ifdef CONFIG_FIEMAP
4357 /* So fiemap access checks don't overflow on 32 bit systems.
4358 * This is very slightly smaller than the limit imposed by
4359 * the underlying kernel.
4361 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4362 / sizeof(struct fiemap_extent))
4364 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4365 int fd
, int cmd
, abi_long arg
)
4367 /* The parameter for this ioctl is a struct fiemap followed
4368 * by an array of struct fiemap_extent whose size is set
4369 * in fiemap->fm_extent_count. The array is filled in by the
4372 int target_size_in
, target_size_out
;
4374 const argtype
*arg_type
= ie
->arg_type
;
4375 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4378 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4382 assert(arg_type
[0] == TYPE_PTR
);
4383 assert(ie
->access
== IOC_RW
);
4385 target_size_in
= thunk_type_size(arg_type
, 0);
4386 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4388 return -TARGET_EFAULT
;
4390 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4391 unlock_user(argptr
, arg
, 0);
4392 fm
= (struct fiemap
*)buf_temp
;
4393 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4394 return -TARGET_EINVAL
;
4397 outbufsz
= sizeof (*fm
) +
4398 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4400 if (outbufsz
> MAX_STRUCT_SIZE
) {
4401 /* We can't fit all the extents into the fixed size buffer.
4402 * Allocate one that is large enough and use it instead.
4404 fm
= g_try_malloc(outbufsz
);
4406 return -TARGET_ENOMEM
;
4408 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4411 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4412 if (!is_error(ret
)) {
4413 target_size_out
= target_size_in
;
4414 /* An extent_count of 0 means we were only counting the extents
4415 * so there are no structs to copy
4417 if (fm
->fm_extent_count
!= 0) {
4418 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4420 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4422 ret
= -TARGET_EFAULT
;
4424 /* Convert the struct fiemap */
4425 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4426 if (fm
->fm_extent_count
!= 0) {
4427 p
= argptr
+ target_size_in
;
4428 /* ...and then all the struct fiemap_extents */
4429 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4430 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4435 unlock_user(argptr
, arg
, target_size_out
);
4445 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4446 int fd
, int cmd
, abi_long arg
)
4448 const argtype
*arg_type
= ie
->arg_type
;
4452 struct ifconf
*host_ifconf
;
4454 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4455 int target_ifreq_size
;
4460 abi_long target_ifc_buf
;
4464 assert(arg_type
[0] == TYPE_PTR
);
4465 assert(ie
->access
== IOC_RW
);
4468 target_size
= thunk_type_size(arg_type
, 0);
4470 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4472 return -TARGET_EFAULT
;
4473 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4474 unlock_user(argptr
, arg
, 0);
4476 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4477 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4478 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4480 if (target_ifc_buf
!= 0) {
4481 target_ifc_len
= host_ifconf
->ifc_len
;
4482 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4483 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4485 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4486 if (outbufsz
> MAX_STRUCT_SIZE
) {
4488 * We can't fit all the extents into the fixed size buffer.
4489 * Allocate one that is large enough and use it instead.
4491 host_ifconf
= malloc(outbufsz
);
4493 return -TARGET_ENOMEM
;
4495 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4498 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4500 host_ifconf
->ifc_len
= host_ifc_len
;
4502 host_ifc_buf
= NULL
;
4504 host_ifconf
->ifc_buf
= host_ifc_buf
;
4506 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4507 if (!is_error(ret
)) {
4508 /* convert host ifc_len to target ifc_len */
4510 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4511 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4512 host_ifconf
->ifc_len
= target_ifc_len
;
4514 /* restore target ifc_buf */
4516 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4518 /* copy struct ifconf to target user */
4520 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4522 return -TARGET_EFAULT
;
4523 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4524 unlock_user(argptr
, arg
, target_size
);
4526 if (target_ifc_buf
!= 0) {
4527 /* copy ifreq[] to target user */
4528 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4529 for (i
= 0; i
< nb_ifreq
; i
++) {
4530 thunk_convert(argptr
+ i
* target_ifreq_size
,
4531 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4532 ifreq_arg_type
, THUNK_TARGET
);
4534 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4545 #if defined(CONFIG_USBFS)
4546 #if HOST_LONG_BITS > 64
4547 #error USBDEVFS thunks do not support >64 bit hosts yet.
4550 uint64_t target_urb_adr
;
4551 uint64_t target_buf_adr
;
4552 char *target_buf_ptr
;
4553 struct usbdevfs_urb host_urb
;
4556 static GHashTable
*usbdevfs_urb_hashtable(void)
4558 static GHashTable
*urb_hashtable
;
4560 if (!urb_hashtable
) {
4561 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4563 return urb_hashtable
;
4566 static void urb_hashtable_insert(struct live_urb
*urb
)
4568 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4569 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4572 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4574 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4575 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4578 static void urb_hashtable_remove(struct live_urb
*urb
)
4580 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4581 g_hash_table_remove(urb_hashtable
, urb
);
4585 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4586 int fd
, int cmd
, abi_long arg
)
4588 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4589 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4590 struct live_urb
*lurb
;
4594 uintptr_t target_urb_adr
;
4597 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4599 memset(buf_temp
, 0, sizeof(uint64_t));
4600 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4601 if (is_error(ret
)) {
4605 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4606 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4607 if (!lurb
->target_urb_adr
) {
4608 return -TARGET_EFAULT
;
4610 urb_hashtable_remove(lurb
);
4611 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4612 lurb
->host_urb
.buffer_length
);
4613 lurb
->target_buf_ptr
= NULL
;
4615 /* restore the guest buffer pointer */
4616 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4618 /* update the guest urb struct */
4619 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4622 return -TARGET_EFAULT
;
4624 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4625 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4627 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4628 /* write back the urb handle */
4629 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4632 return -TARGET_EFAULT
;
4635 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4636 target_urb_adr
= lurb
->target_urb_adr
;
4637 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4638 unlock_user(argptr
, arg
, target_size
);
4645 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4646 uint8_t *buf_temp
__attribute__((unused
)),
4647 int fd
, int cmd
, abi_long arg
)
4649 struct live_urb
*lurb
;
4651 /* map target address back to host URB with metadata. */
4652 lurb
= urb_hashtable_lookup(arg
);
4654 return -TARGET_EFAULT
;
4656 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4660 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4661 int fd
, int cmd
, abi_long arg
)
4663 const argtype
*arg_type
= ie
->arg_type
;
4668 struct live_urb
*lurb
;
4671 * each submitted URB needs to map to a unique ID for the
4672 * kernel, and that unique ID needs to be a pointer to
4673 * host memory. hence, we need to malloc for each URB.
4674 * isochronous transfers have a variable length struct.
4677 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4679 /* construct host copy of urb and metadata */
4680 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4682 return -TARGET_ENOMEM
;
4685 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4688 return -TARGET_EFAULT
;
4690 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4691 unlock_user(argptr
, arg
, 0);
4693 lurb
->target_urb_adr
= arg
;
4694 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4696 /* buffer space used depends on endpoint type so lock the entire buffer */
4697 /* control type urbs should check the buffer contents for true direction */
4698 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4699 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4700 lurb
->host_urb
.buffer_length
, 1);
4701 if (lurb
->target_buf_ptr
== NULL
) {
4703 return -TARGET_EFAULT
;
4706 /* update buffer pointer in host copy */
4707 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4709 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4710 if (is_error(ret
)) {
4711 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4714 urb_hashtable_insert(lurb
);
4719 #endif /* CONFIG_USBFS */
4721 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4722 int cmd
, abi_long arg
)
4725 struct dm_ioctl
*host_dm
;
4726 abi_long guest_data
;
4727 uint32_t guest_data_size
;
4729 const argtype
*arg_type
= ie
->arg_type
;
4731 void *big_buf
= NULL
;
4735 target_size
= thunk_type_size(arg_type
, 0);
4736 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4738 ret
= -TARGET_EFAULT
;
4741 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4742 unlock_user(argptr
, arg
, 0);
4744 /* buf_temp is too small, so fetch things into a bigger buffer */
4745 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4746 memcpy(big_buf
, buf_temp
, target_size
);
4750 guest_data
= arg
+ host_dm
->data_start
;
4751 if ((guest_data
- arg
) < 0) {
4752 ret
= -TARGET_EINVAL
;
4755 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4756 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4758 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4760 ret
= -TARGET_EFAULT
;
4764 switch (ie
->host_cmd
) {
4766 case DM_LIST_DEVICES
:
4769 case DM_DEV_SUSPEND
:
4772 case DM_TABLE_STATUS
:
4773 case DM_TABLE_CLEAR
:
4775 case DM_LIST_VERSIONS
:
4779 case DM_DEV_SET_GEOMETRY
:
4780 /* data contains only strings */
4781 memcpy(host_data
, argptr
, guest_data_size
);
4784 memcpy(host_data
, argptr
, guest_data_size
);
4785 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4789 void *gspec
= argptr
;
4790 void *cur_data
= host_data
;
4791 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4792 int spec_size
= thunk_type_size(arg_type
, 0);
4795 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4796 struct dm_target_spec
*spec
= cur_data
;
4800 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4801 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4803 spec
->next
= sizeof(*spec
) + slen
;
4804 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4806 cur_data
+= spec
->next
;
4811 ret
= -TARGET_EINVAL
;
4812 unlock_user(argptr
, guest_data
, 0);
4815 unlock_user(argptr
, guest_data
, 0);
4817 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4818 if (!is_error(ret
)) {
4819 guest_data
= arg
+ host_dm
->data_start
;
4820 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4821 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4822 switch (ie
->host_cmd
) {
4827 case DM_DEV_SUSPEND
:
4830 case DM_TABLE_CLEAR
:
4832 case DM_DEV_SET_GEOMETRY
:
4833 /* no return data */
4835 case DM_LIST_DEVICES
:
4837 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4838 uint32_t remaining_data
= guest_data_size
;
4839 void *cur_data
= argptr
;
4840 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4841 int nl_size
= 12; /* can't use thunk_size due to alignment */
4844 uint32_t next
= nl
->next
;
4846 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4848 if (remaining_data
< nl
->next
) {
4849 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4852 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4853 strcpy(cur_data
+ nl_size
, nl
->name
);
4854 cur_data
+= nl
->next
;
4855 remaining_data
-= nl
->next
;
4859 nl
= (void*)nl
+ next
;
4864 case DM_TABLE_STATUS
:
4866 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4867 void *cur_data
= argptr
;
4868 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4869 int spec_size
= thunk_type_size(arg_type
, 0);
4872 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4873 uint32_t next
= spec
->next
;
4874 int slen
= strlen((char*)&spec
[1]) + 1;
4875 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4876 if (guest_data_size
< spec
->next
) {
4877 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4880 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4881 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4882 cur_data
= argptr
+ spec
->next
;
4883 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4889 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4890 int count
= *(uint32_t*)hdata
;
4891 uint64_t *hdev
= hdata
+ 8;
4892 uint64_t *gdev
= argptr
+ 8;
4895 *(uint32_t*)argptr
= tswap32(count
);
4896 for (i
= 0; i
< count
; i
++) {
4897 *gdev
= tswap64(*hdev
);
4903 case DM_LIST_VERSIONS
:
4905 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4906 uint32_t remaining_data
= guest_data_size
;
4907 void *cur_data
= argptr
;
4908 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4909 int vers_size
= thunk_type_size(arg_type
, 0);
4912 uint32_t next
= vers
->next
;
4914 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4916 if (remaining_data
< vers
->next
) {
4917 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4920 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4921 strcpy(cur_data
+ vers_size
, vers
->name
);
4922 cur_data
+= vers
->next
;
4923 remaining_data
-= vers
->next
;
4927 vers
= (void*)vers
+ next
;
4932 unlock_user(argptr
, guest_data
, 0);
4933 ret
= -TARGET_EINVAL
;
4936 unlock_user(argptr
, guest_data
, guest_data_size
);
4938 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4940 ret
= -TARGET_EFAULT
;
4943 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4944 unlock_user(argptr
, arg
, target_size
);
4951 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4952 int cmd
, abi_long arg
)
4956 const argtype
*arg_type
= ie
->arg_type
;
4957 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4960 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4961 struct blkpg_partition host_part
;
4963 /* Read and convert blkpg */
4965 target_size
= thunk_type_size(arg_type
, 0);
4966 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4968 ret
= -TARGET_EFAULT
;
4971 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4972 unlock_user(argptr
, arg
, 0);
4974 switch (host_blkpg
->op
) {
4975 case BLKPG_ADD_PARTITION
:
4976 case BLKPG_DEL_PARTITION
:
4977 /* payload is struct blkpg_partition */
4980 /* Unknown opcode */
4981 ret
= -TARGET_EINVAL
;
4985 /* Read and convert blkpg->data */
4986 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4987 target_size
= thunk_type_size(part_arg_type
, 0);
4988 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4990 ret
= -TARGET_EFAULT
;
4993 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4994 unlock_user(argptr
, arg
, 0);
4996 /* Swizzle the data pointer to our local copy and call! */
4997 host_blkpg
->data
= &host_part
;
4998 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5004 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5005 int fd
, int cmd
, abi_long arg
)
5007 const argtype
*arg_type
= ie
->arg_type
;
5008 const StructEntry
*se
;
5009 const argtype
*field_types
;
5010 const int *dst_offsets
, *src_offsets
;
5013 abi_ulong
*target_rt_dev_ptr
= NULL
;
5014 unsigned long *host_rt_dev_ptr
= NULL
;
5018 assert(ie
->access
== IOC_W
);
5019 assert(*arg_type
== TYPE_PTR
);
5021 assert(*arg_type
== TYPE_STRUCT
);
5022 target_size
= thunk_type_size(arg_type
, 0);
5023 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5025 return -TARGET_EFAULT
;
5028 assert(*arg_type
== (int)STRUCT_rtentry
);
5029 se
= struct_entries
+ *arg_type
++;
5030 assert(se
->convert
[0] == NULL
);
5031 /* convert struct here to be able to catch rt_dev string */
5032 field_types
= se
->field_types
;
5033 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5034 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5035 for (i
= 0; i
< se
->nb_fields
; i
++) {
5036 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5037 assert(*field_types
== TYPE_PTRVOID
);
5038 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5039 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5040 if (*target_rt_dev_ptr
!= 0) {
5041 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5042 tswapal(*target_rt_dev_ptr
));
5043 if (!*host_rt_dev_ptr
) {
5044 unlock_user(argptr
, arg
, 0);
5045 return -TARGET_EFAULT
;
5048 *host_rt_dev_ptr
= 0;
5053 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5054 argptr
+ src_offsets
[i
],
5055 field_types
, THUNK_HOST
);
5057 unlock_user(argptr
, arg
, 0);
5059 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5061 assert(host_rt_dev_ptr
!= NULL
);
5062 assert(target_rt_dev_ptr
!= NULL
);
5063 if (*host_rt_dev_ptr
!= 0) {
5064 unlock_user((void *)*host_rt_dev_ptr
,
5065 *target_rt_dev_ptr
, 0);
5070 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5071 int fd
, int cmd
, abi_long arg
)
5073 int sig
= target_to_host_signal(arg
);
5074 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5077 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5078 int fd
, int cmd
, abi_long arg
)
5083 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5084 if (is_error(ret
)) {
5088 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5089 if (copy_to_user_timeval(arg
, &tv
)) {
5090 return -TARGET_EFAULT
;
5093 if (copy_to_user_timeval64(arg
, &tv
)) {
5094 return -TARGET_EFAULT
;
5101 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5102 int fd
, int cmd
, abi_long arg
)
5107 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5108 if (is_error(ret
)) {
5112 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5113 if (host_to_target_timespec(arg
, &ts
)) {
5114 return -TARGET_EFAULT
;
5117 if (host_to_target_timespec64(arg
, &ts
)) {
5118 return -TARGET_EFAULT
;
5126 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5127 int fd
, int cmd
, abi_long arg
)
5129 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5130 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5134 static IOCTLEntry ioctl_entries
[] = {
5135 #define IOCTL(cmd, access, ...) \
5136 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5137 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5138 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5139 #define IOCTL_IGNORE(cmd) \
5140 { TARGET_ ## cmd, 0, #cmd },
5145 /* ??? Implement proper locking for ioctls. */
5146 /* do_ioctl() Must return target values and target errnos. */
5147 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5149 const IOCTLEntry
*ie
;
5150 const argtype
*arg_type
;
5152 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5158 if (ie
->target_cmd
== 0) {
5159 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5160 return -TARGET_ENOSYS
;
5162 if (ie
->target_cmd
== cmd
)
5166 arg_type
= ie
->arg_type
;
5168 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5169 } else if (!ie
->host_cmd
) {
5170 /* Some architectures define BSD ioctls in their headers
5171 that are not implemented in Linux. */
5172 return -TARGET_ENOSYS
;
5175 switch(arg_type
[0]) {
5178 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5184 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5188 target_size
= thunk_type_size(arg_type
, 0);
5189 switch(ie
->access
) {
5191 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5192 if (!is_error(ret
)) {
5193 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5195 return -TARGET_EFAULT
;
5196 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5197 unlock_user(argptr
, arg
, target_size
);
5201 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5203 return -TARGET_EFAULT
;
5204 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5205 unlock_user(argptr
, arg
, 0);
5206 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5210 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5212 return -TARGET_EFAULT
;
5213 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5214 unlock_user(argptr
, arg
, 0);
5215 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5216 if (!is_error(ret
)) {
5217 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5219 return -TARGET_EFAULT
;
5220 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5221 unlock_user(argptr
, arg
, target_size
);
5227 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5228 (long)cmd
, arg_type
[0]);
5229 ret
= -TARGET_ENOSYS
;
5235 static const bitmask_transtbl iflag_tbl
[] = {
5236 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5237 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5238 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5239 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5240 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5241 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5242 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5243 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5244 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5245 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5246 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5247 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5248 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5249 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5253 static const bitmask_transtbl oflag_tbl
[] = {
5254 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5255 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5256 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5257 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5258 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5259 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5260 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5261 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5262 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5263 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5264 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5265 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5266 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5267 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5268 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5269 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5270 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5271 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5272 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5273 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5274 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5275 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5276 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5277 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5281 static const bitmask_transtbl cflag_tbl
[] = {
5282 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5283 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5284 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5285 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5286 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5287 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5288 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5289 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5290 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5291 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5292 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5293 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5294 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5295 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5296 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5297 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5298 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5299 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5300 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5301 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5302 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5303 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5304 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5305 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5306 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5307 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5308 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5309 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5310 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5311 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5312 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5316 static const bitmask_transtbl lflag_tbl
[] = {
5317 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5318 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5319 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5320 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5321 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5322 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5323 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5324 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5325 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5326 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5327 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5328 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5329 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5330 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5331 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5335 static void target_to_host_termios (void *dst
, const void *src
)
5337 struct host_termios
*host
= dst
;
5338 const struct target_termios
*target
= src
;
5341 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5343 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5345 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5347 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5348 host
->c_line
= target
->c_line
;
5350 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5351 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5352 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5353 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5354 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5355 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5356 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5357 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5358 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5359 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5360 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5361 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5362 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5363 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5364 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5365 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5366 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5367 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5370 static void host_to_target_termios (void *dst
, const void *src
)
5372 struct target_termios
*target
= dst
;
5373 const struct host_termios
*host
= src
;
5376 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5378 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5380 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5382 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5383 target
->c_line
= host
->c_line
;
5385 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5386 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5387 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5388 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5389 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5390 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5391 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5392 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5393 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5394 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5395 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5396 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5397 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5398 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5399 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5400 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5401 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5402 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5405 static const StructEntry struct_termios_def
= {
5406 .convert
= { host_to_target_termios
, target_to_host_termios
},
5407 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5408 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5411 static bitmask_transtbl mmap_flags_tbl
[] = {
5412 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5413 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5414 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5415 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5416 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5417 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5418 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5419 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5420 MAP_DENYWRITE
, MAP_DENYWRITE
},
5421 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5422 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5423 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5424 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5425 MAP_NORESERVE
, MAP_NORESERVE
},
5426 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5427 /* MAP_STACK had been ignored by the kernel for quite some time.
5428 Recognize it for the target insofar as we do not want to pass
5429 it through to the host. */
5430 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5434 #if defined(TARGET_I386)
5436 /* NOTE: there is really one LDT for all the threads */
5437 static uint8_t *ldt_table
;
5439 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5446 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5447 if (size
> bytecount
)
5449 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5451 return -TARGET_EFAULT
;
5452 /* ??? Should this by byteswapped? */
5453 memcpy(p
, ldt_table
, size
);
5454 unlock_user(p
, ptr
, size
);
5458 /* XXX: add locking support */
5459 static abi_long
write_ldt(CPUX86State
*env
,
5460 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5462 struct target_modify_ldt_ldt_s ldt_info
;
5463 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5464 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5465 int seg_not_present
, useable
, lm
;
5466 uint32_t *lp
, entry_1
, entry_2
;
5468 if (bytecount
!= sizeof(ldt_info
))
5469 return -TARGET_EINVAL
;
5470 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5471 return -TARGET_EFAULT
;
5472 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5473 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5474 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5475 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5476 unlock_user_struct(target_ldt_info
, ptr
, 0);
5478 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5479 return -TARGET_EINVAL
;
5480 seg_32bit
= ldt_info
.flags
& 1;
5481 contents
= (ldt_info
.flags
>> 1) & 3;
5482 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5483 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5484 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5485 useable
= (ldt_info
.flags
>> 6) & 1;
5489 lm
= (ldt_info
.flags
>> 7) & 1;
5491 if (contents
== 3) {
5493 return -TARGET_EINVAL
;
5494 if (seg_not_present
== 0)
5495 return -TARGET_EINVAL
;
5497 /* allocate the LDT */
5499 env
->ldt
.base
= target_mmap(0,
5500 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5501 PROT_READ
|PROT_WRITE
,
5502 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5503 if (env
->ldt
.base
== -1)
5504 return -TARGET_ENOMEM
;
5505 memset(g2h(env
->ldt
.base
), 0,
5506 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5507 env
->ldt
.limit
= 0xffff;
5508 ldt_table
= g2h(env
->ldt
.base
);
5511 /* NOTE: same code as Linux kernel */
5512 /* Allow LDTs to be cleared by the user. */
5513 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5516 read_exec_only
== 1 &&
5518 limit_in_pages
== 0 &&
5519 seg_not_present
== 1 &&
5527 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5528 (ldt_info
.limit
& 0x0ffff);
5529 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5530 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5531 (ldt_info
.limit
& 0xf0000) |
5532 ((read_exec_only
^ 1) << 9) |
5534 ((seg_not_present
^ 1) << 15) |
5536 (limit_in_pages
<< 23) |
5540 entry_2
|= (useable
<< 20);
5542 /* Install the new entry ... */
5544 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5545 lp
[0] = tswap32(entry_1
);
5546 lp
[1] = tswap32(entry_2
);
5550 /* specific and weird i386 syscalls */
5551 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5552 unsigned long bytecount
)
5558 ret
= read_ldt(ptr
, bytecount
);
5561 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5564 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5567 ret
= -TARGET_ENOSYS
;
5573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5574 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5576 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5577 struct target_modify_ldt_ldt_s ldt_info
;
5578 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5579 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5580 int seg_not_present
, useable
, lm
;
5581 uint32_t *lp
, entry_1
, entry_2
;
5584 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5585 if (!target_ldt_info
)
5586 return -TARGET_EFAULT
;
5587 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5588 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5589 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5590 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5591 if (ldt_info
.entry_number
== -1) {
5592 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5593 if (gdt_table
[i
] == 0) {
5594 ldt_info
.entry_number
= i
;
5595 target_ldt_info
->entry_number
= tswap32(i
);
5600 unlock_user_struct(target_ldt_info
, ptr
, 1);
5602 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5603 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5604 return -TARGET_EINVAL
;
5605 seg_32bit
= ldt_info
.flags
& 1;
5606 contents
= (ldt_info
.flags
>> 1) & 3;
5607 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5608 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5609 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5610 useable
= (ldt_info
.flags
>> 6) & 1;
5614 lm
= (ldt_info
.flags
>> 7) & 1;
5617 if (contents
== 3) {
5618 if (seg_not_present
== 0)
5619 return -TARGET_EINVAL
;
5622 /* NOTE: same code as Linux kernel */
5623 /* Allow LDTs to be cleared by the user. */
5624 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5625 if ((contents
== 0 &&
5626 read_exec_only
== 1 &&
5628 limit_in_pages
== 0 &&
5629 seg_not_present
== 1 &&
5637 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5638 (ldt_info
.limit
& 0x0ffff);
5639 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5640 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5641 (ldt_info
.limit
& 0xf0000) |
5642 ((read_exec_only
^ 1) << 9) |
5644 ((seg_not_present
^ 1) << 15) |
5646 (limit_in_pages
<< 23) |
5651 /* Install the new entry ... */
5653 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5654 lp
[0] = tswap32(entry_1
);
5655 lp
[1] = tswap32(entry_2
);
5659 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5661 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5662 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5663 uint32_t base_addr
, limit
, flags
;
5664 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5665 int seg_not_present
, useable
, lm
;
5666 uint32_t *lp
, entry_1
, entry_2
;
5668 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5669 if (!target_ldt_info
)
5670 return -TARGET_EFAULT
;
5671 idx
= tswap32(target_ldt_info
->entry_number
);
5672 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5673 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5674 unlock_user_struct(target_ldt_info
, ptr
, 1);
5675 return -TARGET_EINVAL
;
5677 lp
= (uint32_t *)(gdt_table
+ idx
);
5678 entry_1
= tswap32(lp
[0]);
5679 entry_2
= tswap32(lp
[1]);
5681 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5682 contents
= (entry_2
>> 10) & 3;
5683 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5684 seg_32bit
= (entry_2
>> 22) & 1;
5685 limit_in_pages
= (entry_2
>> 23) & 1;
5686 useable
= (entry_2
>> 20) & 1;
5690 lm
= (entry_2
>> 21) & 1;
5692 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5693 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5694 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5695 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5696 base_addr
= (entry_1
>> 16) |
5697 (entry_2
& 0xff000000) |
5698 ((entry_2
& 0xff) << 16);
5699 target_ldt_info
->base_addr
= tswapal(base_addr
);
5700 target_ldt_info
->limit
= tswap32(limit
);
5701 target_ldt_info
->flags
= tswap32(flags
);
5702 unlock_user_struct(target_ldt_info
, ptr
, 1);
5705 #endif /* TARGET_I386 && TARGET_ABI32 */
5707 #ifndef TARGET_ABI32
5708 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5715 case TARGET_ARCH_SET_GS
:
5716 case TARGET_ARCH_SET_FS
:
5717 if (code
== TARGET_ARCH_SET_GS
)
5721 cpu_x86_load_seg(env
, idx
, 0);
5722 env
->segs
[idx
].base
= addr
;
5724 case TARGET_ARCH_GET_GS
:
5725 case TARGET_ARCH_GET_FS
:
5726 if (code
== TARGET_ARCH_GET_GS
)
5730 val
= env
->segs
[idx
].base
;
5731 if (put_user(val
, addr
, abi_ulong
))
5732 ret
= -TARGET_EFAULT
;
5735 ret
= -TARGET_EINVAL
;
5742 #endif /* defined(TARGET_I386) */
5744 #define NEW_STACK_SIZE 0x40000
5747 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5750 pthread_mutex_t mutex
;
5751 pthread_cond_t cond
;
5754 abi_ulong child_tidptr
;
5755 abi_ulong parent_tidptr
;
5759 static void *clone_func(void *arg
)
5761 new_thread_info
*info
= arg
;
5766 rcu_register_thread();
5767 tcg_register_thread();
5771 ts
= (TaskState
*)cpu
->opaque
;
5772 info
->tid
= sys_gettid();
5774 if (info
->child_tidptr
)
5775 put_user_u32(info
->tid
, info
->child_tidptr
);
5776 if (info
->parent_tidptr
)
5777 put_user_u32(info
->tid
, info
->parent_tidptr
);
5778 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5779 /* Enable signals. */
5780 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5781 /* Signal to the parent that we're ready. */
5782 pthread_mutex_lock(&info
->mutex
);
5783 pthread_cond_broadcast(&info
->cond
);
5784 pthread_mutex_unlock(&info
->mutex
);
5785 /* Wait until the parent has finished initializing the tls state. */
5786 pthread_mutex_lock(&clone_lock
);
5787 pthread_mutex_unlock(&clone_lock
);
5793 /* do_fork() Must return host values and target errnos (unlike most
5794 do_*() functions). */
5795 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5796 abi_ulong parent_tidptr
, target_ulong newtls
,
5797 abi_ulong child_tidptr
)
5799 CPUState
*cpu
= env_cpu(env
);
5803 CPUArchState
*new_env
;
5806 flags
&= ~CLONE_IGNORED_FLAGS
;
5808 /* Emulate vfork() with fork() */
5809 if (flags
& CLONE_VFORK
)
5810 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5812 if (flags
& CLONE_VM
) {
5813 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5814 new_thread_info info
;
5815 pthread_attr_t attr
;
5817 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5818 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5819 return -TARGET_EINVAL
;
5822 ts
= g_new0(TaskState
, 1);
5823 init_task_state(ts
);
5825 /* Grab a mutex so that thread setup appears atomic. */
5826 pthread_mutex_lock(&clone_lock
);
5828 /* we create a new CPU instance. */
5829 new_env
= cpu_copy(env
);
5830 /* Init regs that differ from the parent. */
5831 cpu_clone_regs_child(new_env
, newsp
, flags
);
5832 cpu_clone_regs_parent(env
, flags
);
5833 new_cpu
= env_cpu(new_env
);
5834 new_cpu
->opaque
= ts
;
5835 ts
->bprm
= parent_ts
->bprm
;
5836 ts
->info
= parent_ts
->info
;
5837 ts
->signal_mask
= parent_ts
->signal_mask
;
5839 if (flags
& CLONE_CHILD_CLEARTID
) {
5840 ts
->child_tidptr
= child_tidptr
;
5843 if (flags
& CLONE_SETTLS
) {
5844 cpu_set_tls (new_env
, newtls
);
5847 memset(&info
, 0, sizeof(info
));
5848 pthread_mutex_init(&info
.mutex
, NULL
);
5849 pthread_mutex_lock(&info
.mutex
);
5850 pthread_cond_init(&info
.cond
, NULL
);
5852 if (flags
& CLONE_CHILD_SETTID
) {
5853 info
.child_tidptr
= child_tidptr
;
5855 if (flags
& CLONE_PARENT_SETTID
) {
5856 info
.parent_tidptr
= parent_tidptr
;
5859 ret
= pthread_attr_init(&attr
);
5860 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5861 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5862 /* It is not safe to deliver signals until the child has finished
5863 initializing, so temporarily block all signals. */
5864 sigfillset(&sigmask
);
5865 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5866 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5868 /* If this is our first additional thread, we need to ensure we
5869 * generate code for parallel execution and flush old translations.
5871 if (!parallel_cpus
) {
5872 parallel_cpus
= true;
5876 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5877 /* TODO: Free new CPU state if thread creation failed. */
5879 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5880 pthread_attr_destroy(&attr
);
5882 /* Wait for the child to initialize. */
5883 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5888 pthread_mutex_unlock(&info
.mutex
);
5889 pthread_cond_destroy(&info
.cond
);
5890 pthread_mutex_destroy(&info
.mutex
);
5891 pthread_mutex_unlock(&clone_lock
);
5893 /* if no CLONE_VM, we consider it is a fork */
5894 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5895 return -TARGET_EINVAL
;
5898 /* We can't support custom termination signals */
5899 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5900 return -TARGET_EINVAL
;
5903 if (block_signals()) {
5904 return -TARGET_ERESTARTSYS
;
5910 /* Child Process. */
5911 cpu_clone_regs_child(env
, newsp
, flags
);
5913 /* There is a race condition here. The parent process could
5914 theoretically read the TID in the child process before the child
5915 tid is set. This would require using either ptrace
5916 (not implemented) or having *_tidptr to point at a shared memory
5917 mapping. We can't repeat the spinlock hack used above because
5918 the child process gets its own copy of the lock. */
5919 if (flags
& CLONE_CHILD_SETTID
)
5920 put_user_u32(sys_gettid(), child_tidptr
);
5921 if (flags
& CLONE_PARENT_SETTID
)
5922 put_user_u32(sys_gettid(), parent_tidptr
);
5923 ts
= (TaskState
*)cpu
->opaque
;
5924 if (flags
& CLONE_SETTLS
)
5925 cpu_set_tls (env
, newtls
);
5926 if (flags
& CLONE_CHILD_CLEARTID
)
5927 ts
->child_tidptr
= child_tidptr
;
5929 cpu_clone_regs_parent(env
, flags
);
5936 /* warning : doesn't handle linux specific flags... */
5937 static int target_to_host_fcntl_cmd(int cmd
)
5942 case TARGET_F_DUPFD
:
5943 case TARGET_F_GETFD
:
5944 case TARGET_F_SETFD
:
5945 case TARGET_F_GETFL
:
5946 case TARGET_F_SETFL
:
5949 case TARGET_F_GETLK
:
5952 case TARGET_F_SETLK
:
5955 case TARGET_F_SETLKW
:
5958 case TARGET_F_GETOWN
:
5961 case TARGET_F_SETOWN
:
5964 case TARGET_F_GETSIG
:
5967 case TARGET_F_SETSIG
:
5970 #if TARGET_ABI_BITS == 32
5971 case TARGET_F_GETLK64
:
5974 case TARGET_F_SETLK64
:
5977 case TARGET_F_SETLKW64
:
5981 case TARGET_F_SETLEASE
:
5984 case TARGET_F_GETLEASE
:
5987 #ifdef F_DUPFD_CLOEXEC
5988 case TARGET_F_DUPFD_CLOEXEC
:
5989 ret
= F_DUPFD_CLOEXEC
;
5992 case TARGET_F_NOTIFY
:
5996 case TARGET_F_GETOWN_EX
:
6001 case TARGET_F_SETOWN_EX
:
6006 case TARGET_F_SETPIPE_SZ
:
6009 case TARGET_F_GETPIPE_SZ
:
6014 ret
= -TARGET_EINVAL
;
6018 #if defined(__powerpc64__)
6019 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6020 * is not supported by kernel. The glibc fcntl call actually adjusts
6021 * them to 5, 6 and 7 before making the syscall(). Since we make the
6022 * syscall directly, adjust to what is supported by the kernel.
6024 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6025 ret
-= F_GETLK64
- 5;
6032 #define FLOCK_TRANSTBL \
6034 TRANSTBL_CONVERT(F_RDLCK); \
6035 TRANSTBL_CONVERT(F_WRLCK); \
6036 TRANSTBL_CONVERT(F_UNLCK); \
6037 TRANSTBL_CONVERT(F_EXLCK); \
6038 TRANSTBL_CONVERT(F_SHLCK); \
6041 static int target_to_host_flock(int type
)
6043 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6045 #undef TRANSTBL_CONVERT
6046 return -TARGET_EINVAL
;
6049 static int host_to_target_flock(int type
)
6051 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6053 #undef TRANSTBL_CONVERT
6054 /* if we don't know how to convert the value coming
6055 * from the host we copy to the target field as-is
6060 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6061 abi_ulong target_flock_addr
)
6063 struct target_flock
*target_fl
;
6066 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6067 return -TARGET_EFAULT
;
6070 __get_user(l_type
, &target_fl
->l_type
);
6071 l_type
= target_to_host_flock(l_type
);
6075 fl
->l_type
= l_type
;
6076 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6077 __get_user(fl
->l_start
, &target_fl
->l_start
);
6078 __get_user(fl
->l_len
, &target_fl
->l_len
);
6079 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6080 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6084 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6085 const struct flock64
*fl
)
6087 struct target_flock
*target_fl
;
6090 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6091 return -TARGET_EFAULT
;
6094 l_type
= host_to_target_flock(fl
->l_type
);
6095 __put_user(l_type
, &target_fl
->l_type
);
6096 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6097 __put_user(fl
->l_start
, &target_fl
->l_start
);
6098 __put_user(fl
->l_len
, &target_fl
->l_len
);
6099 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6100 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6104 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6105 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6107 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6108 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6109 abi_ulong target_flock_addr
)
6111 struct target_oabi_flock64
*target_fl
;
6114 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6115 return -TARGET_EFAULT
;
6118 __get_user(l_type
, &target_fl
->l_type
);
6119 l_type
= target_to_host_flock(l_type
);
6123 fl
->l_type
= l_type
;
6124 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6125 __get_user(fl
->l_start
, &target_fl
->l_start
);
6126 __get_user(fl
->l_len
, &target_fl
->l_len
);
6127 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6128 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6132 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6133 const struct flock64
*fl
)
6135 struct target_oabi_flock64
*target_fl
;
6138 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6139 return -TARGET_EFAULT
;
6142 l_type
= host_to_target_flock(fl
->l_type
);
6143 __put_user(l_type
, &target_fl
->l_type
);
6144 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6145 __put_user(fl
->l_start
, &target_fl
->l_start
);
6146 __put_user(fl
->l_len
, &target_fl
->l_len
);
6147 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6148 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6153 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6154 abi_ulong target_flock_addr
)
6156 struct target_flock64
*target_fl
;
6159 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6160 return -TARGET_EFAULT
;
6163 __get_user(l_type
, &target_fl
->l_type
);
6164 l_type
= target_to_host_flock(l_type
);
6168 fl
->l_type
= l_type
;
6169 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6170 __get_user(fl
->l_start
, &target_fl
->l_start
);
6171 __get_user(fl
->l_len
, &target_fl
->l_len
);
6172 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6173 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6177 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6178 const struct flock64
*fl
)
6180 struct target_flock64
*target_fl
;
6183 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6184 return -TARGET_EFAULT
;
6187 l_type
= host_to_target_flock(fl
->l_type
);
6188 __put_user(l_type
, &target_fl
->l_type
);
6189 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6190 __put_user(fl
->l_start
, &target_fl
->l_start
);
6191 __put_user(fl
->l_len
, &target_fl
->l_len
);
6192 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6193 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6197 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6199 struct flock64 fl64
;
6201 struct f_owner_ex fox
;
6202 struct target_f_owner_ex
*target_fox
;
6205 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6207 if (host_cmd
== -TARGET_EINVAL
)
6211 case TARGET_F_GETLK
:
6212 ret
= copy_from_user_flock(&fl64
, arg
);
6216 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6218 ret
= copy_to_user_flock(arg
, &fl64
);
6222 case TARGET_F_SETLK
:
6223 case TARGET_F_SETLKW
:
6224 ret
= copy_from_user_flock(&fl64
, arg
);
6228 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6231 case TARGET_F_GETLK64
:
6232 ret
= copy_from_user_flock64(&fl64
, arg
);
6236 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6238 ret
= copy_to_user_flock64(arg
, &fl64
);
6241 case TARGET_F_SETLK64
:
6242 case TARGET_F_SETLKW64
:
6243 ret
= copy_from_user_flock64(&fl64
, arg
);
6247 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6250 case TARGET_F_GETFL
:
6251 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6253 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6257 case TARGET_F_SETFL
:
6258 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6259 target_to_host_bitmask(arg
,
6264 case TARGET_F_GETOWN_EX
:
6265 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6267 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6268 return -TARGET_EFAULT
;
6269 target_fox
->type
= tswap32(fox
.type
);
6270 target_fox
->pid
= tswap32(fox
.pid
);
6271 unlock_user_struct(target_fox
, arg
, 1);
6277 case TARGET_F_SETOWN_EX
:
6278 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6279 return -TARGET_EFAULT
;
6280 fox
.type
= tswap32(target_fox
->type
);
6281 fox
.pid
= tswap32(target_fox
->pid
);
6282 unlock_user_struct(target_fox
, arg
, 0);
6283 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6287 case TARGET_F_SETOWN
:
6288 case TARGET_F_GETOWN
:
6289 case TARGET_F_SETSIG
:
6290 case TARGET_F_GETSIG
:
6291 case TARGET_F_SETLEASE
:
6292 case TARGET_F_GETLEASE
:
6293 case TARGET_F_SETPIPE_SZ
:
6294 case TARGET_F_GETPIPE_SZ
:
6295 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6299 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6307 static inline int high2lowuid(int uid
)
6315 static inline int high2lowgid(int gid
)
6323 static inline int low2highuid(int uid
)
6325 if ((int16_t)uid
== -1)
6331 static inline int low2highgid(int gid
)
6333 if ((int16_t)gid
== -1)
6338 static inline int tswapid(int id
)
6343 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6345 #else /* !USE_UID16 */
6346 static inline int high2lowuid(int uid
)
6350 static inline int high2lowgid(int gid
)
6354 static inline int low2highuid(int uid
)
6358 static inline int low2highgid(int gid
)
6362 static inline int tswapid(int id
)
6367 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6369 #endif /* USE_UID16 */
6371 /* We must do direct syscalls for setting UID/GID, because we want to
6372 * implement the Linux system call semantics of "change only for this thread",
6373 * not the libc/POSIX semantics of "change for all threads in process".
6374 * (See http://ewontfix.com/17/ for more details.)
6375 * We use the 32-bit version of the syscalls if present; if it is not
6376 * then either the host architecture supports 32-bit UIDs natively with
6377 * the standard syscall, or the 16-bit UID is the best we can do.
6379 #ifdef __NR_setuid32
6380 #define __NR_sys_setuid __NR_setuid32
6382 #define __NR_sys_setuid __NR_setuid
6384 #ifdef __NR_setgid32
6385 #define __NR_sys_setgid __NR_setgid32
6387 #define __NR_sys_setgid __NR_setgid
6389 #ifdef __NR_setresuid32
6390 #define __NR_sys_setresuid __NR_setresuid32
6392 #define __NR_sys_setresuid __NR_setresuid
6394 #ifdef __NR_setresgid32
6395 #define __NR_sys_setresgid __NR_setresgid32
6397 #define __NR_sys_setresgid __NR_setresgid
6400 _syscall1(int, sys_setuid
, uid_t
, uid
)
6401 _syscall1(int, sys_setgid
, gid_t
, gid
)
6402 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6403 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6405 void syscall_init(void)
6408 const argtype
*arg_type
;
6412 thunk_init(STRUCT_MAX
);
6414 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6415 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6416 #include "syscall_types.h"
6418 #undef STRUCT_SPECIAL
6420 /* Build target_to_host_errno_table[] table from
6421 * host_to_target_errno_table[]. */
6422 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6423 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6426 /* we patch the ioctl size if necessary. We rely on the fact that
6427 no ioctl has all the bits at '1' in the size field */
6429 while (ie
->target_cmd
!= 0) {
6430 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6431 TARGET_IOC_SIZEMASK
) {
6432 arg_type
= ie
->arg_type
;
6433 if (arg_type
[0] != TYPE_PTR
) {
6434 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6439 size
= thunk_type_size(arg_type
, 0);
6440 ie
->target_cmd
= (ie
->target_cmd
&
6441 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6442 (size
<< TARGET_IOC_SIZESHIFT
);
6445 /* automatic consistency check if same arch */
6446 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6447 (defined(__x86_64__) && defined(TARGET_X86_64))
6448 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6449 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6450 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6457 #if TARGET_ABI_BITS == 32
6458 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6460 #ifdef TARGET_WORDS_BIGENDIAN
6461 return ((uint64_t)word0
<< 32) | word1
;
6463 return ((uint64_t)word1
<< 32) | word0
;
6466 #else /* TARGET_ABI_BITS == 32 */
6467 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6471 #endif /* TARGET_ABI_BITS != 32 */
6473 #ifdef TARGET_NR_truncate64
6474 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6479 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6483 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6487 #ifdef TARGET_NR_ftruncate64
6488 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6493 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6497 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6501 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6502 abi_ulong target_addr
)
6504 struct target_itimerspec
*target_itspec
;
6506 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6507 return -TARGET_EFAULT
;
6510 host_itspec
->it_interval
.tv_sec
=
6511 tswapal(target_itspec
->it_interval
.tv_sec
);
6512 host_itspec
->it_interval
.tv_nsec
=
6513 tswapal(target_itspec
->it_interval
.tv_nsec
);
6514 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6515 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6517 unlock_user_struct(target_itspec
, target_addr
, 1);
6521 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6522 struct itimerspec
*host_its
)
6524 struct target_itimerspec
*target_itspec
;
6526 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6527 return -TARGET_EFAULT
;
6530 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6531 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6533 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6534 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6536 unlock_user_struct(target_itspec
, target_addr
, 0);
6540 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6541 abi_long target_addr
)
6543 struct target_timex
*target_tx
;
6545 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6546 return -TARGET_EFAULT
;
6549 __get_user(host_tx
->modes
, &target_tx
->modes
);
6550 __get_user(host_tx
->offset
, &target_tx
->offset
);
6551 __get_user(host_tx
->freq
, &target_tx
->freq
);
6552 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6553 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6554 __get_user(host_tx
->status
, &target_tx
->status
);
6555 __get_user(host_tx
->constant
, &target_tx
->constant
);
6556 __get_user(host_tx
->precision
, &target_tx
->precision
);
6557 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6558 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6559 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6560 __get_user(host_tx
->tick
, &target_tx
->tick
);
6561 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6562 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6563 __get_user(host_tx
->shift
, &target_tx
->shift
);
6564 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6565 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6566 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6567 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6568 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6569 __get_user(host_tx
->tai
, &target_tx
->tai
);
6571 unlock_user_struct(target_tx
, target_addr
, 0);
6575 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6576 struct timex
*host_tx
)
6578 struct target_timex
*target_tx
;
6580 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6581 return -TARGET_EFAULT
;
6584 __put_user(host_tx
->modes
, &target_tx
->modes
);
6585 __put_user(host_tx
->offset
, &target_tx
->offset
);
6586 __put_user(host_tx
->freq
, &target_tx
->freq
);
6587 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6588 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6589 __put_user(host_tx
->status
, &target_tx
->status
);
6590 __put_user(host_tx
->constant
, &target_tx
->constant
);
6591 __put_user(host_tx
->precision
, &target_tx
->precision
);
6592 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6593 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6594 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6595 __put_user(host_tx
->tick
, &target_tx
->tick
);
6596 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6597 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6598 __put_user(host_tx
->shift
, &target_tx
->shift
);
6599 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6600 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6601 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6602 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6603 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6604 __put_user(host_tx
->tai
, &target_tx
->tai
);
6606 unlock_user_struct(target_tx
, target_addr
, 1);
6611 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6612 abi_ulong target_addr
)
6614 struct target_sigevent
*target_sevp
;
6616 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6617 return -TARGET_EFAULT
;
6620 /* This union is awkward on 64 bit systems because it has a 32 bit
6621 * integer and a pointer in it; we follow the conversion approach
6622 * used for handling sigval types in signal.c so the guest should get
6623 * the correct value back even if we did a 64 bit byteswap and it's
6624 * using the 32 bit integer.
6626 host_sevp
->sigev_value
.sival_ptr
=
6627 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6628 host_sevp
->sigev_signo
=
6629 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6630 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6631 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6633 unlock_user_struct(target_sevp
, target_addr
, 1);
6637 #if defined(TARGET_NR_mlockall)
6638 static inline int target_to_host_mlockall_arg(int arg
)
6642 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6643 result
|= MCL_CURRENT
;
6645 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6646 result
|= MCL_FUTURE
;
6652 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6653 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6654 defined(TARGET_NR_newfstatat))
6655 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6656 abi_ulong target_addr
,
6657 struct stat
*host_st
)
6659 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6660 if (((CPUARMState
*)cpu_env
)->eabi
) {
6661 struct target_eabi_stat64
*target_st
;
6663 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6664 return -TARGET_EFAULT
;
6665 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6666 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6667 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6668 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6669 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6671 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6672 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6673 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6674 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6675 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6676 __put_user(host_st
->st_size
, &target_st
->st_size
);
6677 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6678 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6679 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6680 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6681 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6682 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6683 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6684 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6685 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6687 unlock_user_struct(target_st
, target_addr
, 1);
6691 #if defined(TARGET_HAS_STRUCT_STAT64)
6692 struct target_stat64
*target_st
;
6694 struct target_stat
*target_st
;
6697 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6698 return -TARGET_EFAULT
;
6699 memset(target_st
, 0, sizeof(*target_st
));
6700 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6701 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6702 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6703 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6705 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6706 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6707 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6708 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6709 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6710 /* XXX: better use of kernel struct */
6711 __put_user(host_st
->st_size
, &target_st
->st_size
);
6712 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6713 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6714 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6715 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6716 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6717 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6718 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6719 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6720 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6722 unlock_user_struct(target_st
, target_addr
, 1);
6729 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6730 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6731 abi_ulong target_addr
)
6733 struct target_statx
*target_stx
;
6735 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6736 return -TARGET_EFAULT
;
6738 memset(target_stx
, 0, sizeof(*target_stx
));
6740 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6741 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6742 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6743 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6744 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6745 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6746 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6747 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6748 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6749 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6750 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6751 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6752 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6753 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6754 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6755 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6756 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6757 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6758 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6759 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6760 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6761 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6762 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6764 unlock_user_struct(target_stx
, target_addr
, 1);
6771 /* ??? Using host futex calls even when target atomic operations
6772 are not really atomic probably breaks things. However implementing
6773 futexes locally would make futexes shared between multiple processes
6774 tricky. However they're probably useless because guest atomic
6775 operations won't work either. */
6776 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6777 target_ulong uaddr2
, int val3
)
6779 struct timespec ts
, *pts
;
6782 /* ??? We assume FUTEX_* constants are the same on both host
6784 #ifdef FUTEX_CMD_MASK
6785 base_op
= op
& FUTEX_CMD_MASK
;
6791 case FUTEX_WAIT_BITSET
:
6794 target_to_host_timespec(pts
, timeout
);
6798 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6801 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6803 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6805 case FUTEX_CMP_REQUEUE
:
6807 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6808 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6809 But the prototype takes a `struct timespec *'; insert casts
6810 to satisfy the compiler. We do not need to tswap TIMEOUT
6811 since it's not compared to guest memory. */
6812 pts
= (struct timespec
*)(uintptr_t) timeout
;
6813 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6815 (base_op
== FUTEX_CMP_REQUEUE
6819 return -TARGET_ENOSYS
;
6822 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6823 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6824 abi_long handle
, abi_long mount_id
,
6827 struct file_handle
*target_fh
;
6828 struct file_handle
*fh
;
6832 unsigned int size
, total_size
;
6834 if (get_user_s32(size
, handle
)) {
6835 return -TARGET_EFAULT
;
6838 name
= lock_user_string(pathname
);
6840 return -TARGET_EFAULT
;
6843 total_size
= sizeof(struct file_handle
) + size
;
6844 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6846 unlock_user(name
, pathname
, 0);
6847 return -TARGET_EFAULT
;
6850 fh
= g_malloc0(total_size
);
6851 fh
->handle_bytes
= size
;
6853 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6854 unlock_user(name
, pathname
, 0);
6856 /* man name_to_handle_at(2):
6857 * Other than the use of the handle_bytes field, the caller should treat
6858 * the file_handle structure as an opaque data type
6861 memcpy(target_fh
, fh
, total_size
);
6862 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6863 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6865 unlock_user(target_fh
, handle
, total_size
);
6867 if (put_user_s32(mid
, mount_id
)) {
6868 return -TARGET_EFAULT
;
6876 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6877 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6880 struct file_handle
*target_fh
;
6881 struct file_handle
*fh
;
6882 unsigned int size
, total_size
;
6885 if (get_user_s32(size
, handle
)) {
6886 return -TARGET_EFAULT
;
6889 total_size
= sizeof(struct file_handle
) + size
;
6890 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6892 return -TARGET_EFAULT
;
6895 fh
= g_memdup(target_fh
, total_size
);
6896 fh
->handle_bytes
= size
;
6897 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6899 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6900 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6904 unlock_user(target_fh
, handle
, total_size
);
6910 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6912 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6915 target_sigset_t
*target_mask
;
6919 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6920 return -TARGET_EINVAL
;
6922 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6923 return -TARGET_EFAULT
;
6926 target_to_host_sigset(&host_mask
, target_mask
);
6928 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6930 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6932 fd_trans_register(ret
, &target_signalfd_trans
);
6935 unlock_user_struct(target_mask
, mask
, 0);
6941 /* Map host to target signal numbers for the wait family of syscalls.
6942 Assume all other status bits are the same. */
6943 int host_to_target_waitstatus(int status
)
6945 if (WIFSIGNALED(status
)) {
6946 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6948 if (WIFSTOPPED(status
)) {
6949 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6955 static int open_self_cmdline(void *cpu_env
, int fd
)
6957 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6958 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6961 for (i
= 0; i
< bprm
->argc
; i
++) {
6962 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6964 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6972 static int open_self_maps(void *cpu_env
, int fd
)
6974 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6975 TaskState
*ts
= cpu
->opaque
;
6981 fp
= fopen("/proc/self/maps", "r");
6986 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6987 int fields
, dev_maj
, dev_min
, inode
;
6988 uint64_t min
, max
, offset
;
6989 char flag_r
, flag_w
, flag_x
, flag_p
;
6990 char path
[512] = "";
6991 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6992 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6993 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6995 if ((fields
< 10) || (fields
> 11)) {
6998 if (h2g_valid(min
)) {
6999 int flags
= page_get_flags(h2g(min
));
7000 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7001 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7004 if (h2g(min
) == ts
->info
->stack_limit
) {
7005 pstrcpy(path
, sizeof(path
), " [stack]");
7007 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7008 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7009 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7010 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7011 path
[0] ? " " : "", path
);
7021 static int open_self_stat(void *cpu_env
, int fd
)
7023 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7024 TaskState
*ts
= cpu
->opaque
;
7025 abi_ulong start_stack
= ts
->info
->start_stack
;
7028 for (i
= 0; i
< 44; i
++) {
7036 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7037 } else if (i
== 1) {
7039 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7040 } else if (i
== 27) {
7043 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7045 /* for the rest, there is MasterCard */
7046 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7050 if (write(fd
, buf
, len
) != len
) {
7058 static int open_self_auxv(void *cpu_env
, int fd
)
7060 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7061 TaskState
*ts
= cpu
->opaque
;
7062 abi_ulong auxv
= ts
->info
->saved_auxv
;
7063 abi_ulong len
= ts
->info
->auxv_len
;
7067 * Auxiliary vector is stored in target process stack.
7068 * read in whole auxv vector and copy it to file
7070 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7074 r
= write(fd
, ptr
, len
);
7081 lseek(fd
, 0, SEEK_SET
);
7082 unlock_user(ptr
, auxv
, len
);
7088 static int is_proc_myself(const char *filename
, const char *entry
)
7090 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7091 filename
+= strlen("/proc/");
7092 if (!strncmp(filename
, "self/", strlen("self/"))) {
7093 filename
+= strlen("self/");
7094 } else if (*filename
>= '1' && *filename
<= '9') {
7096 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7097 if (!strncmp(filename
, myself
, strlen(myself
))) {
7098 filename
+= strlen(myself
);
7105 if (!strcmp(filename
, entry
)) {
7112 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7113 defined(TARGET_SPARC) || defined(TARGET_M68K)
7114 static int is_proc(const char *filename
, const char *entry
)
7116 return strcmp(filename
, entry
) == 0;
7120 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7121 static int open_net_route(void *cpu_env
, int fd
)
7128 fp
= fopen("/proc/net/route", "r");
7135 read
= getline(&line
, &len
, fp
);
7136 dprintf(fd
, "%s", line
);
7140 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7142 uint32_t dest
, gw
, mask
;
7143 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7146 fields
= sscanf(line
,
7147 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7148 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7149 &mask
, &mtu
, &window
, &irtt
);
7153 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7154 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7155 metric
, tswap32(mask
), mtu
, window
, irtt
);
7165 #if defined(TARGET_SPARC)
7166 static int open_cpuinfo(void *cpu_env
, int fd
)
7168 dprintf(fd
, "type\t\t: sun4u\n");
7173 #if defined(TARGET_M68K)
7174 static int open_hardware(void *cpu_env
, int fd
)
7176 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7181 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7184 const char *filename
;
7185 int (*fill
)(void *cpu_env
, int fd
);
7186 int (*cmp
)(const char *s1
, const char *s2
);
7188 const struct fake_open
*fake_open
;
7189 static const struct fake_open fakes
[] = {
7190 { "maps", open_self_maps
, is_proc_myself
},
7191 { "stat", open_self_stat
, is_proc_myself
},
7192 { "auxv", open_self_auxv
, is_proc_myself
},
7193 { "cmdline", open_self_cmdline
, is_proc_myself
},
7194 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7195 { "/proc/net/route", open_net_route
, is_proc
},
7197 #if defined(TARGET_SPARC)
7198 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7200 #if defined(TARGET_M68K)
7201 { "/proc/hardware", open_hardware
, is_proc
},
7203 { NULL
, NULL
, NULL
}
7206 if (is_proc_myself(pathname
, "exe")) {
7207 int execfd
= qemu_getauxval(AT_EXECFD
);
7208 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7211 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7212 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7217 if (fake_open
->filename
) {
7219 char filename
[PATH_MAX
];
7222 /* create temporary file to map stat to */
7223 tmpdir
= getenv("TMPDIR");
7226 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7227 fd
= mkstemp(filename
);
7233 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7239 lseek(fd
, 0, SEEK_SET
);
7244 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7247 #define TIMER_MAGIC 0x0caf0000
7248 #define TIMER_MAGIC_MASK 0xffff0000
7250 /* Convert QEMU provided timer ID back to internal 16bit index format */
7251 static target_timer_t
get_timer_id(abi_long arg
)
7253 target_timer_t timerid
= arg
;
7255 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7256 return -TARGET_EINVAL
;
7261 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7262 return -TARGET_EINVAL
;
7268 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7270 abi_ulong target_addr
,
7273 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7274 unsigned host_bits
= sizeof(*host_mask
) * 8;
7275 abi_ulong
*target_mask
;
7278 assert(host_size
>= target_size
);
7280 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7282 return -TARGET_EFAULT
;
7284 memset(host_mask
, 0, host_size
);
7286 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7287 unsigned bit
= i
* target_bits
;
7290 __get_user(val
, &target_mask
[i
]);
7291 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7292 if (val
& (1UL << j
)) {
7293 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7298 unlock_user(target_mask
, target_addr
, 0);
7302 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7304 abi_ulong target_addr
,
7307 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7308 unsigned host_bits
= sizeof(*host_mask
) * 8;
7309 abi_ulong
*target_mask
;
7312 assert(host_size
>= target_size
);
7314 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7316 return -TARGET_EFAULT
;
7319 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7320 unsigned bit
= i
* target_bits
;
7323 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7324 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7328 __put_user(val
, &target_mask
[i
]);
7331 unlock_user(target_mask
, target_addr
, target_size
);
7335 /* This is an internal helper for do_syscall so that it is easier
7336 * to have a single return point, so that actions, such as logging
7337 * of syscall results, can be performed.
7338 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7340 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7341 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7342 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7345 CPUState
*cpu
= env_cpu(cpu_env
);
7347 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7348 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7349 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7350 || defined(TARGET_NR_statx)
7353 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7354 || defined(TARGET_NR_fstatfs)
7360 case TARGET_NR_exit
:
7361 /* In old applications this may be used to implement _exit(2).
7362 However in threaded applictions it is used for thread termination,
7363 and _exit_group is used for application termination.
7364 Do thread termination if we have more then one thread. */
7366 if (block_signals()) {
7367 return -TARGET_ERESTARTSYS
;
7372 if (CPU_NEXT(first_cpu
)) {
7375 /* Remove the CPU from the list. */
7376 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7381 if (ts
->child_tidptr
) {
7382 put_user_u32(0, ts
->child_tidptr
);
7383 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7387 object_unref(OBJECT(cpu
));
7389 rcu_unregister_thread();
7394 preexit_cleanup(cpu_env
, arg1
);
7396 return 0; /* avoid warning */
7397 case TARGET_NR_read
:
7398 if (arg2
== 0 && arg3
== 0) {
7399 return get_errno(safe_read(arg1
, 0, 0));
7401 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7402 return -TARGET_EFAULT
;
7403 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7405 fd_trans_host_to_target_data(arg1
)) {
7406 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7408 unlock_user(p
, arg2
, ret
);
7411 case TARGET_NR_write
:
7412 if (arg2
== 0 && arg3
== 0) {
7413 return get_errno(safe_write(arg1
, 0, 0));
7415 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7416 return -TARGET_EFAULT
;
7417 if (fd_trans_target_to_host_data(arg1
)) {
7418 void *copy
= g_malloc(arg3
);
7419 memcpy(copy
, p
, arg3
);
7420 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7422 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7426 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7428 unlock_user(p
, arg2
, 0);
7431 #ifdef TARGET_NR_open
7432 case TARGET_NR_open
:
7433 if (!(p
= lock_user_string(arg1
)))
7434 return -TARGET_EFAULT
;
7435 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7436 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7438 fd_trans_unregister(ret
);
7439 unlock_user(p
, arg1
, 0);
7442 case TARGET_NR_openat
:
7443 if (!(p
= lock_user_string(arg2
)))
7444 return -TARGET_EFAULT
;
7445 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7446 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7448 fd_trans_unregister(ret
);
7449 unlock_user(p
, arg2
, 0);
7451 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7452 case TARGET_NR_name_to_handle_at
:
7453 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7456 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7457 case TARGET_NR_open_by_handle_at
:
7458 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7459 fd_trans_unregister(ret
);
7462 case TARGET_NR_close
:
7463 fd_trans_unregister(arg1
);
7464 return get_errno(close(arg1
));
7467 return do_brk(arg1
);
7468 #ifdef TARGET_NR_fork
7469 case TARGET_NR_fork
:
7470 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7472 #ifdef TARGET_NR_waitpid
7473 case TARGET_NR_waitpid
:
7476 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7477 if (!is_error(ret
) && arg2
&& ret
7478 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7479 return -TARGET_EFAULT
;
7483 #ifdef TARGET_NR_waitid
7484 case TARGET_NR_waitid
:
7488 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7489 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7490 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7491 return -TARGET_EFAULT
;
7492 host_to_target_siginfo(p
, &info
);
7493 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7498 #ifdef TARGET_NR_creat /* not on alpha */
7499 case TARGET_NR_creat
:
7500 if (!(p
= lock_user_string(arg1
)))
7501 return -TARGET_EFAULT
;
7502 ret
= get_errno(creat(p
, arg2
));
7503 fd_trans_unregister(ret
);
7504 unlock_user(p
, arg1
, 0);
7507 #ifdef TARGET_NR_link
7508 case TARGET_NR_link
:
7511 p
= lock_user_string(arg1
);
7512 p2
= lock_user_string(arg2
);
7514 ret
= -TARGET_EFAULT
;
7516 ret
= get_errno(link(p
, p2
));
7517 unlock_user(p2
, arg2
, 0);
7518 unlock_user(p
, arg1
, 0);
7522 #if defined(TARGET_NR_linkat)
7523 case TARGET_NR_linkat
:
7527 return -TARGET_EFAULT
;
7528 p
= lock_user_string(arg2
);
7529 p2
= lock_user_string(arg4
);
7531 ret
= -TARGET_EFAULT
;
7533 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7534 unlock_user(p
, arg2
, 0);
7535 unlock_user(p2
, arg4
, 0);
7539 #ifdef TARGET_NR_unlink
7540 case TARGET_NR_unlink
:
7541 if (!(p
= lock_user_string(arg1
)))
7542 return -TARGET_EFAULT
;
7543 ret
= get_errno(unlink(p
));
7544 unlock_user(p
, arg1
, 0);
7547 #if defined(TARGET_NR_unlinkat)
7548 case TARGET_NR_unlinkat
:
7549 if (!(p
= lock_user_string(arg2
)))
7550 return -TARGET_EFAULT
;
7551 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7552 unlock_user(p
, arg2
, 0);
7555 case TARGET_NR_execve
:
7557 char **argp
, **envp
;
7560 abi_ulong guest_argp
;
7561 abi_ulong guest_envp
;
7568 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7569 if (get_user_ual(addr
, gp
))
7570 return -TARGET_EFAULT
;
7577 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7578 if (get_user_ual(addr
, gp
))
7579 return -TARGET_EFAULT
;
7585 argp
= g_new0(char *, argc
+ 1);
7586 envp
= g_new0(char *, envc
+ 1);
7588 for (gp
= guest_argp
, q
= argp
; gp
;
7589 gp
+= sizeof(abi_ulong
), q
++) {
7590 if (get_user_ual(addr
, gp
))
7594 if (!(*q
= lock_user_string(addr
)))
7596 total_size
+= strlen(*q
) + 1;
7600 for (gp
= guest_envp
, q
= envp
; gp
;
7601 gp
+= sizeof(abi_ulong
), q
++) {
7602 if (get_user_ual(addr
, gp
))
7606 if (!(*q
= lock_user_string(addr
)))
7608 total_size
+= strlen(*q
) + 1;
7612 if (!(p
= lock_user_string(arg1
)))
7614 /* Although execve() is not an interruptible syscall it is
7615 * a special case where we must use the safe_syscall wrapper:
7616 * if we allow a signal to happen before we make the host
7617 * syscall then we will 'lose' it, because at the point of
7618 * execve the process leaves QEMU's control. So we use the
7619 * safe syscall wrapper to ensure that we either take the
7620 * signal as a guest signal, or else it does not happen
7621 * before the execve completes and makes it the other
7622 * program's problem.
7624 ret
= get_errno(safe_execve(p
, argp
, envp
));
7625 unlock_user(p
, arg1
, 0);
7630 ret
= -TARGET_EFAULT
;
7633 for (gp
= guest_argp
, q
= argp
; *q
;
7634 gp
+= sizeof(abi_ulong
), q
++) {
7635 if (get_user_ual(addr
, gp
)
7638 unlock_user(*q
, addr
, 0);
7640 for (gp
= guest_envp
, q
= envp
; *q
;
7641 gp
+= sizeof(abi_ulong
), q
++) {
7642 if (get_user_ual(addr
, gp
)
7645 unlock_user(*q
, addr
, 0);
7652 case TARGET_NR_chdir
:
7653 if (!(p
= lock_user_string(arg1
)))
7654 return -TARGET_EFAULT
;
7655 ret
= get_errno(chdir(p
));
7656 unlock_user(p
, arg1
, 0);
7658 #ifdef TARGET_NR_time
7659 case TARGET_NR_time
:
7662 ret
= get_errno(time(&host_time
));
7665 && put_user_sal(host_time
, arg1
))
7666 return -TARGET_EFAULT
;
7670 #ifdef TARGET_NR_mknod
7671 case TARGET_NR_mknod
:
7672 if (!(p
= lock_user_string(arg1
)))
7673 return -TARGET_EFAULT
;
7674 ret
= get_errno(mknod(p
, arg2
, arg3
));
7675 unlock_user(p
, arg1
, 0);
7678 #if defined(TARGET_NR_mknodat)
7679 case TARGET_NR_mknodat
:
7680 if (!(p
= lock_user_string(arg2
)))
7681 return -TARGET_EFAULT
;
7682 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7683 unlock_user(p
, arg2
, 0);
7686 #ifdef TARGET_NR_chmod
7687 case TARGET_NR_chmod
:
7688 if (!(p
= lock_user_string(arg1
)))
7689 return -TARGET_EFAULT
;
7690 ret
= get_errno(chmod(p
, arg2
));
7691 unlock_user(p
, arg1
, 0);
7694 #ifdef TARGET_NR_lseek
7695 case TARGET_NR_lseek
:
7696 return get_errno(lseek(arg1
, arg2
, arg3
));
7698 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7699 /* Alpha specific */
7700 case TARGET_NR_getxpid
:
7701 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7702 return get_errno(getpid());
7704 #ifdef TARGET_NR_getpid
7705 case TARGET_NR_getpid
:
7706 return get_errno(getpid());
7708 case TARGET_NR_mount
:
7710 /* need to look at the data field */
7714 p
= lock_user_string(arg1
);
7716 return -TARGET_EFAULT
;
7722 p2
= lock_user_string(arg2
);
7725 unlock_user(p
, arg1
, 0);
7727 return -TARGET_EFAULT
;
7731 p3
= lock_user_string(arg3
);
7734 unlock_user(p
, arg1
, 0);
7736 unlock_user(p2
, arg2
, 0);
7737 return -TARGET_EFAULT
;
7743 /* FIXME - arg5 should be locked, but it isn't clear how to
7744 * do that since it's not guaranteed to be a NULL-terminated
7748 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7750 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7752 ret
= get_errno(ret
);
7755 unlock_user(p
, arg1
, 0);
7757 unlock_user(p2
, arg2
, 0);
7759 unlock_user(p3
, arg3
, 0);
7763 #ifdef TARGET_NR_umount
7764 case TARGET_NR_umount
:
7765 if (!(p
= lock_user_string(arg1
)))
7766 return -TARGET_EFAULT
;
7767 ret
= get_errno(umount(p
));
7768 unlock_user(p
, arg1
, 0);
7771 #ifdef TARGET_NR_stime /* not on alpha */
7772 case TARGET_NR_stime
:
7776 if (get_user_sal(ts
.tv_sec
, arg1
)) {
7777 return -TARGET_EFAULT
;
7779 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
7782 #ifdef TARGET_NR_alarm /* not on alpha */
7783 case TARGET_NR_alarm
:
7786 #ifdef TARGET_NR_pause /* not on alpha */
7787 case TARGET_NR_pause
:
7788 if (!block_signals()) {
7789 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7791 return -TARGET_EINTR
;
7793 #ifdef TARGET_NR_utime
7794 case TARGET_NR_utime
:
7796 struct utimbuf tbuf
, *host_tbuf
;
7797 struct target_utimbuf
*target_tbuf
;
7799 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7800 return -TARGET_EFAULT
;
7801 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7802 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7803 unlock_user_struct(target_tbuf
, arg2
, 0);
7808 if (!(p
= lock_user_string(arg1
)))
7809 return -TARGET_EFAULT
;
7810 ret
= get_errno(utime(p
, host_tbuf
));
7811 unlock_user(p
, arg1
, 0);
7815 #ifdef TARGET_NR_utimes
7816 case TARGET_NR_utimes
:
7818 struct timeval
*tvp
, tv
[2];
7820 if (copy_from_user_timeval(&tv
[0], arg2
)
7821 || copy_from_user_timeval(&tv
[1],
7822 arg2
+ sizeof(struct target_timeval
)))
7823 return -TARGET_EFAULT
;
7828 if (!(p
= lock_user_string(arg1
)))
7829 return -TARGET_EFAULT
;
7830 ret
= get_errno(utimes(p
, tvp
));
7831 unlock_user(p
, arg1
, 0);
7835 #if defined(TARGET_NR_futimesat)
7836 case TARGET_NR_futimesat
:
7838 struct timeval
*tvp
, tv
[2];
7840 if (copy_from_user_timeval(&tv
[0], arg3
)
7841 || copy_from_user_timeval(&tv
[1],
7842 arg3
+ sizeof(struct target_timeval
)))
7843 return -TARGET_EFAULT
;
7848 if (!(p
= lock_user_string(arg2
))) {
7849 return -TARGET_EFAULT
;
7851 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7852 unlock_user(p
, arg2
, 0);
7856 #ifdef TARGET_NR_access
7857 case TARGET_NR_access
:
7858 if (!(p
= lock_user_string(arg1
))) {
7859 return -TARGET_EFAULT
;
7861 ret
= get_errno(access(path(p
), arg2
));
7862 unlock_user(p
, arg1
, 0);
7865 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7866 case TARGET_NR_faccessat
:
7867 if (!(p
= lock_user_string(arg2
))) {
7868 return -TARGET_EFAULT
;
7870 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7871 unlock_user(p
, arg2
, 0);
7874 #ifdef TARGET_NR_nice /* not on alpha */
7875 case TARGET_NR_nice
:
7876 return get_errno(nice(arg1
));
7878 case TARGET_NR_sync
:
7881 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7882 case TARGET_NR_syncfs
:
7883 return get_errno(syncfs(arg1
));
7885 case TARGET_NR_kill
:
7886 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7887 #ifdef TARGET_NR_rename
7888 case TARGET_NR_rename
:
7891 p
= lock_user_string(arg1
);
7892 p2
= lock_user_string(arg2
);
7894 ret
= -TARGET_EFAULT
;
7896 ret
= get_errno(rename(p
, p2
));
7897 unlock_user(p2
, arg2
, 0);
7898 unlock_user(p
, arg1
, 0);
7902 #if defined(TARGET_NR_renameat)
7903 case TARGET_NR_renameat
:
7906 p
= lock_user_string(arg2
);
7907 p2
= lock_user_string(arg4
);
7909 ret
= -TARGET_EFAULT
;
7911 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7912 unlock_user(p2
, arg4
, 0);
7913 unlock_user(p
, arg2
, 0);
7917 #if defined(TARGET_NR_renameat2)
7918 case TARGET_NR_renameat2
:
7921 p
= lock_user_string(arg2
);
7922 p2
= lock_user_string(arg4
);
7924 ret
= -TARGET_EFAULT
;
7926 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7928 unlock_user(p2
, arg4
, 0);
7929 unlock_user(p
, arg2
, 0);
7933 #ifdef TARGET_NR_mkdir
7934 case TARGET_NR_mkdir
:
7935 if (!(p
= lock_user_string(arg1
)))
7936 return -TARGET_EFAULT
;
7937 ret
= get_errno(mkdir(p
, arg2
));
7938 unlock_user(p
, arg1
, 0);
7941 #if defined(TARGET_NR_mkdirat)
7942 case TARGET_NR_mkdirat
:
7943 if (!(p
= lock_user_string(arg2
)))
7944 return -TARGET_EFAULT
;
7945 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7946 unlock_user(p
, arg2
, 0);
7949 #ifdef TARGET_NR_rmdir
7950 case TARGET_NR_rmdir
:
7951 if (!(p
= lock_user_string(arg1
)))
7952 return -TARGET_EFAULT
;
7953 ret
= get_errno(rmdir(p
));
7954 unlock_user(p
, arg1
, 0);
7958 ret
= get_errno(dup(arg1
));
7960 fd_trans_dup(arg1
, ret
);
7963 #ifdef TARGET_NR_pipe
7964 case TARGET_NR_pipe
:
7965 return do_pipe(cpu_env
, arg1
, 0, 0);
7967 #ifdef TARGET_NR_pipe2
7968 case TARGET_NR_pipe2
:
7969 return do_pipe(cpu_env
, arg1
,
7970 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7972 case TARGET_NR_times
:
7974 struct target_tms
*tmsp
;
7976 ret
= get_errno(times(&tms
));
7978 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7980 return -TARGET_EFAULT
;
7981 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7982 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7983 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7984 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7987 ret
= host_to_target_clock_t(ret
);
7990 case TARGET_NR_acct
:
7992 ret
= get_errno(acct(NULL
));
7994 if (!(p
= lock_user_string(arg1
))) {
7995 return -TARGET_EFAULT
;
7997 ret
= get_errno(acct(path(p
)));
7998 unlock_user(p
, arg1
, 0);
8001 #ifdef TARGET_NR_umount2
8002 case TARGET_NR_umount2
:
8003 if (!(p
= lock_user_string(arg1
)))
8004 return -TARGET_EFAULT
;
8005 ret
= get_errno(umount2(p
, arg2
));
8006 unlock_user(p
, arg1
, 0);
8009 case TARGET_NR_ioctl
:
8010 return do_ioctl(arg1
, arg2
, arg3
);
8011 #ifdef TARGET_NR_fcntl
8012 case TARGET_NR_fcntl
:
8013 return do_fcntl(arg1
, arg2
, arg3
);
8015 case TARGET_NR_setpgid
:
8016 return get_errno(setpgid(arg1
, arg2
));
8017 case TARGET_NR_umask
:
8018 return get_errno(umask(arg1
));
8019 case TARGET_NR_chroot
:
8020 if (!(p
= lock_user_string(arg1
)))
8021 return -TARGET_EFAULT
;
8022 ret
= get_errno(chroot(p
));
8023 unlock_user(p
, arg1
, 0);
8025 #ifdef TARGET_NR_dup2
8026 case TARGET_NR_dup2
:
8027 ret
= get_errno(dup2(arg1
, arg2
));
8029 fd_trans_dup(arg1
, arg2
);
8033 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8034 case TARGET_NR_dup3
:
8038 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8041 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8042 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8044 fd_trans_dup(arg1
, arg2
);
8049 #ifdef TARGET_NR_getppid /* not on alpha */
8050 case TARGET_NR_getppid
:
8051 return get_errno(getppid());
8053 #ifdef TARGET_NR_getpgrp
8054 case TARGET_NR_getpgrp
:
8055 return get_errno(getpgrp());
8057 case TARGET_NR_setsid
:
8058 return get_errno(setsid());
8059 #ifdef TARGET_NR_sigaction
8060 case TARGET_NR_sigaction
:
8062 #if defined(TARGET_ALPHA)
8063 struct target_sigaction act
, oact
, *pact
= 0;
8064 struct target_old_sigaction
*old_act
;
8066 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8067 return -TARGET_EFAULT
;
8068 act
._sa_handler
= old_act
->_sa_handler
;
8069 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8070 act
.sa_flags
= old_act
->sa_flags
;
8071 act
.sa_restorer
= 0;
8072 unlock_user_struct(old_act
, arg2
, 0);
8075 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8076 if (!is_error(ret
) && arg3
) {
8077 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8078 return -TARGET_EFAULT
;
8079 old_act
->_sa_handler
= oact
._sa_handler
;
8080 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8081 old_act
->sa_flags
= oact
.sa_flags
;
8082 unlock_user_struct(old_act
, arg3
, 1);
8084 #elif defined(TARGET_MIPS)
8085 struct target_sigaction act
, oact
, *pact
, *old_act
;
8088 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8089 return -TARGET_EFAULT
;
8090 act
._sa_handler
= old_act
->_sa_handler
;
8091 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8092 act
.sa_flags
= old_act
->sa_flags
;
8093 unlock_user_struct(old_act
, arg2
, 0);
8099 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8101 if (!is_error(ret
) && arg3
) {
8102 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8103 return -TARGET_EFAULT
;
8104 old_act
->_sa_handler
= oact
._sa_handler
;
8105 old_act
->sa_flags
= oact
.sa_flags
;
8106 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8107 old_act
->sa_mask
.sig
[1] = 0;
8108 old_act
->sa_mask
.sig
[2] = 0;
8109 old_act
->sa_mask
.sig
[3] = 0;
8110 unlock_user_struct(old_act
, arg3
, 1);
8113 struct target_old_sigaction
*old_act
;
8114 struct target_sigaction act
, oact
, *pact
;
8116 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8117 return -TARGET_EFAULT
;
8118 act
._sa_handler
= old_act
->_sa_handler
;
8119 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8120 act
.sa_flags
= old_act
->sa_flags
;
8121 act
.sa_restorer
= old_act
->sa_restorer
;
8122 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8123 act
.ka_restorer
= 0;
8125 unlock_user_struct(old_act
, arg2
, 0);
8130 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8131 if (!is_error(ret
) && arg3
) {
8132 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8133 return -TARGET_EFAULT
;
8134 old_act
->_sa_handler
= oact
._sa_handler
;
8135 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8136 old_act
->sa_flags
= oact
.sa_flags
;
8137 old_act
->sa_restorer
= oact
.sa_restorer
;
8138 unlock_user_struct(old_act
, arg3
, 1);
8144 case TARGET_NR_rt_sigaction
:
8146 #if defined(TARGET_ALPHA)
8147 /* For Alpha and SPARC this is a 5 argument syscall, with
8148 * a 'restorer' parameter which must be copied into the
8149 * sa_restorer field of the sigaction struct.
8150 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8151 * and arg5 is the sigsetsize.
8152 * Alpha also has a separate rt_sigaction struct that it uses
8153 * here; SPARC uses the usual sigaction struct.
8155 struct target_rt_sigaction
*rt_act
;
8156 struct target_sigaction act
, oact
, *pact
= 0;
8158 if (arg4
!= sizeof(target_sigset_t
)) {
8159 return -TARGET_EINVAL
;
8162 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8163 return -TARGET_EFAULT
;
8164 act
._sa_handler
= rt_act
->_sa_handler
;
8165 act
.sa_mask
= rt_act
->sa_mask
;
8166 act
.sa_flags
= rt_act
->sa_flags
;
8167 act
.sa_restorer
= arg5
;
8168 unlock_user_struct(rt_act
, arg2
, 0);
8171 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8172 if (!is_error(ret
) && arg3
) {
8173 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8174 return -TARGET_EFAULT
;
8175 rt_act
->_sa_handler
= oact
._sa_handler
;
8176 rt_act
->sa_mask
= oact
.sa_mask
;
8177 rt_act
->sa_flags
= oact
.sa_flags
;
8178 unlock_user_struct(rt_act
, arg3
, 1);
8182 target_ulong restorer
= arg4
;
8183 target_ulong sigsetsize
= arg5
;
8185 target_ulong sigsetsize
= arg4
;
8187 struct target_sigaction
*act
;
8188 struct target_sigaction
*oact
;
8190 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8191 return -TARGET_EINVAL
;
8194 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8195 return -TARGET_EFAULT
;
8197 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8198 act
->ka_restorer
= restorer
;
8204 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8205 ret
= -TARGET_EFAULT
;
8206 goto rt_sigaction_fail
;
8210 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8213 unlock_user_struct(act
, arg2
, 0);
8215 unlock_user_struct(oact
, arg3
, 1);
8219 #ifdef TARGET_NR_sgetmask /* not on alpha */
8220 case TARGET_NR_sgetmask
:
8223 abi_ulong target_set
;
8224 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8226 host_to_target_old_sigset(&target_set
, &cur_set
);
8232 #ifdef TARGET_NR_ssetmask /* not on alpha */
8233 case TARGET_NR_ssetmask
:
8236 abi_ulong target_set
= arg1
;
8237 target_to_host_old_sigset(&set
, &target_set
);
8238 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8240 host_to_target_old_sigset(&target_set
, &oset
);
8246 #ifdef TARGET_NR_sigprocmask
8247 case TARGET_NR_sigprocmask
:
8249 #if defined(TARGET_ALPHA)
8250 sigset_t set
, oldset
;
8255 case TARGET_SIG_BLOCK
:
8258 case TARGET_SIG_UNBLOCK
:
8261 case TARGET_SIG_SETMASK
:
8265 return -TARGET_EINVAL
;
8268 target_to_host_old_sigset(&set
, &mask
);
8270 ret
= do_sigprocmask(how
, &set
, &oldset
);
8271 if (!is_error(ret
)) {
8272 host_to_target_old_sigset(&mask
, &oldset
);
8274 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8277 sigset_t set
, oldset
, *set_ptr
;
8282 case TARGET_SIG_BLOCK
:
8285 case TARGET_SIG_UNBLOCK
:
8288 case TARGET_SIG_SETMASK
:
8292 return -TARGET_EINVAL
;
8294 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8295 return -TARGET_EFAULT
;
8296 target_to_host_old_sigset(&set
, p
);
8297 unlock_user(p
, arg2
, 0);
8303 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8304 if (!is_error(ret
) && arg3
) {
8305 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8306 return -TARGET_EFAULT
;
8307 host_to_target_old_sigset(p
, &oldset
);
8308 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8314 case TARGET_NR_rt_sigprocmask
:
8317 sigset_t set
, oldset
, *set_ptr
;
8319 if (arg4
!= sizeof(target_sigset_t
)) {
8320 return -TARGET_EINVAL
;
8325 case TARGET_SIG_BLOCK
:
8328 case TARGET_SIG_UNBLOCK
:
8331 case TARGET_SIG_SETMASK
:
8335 return -TARGET_EINVAL
;
8337 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8338 return -TARGET_EFAULT
;
8339 target_to_host_sigset(&set
, p
);
8340 unlock_user(p
, arg2
, 0);
8346 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8347 if (!is_error(ret
) && arg3
) {
8348 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8349 return -TARGET_EFAULT
;
8350 host_to_target_sigset(p
, &oldset
);
8351 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8355 #ifdef TARGET_NR_sigpending
8356 case TARGET_NR_sigpending
:
8359 ret
= get_errno(sigpending(&set
));
8360 if (!is_error(ret
)) {
8361 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8362 return -TARGET_EFAULT
;
8363 host_to_target_old_sigset(p
, &set
);
8364 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8369 case TARGET_NR_rt_sigpending
:
8373 /* Yes, this check is >, not != like most. We follow the kernel's
8374 * logic and it does it like this because it implements
8375 * NR_sigpending through the same code path, and in that case
8376 * the old_sigset_t is smaller in size.
8378 if (arg2
> sizeof(target_sigset_t
)) {
8379 return -TARGET_EINVAL
;
8382 ret
= get_errno(sigpending(&set
));
8383 if (!is_error(ret
)) {
8384 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8385 return -TARGET_EFAULT
;
8386 host_to_target_sigset(p
, &set
);
8387 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8391 #ifdef TARGET_NR_sigsuspend
8392 case TARGET_NR_sigsuspend
:
8394 TaskState
*ts
= cpu
->opaque
;
8395 #if defined(TARGET_ALPHA)
8396 abi_ulong mask
= arg1
;
8397 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8399 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8400 return -TARGET_EFAULT
;
8401 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8402 unlock_user(p
, arg1
, 0);
8404 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8406 if (ret
!= -TARGET_ERESTARTSYS
) {
8407 ts
->in_sigsuspend
= 1;
8412 case TARGET_NR_rt_sigsuspend
:
8414 TaskState
*ts
= cpu
->opaque
;
8416 if (arg2
!= sizeof(target_sigset_t
)) {
8417 return -TARGET_EINVAL
;
8419 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8420 return -TARGET_EFAULT
;
8421 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8422 unlock_user(p
, arg1
, 0);
8423 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8425 if (ret
!= -TARGET_ERESTARTSYS
) {
8426 ts
->in_sigsuspend
= 1;
8430 case TARGET_NR_rt_sigtimedwait
:
8433 struct timespec uts
, *puts
;
8436 if (arg4
!= sizeof(target_sigset_t
)) {
8437 return -TARGET_EINVAL
;
8440 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8441 return -TARGET_EFAULT
;
8442 target_to_host_sigset(&set
, p
);
8443 unlock_user(p
, arg1
, 0);
8446 target_to_host_timespec(puts
, arg3
);
8450 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8452 if (!is_error(ret
)) {
8454 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8457 return -TARGET_EFAULT
;
8459 host_to_target_siginfo(p
, &uinfo
);
8460 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8462 ret
= host_to_target_signal(ret
);
8466 case TARGET_NR_rt_sigqueueinfo
:
8470 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8472 return -TARGET_EFAULT
;
8474 target_to_host_siginfo(&uinfo
, p
);
8475 unlock_user(p
, arg3
, 0);
8476 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8479 case TARGET_NR_rt_tgsigqueueinfo
:
8483 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8485 return -TARGET_EFAULT
;
8487 target_to_host_siginfo(&uinfo
, p
);
8488 unlock_user(p
, arg4
, 0);
8489 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8492 #ifdef TARGET_NR_sigreturn
8493 case TARGET_NR_sigreturn
:
8494 if (block_signals()) {
8495 return -TARGET_ERESTARTSYS
;
8497 return do_sigreturn(cpu_env
);
8499 case TARGET_NR_rt_sigreturn
:
8500 if (block_signals()) {
8501 return -TARGET_ERESTARTSYS
;
8503 return do_rt_sigreturn(cpu_env
);
8504 case TARGET_NR_sethostname
:
8505 if (!(p
= lock_user_string(arg1
)))
8506 return -TARGET_EFAULT
;
8507 ret
= get_errno(sethostname(p
, arg2
));
8508 unlock_user(p
, arg1
, 0);
8510 #ifdef TARGET_NR_setrlimit
8511 case TARGET_NR_setrlimit
:
8513 int resource
= target_to_host_resource(arg1
);
8514 struct target_rlimit
*target_rlim
;
8516 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8517 return -TARGET_EFAULT
;
8518 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8519 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8520 unlock_user_struct(target_rlim
, arg2
, 0);
8522 * If we just passed through resource limit settings for memory then
8523 * they would also apply to QEMU's own allocations, and QEMU will
8524 * crash or hang or die if its allocations fail. Ideally we would
8525 * track the guest allocations in QEMU and apply the limits ourselves.
8526 * For now, just tell the guest the call succeeded but don't actually
8529 if (resource
!= RLIMIT_AS
&&
8530 resource
!= RLIMIT_DATA
&&
8531 resource
!= RLIMIT_STACK
) {
8532 return get_errno(setrlimit(resource
, &rlim
));
8538 #ifdef TARGET_NR_getrlimit
8539 case TARGET_NR_getrlimit
:
8541 int resource
= target_to_host_resource(arg1
);
8542 struct target_rlimit
*target_rlim
;
8545 ret
= get_errno(getrlimit(resource
, &rlim
));
8546 if (!is_error(ret
)) {
8547 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8548 return -TARGET_EFAULT
;
8549 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8550 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8551 unlock_user_struct(target_rlim
, arg2
, 1);
8556 case TARGET_NR_getrusage
:
8558 struct rusage rusage
;
8559 ret
= get_errno(getrusage(arg1
, &rusage
));
8560 if (!is_error(ret
)) {
8561 ret
= host_to_target_rusage(arg2
, &rusage
);
8565 case TARGET_NR_gettimeofday
:
8568 ret
= get_errno(gettimeofday(&tv
, NULL
));
8569 if (!is_error(ret
)) {
8570 if (copy_to_user_timeval(arg1
, &tv
))
8571 return -TARGET_EFAULT
;
8575 case TARGET_NR_settimeofday
:
8577 struct timeval tv
, *ptv
= NULL
;
8578 struct timezone tz
, *ptz
= NULL
;
8581 if (copy_from_user_timeval(&tv
, arg1
)) {
8582 return -TARGET_EFAULT
;
8588 if (copy_from_user_timezone(&tz
, arg2
)) {
8589 return -TARGET_EFAULT
;
8594 return get_errno(settimeofday(ptv
, ptz
));
8596 #if defined(TARGET_NR_select)
8597 case TARGET_NR_select
:
8598 #if defined(TARGET_WANT_NI_OLD_SELECT)
8599 /* some architectures used to have old_select here
8600 * but now ENOSYS it.
8602 ret
= -TARGET_ENOSYS
;
8603 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8604 ret
= do_old_select(arg1
);
8606 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8610 #ifdef TARGET_NR_pselect6
8611 case TARGET_NR_pselect6
:
8613 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8614 fd_set rfds
, wfds
, efds
;
8615 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8616 struct timespec ts
, *ts_ptr
;
8619 * The 6th arg is actually two args smashed together,
8620 * so we cannot use the C library.
8628 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8629 target_sigset_t
*target_sigset
;
8637 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8641 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8645 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8651 * This takes a timespec, and not a timeval, so we cannot
8652 * use the do_select() helper ...
8655 if (target_to_host_timespec(&ts
, ts_addr
)) {
8656 return -TARGET_EFAULT
;
8663 /* Extract the two packed args for the sigset */
8666 sig
.size
= SIGSET_T_SIZE
;
8668 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8670 return -TARGET_EFAULT
;
8672 arg_sigset
= tswapal(arg7
[0]);
8673 arg_sigsize
= tswapal(arg7
[1]);
8674 unlock_user(arg7
, arg6
, 0);
8678 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8679 /* Like the kernel, we enforce correct size sigsets */
8680 return -TARGET_EINVAL
;
8682 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8683 sizeof(*target_sigset
), 1);
8684 if (!target_sigset
) {
8685 return -TARGET_EFAULT
;
8687 target_to_host_sigset(&set
, target_sigset
);
8688 unlock_user(target_sigset
, arg_sigset
, 0);
8696 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8699 if (!is_error(ret
)) {
8700 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8701 return -TARGET_EFAULT
;
8702 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8703 return -TARGET_EFAULT
;
8704 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8705 return -TARGET_EFAULT
;
8707 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8708 return -TARGET_EFAULT
;
8713 #ifdef TARGET_NR_symlink
8714 case TARGET_NR_symlink
:
8717 p
= lock_user_string(arg1
);
8718 p2
= lock_user_string(arg2
);
8720 ret
= -TARGET_EFAULT
;
8722 ret
= get_errno(symlink(p
, p2
));
8723 unlock_user(p2
, arg2
, 0);
8724 unlock_user(p
, arg1
, 0);
8728 #if defined(TARGET_NR_symlinkat)
8729 case TARGET_NR_symlinkat
:
8732 p
= lock_user_string(arg1
);
8733 p2
= lock_user_string(arg3
);
8735 ret
= -TARGET_EFAULT
;
8737 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8738 unlock_user(p2
, arg3
, 0);
8739 unlock_user(p
, arg1
, 0);
8743 #ifdef TARGET_NR_readlink
8744 case TARGET_NR_readlink
:
8747 p
= lock_user_string(arg1
);
8748 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8750 ret
= -TARGET_EFAULT
;
8752 /* Short circuit this for the magic exe check. */
8753 ret
= -TARGET_EINVAL
;
8754 } else if (is_proc_myself((const char *)p
, "exe")) {
8755 char real
[PATH_MAX
], *temp
;
8756 temp
= realpath(exec_path
, real
);
8757 /* Return value is # of bytes that we wrote to the buffer. */
8759 ret
= get_errno(-1);
8761 /* Don't worry about sign mismatch as earlier mapping
8762 * logic would have thrown a bad address error. */
8763 ret
= MIN(strlen(real
), arg3
);
8764 /* We cannot NUL terminate the string. */
8765 memcpy(p2
, real
, ret
);
8768 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8770 unlock_user(p2
, arg2
, ret
);
8771 unlock_user(p
, arg1
, 0);
8775 #if defined(TARGET_NR_readlinkat)
8776 case TARGET_NR_readlinkat
:
8779 p
= lock_user_string(arg2
);
8780 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8782 ret
= -TARGET_EFAULT
;
8783 } else if (is_proc_myself((const char *)p
, "exe")) {
8784 char real
[PATH_MAX
], *temp
;
8785 temp
= realpath(exec_path
, real
);
8786 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8787 snprintf((char *)p2
, arg4
, "%s", real
);
8789 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8791 unlock_user(p2
, arg3
, ret
);
8792 unlock_user(p
, arg2
, 0);
8796 #ifdef TARGET_NR_swapon
8797 case TARGET_NR_swapon
:
8798 if (!(p
= lock_user_string(arg1
)))
8799 return -TARGET_EFAULT
;
8800 ret
= get_errno(swapon(p
, arg2
));
8801 unlock_user(p
, arg1
, 0);
8804 case TARGET_NR_reboot
:
8805 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8806 /* arg4 must be ignored in all other cases */
8807 p
= lock_user_string(arg4
);
8809 return -TARGET_EFAULT
;
8811 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8812 unlock_user(p
, arg4
, 0);
8814 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8817 #ifdef TARGET_NR_mmap
8818 case TARGET_NR_mmap
:
8819 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8820 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8821 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8822 || defined(TARGET_S390X)
8825 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8826 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8827 return -TARGET_EFAULT
;
8834 unlock_user(v
, arg1
, 0);
8835 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8836 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8840 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8841 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8847 #ifdef TARGET_NR_mmap2
8848 case TARGET_NR_mmap2
:
8850 #define MMAP_SHIFT 12
8852 ret
= target_mmap(arg1
, arg2
, arg3
,
8853 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8854 arg5
, arg6
<< MMAP_SHIFT
);
8855 return get_errno(ret
);
8857 case TARGET_NR_munmap
:
8858 return get_errno(target_munmap(arg1
, arg2
));
8859 case TARGET_NR_mprotect
:
8861 TaskState
*ts
= cpu
->opaque
;
8862 /* Special hack to detect libc making the stack executable. */
8863 if ((arg3
& PROT_GROWSDOWN
)
8864 && arg1
>= ts
->info
->stack_limit
8865 && arg1
<= ts
->info
->start_stack
) {
8866 arg3
&= ~PROT_GROWSDOWN
;
8867 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8868 arg1
= ts
->info
->stack_limit
;
8871 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8872 #ifdef TARGET_NR_mremap
8873 case TARGET_NR_mremap
:
8874 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8876 /* ??? msync/mlock/munlock are broken for softmmu. */
8877 #ifdef TARGET_NR_msync
8878 case TARGET_NR_msync
:
8879 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8881 #ifdef TARGET_NR_mlock
8882 case TARGET_NR_mlock
:
8883 return get_errno(mlock(g2h(arg1
), arg2
));
8885 #ifdef TARGET_NR_munlock
8886 case TARGET_NR_munlock
:
8887 return get_errno(munlock(g2h(arg1
), arg2
));
8889 #ifdef TARGET_NR_mlockall
8890 case TARGET_NR_mlockall
:
8891 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8893 #ifdef TARGET_NR_munlockall
8894 case TARGET_NR_munlockall
:
8895 return get_errno(munlockall());
8897 #ifdef TARGET_NR_truncate
8898 case TARGET_NR_truncate
:
8899 if (!(p
= lock_user_string(arg1
)))
8900 return -TARGET_EFAULT
;
8901 ret
= get_errno(truncate(p
, arg2
));
8902 unlock_user(p
, arg1
, 0);
8905 #ifdef TARGET_NR_ftruncate
8906 case TARGET_NR_ftruncate
:
8907 return get_errno(ftruncate(arg1
, arg2
));
8909 case TARGET_NR_fchmod
:
8910 return get_errno(fchmod(arg1
, arg2
));
8911 #if defined(TARGET_NR_fchmodat)
8912 case TARGET_NR_fchmodat
:
8913 if (!(p
= lock_user_string(arg2
)))
8914 return -TARGET_EFAULT
;
8915 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8916 unlock_user(p
, arg2
, 0);
8919 case TARGET_NR_getpriority
:
8920 /* Note that negative values are valid for getpriority, so we must
8921 differentiate based on errno settings. */
8923 ret
= getpriority(arg1
, arg2
);
8924 if (ret
== -1 && errno
!= 0) {
8925 return -host_to_target_errno(errno
);
8928 /* Return value is the unbiased priority. Signal no error. */
8929 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8931 /* Return value is a biased priority to avoid negative numbers. */
8935 case TARGET_NR_setpriority
:
8936 return get_errno(setpriority(arg1
, arg2
, arg3
));
8937 #ifdef TARGET_NR_statfs
8938 case TARGET_NR_statfs
:
8939 if (!(p
= lock_user_string(arg1
))) {
8940 return -TARGET_EFAULT
;
8942 ret
= get_errno(statfs(path(p
), &stfs
));
8943 unlock_user(p
, arg1
, 0);
8945 if (!is_error(ret
)) {
8946 struct target_statfs
*target_stfs
;
8948 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8949 return -TARGET_EFAULT
;
8950 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8951 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8952 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8953 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8954 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8955 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8956 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8957 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8958 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8959 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8960 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8961 #ifdef _STATFS_F_FLAGS
8962 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8964 __put_user(0, &target_stfs
->f_flags
);
8966 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8967 unlock_user_struct(target_stfs
, arg2
, 1);
8971 #ifdef TARGET_NR_fstatfs
8972 case TARGET_NR_fstatfs
:
8973 ret
= get_errno(fstatfs(arg1
, &stfs
));
8974 goto convert_statfs
;
8976 #ifdef TARGET_NR_statfs64
8977 case TARGET_NR_statfs64
:
8978 if (!(p
= lock_user_string(arg1
))) {
8979 return -TARGET_EFAULT
;
8981 ret
= get_errno(statfs(path(p
), &stfs
));
8982 unlock_user(p
, arg1
, 0);
8984 if (!is_error(ret
)) {
8985 struct target_statfs64
*target_stfs
;
8987 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8988 return -TARGET_EFAULT
;
8989 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8990 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8991 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8992 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8993 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8994 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8995 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8996 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8997 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8998 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8999 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9000 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9001 unlock_user_struct(target_stfs
, arg3
, 1);
9004 case TARGET_NR_fstatfs64
:
9005 ret
= get_errno(fstatfs(arg1
, &stfs
));
9006 goto convert_statfs64
;
9008 #ifdef TARGET_NR_socketcall
9009 case TARGET_NR_socketcall
:
9010 return do_socketcall(arg1
, arg2
);
9012 #ifdef TARGET_NR_accept
9013 case TARGET_NR_accept
:
9014 return do_accept4(arg1
, arg2
, arg3
, 0);
9016 #ifdef TARGET_NR_accept4
9017 case TARGET_NR_accept4
:
9018 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9020 #ifdef TARGET_NR_bind
9021 case TARGET_NR_bind
:
9022 return do_bind(arg1
, arg2
, arg3
);
9024 #ifdef TARGET_NR_connect
9025 case TARGET_NR_connect
:
9026 return do_connect(arg1
, arg2
, arg3
);
9028 #ifdef TARGET_NR_getpeername
9029 case TARGET_NR_getpeername
:
9030 return do_getpeername(arg1
, arg2
, arg3
);
9032 #ifdef TARGET_NR_getsockname
9033 case TARGET_NR_getsockname
:
9034 return do_getsockname(arg1
, arg2
, arg3
);
9036 #ifdef TARGET_NR_getsockopt
9037 case TARGET_NR_getsockopt
:
9038 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9040 #ifdef TARGET_NR_listen
9041 case TARGET_NR_listen
:
9042 return get_errno(listen(arg1
, arg2
));
9044 #ifdef TARGET_NR_recv
9045 case TARGET_NR_recv
:
9046 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9048 #ifdef TARGET_NR_recvfrom
9049 case TARGET_NR_recvfrom
:
9050 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9052 #ifdef TARGET_NR_recvmsg
9053 case TARGET_NR_recvmsg
:
9054 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9056 #ifdef TARGET_NR_send
9057 case TARGET_NR_send
:
9058 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9060 #ifdef TARGET_NR_sendmsg
9061 case TARGET_NR_sendmsg
:
9062 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9064 #ifdef TARGET_NR_sendmmsg
9065 case TARGET_NR_sendmmsg
:
9066 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9067 case TARGET_NR_recvmmsg
:
9068 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9070 #ifdef TARGET_NR_sendto
9071 case TARGET_NR_sendto
:
9072 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9074 #ifdef TARGET_NR_shutdown
9075 case TARGET_NR_shutdown
:
9076 return get_errno(shutdown(arg1
, arg2
));
9078 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9079 case TARGET_NR_getrandom
:
9080 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9082 return -TARGET_EFAULT
;
9084 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9085 unlock_user(p
, arg1
, ret
);
9088 #ifdef TARGET_NR_socket
9089 case TARGET_NR_socket
:
9090 return do_socket(arg1
, arg2
, arg3
);
9092 #ifdef TARGET_NR_socketpair
9093 case TARGET_NR_socketpair
:
9094 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9096 #ifdef TARGET_NR_setsockopt
9097 case TARGET_NR_setsockopt
:
9098 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9100 #if defined(TARGET_NR_syslog)
9101 case TARGET_NR_syslog
:
9106 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9107 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9108 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9109 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9110 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9111 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9112 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9113 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9114 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9115 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9116 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9117 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9120 return -TARGET_EINVAL
;
9125 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9127 return -TARGET_EFAULT
;
9129 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9130 unlock_user(p
, arg2
, arg3
);
9134 return -TARGET_EINVAL
;
9139 case TARGET_NR_setitimer
:
9141 struct itimerval value
, ovalue
, *pvalue
;
9145 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9146 || copy_from_user_timeval(&pvalue
->it_value
,
9147 arg2
+ sizeof(struct target_timeval
)))
9148 return -TARGET_EFAULT
;
9152 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9153 if (!is_error(ret
) && arg3
) {
9154 if (copy_to_user_timeval(arg3
,
9155 &ovalue
.it_interval
)
9156 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9158 return -TARGET_EFAULT
;
9162 case TARGET_NR_getitimer
:
9164 struct itimerval value
;
9166 ret
= get_errno(getitimer(arg1
, &value
));
9167 if (!is_error(ret
) && arg2
) {
9168 if (copy_to_user_timeval(arg2
,
9170 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9172 return -TARGET_EFAULT
;
9176 #ifdef TARGET_NR_stat
9177 case TARGET_NR_stat
:
9178 if (!(p
= lock_user_string(arg1
))) {
9179 return -TARGET_EFAULT
;
9181 ret
= get_errno(stat(path(p
), &st
));
9182 unlock_user(p
, arg1
, 0);
9185 #ifdef TARGET_NR_lstat
9186 case TARGET_NR_lstat
:
9187 if (!(p
= lock_user_string(arg1
))) {
9188 return -TARGET_EFAULT
;
9190 ret
= get_errno(lstat(path(p
), &st
));
9191 unlock_user(p
, arg1
, 0);
9194 #ifdef TARGET_NR_fstat
9195 case TARGET_NR_fstat
:
9197 ret
= get_errno(fstat(arg1
, &st
));
9198 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9201 if (!is_error(ret
)) {
9202 struct target_stat
*target_st
;
9204 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9205 return -TARGET_EFAULT
;
9206 memset(target_st
, 0, sizeof(*target_st
));
9207 __put_user(st
.st_dev
, &target_st
->st_dev
);
9208 __put_user(st
.st_ino
, &target_st
->st_ino
);
9209 __put_user(st
.st_mode
, &target_st
->st_mode
);
9210 __put_user(st
.st_uid
, &target_st
->st_uid
);
9211 __put_user(st
.st_gid
, &target_st
->st_gid
);
9212 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9213 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9214 __put_user(st
.st_size
, &target_st
->st_size
);
9215 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9216 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9217 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9218 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9219 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9220 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9221 defined(TARGET_STAT_HAVE_NSEC)
9222 __put_user(st
.st_atim
.tv_nsec
,
9223 &target_st
->target_st_atime_nsec
);
9224 __put_user(st
.st_mtim
.tv_nsec
,
9225 &target_st
->target_st_mtime_nsec
);
9226 __put_user(st
.st_ctim
.tv_nsec
,
9227 &target_st
->target_st_ctime_nsec
);
9229 unlock_user_struct(target_st
, arg2
, 1);
9234 case TARGET_NR_vhangup
:
9235 return get_errno(vhangup());
9236 #ifdef TARGET_NR_syscall
9237 case TARGET_NR_syscall
:
9238 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9239 arg6
, arg7
, arg8
, 0);
9241 case TARGET_NR_wait4
:
9244 abi_long status_ptr
= arg2
;
9245 struct rusage rusage
, *rusage_ptr
;
9246 abi_ulong target_rusage
= arg4
;
9247 abi_long rusage_err
;
9249 rusage_ptr
= &rusage
;
9252 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9253 if (!is_error(ret
)) {
9254 if (status_ptr
&& ret
) {
9255 status
= host_to_target_waitstatus(status
);
9256 if (put_user_s32(status
, status_ptr
))
9257 return -TARGET_EFAULT
;
9259 if (target_rusage
) {
9260 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9268 #ifdef TARGET_NR_swapoff
9269 case TARGET_NR_swapoff
:
9270 if (!(p
= lock_user_string(arg1
)))
9271 return -TARGET_EFAULT
;
9272 ret
= get_errno(swapoff(p
));
9273 unlock_user(p
, arg1
, 0);
9276 case TARGET_NR_sysinfo
:
9278 struct target_sysinfo
*target_value
;
9279 struct sysinfo value
;
9280 ret
= get_errno(sysinfo(&value
));
9281 if (!is_error(ret
) && arg1
)
9283 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9284 return -TARGET_EFAULT
;
9285 __put_user(value
.uptime
, &target_value
->uptime
);
9286 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9287 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9288 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9289 __put_user(value
.totalram
, &target_value
->totalram
);
9290 __put_user(value
.freeram
, &target_value
->freeram
);
9291 __put_user(value
.sharedram
, &target_value
->sharedram
);
9292 __put_user(value
.bufferram
, &target_value
->bufferram
);
9293 __put_user(value
.totalswap
, &target_value
->totalswap
);
9294 __put_user(value
.freeswap
, &target_value
->freeswap
);
9295 __put_user(value
.procs
, &target_value
->procs
);
9296 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9297 __put_user(value
.freehigh
, &target_value
->freehigh
);
9298 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9299 unlock_user_struct(target_value
, arg1
, 1);
9303 #ifdef TARGET_NR_ipc
9305 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9307 #ifdef TARGET_NR_semget
9308 case TARGET_NR_semget
:
9309 return get_errno(semget(arg1
, arg2
, arg3
));
9311 #ifdef TARGET_NR_semop
9312 case TARGET_NR_semop
:
9313 return do_semop(arg1
, arg2
, arg3
);
9315 #ifdef TARGET_NR_semctl
9316 case TARGET_NR_semctl
:
9317 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9319 #ifdef TARGET_NR_msgctl
9320 case TARGET_NR_msgctl
:
9321 return do_msgctl(arg1
, arg2
, arg3
);
9323 #ifdef TARGET_NR_msgget
9324 case TARGET_NR_msgget
:
9325 return get_errno(msgget(arg1
, arg2
));
9327 #ifdef TARGET_NR_msgrcv
9328 case TARGET_NR_msgrcv
:
9329 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9331 #ifdef TARGET_NR_msgsnd
9332 case TARGET_NR_msgsnd
:
9333 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9335 #ifdef TARGET_NR_shmget
9336 case TARGET_NR_shmget
:
9337 return get_errno(shmget(arg1
, arg2
, arg3
));
9339 #ifdef TARGET_NR_shmctl
9340 case TARGET_NR_shmctl
:
9341 return do_shmctl(arg1
, arg2
, arg3
);
9343 #ifdef TARGET_NR_shmat
9344 case TARGET_NR_shmat
:
9345 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9347 #ifdef TARGET_NR_shmdt
9348 case TARGET_NR_shmdt
:
9349 return do_shmdt(arg1
);
9351 case TARGET_NR_fsync
:
9352 return get_errno(fsync(arg1
));
9353 case TARGET_NR_clone
:
9354 /* Linux manages to have three different orderings for its
9355 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9356 * match the kernel's CONFIG_CLONE_* settings.
9357 * Microblaze is further special in that it uses a sixth
9358 * implicit argument to clone for the TLS pointer.
9360 #if defined(TARGET_MICROBLAZE)
9361 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9362 #elif defined(TARGET_CLONE_BACKWARDS)
9363 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9364 #elif defined(TARGET_CLONE_BACKWARDS2)
9365 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9367 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9370 #ifdef __NR_exit_group
9371 /* new thread calls */
9372 case TARGET_NR_exit_group
:
9373 preexit_cleanup(cpu_env
, arg1
);
9374 return get_errno(exit_group(arg1
));
9376 case TARGET_NR_setdomainname
:
9377 if (!(p
= lock_user_string(arg1
)))
9378 return -TARGET_EFAULT
;
9379 ret
= get_errno(setdomainname(p
, arg2
));
9380 unlock_user(p
, arg1
, 0);
9382 case TARGET_NR_uname
:
9383 /* no need to transcode because we use the linux syscall */
9385 struct new_utsname
* buf
;
9387 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9388 return -TARGET_EFAULT
;
9389 ret
= get_errno(sys_uname(buf
));
9390 if (!is_error(ret
)) {
9391 /* Overwrite the native machine name with whatever is being
9393 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9394 sizeof(buf
->machine
));
9395 /* Allow the user to override the reported release. */
9396 if (qemu_uname_release
&& *qemu_uname_release
) {
9397 g_strlcpy(buf
->release
, qemu_uname_release
,
9398 sizeof(buf
->release
));
9401 unlock_user_struct(buf
, arg1
, 1);
9405 case TARGET_NR_modify_ldt
:
9406 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9407 #if !defined(TARGET_X86_64)
9408 case TARGET_NR_vm86
:
9409 return do_vm86(cpu_env
, arg1
, arg2
);
9412 case TARGET_NR_adjtimex
:
9414 struct timex host_buf
;
9416 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9417 return -TARGET_EFAULT
;
9419 ret
= get_errno(adjtimex(&host_buf
));
9420 if (!is_error(ret
)) {
9421 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9422 return -TARGET_EFAULT
;
9427 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9428 case TARGET_NR_clock_adjtime
:
9430 struct timex htx
, *phtx
= &htx
;
9432 if (target_to_host_timex(phtx
, arg2
) != 0) {
9433 return -TARGET_EFAULT
;
9435 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9436 if (!is_error(ret
) && phtx
) {
9437 if (host_to_target_timex(arg2
, phtx
) != 0) {
9438 return -TARGET_EFAULT
;
9444 case TARGET_NR_getpgid
:
9445 return get_errno(getpgid(arg1
));
9446 case TARGET_NR_fchdir
:
9447 return get_errno(fchdir(arg1
));
9448 case TARGET_NR_personality
:
9449 return get_errno(personality(arg1
));
9450 #ifdef TARGET_NR__llseek /* Not on alpha */
9451 case TARGET_NR__llseek
:
9454 #if !defined(__NR_llseek)
9455 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9457 ret
= get_errno(res
);
9462 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9464 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9465 return -TARGET_EFAULT
;
9470 #ifdef TARGET_NR_getdents
9471 case TARGET_NR_getdents
:
9472 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9473 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9475 struct target_dirent
*target_dirp
;
9476 struct linux_dirent
*dirp
;
9477 abi_long count
= arg3
;
9479 dirp
= g_try_malloc(count
);
9481 return -TARGET_ENOMEM
;
9484 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9485 if (!is_error(ret
)) {
9486 struct linux_dirent
*de
;
9487 struct target_dirent
*tde
;
9489 int reclen
, treclen
;
9490 int count1
, tnamelen
;
9494 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9495 return -TARGET_EFAULT
;
9498 reclen
= de
->d_reclen
;
9499 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9500 assert(tnamelen
>= 0);
9501 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9502 assert(count1
+ treclen
<= count
);
9503 tde
->d_reclen
= tswap16(treclen
);
9504 tde
->d_ino
= tswapal(de
->d_ino
);
9505 tde
->d_off
= tswapal(de
->d_off
);
9506 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9507 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9509 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9513 unlock_user(target_dirp
, arg2
, ret
);
9519 struct linux_dirent
*dirp
;
9520 abi_long count
= arg3
;
9522 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9523 return -TARGET_EFAULT
;
9524 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9525 if (!is_error(ret
)) {
9526 struct linux_dirent
*de
;
9531 reclen
= de
->d_reclen
;
9534 de
->d_reclen
= tswap16(reclen
);
9535 tswapls(&de
->d_ino
);
9536 tswapls(&de
->d_off
);
9537 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9541 unlock_user(dirp
, arg2
, ret
);
9545 /* Implement getdents in terms of getdents64 */
9547 struct linux_dirent64
*dirp
;
9548 abi_long count
= arg3
;
9550 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9552 return -TARGET_EFAULT
;
9554 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9555 if (!is_error(ret
)) {
9556 /* Convert the dirent64 structs to target dirent. We do this
9557 * in-place, since we can guarantee that a target_dirent is no
9558 * larger than a dirent64; however this means we have to be
9559 * careful to read everything before writing in the new format.
9561 struct linux_dirent64
*de
;
9562 struct target_dirent
*tde
;
9567 tde
= (struct target_dirent
*)dirp
;
9569 int namelen
, treclen
;
9570 int reclen
= de
->d_reclen
;
9571 uint64_t ino
= de
->d_ino
;
9572 int64_t off
= de
->d_off
;
9573 uint8_t type
= de
->d_type
;
9575 namelen
= strlen(de
->d_name
);
9576 treclen
= offsetof(struct target_dirent
, d_name
)
9578 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9580 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9581 tde
->d_ino
= tswapal(ino
);
9582 tde
->d_off
= tswapal(off
);
9583 tde
->d_reclen
= tswap16(treclen
);
9584 /* The target_dirent type is in what was formerly a padding
9585 * byte at the end of the structure:
9587 *(((char *)tde
) + treclen
- 1) = type
;
9589 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9590 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9596 unlock_user(dirp
, arg2
, ret
);
9600 #endif /* TARGET_NR_getdents */
9601 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9602 case TARGET_NR_getdents64
:
9604 struct linux_dirent64
*dirp
;
9605 abi_long count
= arg3
;
9606 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9607 return -TARGET_EFAULT
;
9608 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9609 if (!is_error(ret
)) {
9610 struct linux_dirent64
*de
;
9615 reclen
= de
->d_reclen
;
9618 de
->d_reclen
= tswap16(reclen
);
9619 tswap64s((uint64_t *)&de
->d_ino
);
9620 tswap64s((uint64_t *)&de
->d_off
);
9621 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9625 unlock_user(dirp
, arg2
, ret
);
9628 #endif /* TARGET_NR_getdents64 */
9629 #if defined(TARGET_NR__newselect)
9630 case TARGET_NR__newselect
:
9631 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9633 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9634 # ifdef TARGET_NR_poll
9635 case TARGET_NR_poll
:
9637 # ifdef TARGET_NR_ppoll
9638 case TARGET_NR_ppoll
:
9641 struct target_pollfd
*target_pfd
;
9642 unsigned int nfds
= arg2
;
9649 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9650 return -TARGET_EINVAL
;
9653 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9654 sizeof(struct target_pollfd
) * nfds
, 1);
9656 return -TARGET_EFAULT
;
9659 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9660 for (i
= 0; i
< nfds
; i
++) {
9661 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9662 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9667 # ifdef TARGET_NR_ppoll
9668 case TARGET_NR_ppoll
:
9670 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9671 target_sigset_t
*target_set
;
9672 sigset_t _set
, *set
= &_set
;
9675 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9676 unlock_user(target_pfd
, arg1
, 0);
9677 return -TARGET_EFAULT
;
9684 if (arg5
!= sizeof(target_sigset_t
)) {
9685 unlock_user(target_pfd
, arg1
, 0);
9686 return -TARGET_EINVAL
;
9689 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9691 unlock_user(target_pfd
, arg1
, 0);
9692 return -TARGET_EFAULT
;
9694 target_to_host_sigset(set
, target_set
);
9699 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9700 set
, SIGSET_T_SIZE
));
9702 if (!is_error(ret
) && arg3
) {
9703 host_to_target_timespec(arg3
, timeout_ts
);
9706 unlock_user(target_set
, arg4
, 0);
9711 # ifdef TARGET_NR_poll
9712 case TARGET_NR_poll
:
9714 struct timespec ts
, *pts
;
9717 /* Convert ms to secs, ns */
9718 ts
.tv_sec
= arg3
/ 1000;
9719 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9722 /* -ve poll() timeout means "infinite" */
9725 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9730 g_assert_not_reached();
9733 if (!is_error(ret
)) {
9734 for(i
= 0; i
< nfds
; i
++) {
9735 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9738 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9742 case TARGET_NR_flock
:
9743 /* NOTE: the flock constant seems to be the same for every
9745 return get_errno(safe_flock(arg1
, arg2
));
9746 case TARGET_NR_readv
:
9748 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9750 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9751 unlock_iovec(vec
, arg2
, arg3
, 1);
9753 ret
= -host_to_target_errno(errno
);
9757 case TARGET_NR_writev
:
9759 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9761 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9762 unlock_iovec(vec
, arg2
, arg3
, 0);
9764 ret
= -host_to_target_errno(errno
);
9768 #if defined(TARGET_NR_preadv)
9769 case TARGET_NR_preadv
:
9771 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9773 unsigned long low
, high
;
9775 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9776 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9777 unlock_iovec(vec
, arg2
, arg3
, 1);
9779 ret
= -host_to_target_errno(errno
);
9784 #if defined(TARGET_NR_pwritev)
9785 case TARGET_NR_pwritev
:
9787 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9789 unsigned long low
, high
;
9791 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9792 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9793 unlock_iovec(vec
, arg2
, arg3
, 0);
9795 ret
= -host_to_target_errno(errno
);
9800 case TARGET_NR_getsid
:
9801 return get_errno(getsid(arg1
));
9802 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9803 case TARGET_NR_fdatasync
:
9804 return get_errno(fdatasync(arg1
));
9806 #ifdef TARGET_NR__sysctl
9807 case TARGET_NR__sysctl
:
9808 /* We don't implement this, but ENOTDIR is always a safe
9810 return -TARGET_ENOTDIR
;
9812 case TARGET_NR_sched_getaffinity
:
9814 unsigned int mask_size
;
9815 unsigned long *mask
;
9818 * sched_getaffinity needs multiples of ulong, so need to take
9819 * care of mismatches between target ulong and host ulong sizes.
9821 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9822 return -TARGET_EINVAL
;
9824 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9826 mask
= alloca(mask_size
);
9827 memset(mask
, 0, mask_size
);
9828 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9830 if (!is_error(ret
)) {
9832 /* More data returned than the caller's buffer will fit.
9833 * This only happens if sizeof(abi_long) < sizeof(long)
9834 * and the caller passed us a buffer holding an odd number
9835 * of abi_longs. If the host kernel is actually using the
9836 * extra 4 bytes then fail EINVAL; otherwise we can just
9837 * ignore them and only copy the interesting part.
9839 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9840 if (numcpus
> arg2
* 8) {
9841 return -TARGET_EINVAL
;
9846 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9847 return -TARGET_EFAULT
;
9852 case TARGET_NR_sched_setaffinity
:
9854 unsigned int mask_size
;
9855 unsigned long *mask
;
9858 * sched_setaffinity needs multiples of ulong, so need to take
9859 * care of mismatches between target ulong and host ulong sizes.
9861 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9862 return -TARGET_EINVAL
;
9864 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9865 mask
= alloca(mask_size
);
9867 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9872 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9874 case TARGET_NR_getcpu
:
9877 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9878 arg2
? &node
: NULL
,
9880 if (is_error(ret
)) {
9883 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9884 return -TARGET_EFAULT
;
9886 if (arg2
&& put_user_u32(node
, arg2
)) {
9887 return -TARGET_EFAULT
;
9891 case TARGET_NR_sched_setparam
:
9893 struct sched_param
*target_schp
;
9894 struct sched_param schp
;
9897 return -TARGET_EINVAL
;
9899 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9900 return -TARGET_EFAULT
;
9901 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9902 unlock_user_struct(target_schp
, arg2
, 0);
9903 return get_errno(sched_setparam(arg1
, &schp
));
9905 case TARGET_NR_sched_getparam
:
9907 struct sched_param
*target_schp
;
9908 struct sched_param schp
;
9911 return -TARGET_EINVAL
;
9913 ret
= get_errno(sched_getparam(arg1
, &schp
));
9914 if (!is_error(ret
)) {
9915 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9916 return -TARGET_EFAULT
;
9917 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9918 unlock_user_struct(target_schp
, arg2
, 1);
9922 case TARGET_NR_sched_setscheduler
:
9924 struct sched_param
*target_schp
;
9925 struct sched_param schp
;
9927 return -TARGET_EINVAL
;
9929 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9930 return -TARGET_EFAULT
;
9931 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9932 unlock_user_struct(target_schp
, arg3
, 0);
9933 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9935 case TARGET_NR_sched_getscheduler
:
9936 return get_errno(sched_getscheduler(arg1
));
9937 case TARGET_NR_sched_yield
:
9938 return get_errno(sched_yield());
9939 case TARGET_NR_sched_get_priority_max
:
9940 return get_errno(sched_get_priority_max(arg1
));
9941 case TARGET_NR_sched_get_priority_min
:
9942 return get_errno(sched_get_priority_min(arg1
));
9943 case TARGET_NR_sched_rr_get_interval
:
9946 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9947 if (!is_error(ret
)) {
9948 ret
= host_to_target_timespec(arg2
, &ts
);
9952 case TARGET_NR_nanosleep
:
9954 struct timespec req
, rem
;
9955 target_to_host_timespec(&req
, arg1
);
9956 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9957 if (is_error(ret
) && arg2
) {
9958 host_to_target_timespec(arg2
, &rem
);
9962 case TARGET_NR_prctl
:
9964 case PR_GET_PDEATHSIG
:
9967 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9968 if (!is_error(ret
) && arg2
9969 && put_user_ual(deathsig
, arg2
)) {
9970 return -TARGET_EFAULT
;
9977 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9979 return -TARGET_EFAULT
;
9981 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9983 unlock_user(name
, arg2
, 16);
9988 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9990 return -TARGET_EFAULT
;
9992 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9994 unlock_user(name
, arg2
, 0);
9999 case TARGET_PR_GET_FP_MODE
:
10001 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10003 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10004 ret
|= TARGET_PR_FP_MODE_FR
;
10006 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10007 ret
|= TARGET_PR_FP_MODE_FRE
;
10011 case TARGET_PR_SET_FP_MODE
:
10013 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10014 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10015 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10016 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10017 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10019 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10020 TARGET_PR_FP_MODE_FRE
;
10022 /* If nothing to change, return right away, successfully. */
10023 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10026 /* Check the value is valid */
10027 if (arg2
& ~known_bits
) {
10028 return -TARGET_EOPNOTSUPP
;
10030 /* Setting FRE without FR is not supported. */
10031 if (new_fre
&& !new_fr
) {
10032 return -TARGET_EOPNOTSUPP
;
10034 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10035 /* FR1 is not supported */
10036 return -TARGET_EOPNOTSUPP
;
10038 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10039 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10040 /* cannot set FR=0 */
10041 return -TARGET_EOPNOTSUPP
;
10043 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10044 /* Cannot set FRE=1 */
10045 return -TARGET_EOPNOTSUPP
;
10049 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10050 for (i
= 0; i
< 32 ; i
+= 2) {
10051 if (!old_fr
&& new_fr
) {
10052 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10053 } else if (old_fr
&& !new_fr
) {
10054 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10059 env
->CP0_Status
|= (1 << CP0St_FR
);
10060 env
->hflags
|= MIPS_HFLAG_F64
;
10062 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10063 env
->hflags
&= ~MIPS_HFLAG_F64
;
10066 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10067 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10068 env
->hflags
|= MIPS_HFLAG_FRE
;
10071 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10072 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10078 #ifdef TARGET_AARCH64
10079 case TARGET_PR_SVE_SET_VL
:
10081 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10082 * PR_SVE_VL_INHERIT. Note the kernel definition
10083 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10084 * even though the current architectural maximum is VQ=16.
10086 ret
= -TARGET_EINVAL
;
10087 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10088 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10089 CPUARMState
*env
= cpu_env
;
10090 ARMCPU
*cpu
= env_archcpu(env
);
10091 uint32_t vq
, old_vq
;
10093 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10094 vq
= MAX(arg2
/ 16, 1);
10095 vq
= MIN(vq
, cpu
->sve_max_vq
);
10098 aarch64_sve_narrow_vq(env
, vq
);
10100 env
->vfp
.zcr_el
[1] = vq
- 1;
10101 arm_rebuild_hflags(env
);
10105 case TARGET_PR_SVE_GET_VL
:
10106 ret
= -TARGET_EINVAL
;
10108 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10109 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10110 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10114 case TARGET_PR_PAC_RESET_KEYS
:
10116 CPUARMState
*env
= cpu_env
;
10117 ARMCPU
*cpu
= env_archcpu(env
);
10119 if (arg3
|| arg4
|| arg5
) {
10120 return -TARGET_EINVAL
;
10122 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10123 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10124 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10125 TARGET_PR_PAC_APGAKEY
);
10131 } else if (arg2
& ~all
) {
10132 return -TARGET_EINVAL
;
10134 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10135 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10136 sizeof(ARMPACKey
), &err
);
10138 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10139 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10140 sizeof(ARMPACKey
), &err
);
10142 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10143 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10144 sizeof(ARMPACKey
), &err
);
10146 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10147 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10148 sizeof(ARMPACKey
), &err
);
10150 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10151 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10152 sizeof(ARMPACKey
), &err
);
10156 * Some unknown failure in the crypto. The best
10157 * we can do is log it and fail the syscall.
10158 * The real syscall cannot fail this way.
10160 qemu_log_mask(LOG_UNIMP
,
10161 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10162 error_get_pretty(err
));
10164 return -TARGET_EIO
;
10169 return -TARGET_EINVAL
;
10170 #endif /* AARCH64 */
10171 case PR_GET_SECCOMP
:
10172 case PR_SET_SECCOMP
:
10173 /* Disable seccomp to prevent the target disabling syscalls we
10175 return -TARGET_EINVAL
;
10177 /* Most prctl options have no pointer arguments */
10178 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10181 #ifdef TARGET_NR_arch_prctl
10182 case TARGET_NR_arch_prctl
:
10183 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10184 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10189 #ifdef TARGET_NR_pread64
10190 case TARGET_NR_pread64
:
10191 if (regpairs_aligned(cpu_env
, num
)) {
10195 if (arg2
== 0 && arg3
== 0) {
10196 /* Special-case NULL buffer and zero length, which should succeed */
10199 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10201 return -TARGET_EFAULT
;
10204 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10205 unlock_user(p
, arg2
, ret
);
10207 case TARGET_NR_pwrite64
:
10208 if (regpairs_aligned(cpu_env
, num
)) {
10212 if (arg2
== 0 && arg3
== 0) {
10213 /* Special-case NULL buffer and zero length, which should succeed */
10216 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10218 return -TARGET_EFAULT
;
10221 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10222 unlock_user(p
, arg2
, 0);
10225 case TARGET_NR_getcwd
:
10226 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10227 return -TARGET_EFAULT
;
10228 ret
= get_errno(sys_getcwd1(p
, arg2
));
10229 unlock_user(p
, arg1
, ret
);
10231 case TARGET_NR_capget
:
10232 case TARGET_NR_capset
:
10234 struct target_user_cap_header
*target_header
;
10235 struct target_user_cap_data
*target_data
= NULL
;
10236 struct __user_cap_header_struct header
;
10237 struct __user_cap_data_struct data
[2];
10238 struct __user_cap_data_struct
*dataptr
= NULL
;
10239 int i
, target_datalen
;
10240 int data_items
= 1;
10242 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10243 return -TARGET_EFAULT
;
10245 header
.version
= tswap32(target_header
->version
);
10246 header
.pid
= tswap32(target_header
->pid
);
10248 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10249 /* Version 2 and up takes pointer to two user_data structs */
10253 target_datalen
= sizeof(*target_data
) * data_items
;
10256 if (num
== TARGET_NR_capget
) {
10257 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10259 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10261 if (!target_data
) {
10262 unlock_user_struct(target_header
, arg1
, 0);
10263 return -TARGET_EFAULT
;
10266 if (num
== TARGET_NR_capset
) {
10267 for (i
= 0; i
< data_items
; i
++) {
10268 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10269 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10270 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10277 if (num
== TARGET_NR_capget
) {
10278 ret
= get_errno(capget(&header
, dataptr
));
10280 ret
= get_errno(capset(&header
, dataptr
));
10283 /* The kernel always updates version for both capget and capset */
10284 target_header
->version
= tswap32(header
.version
);
10285 unlock_user_struct(target_header
, arg1
, 1);
10288 if (num
== TARGET_NR_capget
) {
10289 for (i
= 0; i
< data_items
; i
++) {
10290 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10291 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10292 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10294 unlock_user(target_data
, arg2
, target_datalen
);
10296 unlock_user(target_data
, arg2
, 0);
10301 case TARGET_NR_sigaltstack
:
10302 return do_sigaltstack(arg1
, arg2
,
10303 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10305 #ifdef CONFIG_SENDFILE
10306 #ifdef TARGET_NR_sendfile
10307 case TARGET_NR_sendfile
:
10309 off_t
*offp
= NULL
;
10312 ret
= get_user_sal(off
, arg3
);
10313 if (is_error(ret
)) {
10318 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10319 if (!is_error(ret
) && arg3
) {
10320 abi_long ret2
= put_user_sal(off
, arg3
);
10321 if (is_error(ret2
)) {
10328 #ifdef TARGET_NR_sendfile64
10329 case TARGET_NR_sendfile64
:
10331 off_t
*offp
= NULL
;
10334 ret
= get_user_s64(off
, arg3
);
10335 if (is_error(ret
)) {
10340 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10341 if (!is_error(ret
) && arg3
) {
10342 abi_long ret2
= put_user_s64(off
, arg3
);
10343 if (is_error(ret2
)) {
10351 #ifdef TARGET_NR_vfork
10352 case TARGET_NR_vfork
:
10353 return get_errno(do_fork(cpu_env
,
10354 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10357 #ifdef TARGET_NR_ugetrlimit
10358 case TARGET_NR_ugetrlimit
:
10360 struct rlimit rlim
;
10361 int resource
= target_to_host_resource(arg1
);
10362 ret
= get_errno(getrlimit(resource
, &rlim
));
10363 if (!is_error(ret
)) {
10364 struct target_rlimit
*target_rlim
;
10365 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10366 return -TARGET_EFAULT
;
10367 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10368 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10369 unlock_user_struct(target_rlim
, arg2
, 1);
10374 #ifdef TARGET_NR_truncate64
10375 case TARGET_NR_truncate64
:
10376 if (!(p
= lock_user_string(arg1
)))
10377 return -TARGET_EFAULT
;
10378 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10379 unlock_user(p
, arg1
, 0);
10382 #ifdef TARGET_NR_ftruncate64
10383 case TARGET_NR_ftruncate64
:
10384 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10386 #ifdef TARGET_NR_stat64
10387 case TARGET_NR_stat64
:
10388 if (!(p
= lock_user_string(arg1
))) {
10389 return -TARGET_EFAULT
;
10391 ret
= get_errno(stat(path(p
), &st
));
10392 unlock_user(p
, arg1
, 0);
10393 if (!is_error(ret
))
10394 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10397 #ifdef TARGET_NR_lstat64
10398 case TARGET_NR_lstat64
:
10399 if (!(p
= lock_user_string(arg1
))) {
10400 return -TARGET_EFAULT
;
10402 ret
= get_errno(lstat(path(p
), &st
));
10403 unlock_user(p
, arg1
, 0);
10404 if (!is_error(ret
))
10405 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10408 #ifdef TARGET_NR_fstat64
10409 case TARGET_NR_fstat64
:
10410 ret
= get_errno(fstat(arg1
, &st
));
10411 if (!is_error(ret
))
10412 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10415 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10416 #ifdef TARGET_NR_fstatat64
10417 case TARGET_NR_fstatat64
:
10419 #ifdef TARGET_NR_newfstatat
10420 case TARGET_NR_newfstatat
:
10422 if (!(p
= lock_user_string(arg2
))) {
10423 return -TARGET_EFAULT
;
10425 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10426 unlock_user(p
, arg2
, 0);
10427 if (!is_error(ret
))
10428 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10431 #if defined(TARGET_NR_statx)
10432 case TARGET_NR_statx
:
10434 struct target_statx
*target_stx
;
10438 p
= lock_user_string(arg2
);
10440 return -TARGET_EFAULT
;
10442 #if defined(__NR_statx)
10445 * It is assumed that struct statx is architecture independent.
10447 struct target_statx host_stx
;
10450 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10451 if (!is_error(ret
)) {
10452 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10453 unlock_user(p
, arg2
, 0);
10454 return -TARGET_EFAULT
;
10458 if (ret
!= -TARGET_ENOSYS
) {
10459 unlock_user(p
, arg2
, 0);
10464 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10465 unlock_user(p
, arg2
, 0);
10467 if (!is_error(ret
)) {
10468 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10469 return -TARGET_EFAULT
;
10471 memset(target_stx
, 0, sizeof(*target_stx
));
10472 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10473 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10474 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10475 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10476 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10477 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10478 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10479 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10480 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10481 __put_user(st
.st_size
, &target_stx
->stx_size
);
10482 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10483 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10484 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10485 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10486 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10487 unlock_user_struct(target_stx
, arg5
, 1);
10492 #ifdef TARGET_NR_lchown
10493 case TARGET_NR_lchown
:
10494 if (!(p
= lock_user_string(arg1
)))
10495 return -TARGET_EFAULT
;
10496 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10497 unlock_user(p
, arg1
, 0);
10500 #ifdef TARGET_NR_getuid
10501 case TARGET_NR_getuid
:
10502 return get_errno(high2lowuid(getuid()));
10504 #ifdef TARGET_NR_getgid
10505 case TARGET_NR_getgid
:
10506 return get_errno(high2lowgid(getgid()));
10508 #ifdef TARGET_NR_geteuid
10509 case TARGET_NR_geteuid
:
10510 return get_errno(high2lowuid(geteuid()));
10512 #ifdef TARGET_NR_getegid
10513 case TARGET_NR_getegid
:
10514 return get_errno(high2lowgid(getegid()));
10516 case TARGET_NR_setreuid
:
10517 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10518 case TARGET_NR_setregid
:
10519 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10520 case TARGET_NR_getgroups
:
10522 int gidsetsize
= arg1
;
10523 target_id
*target_grouplist
;
10527 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10528 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10529 if (gidsetsize
== 0)
10531 if (!is_error(ret
)) {
10532 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10533 if (!target_grouplist
)
10534 return -TARGET_EFAULT
;
10535 for(i
= 0;i
< ret
; i
++)
10536 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10537 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10541 case TARGET_NR_setgroups
:
10543 int gidsetsize
= arg1
;
10544 target_id
*target_grouplist
;
10545 gid_t
*grouplist
= NULL
;
10548 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10549 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10550 if (!target_grouplist
) {
10551 return -TARGET_EFAULT
;
10553 for (i
= 0; i
< gidsetsize
; i
++) {
10554 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10556 unlock_user(target_grouplist
, arg2
, 0);
10558 return get_errno(setgroups(gidsetsize
, grouplist
));
10560 case TARGET_NR_fchown
:
10561 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10562 #if defined(TARGET_NR_fchownat)
10563 case TARGET_NR_fchownat
:
10564 if (!(p
= lock_user_string(arg2
)))
10565 return -TARGET_EFAULT
;
10566 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10567 low2highgid(arg4
), arg5
));
10568 unlock_user(p
, arg2
, 0);
10571 #ifdef TARGET_NR_setresuid
10572 case TARGET_NR_setresuid
:
10573 return get_errno(sys_setresuid(low2highuid(arg1
),
10575 low2highuid(arg3
)));
10577 #ifdef TARGET_NR_getresuid
10578 case TARGET_NR_getresuid
:
10580 uid_t ruid
, euid
, suid
;
10581 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10582 if (!is_error(ret
)) {
10583 if (put_user_id(high2lowuid(ruid
), arg1
)
10584 || put_user_id(high2lowuid(euid
), arg2
)
10585 || put_user_id(high2lowuid(suid
), arg3
))
10586 return -TARGET_EFAULT
;
10591 #ifdef TARGET_NR_getresgid
10592 case TARGET_NR_setresgid
:
10593 return get_errno(sys_setresgid(low2highgid(arg1
),
10595 low2highgid(arg3
)));
10597 #ifdef TARGET_NR_getresgid
10598 case TARGET_NR_getresgid
:
10600 gid_t rgid
, egid
, sgid
;
10601 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10602 if (!is_error(ret
)) {
10603 if (put_user_id(high2lowgid(rgid
), arg1
)
10604 || put_user_id(high2lowgid(egid
), arg2
)
10605 || put_user_id(high2lowgid(sgid
), arg3
))
10606 return -TARGET_EFAULT
;
10611 #ifdef TARGET_NR_chown
10612 case TARGET_NR_chown
:
10613 if (!(p
= lock_user_string(arg1
)))
10614 return -TARGET_EFAULT
;
10615 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10616 unlock_user(p
, arg1
, 0);
10619 case TARGET_NR_setuid
:
10620 return get_errno(sys_setuid(low2highuid(arg1
)));
10621 case TARGET_NR_setgid
:
10622 return get_errno(sys_setgid(low2highgid(arg1
)));
10623 case TARGET_NR_setfsuid
:
10624 return get_errno(setfsuid(arg1
));
10625 case TARGET_NR_setfsgid
:
10626 return get_errno(setfsgid(arg1
));
10628 #ifdef TARGET_NR_lchown32
10629 case TARGET_NR_lchown32
:
10630 if (!(p
= lock_user_string(arg1
)))
10631 return -TARGET_EFAULT
;
10632 ret
= get_errno(lchown(p
, arg2
, arg3
));
10633 unlock_user(p
, arg1
, 0);
10636 #ifdef TARGET_NR_getuid32
10637 case TARGET_NR_getuid32
:
10638 return get_errno(getuid());
10641 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10642 /* Alpha specific */
10643 case TARGET_NR_getxuid
:
10647 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10649 return get_errno(getuid());
10651 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10652 /* Alpha specific */
10653 case TARGET_NR_getxgid
:
10657 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10659 return get_errno(getgid());
10661 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10662 /* Alpha specific */
10663 case TARGET_NR_osf_getsysinfo
:
10664 ret
= -TARGET_EOPNOTSUPP
;
10666 case TARGET_GSI_IEEE_FP_CONTROL
:
10668 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10669 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10671 swcr
&= ~SWCR_STATUS_MASK
;
10672 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10674 if (put_user_u64 (swcr
, arg2
))
10675 return -TARGET_EFAULT
;
10680 /* case GSI_IEEE_STATE_AT_SIGNAL:
10681 -- Not implemented in linux kernel.
10683 -- Retrieves current unaligned access state; not much used.
10684 case GSI_PROC_TYPE:
10685 -- Retrieves implver information; surely not used.
10686 case GSI_GET_HWRPB:
10687 -- Grabs a copy of the HWRPB; surely not used.
10692 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10693 /* Alpha specific */
10694 case TARGET_NR_osf_setsysinfo
:
10695 ret
= -TARGET_EOPNOTSUPP
;
10697 case TARGET_SSI_IEEE_FP_CONTROL
:
10699 uint64_t swcr
, fpcr
;
10701 if (get_user_u64 (swcr
, arg2
)) {
10702 return -TARGET_EFAULT
;
10706 * The kernel calls swcr_update_status to update the
10707 * status bits from the fpcr at every point that it
10708 * could be queried. Therefore, we store the status
10709 * bits only in FPCR.
10711 ((CPUAlphaState
*)cpu_env
)->swcr
10712 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10714 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10715 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10716 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10717 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10722 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10724 uint64_t exc
, fpcr
, fex
;
10726 if (get_user_u64(exc
, arg2
)) {
10727 return -TARGET_EFAULT
;
10729 exc
&= SWCR_STATUS_MASK
;
10730 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10732 /* Old exceptions are not signaled. */
10733 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10735 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10736 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10738 /* Update the hardware fpcr. */
10739 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10740 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10743 int si_code
= TARGET_FPE_FLTUNK
;
10744 target_siginfo_t info
;
10746 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10747 si_code
= TARGET_FPE_FLTUND
;
10749 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10750 si_code
= TARGET_FPE_FLTRES
;
10752 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10753 si_code
= TARGET_FPE_FLTUND
;
10755 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10756 si_code
= TARGET_FPE_FLTOVF
;
10758 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10759 si_code
= TARGET_FPE_FLTDIV
;
10761 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10762 si_code
= TARGET_FPE_FLTINV
;
10765 info
.si_signo
= SIGFPE
;
10767 info
.si_code
= si_code
;
10768 info
._sifields
._sigfault
._addr
10769 = ((CPUArchState
*)cpu_env
)->pc
;
10770 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10771 QEMU_SI_FAULT
, &info
);
10777 /* case SSI_NVPAIRS:
10778 -- Used with SSIN_UACPROC to enable unaligned accesses.
10779 case SSI_IEEE_STATE_AT_SIGNAL:
10780 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10781 -- Not implemented in linux kernel
10786 #ifdef TARGET_NR_osf_sigprocmask
10787 /* Alpha specific. */
10788 case TARGET_NR_osf_sigprocmask
:
10792 sigset_t set
, oldset
;
10795 case TARGET_SIG_BLOCK
:
10798 case TARGET_SIG_UNBLOCK
:
10801 case TARGET_SIG_SETMASK
:
10805 return -TARGET_EINVAL
;
10808 target_to_host_old_sigset(&set
, &mask
);
10809 ret
= do_sigprocmask(how
, &set
, &oldset
);
10811 host_to_target_old_sigset(&mask
, &oldset
);
10818 #ifdef TARGET_NR_getgid32
10819 case TARGET_NR_getgid32
:
10820 return get_errno(getgid());
10822 #ifdef TARGET_NR_geteuid32
10823 case TARGET_NR_geteuid32
:
10824 return get_errno(geteuid());
10826 #ifdef TARGET_NR_getegid32
10827 case TARGET_NR_getegid32
:
10828 return get_errno(getegid());
10830 #ifdef TARGET_NR_setreuid32
10831 case TARGET_NR_setreuid32
:
10832 return get_errno(setreuid(arg1
, arg2
));
10834 #ifdef TARGET_NR_setregid32
10835 case TARGET_NR_setregid32
:
10836 return get_errno(setregid(arg1
, arg2
));
10838 #ifdef TARGET_NR_getgroups32
10839 case TARGET_NR_getgroups32
:
10841 int gidsetsize
= arg1
;
10842 uint32_t *target_grouplist
;
10846 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10847 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10848 if (gidsetsize
== 0)
10850 if (!is_error(ret
)) {
10851 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10852 if (!target_grouplist
) {
10853 return -TARGET_EFAULT
;
10855 for(i
= 0;i
< ret
; i
++)
10856 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10857 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10862 #ifdef TARGET_NR_setgroups32
10863 case TARGET_NR_setgroups32
:
10865 int gidsetsize
= arg1
;
10866 uint32_t *target_grouplist
;
10870 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10871 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10872 if (!target_grouplist
) {
10873 return -TARGET_EFAULT
;
10875 for(i
= 0;i
< gidsetsize
; i
++)
10876 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10877 unlock_user(target_grouplist
, arg2
, 0);
10878 return get_errno(setgroups(gidsetsize
, grouplist
));
10881 #ifdef TARGET_NR_fchown32
10882 case TARGET_NR_fchown32
:
10883 return get_errno(fchown(arg1
, arg2
, arg3
));
10885 #ifdef TARGET_NR_setresuid32
10886 case TARGET_NR_setresuid32
:
10887 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10889 #ifdef TARGET_NR_getresuid32
10890 case TARGET_NR_getresuid32
:
10892 uid_t ruid
, euid
, suid
;
10893 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10894 if (!is_error(ret
)) {
10895 if (put_user_u32(ruid
, arg1
)
10896 || put_user_u32(euid
, arg2
)
10897 || put_user_u32(suid
, arg3
))
10898 return -TARGET_EFAULT
;
10903 #ifdef TARGET_NR_setresgid32
10904 case TARGET_NR_setresgid32
:
10905 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10907 #ifdef TARGET_NR_getresgid32
10908 case TARGET_NR_getresgid32
:
10910 gid_t rgid
, egid
, sgid
;
10911 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10912 if (!is_error(ret
)) {
10913 if (put_user_u32(rgid
, arg1
)
10914 || put_user_u32(egid
, arg2
)
10915 || put_user_u32(sgid
, arg3
))
10916 return -TARGET_EFAULT
;
10921 #ifdef TARGET_NR_chown32
10922 case TARGET_NR_chown32
:
10923 if (!(p
= lock_user_string(arg1
)))
10924 return -TARGET_EFAULT
;
10925 ret
= get_errno(chown(p
, arg2
, arg3
));
10926 unlock_user(p
, arg1
, 0);
10929 #ifdef TARGET_NR_setuid32
10930 case TARGET_NR_setuid32
:
10931 return get_errno(sys_setuid(arg1
));
10933 #ifdef TARGET_NR_setgid32
10934 case TARGET_NR_setgid32
:
10935 return get_errno(sys_setgid(arg1
));
10937 #ifdef TARGET_NR_setfsuid32
10938 case TARGET_NR_setfsuid32
:
10939 return get_errno(setfsuid(arg1
));
10941 #ifdef TARGET_NR_setfsgid32
10942 case TARGET_NR_setfsgid32
:
10943 return get_errno(setfsgid(arg1
));
10945 #ifdef TARGET_NR_mincore
10946 case TARGET_NR_mincore
:
10948 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10950 return -TARGET_ENOMEM
;
10952 p
= lock_user_string(arg3
);
10954 ret
= -TARGET_EFAULT
;
10956 ret
= get_errno(mincore(a
, arg2
, p
));
10957 unlock_user(p
, arg3
, ret
);
10959 unlock_user(a
, arg1
, 0);
10963 #ifdef TARGET_NR_arm_fadvise64_64
10964 case TARGET_NR_arm_fadvise64_64
:
10965 /* arm_fadvise64_64 looks like fadvise64_64 but
10966 * with different argument order: fd, advice, offset, len
10967 * rather than the usual fd, offset, len, advice.
10968 * Note that offset and len are both 64-bit so appear as
10969 * pairs of 32-bit registers.
10971 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10972 target_offset64(arg5
, arg6
), arg2
);
10973 return -host_to_target_errno(ret
);
10976 #if TARGET_ABI_BITS == 32
10978 #ifdef TARGET_NR_fadvise64_64
10979 case TARGET_NR_fadvise64_64
:
10980 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10981 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10989 /* 6 args: fd, offset (high, low), len (high, low), advice */
10990 if (regpairs_aligned(cpu_env
, num
)) {
10991 /* offset is in (3,4), len in (5,6) and advice in 7 */
10999 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11000 target_offset64(arg4
, arg5
), arg6
);
11001 return -host_to_target_errno(ret
);
11004 #ifdef TARGET_NR_fadvise64
11005 case TARGET_NR_fadvise64
:
11006 /* 5 args: fd, offset (high, low), len, advice */
11007 if (regpairs_aligned(cpu_env
, num
)) {
11008 /* offset is in (3,4), len in 5 and advice in 6 */
11014 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11015 return -host_to_target_errno(ret
);
11018 #else /* not a 32-bit ABI */
11019 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11020 #ifdef TARGET_NR_fadvise64_64
11021 case TARGET_NR_fadvise64_64
:
11023 #ifdef TARGET_NR_fadvise64
11024 case TARGET_NR_fadvise64
:
11026 #ifdef TARGET_S390X
11028 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11029 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11030 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11031 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11035 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11037 #endif /* end of 64-bit ABI fadvise handling */
11039 #ifdef TARGET_NR_madvise
11040 case TARGET_NR_madvise
:
11041 /* A straight passthrough may not be safe because qemu sometimes
11042 turns private file-backed mappings into anonymous mappings.
11043 This will break MADV_DONTNEED.
11044 This is a hint, so ignoring and returning success is ok. */
11047 #if TARGET_ABI_BITS == 32
11048 case TARGET_NR_fcntl64
:
11052 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11053 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11056 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11057 copyfrom
= copy_from_user_oabi_flock64
;
11058 copyto
= copy_to_user_oabi_flock64
;
11062 cmd
= target_to_host_fcntl_cmd(arg2
);
11063 if (cmd
== -TARGET_EINVAL
) {
11068 case TARGET_F_GETLK64
:
11069 ret
= copyfrom(&fl
, arg3
);
11073 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11075 ret
= copyto(arg3
, &fl
);
11079 case TARGET_F_SETLK64
:
11080 case TARGET_F_SETLKW64
:
11081 ret
= copyfrom(&fl
, arg3
);
11085 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11088 ret
= do_fcntl(arg1
, arg2
, arg3
);
11094 #ifdef TARGET_NR_cacheflush
11095 case TARGET_NR_cacheflush
:
11096 /* self-modifying code is handled automatically, so nothing needed */
11099 #ifdef TARGET_NR_getpagesize
11100 case TARGET_NR_getpagesize
:
11101 return TARGET_PAGE_SIZE
;
11103 case TARGET_NR_gettid
:
11104 return get_errno(sys_gettid());
11105 #ifdef TARGET_NR_readahead
11106 case TARGET_NR_readahead
:
11107 #if TARGET_ABI_BITS == 32
11108 if (regpairs_aligned(cpu_env
, num
)) {
11113 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11115 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11120 #ifdef TARGET_NR_setxattr
11121 case TARGET_NR_listxattr
:
11122 case TARGET_NR_llistxattr
:
11126 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11128 return -TARGET_EFAULT
;
11131 p
= lock_user_string(arg1
);
11133 if (num
== TARGET_NR_listxattr
) {
11134 ret
= get_errno(listxattr(p
, b
, arg3
));
11136 ret
= get_errno(llistxattr(p
, b
, arg3
));
11139 ret
= -TARGET_EFAULT
;
11141 unlock_user(p
, arg1
, 0);
11142 unlock_user(b
, arg2
, arg3
);
11145 case TARGET_NR_flistxattr
:
11149 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11151 return -TARGET_EFAULT
;
11154 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11155 unlock_user(b
, arg2
, arg3
);
11158 case TARGET_NR_setxattr
:
11159 case TARGET_NR_lsetxattr
:
11161 void *p
, *n
, *v
= 0;
11163 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11165 return -TARGET_EFAULT
;
11168 p
= lock_user_string(arg1
);
11169 n
= lock_user_string(arg2
);
11171 if (num
== TARGET_NR_setxattr
) {
11172 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11174 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11177 ret
= -TARGET_EFAULT
;
11179 unlock_user(p
, arg1
, 0);
11180 unlock_user(n
, arg2
, 0);
11181 unlock_user(v
, arg3
, 0);
11184 case TARGET_NR_fsetxattr
:
11188 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11190 return -TARGET_EFAULT
;
11193 n
= lock_user_string(arg2
);
11195 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11197 ret
= -TARGET_EFAULT
;
11199 unlock_user(n
, arg2
, 0);
11200 unlock_user(v
, arg3
, 0);
11203 case TARGET_NR_getxattr
:
11204 case TARGET_NR_lgetxattr
:
11206 void *p
, *n
, *v
= 0;
11208 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11210 return -TARGET_EFAULT
;
11213 p
= lock_user_string(arg1
);
11214 n
= lock_user_string(arg2
);
11216 if (num
== TARGET_NR_getxattr
) {
11217 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11219 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11222 ret
= -TARGET_EFAULT
;
11224 unlock_user(p
, arg1
, 0);
11225 unlock_user(n
, arg2
, 0);
11226 unlock_user(v
, arg3
, arg4
);
11229 case TARGET_NR_fgetxattr
:
11233 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11235 return -TARGET_EFAULT
;
11238 n
= lock_user_string(arg2
);
11240 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11242 ret
= -TARGET_EFAULT
;
11244 unlock_user(n
, arg2
, 0);
11245 unlock_user(v
, arg3
, arg4
);
11248 case TARGET_NR_removexattr
:
11249 case TARGET_NR_lremovexattr
:
11252 p
= lock_user_string(arg1
);
11253 n
= lock_user_string(arg2
);
11255 if (num
== TARGET_NR_removexattr
) {
11256 ret
= get_errno(removexattr(p
, n
));
11258 ret
= get_errno(lremovexattr(p
, n
));
11261 ret
= -TARGET_EFAULT
;
11263 unlock_user(p
, arg1
, 0);
11264 unlock_user(n
, arg2
, 0);
11267 case TARGET_NR_fremovexattr
:
11270 n
= lock_user_string(arg2
);
11272 ret
= get_errno(fremovexattr(arg1
, n
));
11274 ret
= -TARGET_EFAULT
;
11276 unlock_user(n
, arg2
, 0);
11280 #endif /* CONFIG_ATTR */
11281 #ifdef TARGET_NR_set_thread_area
11282 case TARGET_NR_set_thread_area
:
11283 #if defined(TARGET_MIPS)
11284 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11286 #elif defined(TARGET_CRIS)
11288 ret
= -TARGET_EINVAL
;
11290 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11294 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11295 return do_set_thread_area(cpu_env
, arg1
);
11296 #elif defined(TARGET_M68K)
11298 TaskState
*ts
= cpu
->opaque
;
11299 ts
->tp_value
= arg1
;
11303 return -TARGET_ENOSYS
;
11306 #ifdef TARGET_NR_get_thread_area
11307 case TARGET_NR_get_thread_area
:
11308 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11309 return do_get_thread_area(cpu_env
, arg1
);
11310 #elif defined(TARGET_M68K)
11312 TaskState
*ts
= cpu
->opaque
;
11313 return ts
->tp_value
;
11316 return -TARGET_ENOSYS
;
11319 #ifdef TARGET_NR_getdomainname
11320 case TARGET_NR_getdomainname
:
11321 return -TARGET_ENOSYS
;
11324 #ifdef TARGET_NR_clock_settime
11325 case TARGET_NR_clock_settime
:
11327 struct timespec ts
;
11329 ret
= target_to_host_timespec(&ts
, arg2
);
11330 if (!is_error(ret
)) {
11331 ret
= get_errno(clock_settime(arg1
, &ts
));
11336 #ifdef TARGET_NR_clock_gettime
11337 case TARGET_NR_clock_gettime
:
11339 struct timespec ts
;
11340 ret
= get_errno(clock_gettime(arg1
, &ts
));
11341 if (!is_error(ret
)) {
11342 ret
= host_to_target_timespec(arg2
, &ts
);
11347 #ifdef TARGET_NR_clock_getres
11348 case TARGET_NR_clock_getres
:
11350 struct timespec ts
;
11351 ret
= get_errno(clock_getres(arg1
, &ts
));
11352 if (!is_error(ret
)) {
11353 host_to_target_timespec(arg2
, &ts
);
11358 #ifdef TARGET_NR_clock_nanosleep
11359 case TARGET_NR_clock_nanosleep
:
11361 struct timespec ts
;
11362 target_to_host_timespec(&ts
, arg3
);
11363 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11364 &ts
, arg4
? &ts
: NULL
));
11366 host_to_target_timespec(arg4
, &ts
);
11368 #if defined(TARGET_PPC)
11369 /* clock_nanosleep is odd in that it returns positive errno values.
11370 * On PPC, CR0 bit 3 should be set in such a situation. */
11371 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11372 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11379 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11380 case TARGET_NR_set_tid_address
:
11381 return get_errno(set_tid_address((int *)g2h(arg1
)));
11384 case TARGET_NR_tkill
:
11385 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11387 case TARGET_NR_tgkill
:
11388 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11389 target_to_host_signal(arg3
)));
11391 #ifdef TARGET_NR_set_robust_list
11392 case TARGET_NR_set_robust_list
:
11393 case TARGET_NR_get_robust_list
:
11394 /* The ABI for supporting robust futexes has userspace pass
11395 * the kernel a pointer to a linked list which is updated by
11396 * userspace after the syscall; the list is walked by the kernel
11397 * when the thread exits. Since the linked list in QEMU guest
11398 * memory isn't a valid linked list for the host and we have
11399 * no way to reliably intercept the thread-death event, we can't
11400 * support these. Silently return ENOSYS so that guest userspace
11401 * falls back to a non-robust futex implementation (which should
11402 * be OK except in the corner case of the guest crashing while
11403 * holding a mutex that is shared with another process via
11406 return -TARGET_ENOSYS
;
11409 #if defined(TARGET_NR_utimensat)
11410 case TARGET_NR_utimensat
:
11412 struct timespec
*tsp
, ts
[2];
11416 target_to_host_timespec(ts
, arg3
);
11417 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11421 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11423 if (!(p
= lock_user_string(arg2
))) {
11424 return -TARGET_EFAULT
;
11426 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11427 unlock_user(p
, arg2
, 0);
11432 case TARGET_NR_futex
:
11433 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11434 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11435 case TARGET_NR_inotify_init
:
11436 ret
= get_errno(sys_inotify_init());
11438 fd_trans_register(ret
, &target_inotify_trans
);
11442 #ifdef CONFIG_INOTIFY1
11443 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11444 case TARGET_NR_inotify_init1
:
11445 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11446 fcntl_flags_tbl
)));
11448 fd_trans_register(ret
, &target_inotify_trans
);
11453 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11454 case TARGET_NR_inotify_add_watch
:
11455 p
= lock_user_string(arg2
);
11456 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11457 unlock_user(p
, arg2
, 0);
11460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11461 case TARGET_NR_inotify_rm_watch
:
11462 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11465 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11466 case TARGET_NR_mq_open
:
11468 struct mq_attr posix_mq_attr
;
11469 struct mq_attr
*pposix_mq_attr
;
11472 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11473 pposix_mq_attr
= NULL
;
11475 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11476 return -TARGET_EFAULT
;
11478 pposix_mq_attr
= &posix_mq_attr
;
11480 p
= lock_user_string(arg1
- 1);
11482 return -TARGET_EFAULT
;
11484 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11485 unlock_user (p
, arg1
, 0);
11489 case TARGET_NR_mq_unlink
:
11490 p
= lock_user_string(arg1
- 1);
11492 return -TARGET_EFAULT
;
11494 ret
= get_errno(mq_unlink(p
));
11495 unlock_user (p
, arg1
, 0);
11498 case TARGET_NR_mq_timedsend
:
11500 struct timespec ts
;
11502 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11504 target_to_host_timespec(&ts
, arg5
);
11505 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11506 host_to_target_timespec(arg5
, &ts
);
11508 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11510 unlock_user (p
, arg2
, arg3
);
11514 case TARGET_NR_mq_timedreceive
:
11516 struct timespec ts
;
11519 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11521 target_to_host_timespec(&ts
, arg5
);
11522 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11524 host_to_target_timespec(arg5
, &ts
);
11526 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11529 unlock_user (p
, arg2
, arg3
);
11531 put_user_u32(prio
, arg4
);
11535 /* Not implemented for now... */
11536 /* case TARGET_NR_mq_notify: */
11539 case TARGET_NR_mq_getsetattr
:
11541 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11544 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11545 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11546 &posix_mq_attr_out
));
11547 } else if (arg3
!= 0) {
11548 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11550 if (ret
== 0 && arg3
!= 0) {
11551 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11557 #ifdef CONFIG_SPLICE
11558 #ifdef TARGET_NR_tee
11559 case TARGET_NR_tee
:
11561 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11565 #ifdef TARGET_NR_splice
11566 case TARGET_NR_splice
:
11568 loff_t loff_in
, loff_out
;
11569 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11571 if (get_user_u64(loff_in
, arg2
)) {
11572 return -TARGET_EFAULT
;
11574 ploff_in
= &loff_in
;
11577 if (get_user_u64(loff_out
, arg4
)) {
11578 return -TARGET_EFAULT
;
11580 ploff_out
= &loff_out
;
11582 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11584 if (put_user_u64(loff_in
, arg2
)) {
11585 return -TARGET_EFAULT
;
11589 if (put_user_u64(loff_out
, arg4
)) {
11590 return -TARGET_EFAULT
;
11596 #ifdef TARGET_NR_vmsplice
11597 case TARGET_NR_vmsplice
:
11599 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11601 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11602 unlock_iovec(vec
, arg2
, arg3
, 0);
11604 ret
= -host_to_target_errno(errno
);
11609 #endif /* CONFIG_SPLICE */
11610 #ifdef CONFIG_EVENTFD
11611 #if defined(TARGET_NR_eventfd)
11612 case TARGET_NR_eventfd
:
11613 ret
= get_errno(eventfd(arg1
, 0));
11615 fd_trans_register(ret
, &target_eventfd_trans
);
11619 #if defined(TARGET_NR_eventfd2)
11620 case TARGET_NR_eventfd2
:
11622 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11623 if (arg2
& TARGET_O_NONBLOCK
) {
11624 host_flags
|= O_NONBLOCK
;
11626 if (arg2
& TARGET_O_CLOEXEC
) {
11627 host_flags
|= O_CLOEXEC
;
11629 ret
= get_errno(eventfd(arg1
, host_flags
));
11631 fd_trans_register(ret
, &target_eventfd_trans
);
11636 #endif /* CONFIG_EVENTFD */
11637 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11638 case TARGET_NR_fallocate
:
11639 #if TARGET_ABI_BITS == 32
11640 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11641 target_offset64(arg5
, arg6
)));
11643 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11647 #if defined(CONFIG_SYNC_FILE_RANGE)
11648 #if defined(TARGET_NR_sync_file_range)
11649 case TARGET_NR_sync_file_range
:
11650 #if TARGET_ABI_BITS == 32
11651 #if defined(TARGET_MIPS)
11652 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11653 target_offset64(arg5
, arg6
), arg7
));
11655 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11656 target_offset64(arg4
, arg5
), arg6
));
11657 #endif /* !TARGET_MIPS */
11659 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11663 #if defined(TARGET_NR_sync_file_range2)
11664 case TARGET_NR_sync_file_range2
:
11665 /* This is like sync_file_range but the arguments are reordered */
11666 #if TARGET_ABI_BITS == 32
11667 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11668 target_offset64(arg5
, arg6
), arg2
));
11670 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11675 #if defined(TARGET_NR_signalfd4)
11676 case TARGET_NR_signalfd4
:
11677 return do_signalfd4(arg1
, arg2
, arg4
);
11679 #if defined(TARGET_NR_signalfd)
11680 case TARGET_NR_signalfd
:
11681 return do_signalfd4(arg1
, arg2
, 0);
11683 #if defined(CONFIG_EPOLL)
11684 #if defined(TARGET_NR_epoll_create)
11685 case TARGET_NR_epoll_create
:
11686 return get_errno(epoll_create(arg1
));
11688 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11689 case TARGET_NR_epoll_create1
:
11690 return get_errno(epoll_create1(arg1
));
11692 #if defined(TARGET_NR_epoll_ctl)
11693 case TARGET_NR_epoll_ctl
:
11695 struct epoll_event ep
;
11696 struct epoll_event
*epp
= 0;
11698 struct target_epoll_event
*target_ep
;
11699 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11700 return -TARGET_EFAULT
;
11702 ep
.events
= tswap32(target_ep
->events
);
11703 /* The epoll_data_t union is just opaque data to the kernel,
11704 * so we transfer all 64 bits across and need not worry what
11705 * actual data type it is.
11707 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11708 unlock_user_struct(target_ep
, arg4
, 0);
11711 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11715 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11716 #if defined(TARGET_NR_epoll_wait)
11717 case TARGET_NR_epoll_wait
:
11719 #if defined(TARGET_NR_epoll_pwait)
11720 case TARGET_NR_epoll_pwait
:
11723 struct target_epoll_event
*target_ep
;
11724 struct epoll_event
*ep
;
11726 int maxevents
= arg3
;
11727 int timeout
= arg4
;
11729 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11730 return -TARGET_EINVAL
;
11733 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11734 maxevents
* sizeof(struct target_epoll_event
), 1);
11736 return -TARGET_EFAULT
;
11739 ep
= g_try_new(struct epoll_event
, maxevents
);
11741 unlock_user(target_ep
, arg2
, 0);
11742 return -TARGET_ENOMEM
;
11746 #if defined(TARGET_NR_epoll_pwait)
11747 case TARGET_NR_epoll_pwait
:
11749 target_sigset_t
*target_set
;
11750 sigset_t _set
, *set
= &_set
;
11753 if (arg6
!= sizeof(target_sigset_t
)) {
11754 ret
= -TARGET_EINVAL
;
11758 target_set
= lock_user(VERIFY_READ
, arg5
,
11759 sizeof(target_sigset_t
), 1);
11761 ret
= -TARGET_EFAULT
;
11764 target_to_host_sigset(set
, target_set
);
11765 unlock_user(target_set
, arg5
, 0);
11770 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11771 set
, SIGSET_T_SIZE
));
11775 #if defined(TARGET_NR_epoll_wait)
11776 case TARGET_NR_epoll_wait
:
11777 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11782 ret
= -TARGET_ENOSYS
;
11784 if (!is_error(ret
)) {
11786 for (i
= 0; i
< ret
; i
++) {
11787 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11788 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11790 unlock_user(target_ep
, arg2
,
11791 ret
* sizeof(struct target_epoll_event
));
11793 unlock_user(target_ep
, arg2
, 0);
11800 #ifdef TARGET_NR_prlimit64
11801 case TARGET_NR_prlimit64
:
11803 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11804 struct target_rlimit64
*target_rnew
, *target_rold
;
11805 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11806 int resource
= target_to_host_resource(arg2
);
11808 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11809 return -TARGET_EFAULT
;
11811 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11812 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11813 unlock_user_struct(target_rnew
, arg3
, 0);
11817 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11818 if (!is_error(ret
) && arg4
) {
11819 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11820 return -TARGET_EFAULT
;
11822 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11823 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11824 unlock_user_struct(target_rold
, arg4
, 1);
11829 #ifdef TARGET_NR_gethostname
11830 case TARGET_NR_gethostname
:
11832 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11834 ret
= get_errno(gethostname(name
, arg2
));
11835 unlock_user(name
, arg1
, arg2
);
11837 ret
= -TARGET_EFAULT
;
11842 #ifdef TARGET_NR_atomic_cmpxchg_32
11843 case TARGET_NR_atomic_cmpxchg_32
:
11845 /* should use start_exclusive from main.c */
11846 abi_ulong mem_value
;
11847 if (get_user_u32(mem_value
, arg6
)) {
11848 target_siginfo_t info
;
11849 info
.si_signo
= SIGSEGV
;
11851 info
.si_code
= TARGET_SEGV_MAPERR
;
11852 info
._sifields
._sigfault
._addr
= arg6
;
11853 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11854 QEMU_SI_FAULT
, &info
);
11858 if (mem_value
== arg2
)
11859 put_user_u32(arg1
, arg6
);
11863 #ifdef TARGET_NR_atomic_barrier
11864 case TARGET_NR_atomic_barrier
:
11865 /* Like the kernel implementation and the
11866 qemu arm barrier, no-op this? */
11870 #ifdef TARGET_NR_timer_create
11871 case TARGET_NR_timer_create
:
11873 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11875 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11878 int timer_index
= next_free_host_timer();
11880 if (timer_index
< 0) {
11881 ret
= -TARGET_EAGAIN
;
11883 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11886 phost_sevp
= &host_sevp
;
11887 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11893 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11897 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11898 return -TARGET_EFAULT
;
11906 #ifdef TARGET_NR_timer_settime
11907 case TARGET_NR_timer_settime
:
11909 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11910 * struct itimerspec * old_value */
11911 target_timer_t timerid
= get_timer_id(arg1
);
11915 } else if (arg3
== 0) {
11916 ret
= -TARGET_EINVAL
;
11918 timer_t htimer
= g_posix_timers
[timerid
];
11919 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11921 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11922 return -TARGET_EFAULT
;
11925 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11926 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11927 return -TARGET_EFAULT
;
11934 #ifdef TARGET_NR_timer_gettime
11935 case TARGET_NR_timer_gettime
:
11937 /* args: timer_t timerid, struct itimerspec *curr_value */
11938 target_timer_t timerid
= get_timer_id(arg1
);
11942 } else if (!arg2
) {
11943 ret
= -TARGET_EFAULT
;
11945 timer_t htimer
= g_posix_timers
[timerid
];
11946 struct itimerspec hspec
;
11947 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11949 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11950 ret
= -TARGET_EFAULT
;
11957 #ifdef TARGET_NR_timer_getoverrun
11958 case TARGET_NR_timer_getoverrun
:
11960 /* args: timer_t timerid */
11961 target_timer_t timerid
= get_timer_id(arg1
);
11966 timer_t htimer
= g_posix_timers
[timerid
];
11967 ret
= get_errno(timer_getoverrun(htimer
));
11973 #ifdef TARGET_NR_timer_delete
11974 case TARGET_NR_timer_delete
:
11976 /* args: timer_t timerid */
11977 target_timer_t timerid
= get_timer_id(arg1
);
11982 timer_t htimer
= g_posix_timers
[timerid
];
11983 ret
= get_errno(timer_delete(htimer
));
11984 g_posix_timers
[timerid
] = 0;
11990 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11991 case TARGET_NR_timerfd_create
:
11992 return get_errno(timerfd_create(arg1
,
11993 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11996 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11997 case TARGET_NR_timerfd_gettime
:
11999 struct itimerspec its_curr
;
12001 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12003 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12004 return -TARGET_EFAULT
;
12010 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12011 case TARGET_NR_timerfd_settime
:
12013 struct itimerspec its_new
, its_old
, *p_new
;
12016 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12017 return -TARGET_EFAULT
;
12024 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12026 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12027 return -TARGET_EFAULT
;
12033 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12034 case TARGET_NR_ioprio_get
:
12035 return get_errno(ioprio_get(arg1
, arg2
));
12038 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12039 case TARGET_NR_ioprio_set
:
12040 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12043 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12044 case TARGET_NR_setns
:
12045 return get_errno(setns(arg1
, arg2
));
12047 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12048 case TARGET_NR_unshare
:
12049 return get_errno(unshare(arg1
));
12051 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12052 case TARGET_NR_kcmp
:
12053 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12055 #ifdef TARGET_NR_swapcontext
12056 case TARGET_NR_swapcontext
:
12057 /* PowerPC specific. */
12058 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12060 #ifdef TARGET_NR_memfd_create
12061 case TARGET_NR_memfd_create
:
12062 p
= lock_user_string(arg1
);
12064 return -TARGET_EFAULT
;
12066 ret
= get_errno(memfd_create(p
, arg2
));
12067 fd_trans_unregister(ret
);
12068 unlock_user(p
, arg1
, 0);
12073 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12074 return -TARGET_ENOSYS
;
12079 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12080 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12081 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12084 CPUState
*cpu
= env_cpu(cpu_env
);
12087 #ifdef DEBUG_ERESTARTSYS
12088 /* Debug-only code for exercising the syscall-restart code paths
12089 * in the per-architecture cpu main loops: restart every syscall
12090 * the guest makes once before letting it through.
12096 return -TARGET_ERESTARTSYS
;
12101 record_syscall_start(cpu
, num
, arg1
,
12102 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12104 if (unlikely(do_strace
)) {
12105 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12106 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12107 arg5
, arg6
, arg7
, arg8
);
12108 print_syscall_ret(num
, ret
);
12110 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12111 arg5
, arg6
, arg7
, arg8
);
12114 record_syscall_return(cpu
, num
, ret
);