4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
113 #include "fd-trans.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
253 _syscall0(int, gettid
)
255 /* This is a replacement for the host gettid() and must return a host
257 static int gettid(void) {
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
282 loff_t
*, res
, uint
, wh
);
284 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
285 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
287 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group
,int,error_code
)
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address
,int *,tidptr
)
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
296 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
300 unsigned long *, user_mask_ptr
);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
303 unsigned long *, user_mask_ptr
);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
306 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
308 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
309 struct __user_cap_data_struct
*, data
);
310 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
311 struct __user_cap_data_struct
*, data
);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get
, int, which
, int, who
)
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
324 unsigned long, idx1
, unsigned long, idx2
)
327 static bitmask_transtbl fcntl_flags_tbl
[] = {
328 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
329 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
330 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
331 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
332 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
333 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
334 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
335 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
336 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
337 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
338 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
339 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
340 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
351 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
363 static int sys_getcwd1(char *buf
, size_t size
)
365 if (getcwd(buf
, size
) == NULL
) {
366 /* getcwd() sets errno */
369 return strlen(buf
)+1;
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
376 const struct timespec
*,tsp
,int,flags
)
378 static int sys_utimensat(int dirfd
, const char *pathname
,
379 const struct timespec times
[2], int flags
)
385 #endif /* TARGET_NR_utimensat */
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
391 const char *, new, unsigned int, flags
)
393 static int sys_renameat2(int oldfd
, const char *old
,
394 int newfd
, const char *new, int flags
)
397 return renameat(oldfd
, old
, newfd
, new);
403 #endif /* TARGET_NR_renameat2 */
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
411 return (inotify_init());
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
417 return (inotify_add_watch(fd
, pathname
, mask
));
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
423 return (inotify_rm_watch(fd
, wd
));
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags
)
430 return (inotify_init1(flags
));
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY */
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
, int num
)
480 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486 * of registers which translates to the same as ARM/MIPS, because we start with
488 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env
, int num
)
494 case TARGET_NR_pread64
:
495 case TARGET_NR_pwrite64
:
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
505 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
508 #define ERRNO_TABLE_SIZE 1200
510 /* target_to_host_errno_table[] is initialized from
511 * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
516 * This list is the union of errno values overridden in asm-<arch>/errno.h
517 * minus the errnos that are not actually generic to all archs.
519 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
520 [EAGAIN
] = TARGET_EAGAIN
,
521 [EIDRM
] = TARGET_EIDRM
,
522 [ECHRNG
] = TARGET_ECHRNG
,
523 [EL2NSYNC
] = TARGET_EL2NSYNC
,
524 [EL3HLT
] = TARGET_EL3HLT
,
525 [EL3RST
] = TARGET_EL3RST
,
526 [ELNRNG
] = TARGET_ELNRNG
,
527 [EUNATCH
] = TARGET_EUNATCH
,
528 [ENOCSI
] = TARGET_ENOCSI
,
529 [EL2HLT
] = TARGET_EL2HLT
,
530 [EDEADLK
] = TARGET_EDEADLK
,
531 [ENOLCK
] = TARGET_ENOLCK
,
532 [EBADE
] = TARGET_EBADE
,
533 [EBADR
] = TARGET_EBADR
,
534 [EXFULL
] = TARGET_EXFULL
,
535 [ENOANO
] = TARGET_ENOANO
,
536 [EBADRQC
] = TARGET_EBADRQC
,
537 [EBADSLT
] = TARGET_EBADSLT
,
538 [EBFONT
] = TARGET_EBFONT
,
539 [ENOSTR
] = TARGET_ENOSTR
,
540 [ENODATA
] = TARGET_ENODATA
,
541 [ETIME
] = TARGET_ETIME
,
542 [ENOSR
] = TARGET_ENOSR
,
543 [ENONET
] = TARGET_ENONET
,
544 [ENOPKG
] = TARGET_ENOPKG
,
545 [EREMOTE
] = TARGET_EREMOTE
,
546 [ENOLINK
] = TARGET_ENOLINK
,
547 [EADV
] = TARGET_EADV
,
548 [ESRMNT
] = TARGET_ESRMNT
,
549 [ECOMM
] = TARGET_ECOMM
,
550 [EPROTO
] = TARGET_EPROTO
,
551 [EDOTDOT
] = TARGET_EDOTDOT
,
552 [EMULTIHOP
] = TARGET_EMULTIHOP
,
553 [EBADMSG
] = TARGET_EBADMSG
,
554 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
555 [EOVERFLOW
] = TARGET_EOVERFLOW
,
556 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
557 [EBADFD
] = TARGET_EBADFD
,
558 [EREMCHG
] = TARGET_EREMCHG
,
559 [ELIBACC
] = TARGET_ELIBACC
,
560 [ELIBBAD
] = TARGET_ELIBBAD
,
561 [ELIBSCN
] = TARGET_ELIBSCN
,
562 [ELIBMAX
] = TARGET_ELIBMAX
,
563 [ELIBEXEC
] = TARGET_ELIBEXEC
,
564 [EILSEQ
] = TARGET_EILSEQ
,
565 [ENOSYS
] = TARGET_ENOSYS
,
566 [ELOOP
] = TARGET_ELOOP
,
567 [ERESTART
] = TARGET_ERESTART
,
568 [ESTRPIPE
] = TARGET_ESTRPIPE
,
569 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
570 [EUSERS
] = TARGET_EUSERS
,
571 [ENOTSOCK
] = TARGET_ENOTSOCK
,
572 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
573 [EMSGSIZE
] = TARGET_EMSGSIZE
,
574 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
575 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
576 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
577 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
578 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
579 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
580 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
581 [EADDRINUSE
] = TARGET_EADDRINUSE
,
582 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
583 [ENETDOWN
] = TARGET_ENETDOWN
,
584 [ENETUNREACH
] = TARGET_ENETUNREACH
,
585 [ENETRESET
] = TARGET_ENETRESET
,
586 [ECONNABORTED
] = TARGET_ECONNABORTED
,
587 [ECONNRESET
] = TARGET_ECONNRESET
,
588 [ENOBUFS
] = TARGET_ENOBUFS
,
589 [EISCONN
] = TARGET_EISCONN
,
590 [ENOTCONN
] = TARGET_ENOTCONN
,
591 [EUCLEAN
] = TARGET_EUCLEAN
,
592 [ENOTNAM
] = TARGET_ENOTNAM
,
593 [ENAVAIL
] = TARGET_ENAVAIL
,
594 [EISNAM
] = TARGET_EISNAM
,
595 [EREMOTEIO
] = TARGET_EREMOTEIO
,
596 [EDQUOT
] = TARGET_EDQUOT
,
597 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
598 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
599 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
600 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
601 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
602 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
603 [EALREADY
] = TARGET_EALREADY
,
604 [EINPROGRESS
] = TARGET_EINPROGRESS
,
605 [ESTALE
] = TARGET_ESTALE
,
606 [ECANCELED
] = TARGET_ECANCELED
,
607 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
608 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
610 [ENOKEY
] = TARGET_ENOKEY
,
613 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
616 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
619 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
622 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
624 #ifdef ENOTRECOVERABLE
625 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
628 [ENOMSG
] = TARGET_ENOMSG
,
631 [ERFKILL
] = TARGET_ERFKILL
,
634 [EHWPOISON
] = TARGET_EHWPOISON
,
638 static inline int host_to_target_errno(int err
)
640 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
641 host_to_target_errno_table
[err
]) {
642 return host_to_target_errno_table
[err
];
647 static inline int target_to_host_errno(int err
)
649 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
650 target_to_host_errno_table
[err
]) {
651 return target_to_host_errno_table
[err
];
656 static inline abi_long
get_errno(abi_long ret
)
659 return -host_to_target_errno(errno
);
664 const char *target_strerror(int err
)
666 if (err
== TARGET_ERESTARTSYS
) {
667 return "To be restarted";
669 if (err
== TARGET_QEMU_ESIGRETURN
) {
670 return "Successful exit from sigreturn";
673 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
676 return strerror(target_to_host_errno(err
));
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
682 return safe_syscall(__NR_##name); \
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
688 return safe_syscall(__NR_##name, arg1); \
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
694 return safe_syscall(__NR_##name, arg1, arg2); \
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 type5 arg5, type6 arg6) \
723 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
727 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
728 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
729 int, flags
, mode_t
, mode
)
730 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
731 struct rusage
*, rusage
)
732 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
733 int, options
, struct rusage
*, rusage
)
734 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
735 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
736 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
737 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
738 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
740 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
741 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
743 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
744 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
745 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
746 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
747 safe_syscall2(int, tkill
, int, tid
, int, sig
)
748 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
749 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
750 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
751 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
752 unsigned long, pos_l
, unsigned long, pos_h
)
753 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
754 unsigned long, pos_l
, unsigned long, pos_h
)
755 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
757 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
758 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
759 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
760 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
761 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
762 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
763 safe_syscall2(int, flock
, int, fd
, int, operation
)
764 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
765 const struct timespec
*, uts
, size_t, sigsetsize
)
766 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
768 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
769 struct timespec
*, rem
)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
772 const struct timespec
*, req
, struct timespec
*, rem
)
775 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
777 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
778 long, msgtype
, int, flags
)
779 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
780 unsigned, nsops
, const struct timespec
*, timeout
)
782 /* This host kernel architecture uses a single ipc syscall; fake up
783 * wrappers for the sub-operations to hide this implementation detail.
784 * Annoyingly we can't include linux/ipc.h to get the constant definitions
785 * for the call parameter because some structs in there conflict with the
786 * sys/ipc.h ones. So we just define them here, and rely on them being
787 * the same for all host architectures.
789 #define Q_SEMTIMEDOP 4
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
794 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
795 void *, ptr
, long, fifth
)
796 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
798 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
800 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
802 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
804 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
805 const struct timespec
*timeout
)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
813 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
814 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
815 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
833 static inline int host_to_target_sock_type(int host_type
)
837 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
839 target_type
= TARGET_SOCK_DGRAM
;
842 target_type
= TARGET_SOCK_STREAM
;
845 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type
& SOCK_CLOEXEC
) {
851 target_type
|= TARGET_SOCK_CLOEXEC
;
855 #if defined(SOCK_NONBLOCK)
856 if (host_type
& SOCK_NONBLOCK
) {
857 target_type
|= TARGET_SOCK_NONBLOCK
;
864 static abi_ulong target_brk
;
865 static abi_ulong target_original_brk
;
866 static abi_ulong brk_page
;
868 void target_set_brk(abi_ulong new_brk
)
870 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
871 brk_page
= HOST_PAGE_ALIGN(target_brk
);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long
do_brk(abi_ulong new_brk
)
880 abi_long mapped_addr
;
881 abi_ulong new_alloc_size
;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
886 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
889 if (new_brk
< target_original_brk
) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk
<= brk_page
) {
898 /* Heap contents are initialized to zero, as for anonymous
900 if (new_brk
> target_brk
) {
901 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
903 target_brk
= new_brk
;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
915 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
916 PROT_READ
|PROT_WRITE
,
917 MAP_ANON
|MAP_PRIVATE
, 0, 0));
919 if (mapped_addr
== brk_page
) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
927 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
929 target_brk
= new_brk
;
930 brk_page
= HOST_PAGE_ALIGN(target_brk
);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
934 } else if (mapped_addr
!= -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr
, new_alloc_size
);
940 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
943 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM
;
951 /* For everything else, return the previous break. */
955 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
956 abi_ulong target_fds_addr
,
960 abi_ulong b
, *target_fds
;
962 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
963 if (!(target_fds
= lock_user(VERIFY_READ
,
965 sizeof(abi_ulong
) * nw
,
967 return -TARGET_EFAULT
;
971 for (i
= 0; i
< nw
; i
++) {
972 /* grab the abi_ulong */
973 __get_user(b
, &target_fds
[i
]);
974 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
975 /* check the bit inside the abi_ulong */
982 unlock_user(target_fds
, target_fds_addr
, 0);
987 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
988 abi_ulong target_fds_addr
,
991 if (target_fds_addr
) {
992 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
993 return -TARGET_EFAULT
;
1001 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1007 abi_ulong
*target_fds
;
1009 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1010 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1012 sizeof(abi_ulong
) * nw
,
1014 return -TARGET_EFAULT
;
1017 for (i
= 0; i
< nw
; i
++) {
1019 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1020 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1023 __put_user(v
, &target_fds
[i
]);
1026 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1037 static inline abi_long
host_to_target_clock_t(long ticks
)
1039 #if HOST_HZ == TARGET_HZ
1042 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1046 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1047 const struct rusage
*rusage
)
1049 struct target_rusage
*target_rusage
;
1051 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1052 return -TARGET_EFAULT
;
1053 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1054 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1055 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1056 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1057 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1058 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1059 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1060 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1061 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1062 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1063 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1064 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1065 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1066 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1067 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1068 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1069 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1070 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1071 unlock_user_struct(target_rusage
, target_addr
, 1);
1076 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1078 abi_ulong target_rlim_swap
;
1081 target_rlim_swap
= tswapal(target_rlim
);
1082 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1083 return RLIM_INFINITY
;
1085 result
= target_rlim_swap
;
1086 if (target_rlim_swap
!= (rlim_t
)result
)
1087 return RLIM_INFINITY
;
1092 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1094 abi_ulong target_rlim_swap
;
1097 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1098 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1100 target_rlim_swap
= rlim
;
1101 result
= tswapal(target_rlim_swap
);
1106 static inline int target_to_host_resource(int code
)
1109 case TARGET_RLIMIT_AS
:
1111 case TARGET_RLIMIT_CORE
:
1113 case TARGET_RLIMIT_CPU
:
1115 case TARGET_RLIMIT_DATA
:
1117 case TARGET_RLIMIT_FSIZE
:
1118 return RLIMIT_FSIZE
;
1119 case TARGET_RLIMIT_LOCKS
:
1120 return RLIMIT_LOCKS
;
1121 case TARGET_RLIMIT_MEMLOCK
:
1122 return RLIMIT_MEMLOCK
;
1123 case TARGET_RLIMIT_MSGQUEUE
:
1124 return RLIMIT_MSGQUEUE
;
1125 case TARGET_RLIMIT_NICE
:
1127 case TARGET_RLIMIT_NOFILE
:
1128 return RLIMIT_NOFILE
;
1129 case TARGET_RLIMIT_NPROC
:
1130 return RLIMIT_NPROC
;
1131 case TARGET_RLIMIT_RSS
:
1133 case TARGET_RLIMIT_RTPRIO
:
1134 return RLIMIT_RTPRIO
;
1135 case TARGET_RLIMIT_SIGPENDING
:
1136 return RLIMIT_SIGPENDING
;
1137 case TARGET_RLIMIT_STACK
:
1138 return RLIMIT_STACK
;
1144 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1145 abi_ulong target_tv_addr
)
1147 struct target_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1150 return -TARGET_EFAULT
;
1152 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1153 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1155 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1160 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1161 const struct timeval
*tv
)
1163 struct target_timeval
*target_tv
;
1165 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1166 return -TARGET_EFAULT
;
1168 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1169 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1171 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1176 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1177 abi_ulong target_tz_addr
)
1179 struct target_timezone
*target_tz
;
1181 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1182 return -TARGET_EFAULT
;
1185 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1186 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1188 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1196 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1197 abi_ulong target_mq_attr_addr
)
1199 struct target_mq_attr
*target_mq_attr
;
1201 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1202 target_mq_attr_addr
, 1))
1203 return -TARGET_EFAULT
;
1205 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1206 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1207 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1208 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1210 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1215 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1216 const struct mq_attr
*attr
)
1218 struct target_mq_attr
*target_mq_attr
;
1220 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1221 target_mq_attr_addr
, 0))
1222 return -TARGET_EFAULT
;
1224 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1225 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1226 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1227 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1229 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long
do_select(int n
,
1238 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1239 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1241 fd_set rfds
, wfds
, efds
;
1242 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1244 struct timespec ts
, *ts_ptr
;
1247 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1251 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1255 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1260 if (target_tv_addr
) {
1261 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1262 return -TARGET_EFAULT
;
1263 ts
.tv_sec
= tv
.tv_sec
;
1264 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1270 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1273 if (!is_error(ret
)) {
1274 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1275 return -TARGET_EFAULT
;
1276 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1277 return -TARGET_EFAULT
;
1278 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1279 return -TARGET_EFAULT
;
1281 if (target_tv_addr
) {
1282 tv
.tv_sec
= ts
.tv_sec
;
1283 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1284 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1285 return -TARGET_EFAULT
;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long
do_old_select(abi_ulong arg1
)
1296 struct target_sel_arg_struct
*sel
;
1297 abi_ulong inp
, outp
, exp
, tvp
;
1300 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1301 return -TARGET_EFAULT
;
1304 nsel
= tswapal(sel
->n
);
1305 inp
= tswapal(sel
->inp
);
1306 outp
= tswapal(sel
->outp
);
1307 exp
= tswapal(sel
->exp
);
1308 tvp
= tswapal(sel
->tvp
);
1310 unlock_user_struct(sel
, arg1
, 0);
1312 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1317 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1320 return pipe2(host_pipe
, flags
);
1326 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1327 int flags
, int is_pipe2
)
1331 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1334 return get_errno(ret
);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1341 return host_pipe
[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1344 return host_pipe
[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1347 return host_pipe
[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1350 return host_pipe
[0];
1354 if (put_user_s32(host_pipe
[0], pipedes
)
1355 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1356 return -TARGET_EFAULT
;
1357 return get_errno(ret
);
1360 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1361 abi_ulong target_addr
,
1364 struct target_ip_mreqn
*target_smreqn
;
1366 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1368 return -TARGET_EFAULT
;
1369 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1370 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1371 if (len
== sizeof(struct target_ip_mreqn
))
1372 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1373 unlock_user(target_smreqn
, target_addr
, 0);
1378 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1379 abi_ulong target_addr
,
1382 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1383 sa_family_t sa_family
;
1384 struct target_sockaddr
*target_saddr
;
1386 if (fd_trans_target_to_host_addr(fd
)) {
1387 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1390 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1392 return -TARGET_EFAULT
;
1394 sa_family
= tswap16(target_saddr
->sa_family
);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family
== AF_UNIX
) {
1405 if (len
< unix_maxlen
&& len
> 0) {
1406 char *cp
= (char*)target_saddr
;
1408 if ( cp
[len
-1] && !cp
[len
] )
1411 if (len
> unix_maxlen
)
1415 memcpy(addr
, target_saddr
, len
);
1416 addr
->sa_family
= sa_family
;
1417 if (sa_family
== AF_NETLINK
) {
1418 struct sockaddr_nl
*nladdr
;
1420 nladdr
= (struct sockaddr_nl
*)addr
;
1421 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1422 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1423 } else if (sa_family
== AF_PACKET
) {
1424 struct target_sockaddr_ll
*lladdr
;
1426 lladdr
= (struct target_sockaddr_ll
*)addr
;
1427 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1428 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1430 unlock_user(target_saddr
, target_addr
, 0);
1435 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1436 struct sockaddr
*addr
,
1439 struct target_sockaddr
*target_saddr
;
1446 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1448 return -TARGET_EFAULT
;
1449 memcpy(target_saddr
, addr
, len
);
1450 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1451 sizeof(target_saddr
->sa_family
)) {
1452 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1454 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1455 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1456 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1457 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1458 } else if (addr
->sa_family
== AF_PACKET
) {
1459 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1460 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1461 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1462 } else if (addr
->sa_family
== AF_INET6
&&
1463 len
>= sizeof(struct target_sockaddr_in6
)) {
1464 struct target_sockaddr_in6
*target_in6
=
1465 (struct target_sockaddr_in6
*)target_saddr
;
1466 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1468 unlock_user(target_saddr
, target_addr
, len
);
1473 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1474 struct target_msghdr
*target_msgh
)
1476 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1477 abi_long msg_controllen
;
1478 abi_ulong target_cmsg_addr
;
1479 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1480 socklen_t space
= 0;
1482 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1483 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1485 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1486 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1487 target_cmsg_start
= target_cmsg
;
1489 return -TARGET_EFAULT
;
1491 while (cmsg
&& target_cmsg
) {
1492 void *data
= CMSG_DATA(cmsg
);
1493 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1495 int len
= tswapal(target_cmsg
->cmsg_len
)
1496 - sizeof(struct target_cmsghdr
);
1498 space
+= CMSG_SPACE(len
);
1499 if (space
> msgh
->msg_controllen
) {
1500 space
-= CMSG_SPACE(len
);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1514 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1515 cmsg
->cmsg_level
= SOL_SOCKET
;
1517 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1519 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1520 cmsg
->cmsg_len
= CMSG_LEN(len
);
1522 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1523 int *fd
= (int *)data
;
1524 int *target_fd
= (int *)target_data
;
1525 int i
, numfds
= len
/ sizeof(int);
1527 for (i
= 0; i
< numfds
; i
++) {
1528 __get_user(fd
[i
], target_fd
+ i
);
1530 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1531 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1532 struct ucred
*cred
= (struct ucred
*)data
;
1533 struct target_ucred
*target_cred
=
1534 (struct target_ucred
*)target_data
;
1536 __get_user(cred
->pid
, &target_cred
->pid
);
1537 __get_user(cred
->uid
, &target_cred
->uid
);
1538 __get_user(cred
->gid
, &target_cred
->gid
);
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1542 memcpy(data
, target_data
, len
);
1545 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1546 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1549 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1551 msgh
->msg_controllen
= space
;
1555 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1556 struct msghdr
*msgh
)
1558 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1559 abi_long msg_controllen
;
1560 abi_ulong target_cmsg_addr
;
1561 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1562 socklen_t space
= 0;
1564 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1565 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1567 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1568 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1569 target_cmsg_start
= target_cmsg
;
1571 return -TARGET_EFAULT
;
1573 while (cmsg
&& target_cmsg
) {
1574 void *data
= CMSG_DATA(cmsg
);
1575 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1577 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1578 int tgt_len
, tgt_space
;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1587 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1591 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1592 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1594 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1596 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1602 switch (cmsg
->cmsg_level
) {
1604 switch (cmsg
->cmsg_type
) {
1606 tgt_len
= sizeof(struct target_timeval
);
1616 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1617 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1618 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg
->cmsg_level
) {
1628 switch (cmsg
->cmsg_type
) {
1631 int *fd
= (int *)data
;
1632 int *target_fd
= (int *)target_data
;
1633 int i
, numfds
= tgt_len
/ sizeof(int);
1635 for (i
= 0; i
< numfds
; i
++) {
1636 __put_user(fd
[i
], target_fd
+ i
);
1642 struct timeval
*tv
= (struct timeval
*)data
;
1643 struct target_timeval
*target_tv
=
1644 (struct target_timeval
*)target_data
;
1646 if (len
!= sizeof(struct timeval
) ||
1647 tgt_len
!= sizeof(struct target_timeval
)) {
1651 /* copy struct timeval to target */
1652 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1653 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1656 case SCM_CREDENTIALS
:
1658 struct ucred
*cred
= (struct ucred
*)data
;
1659 struct target_ucred
*target_cred
=
1660 (struct target_ucred
*)target_data
;
1662 __put_user(cred
->pid
, &target_cred
->pid
);
1663 __put_user(cred
->uid
, &target_cred
->uid
);
1664 __put_user(cred
->gid
, &target_cred
->gid
);
1673 switch (cmsg
->cmsg_type
) {
1676 uint32_t *v
= (uint32_t *)data
;
1677 uint32_t *t_int
= (uint32_t *)target_data
;
1679 if (len
!= sizeof(uint32_t) ||
1680 tgt_len
!= sizeof(uint32_t)) {
1683 __put_user(*v
, t_int
);
1689 struct sock_extended_err ee
;
1690 struct sockaddr_in offender
;
1692 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1693 struct errhdr_t
*target_errh
=
1694 (struct errhdr_t
*)target_data
;
1696 if (len
!= sizeof(struct errhdr_t
) ||
1697 tgt_len
!= sizeof(struct errhdr_t
)) {
1700 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1701 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1702 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1703 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1704 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1705 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1706 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1707 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1708 (void *) &errh
->offender
, sizeof(errh
->offender
));
1717 switch (cmsg
->cmsg_type
) {
1720 uint32_t *v
= (uint32_t *)data
;
1721 uint32_t *t_int
= (uint32_t *)target_data
;
1723 if (len
!= sizeof(uint32_t) ||
1724 tgt_len
!= sizeof(uint32_t)) {
1727 __put_user(*v
, t_int
);
1733 struct sock_extended_err ee
;
1734 struct sockaddr_in6 offender
;
1736 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1737 struct errhdr6_t
*target_errh
=
1738 (struct errhdr6_t
*)target_data
;
1740 if (len
!= sizeof(struct errhdr6_t
) ||
1741 tgt_len
!= sizeof(struct errhdr6_t
)) {
1744 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1745 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1746 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1747 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1748 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1749 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1750 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1751 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1752 (void *) &errh
->offender
, sizeof(errh
->offender
));
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1764 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1765 if (tgt_len
> len
) {
1766 memset(target_data
+ len
, 0, tgt_len
- len
);
1770 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1771 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1772 if (msg_controllen
< tgt_space
) {
1773 tgt_space
= msg_controllen
;
1775 msg_controllen
-= tgt_space
;
1777 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1778 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1781 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1783 target_msgh
->msg_controllen
= tswapal(space
);
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1789 abi_ulong optval_addr
, socklen_t optlen
)
1793 struct ip_mreqn
*ip_mreq
;
1794 struct ip_mreq_source
*ip_mreq_source
;
1798 /* TCP options all take an 'int' value. */
1799 if (optlen
< sizeof(uint32_t))
1800 return -TARGET_EINVAL
;
1802 if (get_user_u32(val
, optval_addr
))
1803 return -TARGET_EFAULT
;
1804 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1811 case IP_ROUTER_ALERT
:
1815 case IP_MTU_DISCOVER
:
1822 case IP_MULTICAST_TTL
:
1823 case IP_MULTICAST_LOOP
:
1825 if (optlen
>= sizeof(uint32_t)) {
1826 if (get_user_u32(val
, optval_addr
))
1827 return -TARGET_EFAULT
;
1828 } else if (optlen
>= 1) {
1829 if (get_user_u8(val
, optval_addr
))
1830 return -TARGET_EFAULT
;
1832 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1834 case IP_ADD_MEMBERSHIP
:
1835 case IP_DROP_MEMBERSHIP
:
1836 if (optlen
< sizeof (struct target_ip_mreq
) ||
1837 optlen
> sizeof (struct target_ip_mreqn
))
1838 return -TARGET_EINVAL
;
1840 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1841 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1842 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1845 case IP_BLOCK_SOURCE
:
1846 case IP_UNBLOCK_SOURCE
:
1847 case IP_ADD_SOURCE_MEMBERSHIP
:
1848 case IP_DROP_SOURCE_MEMBERSHIP
:
1849 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1850 return -TARGET_EINVAL
;
1852 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1853 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1854 unlock_user (ip_mreq_source
, optval_addr
, 0);
1863 case IPV6_MTU_DISCOVER
:
1866 case IPV6_RECVPKTINFO
:
1867 case IPV6_UNICAST_HOPS
:
1868 case IPV6_MULTICAST_HOPS
:
1869 case IPV6_MULTICAST_LOOP
:
1871 case IPV6_RECVHOPLIMIT
:
1872 case IPV6_2292HOPLIMIT
:
1875 if (optlen
< sizeof(uint32_t)) {
1876 return -TARGET_EINVAL
;
1878 if (get_user_u32(val
, optval_addr
)) {
1879 return -TARGET_EFAULT
;
1881 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1882 &val
, sizeof(val
)));
1886 struct in6_pktinfo pki
;
1888 if (optlen
< sizeof(pki
)) {
1889 return -TARGET_EINVAL
;
1892 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1893 return -TARGET_EFAULT
;
1896 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1898 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1899 &pki
, sizeof(pki
)));
1910 struct icmp6_filter icmp6f
;
1912 if (optlen
> sizeof(icmp6f
)) {
1913 optlen
= sizeof(icmp6f
);
1916 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1917 return -TARGET_EFAULT
;
1920 for (val
= 0; val
< 8; val
++) {
1921 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1924 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1936 /* those take an u32 value */
1937 if (optlen
< sizeof(uint32_t)) {
1938 return -TARGET_EINVAL
;
1941 if (get_user_u32(val
, optval_addr
)) {
1942 return -TARGET_EFAULT
;
1944 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1945 &val
, sizeof(val
)));
1952 case TARGET_SOL_SOCKET
:
1954 case TARGET_SO_RCVTIMEO
:
1958 optname
= SO_RCVTIMEO
;
1961 if (optlen
!= sizeof(struct target_timeval
)) {
1962 return -TARGET_EINVAL
;
1965 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1966 return -TARGET_EFAULT
;
1969 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1973 case TARGET_SO_SNDTIMEO
:
1974 optname
= SO_SNDTIMEO
;
1976 case TARGET_SO_ATTACH_FILTER
:
1978 struct target_sock_fprog
*tfprog
;
1979 struct target_sock_filter
*tfilter
;
1980 struct sock_fprog fprog
;
1981 struct sock_filter
*filter
;
1984 if (optlen
!= sizeof(*tfprog
)) {
1985 return -TARGET_EINVAL
;
1987 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1988 return -TARGET_EFAULT
;
1990 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1991 tswapal(tfprog
->filter
), 0)) {
1992 unlock_user_struct(tfprog
, optval_addr
, 1);
1993 return -TARGET_EFAULT
;
1996 fprog
.len
= tswap16(tfprog
->len
);
1997 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1998 if (filter
== NULL
) {
1999 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2000 unlock_user_struct(tfprog
, optval_addr
, 1);
2001 return -TARGET_ENOMEM
;
2003 for (i
= 0; i
< fprog
.len
; i
++) {
2004 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2005 filter
[i
].jt
= tfilter
[i
].jt
;
2006 filter
[i
].jf
= tfilter
[i
].jf
;
2007 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2009 fprog
.filter
= filter
;
2011 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2012 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2015 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2016 unlock_user_struct(tfprog
, optval_addr
, 1);
2019 case TARGET_SO_BINDTODEVICE
:
2021 char *dev_ifname
, *addr_ifname
;
2023 if (optlen
> IFNAMSIZ
- 1) {
2024 optlen
= IFNAMSIZ
- 1;
2026 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2028 return -TARGET_EFAULT
;
2030 optname
= SO_BINDTODEVICE
;
2031 addr_ifname
= alloca(IFNAMSIZ
);
2032 memcpy(addr_ifname
, dev_ifname
, optlen
);
2033 addr_ifname
[optlen
] = 0;
2034 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2035 addr_ifname
, optlen
));
2036 unlock_user (dev_ifname
, optval_addr
, 0);
2039 case TARGET_SO_LINGER
:
2042 struct target_linger
*tlg
;
2044 if (optlen
!= sizeof(struct target_linger
)) {
2045 return -TARGET_EINVAL
;
2047 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2048 return -TARGET_EFAULT
;
2050 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2051 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2052 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2054 unlock_user_struct(tlg
, optval_addr
, 0);
2057 /* Options with 'int' argument. */
2058 case TARGET_SO_DEBUG
:
2061 case TARGET_SO_REUSEADDR
:
2062 optname
= SO_REUSEADDR
;
2064 case TARGET_SO_TYPE
:
2067 case TARGET_SO_ERROR
:
2070 case TARGET_SO_DONTROUTE
:
2071 optname
= SO_DONTROUTE
;
2073 case TARGET_SO_BROADCAST
:
2074 optname
= SO_BROADCAST
;
2076 case TARGET_SO_SNDBUF
:
2077 optname
= SO_SNDBUF
;
2079 case TARGET_SO_SNDBUFFORCE
:
2080 optname
= SO_SNDBUFFORCE
;
2082 case TARGET_SO_RCVBUF
:
2083 optname
= SO_RCVBUF
;
2085 case TARGET_SO_RCVBUFFORCE
:
2086 optname
= SO_RCVBUFFORCE
;
2088 case TARGET_SO_KEEPALIVE
:
2089 optname
= SO_KEEPALIVE
;
2091 case TARGET_SO_OOBINLINE
:
2092 optname
= SO_OOBINLINE
;
2094 case TARGET_SO_NO_CHECK
:
2095 optname
= SO_NO_CHECK
;
2097 case TARGET_SO_PRIORITY
:
2098 optname
= SO_PRIORITY
;
2101 case TARGET_SO_BSDCOMPAT
:
2102 optname
= SO_BSDCOMPAT
;
2105 case TARGET_SO_PASSCRED
:
2106 optname
= SO_PASSCRED
;
2108 case TARGET_SO_PASSSEC
:
2109 optname
= SO_PASSSEC
;
2111 case TARGET_SO_TIMESTAMP
:
2112 optname
= SO_TIMESTAMP
;
2114 case TARGET_SO_RCVLOWAT
:
2115 optname
= SO_RCVLOWAT
;
2120 if (optlen
< sizeof(uint32_t))
2121 return -TARGET_EINVAL
;
2123 if (get_user_u32(val
, optval_addr
))
2124 return -TARGET_EFAULT
;
2125 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2129 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2130 ret
= -TARGET_ENOPROTOOPT
;
2135 /* do_getsockopt() Must return target values and target errnos. */
2136 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2137 abi_ulong optval_addr
, abi_ulong optlen
)
2144 case TARGET_SOL_SOCKET
:
2147 /* These don't just return a single integer */
2148 case TARGET_SO_RCVTIMEO
:
2149 case TARGET_SO_SNDTIMEO
:
2150 case TARGET_SO_PEERNAME
:
2152 case TARGET_SO_PEERCRED
: {
2155 struct target_ucred
*tcr
;
2157 if (get_user_u32(len
, optlen
)) {
2158 return -TARGET_EFAULT
;
2161 return -TARGET_EINVAL
;
2165 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2173 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2174 return -TARGET_EFAULT
;
2176 __put_user(cr
.pid
, &tcr
->pid
);
2177 __put_user(cr
.uid
, &tcr
->uid
);
2178 __put_user(cr
.gid
, &tcr
->gid
);
2179 unlock_user_struct(tcr
, optval_addr
, 1);
2180 if (put_user_u32(len
, optlen
)) {
2181 return -TARGET_EFAULT
;
2185 case TARGET_SO_LINGER
:
2189 struct target_linger
*tlg
;
2191 if (get_user_u32(len
, optlen
)) {
2192 return -TARGET_EFAULT
;
2195 return -TARGET_EINVAL
;
2199 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2207 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2208 return -TARGET_EFAULT
;
2210 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2211 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2212 unlock_user_struct(tlg
, optval_addr
, 1);
2213 if (put_user_u32(len
, optlen
)) {
2214 return -TARGET_EFAULT
;
2218 /* Options with 'int' argument. */
2219 case TARGET_SO_DEBUG
:
2222 case TARGET_SO_REUSEADDR
:
2223 optname
= SO_REUSEADDR
;
2225 case TARGET_SO_TYPE
:
2228 case TARGET_SO_ERROR
:
2231 case TARGET_SO_DONTROUTE
:
2232 optname
= SO_DONTROUTE
;
2234 case TARGET_SO_BROADCAST
:
2235 optname
= SO_BROADCAST
;
2237 case TARGET_SO_SNDBUF
:
2238 optname
= SO_SNDBUF
;
2240 case TARGET_SO_RCVBUF
:
2241 optname
= SO_RCVBUF
;
2243 case TARGET_SO_KEEPALIVE
:
2244 optname
= SO_KEEPALIVE
;
2246 case TARGET_SO_OOBINLINE
:
2247 optname
= SO_OOBINLINE
;
2249 case TARGET_SO_NO_CHECK
:
2250 optname
= SO_NO_CHECK
;
2252 case TARGET_SO_PRIORITY
:
2253 optname
= SO_PRIORITY
;
2256 case TARGET_SO_BSDCOMPAT
:
2257 optname
= SO_BSDCOMPAT
;
2260 case TARGET_SO_PASSCRED
:
2261 optname
= SO_PASSCRED
;
2263 case TARGET_SO_TIMESTAMP
:
2264 optname
= SO_TIMESTAMP
;
2266 case TARGET_SO_RCVLOWAT
:
2267 optname
= SO_RCVLOWAT
;
2269 case TARGET_SO_ACCEPTCONN
:
2270 optname
= SO_ACCEPTCONN
;
2277 /* TCP options all take an 'int' value. */
2279 if (get_user_u32(len
, optlen
))
2280 return -TARGET_EFAULT
;
2282 return -TARGET_EINVAL
;
2284 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2287 if (optname
== SO_TYPE
) {
2288 val
= host_to_target_sock_type(val
);
2293 if (put_user_u32(val
, optval_addr
))
2294 return -TARGET_EFAULT
;
2296 if (put_user_u8(val
, optval_addr
))
2297 return -TARGET_EFAULT
;
2299 if (put_user_u32(len
, optlen
))
2300 return -TARGET_EFAULT
;
2307 case IP_ROUTER_ALERT
:
2311 case IP_MTU_DISCOVER
:
2317 case IP_MULTICAST_TTL
:
2318 case IP_MULTICAST_LOOP
:
2319 if (get_user_u32(len
, optlen
))
2320 return -TARGET_EFAULT
;
2322 return -TARGET_EINVAL
;
2324 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2327 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2329 if (put_user_u32(len
, optlen
)
2330 || put_user_u8(val
, optval_addr
))
2331 return -TARGET_EFAULT
;
2333 if (len
> sizeof(int))
2335 if (put_user_u32(len
, optlen
)
2336 || put_user_u32(val
, optval_addr
))
2337 return -TARGET_EFAULT
;
2341 ret
= -TARGET_ENOPROTOOPT
;
2347 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2349 ret
= -TARGET_EOPNOTSUPP
;
2355 /* Convert target low/high pair representing file offset into the host
2356 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2357 * as the kernel doesn't handle them either.
2359 static void target_to_host_low_high(abi_ulong tlow
,
2361 unsigned long *hlow
,
2362 unsigned long *hhigh
)
2364 uint64_t off
= tlow
|
2365 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2366 TARGET_LONG_BITS
/ 2;
2369 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2372 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2373 abi_ulong count
, int copy
)
2375 struct target_iovec
*target_vec
;
2377 abi_ulong total_len
, max_len
;
2380 bool bad_address
= false;
2386 if (count
> IOV_MAX
) {
2391 vec
= g_try_new0(struct iovec
, count
);
2397 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2398 count
* sizeof(struct target_iovec
), 1);
2399 if (target_vec
== NULL
) {
2404 /* ??? If host page size > target page size, this will result in a
2405 value larger than what we can actually support. */
2406 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2409 for (i
= 0; i
< count
; i
++) {
2410 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2411 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2416 } else if (len
== 0) {
2417 /* Zero length pointer is ignored. */
2418 vec
[i
].iov_base
= 0;
2420 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2421 /* If the first buffer pointer is bad, this is a fault. But
2422 * subsequent bad buffers will result in a partial write; this
2423 * is realized by filling the vector with null pointers and
2425 if (!vec
[i
].iov_base
) {
2436 if (len
> max_len
- total_len
) {
2437 len
= max_len
- total_len
;
2440 vec
[i
].iov_len
= len
;
2444 unlock_user(target_vec
, target_addr
, 0);
2449 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2450 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2453 unlock_user(target_vec
, target_addr
, 0);
2460 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2461 abi_ulong count
, int copy
)
2463 struct target_iovec
*target_vec
;
2466 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2467 count
* sizeof(struct target_iovec
), 1);
2469 for (i
= 0; i
< count
; i
++) {
2470 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2471 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2475 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2477 unlock_user(target_vec
, target_addr
, 0);
2483 static inline int target_to_host_sock_type(int *type
)
2486 int target_type
= *type
;
2488 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2489 case TARGET_SOCK_DGRAM
:
2490 host_type
= SOCK_DGRAM
;
2492 case TARGET_SOCK_STREAM
:
2493 host_type
= SOCK_STREAM
;
2496 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2499 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2500 #if defined(SOCK_CLOEXEC)
2501 host_type
|= SOCK_CLOEXEC
;
2503 return -TARGET_EINVAL
;
2506 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2507 #if defined(SOCK_NONBLOCK)
2508 host_type
|= SOCK_NONBLOCK
;
2509 #elif !defined(O_NONBLOCK)
2510 return -TARGET_EINVAL
;
2517 /* Try to emulate socket type flags after socket creation. */
2518 static int sock_flags_fixup(int fd
, int target_type
)
2520 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2521 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2522 int flags
= fcntl(fd
, F_GETFL
);
2523 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2525 return -TARGET_EINVAL
;
2532 /* do_socket() Must return target values and target errnos. */
2533 static abi_long
do_socket(int domain
, int type
, int protocol
)
2535 int target_type
= type
;
2538 ret
= target_to_host_sock_type(&type
);
2543 if (domain
== PF_NETLINK
&& !(
2544 #ifdef CONFIG_RTNETLINK
2545 protocol
== NETLINK_ROUTE
||
2547 protocol
== NETLINK_KOBJECT_UEVENT
||
2548 protocol
== NETLINK_AUDIT
)) {
2549 return -EPFNOSUPPORT
;
2552 if (domain
== AF_PACKET
||
2553 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2554 protocol
= tswap16(protocol
);
2557 ret
= get_errno(socket(domain
, type
, protocol
));
2559 ret
= sock_flags_fixup(ret
, target_type
);
2560 if (type
== SOCK_PACKET
) {
2561 /* Manage an obsolete case :
2562 * if socket type is SOCK_PACKET, bind by name
2564 fd_trans_register(ret
, &target_packet_trans
);
2565 } else if (domain
== PF_NETLINK
) {
2567 #ifdef CONFIG_RTNETLINK
2569 fd_trans_register(ret
, &target_netlink_route_trans
);
2572 case NETLINK_KOBJECT_UEVENT
:
2573 /* nothing to do: messages are strings */
2576 fd_trans_register(ret
, &target_netlink_audit_trans
);
2579 g_assert_not_reached();
2586 /* do_bind() Must return target values and target errnos. */
2587 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2593 if ((int)addrlen
< 0) {
2594 return -TARGET_EINVAL
;
2597 addr
= alloca(addrlen
+1);
2599 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2603 return get_errno(bind(sockfd
, addr
, addrlen
));
2606 /* do_connect() Must return target values and target errnos. */
2607 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2613 if ((int)addrlen
< 0) {
2614 return -TARGET_EINVAL
;
2617 addr
= alloca(addrlen
+1);
2619 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2623 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2626 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2627 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2628 int flags
, int send
)
2634 abi_ulong target_vec
;
2636 if (msgp
->msg_name
) {
2637 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2638 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2639 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2640 tswapal(msgp
->msg_name
),
2642 if (ret
== -TARGET_EFAULT
) {
2643 /* For connected sockets msg_name and msg_namelen must
2644 * be ignored, so returning EFAULT immediately is wrong.
2645 * Instead, pass a bad msg_name to the host kernel, and
2646 * let it decide whether to return EFAULT or not.
2648 msg
.msg_name
= (void *)-1;
2653 msg
.msg_name
= NULL
;
2654 msg
.msg_namelen
= 0;
2656 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2657 msg
.msg_control
= alloca(msg
.msg_controllen
);
2658 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2660 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2662 count
= tswapal(msgp
->msg_iovlen
);
2663 target_vec
= tswapal(msgp
->msg_iov
);
2665 if (count
> IOV_MAX
) {
2666 /* sendrcvmsg returns a different errno for this condition than
2667 * readv/writev, so we must catch it here before lock_iovec() does.
2669 ret
= -TARGET_EMSGSIZE
;
2673 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2674 target_vec
, count
, send
);
2676 ret
= -host_to_target_errno(errno
);
2679 msg
.msg_iovlen
= count
;
2683 if (fd_trans_target_to_host_data(fd
)) {
2686 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2687 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2688 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2689 msg
.msg_iov
->iov_len
);
2691 msg
.msg_iov
->iov_base
= host_msg
;
2692 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2696 ret
= target_to_host_cmsg(&msg
, msgp
);
2698 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2702 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2703 if (!is_error(ret
)) {
2705 if (fd_trans_host_to_target_data(fd
)) {
2706 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2707 MIN(msg
.msg_iov
->iov_len
, len
));
2709 ret
= host_to_target_cmsg(msgp
, &msg
);
2711 if (!is_error(ret
)) {
2712 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2713 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2714 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2715 msg
.msg_name
, msg
.msg_namelen
);
2727 unlock_iovec(vec
, target_vec
, count
, !send
);
2732 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2733 int flags
, int send
)
2736 struct target_msghdr
*msgp
;
2738 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2742 return -TARGET_EFAULT
;
2744 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2745 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2749 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2750 * so it might not have this *mmsg-specific flag either.
2752 #ifndef MSG_WAITFORONE
2753 #define MSG_WAITFORONE 0x10000
2756 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2757 unsigned int vlen
, unsigned int flags
,
2760 struct target_mmsghdr
*mmsgp
;
2764 if (vlen
> UIO_MAXIOV
) {
2768 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2770 return -TARGET_EFAULT
;
2773 for (i
= 0; i
< vlen
; i
++) {
2774 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2775 if (is_error(ret
)) {
2778 mmsgp
[i
].msg_len
= tswap32(ret
);
2779 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2780 if (flags
& MSG_WAITFORONE
) {
2781 flags
|= MSG_DONTWAIT
;
2785 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2787 /* Return number of datagrams sent if we sent any at all;
2788 * otherwise return the error.
2796 /* do_accept4() Must return target values and target errnos. */
2797 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2798 abi_ulong target_addrlen_addr
, int flags
)
2805 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2807 if (target_addr
== 0) {
2808 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2811 /* linux returns EINVAL if addrlen pointer is invalid */
2812 if (get_user_u32(addrlen
, target_addrlen_addr
))
2813 return -TARGET_EINVAL
;
2815 if ((int)addrlen
< 0) {
2816 return -TARGET_EINVAL
;
2819 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2820 return -TARGET_EINVAL
;
2822 addr
= alloca(addrlen
);
2824 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
2825 if (!is_error(ret
)) {
2826 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2827 if (put_user_u32(addrlen
, target_addrlen_addr
))
2828 ret
= -TARGET_EFAULT
;
2833 /* do_getpeername() Must return target values and target errnos. */
2834 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2835 abi_ulong target_addrlen_addr
)
2841 if (get_user_u32(addrlen
, target_addrlen_addr
))
2842 return -TARGET_EFAULT
;
2844 if ((int)addrlen
< 0) {
2845 return -TARGET_EINVAL
;
2848 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2849 return -TARGET_EFAULT
;
2851 addr
= alloca(addrlen
);
2853 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2854 if (!is_error(ret
)) {
2855 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2856 if (put_user_u32(addrlen
, target_addrlen_addr
))
2857 ret
= -TARGET_EFAULT
;
2862 /* do_getsockname() Must return target values and target errnos. */
2863 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2864 abi_ulong target_addrlen_addr
)
2870 if (get_user_u32(addrlen
, target_addrlen_addr
))
2871 return -TARGET_EFAULT
;
2873 if ((int)addrlen
< 0) {
2874 return -TARGET_EINVAL
;
2877 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2878 return -TARGET_EFAULT
;
2880 addr
= alloca(addrlen
);
2882 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2883 if (!is_error(ret
)) {
2884 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2885 if (put_user_u32(addrlen
, target_addrlen_addr
))
2886 ret
= -TARGET_EFAULT
;
2891 /* do_socketpair() Must return target values and target errnos. */
2892 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2893 abi_ulong target_tab_addr
)
2898 target_to_host_sock_type(&type
);
2900 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2901 if (!is_error(ret
)) {
2902 if (put_user_s32(tab
[0], target_tab_addr
)
2903 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2904 ret
= -TARGET_EFAULT
;
2909 /* do_sendto() Must return target values and target errnos. */
2910 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2911 abi_ulong target_addr
, socklen_t addrlen
)
2915 void *copy_msg
= NULL
;
2918 if ((int)addrlen
< 0) {
2919 return -TARGET_EINVAL
;
2922 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2924 return -TARGET_EFAULT
;
2925 if (fd_trans_target_to_host_data(fd
)) {
2926 copy_msg
= host_msg
;
2927 host_msg
= g_malloc(len
);
2928 memcpy(host_msg
, copy_msg
, len
);
2929 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
2935 addr
= alloca(addrlen
+1);
2936 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2940 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2942 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
2947 host_msg
= copy_msg
;
2949 unlock_user(host_msg
, msg
, 0);
2953 /* do_recvfrom() Must return target values and target errnos. */
2954 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2955 abi_ulong target_addr
,
2956 abi_ulong target_addrlen
)
2963 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2965 return -TARGET_EFAULT
;
2967 if (get_user_u32(addrlen
, target_addrlen
)) {
2968 ret
= -TARGET_EFAULT
;
2971 if ((int)addrlen
< 0) {
2972 ret
= -TARGET_EINVAL
;
2975 addr
= alloca(addrlen
);
2976 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
2979 addr
= NULL
; /* To keep compiler quiet. */
2980 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
2982 if (!is_error(ret
)) {
2983 if (fd_trans_host_to_target_data(fd
)) {
2985 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
2986 if (is_error(trans
)) {
2992 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2993 if (put_user_u32(addrlen
, target_addrlen
)) {
2994 ret
= -TARGET_EFAULT
;
2998 unlock_user(host_msg
, msg
, len
);
3001 unlock_user(host_msg
, msg
, 0);
3006 #ifdef TARGET_NR_socketcall
3007 /* do_socketcall() must return target values and target errnos. */
3008 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3010 static const unsigned nargs
[] = { /* number of arguments per operation */
3011 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3012 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3013 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3014 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3015 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3016 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3017 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3018 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3019 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3020 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3021 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3022 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3023 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3024 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3025 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3026 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3027 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3028 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3029 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3030 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3032 abi_long a
[6]; /* max 6 args */
3035 /* check the range of the first argument num */
3036 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3037 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3038 return -TARGET_EINVAL
;
3040 /* ensure we have space for args */
3041 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3042 return -TARGET_EINVAL
;
3044 /* collect the arguments in a[] according to nargs[] */
3045 for (i
= 0; i
< nargs
[num
]; ++i
) {
3046 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3047 return -TARGET_EFAULT
;
3050 /* now when we have the args, invoke the appropriate underlying function */
3052 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3053 return do_socket(a
[0], a
[1], a
[2]);
3054 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3055 return do_bind(a
[0], a
[1], a
[2]);
3056 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3057 return do_connect(a
[0], a
[1], a
[2]);
3058 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3059 return get_errno(listen(a
[0], a
[1]));
3060 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3061 return do_accept4(a
[0], a
[1], a
[2], 0);
3062 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3063 return do_getsockname(a
[0], a
[1], a
[2]);
3064 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3065 return do_getpeername(a
[0], a
[1], a
[2]);
3066 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3067 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3068 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3069 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3070 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3071 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3072 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3073 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3074 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3075 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3076 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3077 return get_errno(shutdown(a
[0], a
[1]));
3078 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3079 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3080 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3081 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3082 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3083 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3084 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3085 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3086 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3087 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3088 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3089 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3090 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3091 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3093 gemu_log("Unsupported socketcall: %d\n", num
);
3094 return -TARGET_EINVAL
;
3099 #define N_SHM_REGIONS 32
3101 static struct shm_region
{
3105 } shm_regions
[N_SHM_REGIONS
];
3107 #ifndef TARGET_SEMID64_DS
3108 /* asm-generic version of this struct */
3109 struct target_semid64_ds
3111 struct target_ipc_perm sem_perm
;
3112 abi_ulong sem_otime
;
3113 #if TARGET_ABI_BITS == 32
3114 abi_ulong __unused1
;
3116 abi_ulong sem_ctime
;
3117 #if TARGET_ABI_BITS == 32
3118 abi_ulong __unused2
;
3120 abi_ulong sem_nsems
;
3121 abi_ulong __unused3
;
3122 abi_ulong __unused4
;
3126 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3127 abi_ulong target_addr
)
3129 struct target_ipc_perm
*target_ip
;
3130 struct target_semid64_ds
*target_sd
;
3132 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3133 return -TARGET_EFAULT
;
3134 target_ip
= &(target_sd
->sem_perm
);
3135 host_ip
->__key
= tswap32(target_ip
->__key
);
3136 host_ip
->uid
= tswap32(target_ip
->uid
);
3137 host_ip
->gid
= tswap32(target_ip
->gid
);
3138 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3139 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3140 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3141 host_ip
->mode
= tswap32(target_ip
->mode
);
3143 host_ip
->mode
= tswap16(target_ip
->mode
);
3145 #if defined(TARGET_PPC)
3146 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3148 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3150 unlock_user_struct(target_sd
, target_addr
, 0);
3154 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3155 struct ipc_perm
*host_ip
)
3157 struct target_ipc_perm
*target_ip
;
3158 struct target_semid64_ds
*target_sd
;
3160 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3161 return -TARGET_EFAULT
;
3162 target_ip
= &(target_sd
->sem_perm
);
3163 target_ip
->__key
= tswap32(host_ip
->__key
);
3164 target_ip
->uid
= tswap32(host_ip
->uid
);
3165 target_ip
->gid
= tswap32(host_ip
->gid
);
3166 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3167 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3168 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3169 target_ip
->mode
= tswap32(host_ip
->mode
);
3171 target_ip
->mode
= tswap16(host_ip
->mode
);
3173 #if defined(TARGET_PPC)
3174 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3176 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3178 unlock_user_struct(target_sd
, target_addr
, 1);
3182 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3183 abi_ulong target_addr
)
3185 struct target_semid64_ds
*target_sd
;
3187 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3188 return -TARGET_EFAULT
;
3189 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3190 return -TARGET_EFAULT
;
3191 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3192 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3193 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3194 unlock_user_struct(target_sd
, target_addr
, 0);
3198 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3199 struct semid_ds
*host_sd
)
3201 struct target_semid64_ds
*target_sd
;
3203 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3204 return -TARGET_EFAULT
;
3205 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3206 return -TARGET_EFAULT
;
3207 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3208 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3209 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3210 unlock_user_struct(target_sd
, target_addr
, 1);
3214 struct target_seminfo
{
3227 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3228 struct seminfo
*host_seminfo
)
3230 struct target_seminfo
*target_seminfo
;
3231 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3232 return -TARGET_EFAULT
;
3233 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3234 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3235 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3236 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3237 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3238 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3239 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3240 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3241 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3242 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3243 unlock_user_struct(target_seminfo
, target_addr
, 1);
3249 struct semid_ds
*buf
;
3250 unsigned short *array
;
3251 struct seminfo
*__buf
;
3254 union target_semun
{
3261 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3262 abi_ulong target_addr
)
3265 unsigned short *array
;
3267 struct semid_ds semid_ds
;
3270 semun
.buf
= &semid_ds
;
3272 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3274 return get_errno(ret
);
3276 nsems
= semid_ds
.sem_nsems
;
3278 *host_array
= g_try_new(unsigned short, nsems
);
3280 return -TARGET_ENOMEM
;
3282 array
= lock_user(VERIFY_READ
, target_addr
,
3283 nsems
*sizeof(unsigned short), 1);
3285 g_free(*host_array
);
3286 return -TARGET_EFAULT
;
3289 for(i
=0; i
<nsems
; i
++) {
3290 __get_user((*host_array
)[i
], &array
[i
]);
3292 unlock_user(array
, target_addr
, 0);
3297 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3298 unsigned short **host_array
)
3301 unsigned short *array
;
3303 struct semid_ds semid_ds
;
3306 semun
.buf
= &semid_ds
;
3308 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3310 return get_errno(ret
);
3312 nsems
= semid_ds
.sem_nsems
;
3314 array
= lock_user(VERIFY_WRITE
, target_addr
,
3315 nsems
*sizeof(unsigned short), 0);
3317 return -TARGET_EFAULT
;
3319 for(i
=0; i
<nsems
; i
++) {
3320 __put_user((*host_array
)[i
], &array
[i
]);
3322 g_free(*host_array
);
3323 unlock_user(array
, target_addr
, 1);
3328 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3329 abi_ulong target_arg
)
3331 union target_semun target_su
= { .buf
= target_arg
};
3333 struct semid_ds dsarg
;
3334 unsigned short *array
= NULL
;
3335 struct seminfo seminfo
;
3336 abi_long ret
= -TARGET_EINVAL
;
3343 /* In 64 bit cross-endian situations, we will erroneously pick up
3344 * the wrong half of the union for the "val" element. To rectify
3345 * this, the entire 8-byte structure is byteswapped, followed by
3346 * a swap of the 4 byte val field. In other cases, the data is
3347 * already in proper host byte order. */
3348 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3349 target_su
.buf
= tswapal(target_su
.buf
);
3350 arg
.val
= tswap32(target_su
.val
);
3352 arg
.val
= target_su
.val
;
3354 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3358 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3362 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3363 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3370 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3374 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3375 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3381 arg
.__buf
= &seminfo
;
3382 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3383 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3391 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3398 struct target_sembuf
{
3399 unsigned short sem_num
;
3404 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3405 abi_ulong target_addr
,
3408 struct target_sembuf
*target_sembuf
;
3411 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3412 nsops
*sizeof(struct target_sembuf
), 1);
3414 return -TARGET_EFAULT
;
3416 for(i
=0; i
<nsops
; i
++) {
3417 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3418 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3419 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3422 unlock_user(target_sembuf
, target_addr
, 0);
3427 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3429 struct sembuf sops
[nsops
];
3431 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3432 return -TARGET_EFAULT
;
3434 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3437 struct target_msqid_ds
3439 struct target_ipc_perm msg_perm
;
3440 abi_ulong msg_stime
;
3441 #if TARGET_ABI_BITS == 32
3442 abi_ulong __unused1
;
3444 abi_ulong msg_rtime
;
3445 #if TARGET_ABI_BITS == 32
3446 abi_ulong __unused2
;
3448 abi_ulong msg_ctime
;
3449 #if TARGET_ABI_BITS == 32
3450 abi_ulong __unused3
;
3452 abi_ulong __msg_cbytes
;
3454 abi_ulong msg_qbytes
;
3455 abi_ulong msg_lspid
;
3456 abi_ulong msg_lrpid
;
3457 abi_ulong __unused4
;
3458 abi_ulong __unused5
;
3461 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3462 abi_ulong target_addr
)
3464 struct target_msqid_ds
*target_md
;
3466 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3467 return -TARGET_EFAULT
;
3468 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3469 return -TARGET_EFAULT
;
3470 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3471 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3472 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3473 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3474 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3475 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3476 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3477 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3478 unlock_user_struct(target_md
, target_addr
, 0);
3482 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3483 struct msqid_ds
*host_md
)
3485 struct target_msqid_ds
*target_md
;
3487 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3488 return -TARGET_EFAULT
;
3489 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3490 return -TARGET_EFAULT
;
3491 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3492 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3493 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3494 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3495 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3496 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3497 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3498 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3499 unlock_user_struct(target_md
, target_addr
, 1);
3503 struct target_msginfo
{
3511 unsigned short int msgseg
;
3514 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3515 struct msginfo
*host_msginfo
)
3517 struct target_msginfo
*target_msginfo
;
3518 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3519 return -TARGET_EFAULT
;
3520 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3521 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3522 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3523 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3524 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3525 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3526 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3527 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3528 unlock_user_struct(target_msginfo
, target_addr
, 1);
3532 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3534 struct msqid_ds dsarg
;
3535 struct msginfo msginfo
;
3536 abi_long ret
= -TARGET_EINVAL
;
3544 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3545 return -TARGET_EFAULT
;
3546 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3547 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3548 return -TARGET_EFAULT
;
3551 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3555 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3556 if (host_to_target_msginfo(ptr
, &msginfo
))
3557 return -TARGET_EFAULT
;
3564 struct target_msgbuf
{
3569 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3570 ssize_t msgsz
, int msgflg
)
3572 struct target_msgbuf
*target_mb
;
3573 struct msgbuf
*host_mb
;
3577 return -TARGET_EINVAL
;
3580 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3581 return -TARGET_EFAULT
;
3582 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3584 unlock_user_struct(target_mb
, msgp
, 0);
3585 return -TARGET_ENOMEM
;
3587 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3588 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3589 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3591 unlock_user_struct(target_mb
, msgp
, 0);
3596 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3597 ssize_t msgsz
, abi_long msgtyp
,
3600 struct target_msgbuf
*target_mb
;
3602 struct msgbuf
*host_mb
;
3606 return -TARGET_EINVAL
;
3609 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3610 return -TARGET_EFAULT
;
3612 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3614 ret
= -TARGET_ENOMEM
;
3617 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3620 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3621 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3622 if (!target_mtext
) {
3623 ret
= -TARGET_EFAULT
;
3626 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3627 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3630 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3634 unlock_user_struct(target_mb
, msgp
, 1);
3639 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3640 abi_ulong target_addr
)
3642 struct target_shmid_ds
*target_sd
;
3644 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3645 return -TARGET_EFAULT
;
3646 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3647 return -TARGET_EFAULT
;
3648 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3649 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3650 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3651 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3652 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3653 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3654 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3655 unlock_user_struct(target_sd
, target_addr
, 0);
3659 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3660 struct shmid_ds
*host_sd
)
3662 struct target_shmid_ds
*target_sd
;
3664 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3665 return -TARGET_EFAULT
;
3666 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3667 return -TARGET_EFAULT
;
3668 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3669 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3670 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3671 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3672 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3673 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3674 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3675 unlock_user_struct(target_sd
, target_addr
, 1);
3679 struct target_shminfo
{
3687 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3688 struct shminfo
*host_shminfo
)
3690 struct target_shminfo
*target_shminfo
;
3691 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3692 return -TARGET_EFAULT
;
3693 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3694 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3695 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3696 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3697 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3698 unlock_user_struct(target_shminfo
, target_addr
, 1);
3702 struct target_shm_info
{
3707 abi_ulong swap_attempts
;
3708 abi_ulong swap_successes
;
3711 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3712 struct shm_info
*host_shm_info
)
3714 struct target_shm_info
*target_shm_info
;
3715 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3716 return -TARGET_EFAULT
;
3717 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3718 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3719 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3720 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3721 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3722 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3723 unlock_user_struct(target_shm_info
, target_addr
, 1);
3727 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3729 struct shmid_ds dsarg
;
3730 struct shminfo shminfo
;
3731 struct shm_info shm_info
;
3732 abi_long ret
= -TARGET_EINVAL
;
3740 if (target_to_host_shmid_ds(&dsarg
, buf
))
3741 return -TARGET_EFAULT
;
3742 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3743 if (host_to_target_shmid_ds(buf
, &dsarg
))
3744 return -TARGET_EFAULT
;
3747 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3748 if (host_to_target_shminfo(buf
, &shminfo
))
3749 return -TARGET_EFAULT
;
3752 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3753 if (host_to_target_shm_info(buf
, &shm_info
))
3754 return -TARGET_EFAULT
;
3759 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3766 #ifndef TARGET_FORCE_SHMLBA
3767 /* For most architectures, SHMLBA is the same as the page size;
3768 * some architectures have larger values, in which case they should
3769 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3770 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3771 * and defining its own value for SHMLBA.
3773 * The kernel also permits SHMLBA to be set by the architecture to a
3774 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3775 * this means that addresses are rounded to the large size if
3776 * SHM_RND is set but addresses not aligned to that size are not rejected
3777 * as long as they are at least page-aligned. Since the only architecture
3778 * which uses this is ia64 this code doesn't provide for that oddity.
3780 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3782 return TARGET_PAGE_SIZE
;
3786 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3787 int shmid
, abi_ulong shmaddr
, int shmflg
)
3791 struct shmid_ds shm_info
;
3795 /* find out the length of the shared memory segment */
3796 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3797 if (is_error(ret
)) {
3798 /* can't get length, bail out */
3802 shmlba
= target_shmlba(cpu_env
);
3804 if (shmaddr
& (shmlba
- 1)) {
3805 if (shmflg
& SHM_RND
) {
3806 shmaddr
&= ~(shmlba
- 1);
3808 return -TARGET_EINVAL
;
3811 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3812 return -TARGET_EINVAL
;
3818 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3820 abi_ulong mmap_start
;
3822 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3824 if (mmap_start
== -1) {
3826 host_raddr
= (void *)-1;
3828 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3831 if (host_raddr
== (void *)-1) {
3833 return get_errno((long)host_raddr
);
3835 raddr
=h2g((unsigned long)host_raddr
);
3837 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3838 PAGE_VALID
| PAGE_READ
|
3839 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3841 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3842 if (!shm_regions
[i
].in_use
) {
3843 shm_regions
[i
].in_use
= true;
3844 shm_regions
[i
].start
= raddr
;
3845 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3855 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3862 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3863 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3864 shm_regions
[i
].in_use
= false;
3865 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3869 rv
= get_errno(shmdt(g2h(shmaddr
)));
3876 #ifdef TARGET_NR_ipc
3877 /* ??? This only works with linear mappings. */
3878 /* do_ipc() must return target values and target errnos. */
3879 static abi_long
do_ipc(CPUArchState
*cpu_env
,
3880 unsigned int call
, abi_long first
,
3881 abi_long second
, abi_long third
,
3882 abi_long ptr
, abi_long fifth
)
3887 version
= call
>> 16;
3892 ret
= do_semop(first
, ptr
, second
);
3896 ret
= get_errno(semget(first
, second
, third
));
3899 case IPCOP_semctl
: {
3900 /* The semun argument to semctl is passed by value, so dereference the
3903 get_user_ual(atptr
, ptr
);
3904 ret
= do_semctl(first
, second
, third
, atptr
);
3909 ret
= get_errno(msgget(first
, second
));
3913 ret
= do_msgsnd(first
, ptr
, second
, third
);
3917 ret
= do_msgctl(first
, second
, ptr
);
3924 struct target_ipc_kludge
{
3929 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3930 ret
= -TARGET_EFAULT
;
3934 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3936 unlock_user_struct(tmp
, ptr
, 0);
3940 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3949 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
3950 if (is_error(raddr
))
3951 return get_errno(raddr
);
3952 if (put_user_ual(raddr
, third
))
3953 return -TARGET_EFAULT
;
3957 ret
= -TARGET_EINVAL
;
3962 ret
= do_shmdt(ptr
);
3966 /* IPC_* flag values are the same on all linux platforms */
3967 ret
= get_errno(shmget(first
, second
, third
));
3970 /* IPC_* and SHM_* command values are the same on all linux platforms */
3972 ret
= do_shmctl(first
, second
, ptr
);
3975 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3976 ret
= -TARGET_ENOSYS
;
3983 /* kernel structure types definitions */
3985 #define STRUCT(name, ...) STRUCT_ ## name,
3986 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3988 #include "syscall_types.h"
3992 #undef STRUCT_SPECIAL
3994 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3995 #define STRUCT_SPECIAL(name)
3996 #include "syscall_types.h"
3998 #undef STRUCT_SPECIAL
4000 typedef struct IOCTLEntry IOCTLEntry
;
4002 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4003 int fd
, int cmd
, abi_long arg
);
4007 unsigned int host_cmd
;
4010 do_ioctl_fn
*do_ioctl
;
4011 const argtype arg_type
[5];
4014 #define IOC_R 0x0001
4015 #define IOC_W 0x0002
4016 #define IOC_RW (IOC_R | IOC_W)
4018 #define MAX_STRUCT_SIZE 4096
4020 #ifdef CONFIG_FIEMAP
4021 /* So fiemap access checks don't overflow on 32 bit systems.
4022 * This is very slightly smaller than the limit imposed by
4023 * the underlying kernel.
4025 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4026 / sizeof(struct fiemap_extent))
4028 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4029 int fd
, int cmd
, abi_long arg
)
4031 /* The parameter for this ioctl is a struct fiemap followed
4032 * by an array of struct fiemap_extent whose size is set
4033 * in fiemap->fm_extent_count. The array is filled in by the
4036 int target_size_in
, target_size_out
;
4038 const argtype
*arg_type
= ie
->arg_type
;
4039 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4042 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4046 assert(arg_type
[0] == TYPE_PTR
);
4047 assert(ie
->access
== IOC_RW
);
4049 target_size_in
= thunk_type_size(arg_type
, 0);
4050 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4052 return -TARGET_EFAULT
;
4054 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4055 unlock_user(argptr
, arg
, 0);
4056 fm
= (struct fiemap
*)buf_temp
;
4057 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4058 return -TARGET_EINVAL
;
4061 outbufsz
= sizeof (*fm
) +
4062 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4064 if (outbufsz
> MAX_STRUCT_SIZE
) {
4065 /* We can't fit all the extents into the fixed size buffer.
4066 * Allocate one that is large enough and use it instead.
4068 fm
= g_try_malloc(outbufsz
);
4070 return -TARGET_ENOMEM
;
4072 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4075 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4076 if (!is_error(ret
)) {
4077 target_size_out
= target_size_in
;
4078 /* An extent_count of 0 means we were only counting the extents
4079 * so there are no structs to copy
4081 if (fm
->fm_extent_count
!= 0) {
4082 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4084 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4086 ret
= -TARGET_EFAULT
;
4088 /* Convert the struct fiemap */
4089 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4090 if (fm
->fm_extent_count
!= 0) {
4091 p
= argptr
+ target_size_in
;
4092 /* ...and then all the struct fiemap_extents */
4093 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4094 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4099 unlock_user(argptr
, arg
, target_size_out
);
4109 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4110 int fd
, int cmd
, abi_long arg
)
4112 const argtype
*arg_type
= ie
->arg_type
;
4116 struct ifconf
*host_ifconf
;
4118 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4119 int target_ifreq_size
;
4124 abi_long target_ifc_buf
;
4128 assert(arg_type
[0] == TYPE_PTR
);
4129 assert(ie
->access
== IOC_RW
);
4132 target_size
= thunk_type_size(arg_type
, 0);
4134 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4136 return -TARGET_EFAULT
;
4137 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4138 unlock_user(argptr
, arg
, 0);
4140 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4141 target_ifc_len
= host_ifconf
->ifc_len
;
4142 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4144 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4145 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4146 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4148 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4149 if (outbufsz
> MAX_STRUCT_SIZE
) {
4150 /* We can't fit all the extents into the fixed size buffer.
4151 * Allocate one that is large enough and use it instead.
4153 host_ifconf
= malloc(outbufsz
);
4155 return -TARGET_ENOMEM
;
4157 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4160 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4162 host_ifconf
->ifc_len
= host_ifc_len
;
4163 host_ifconf
->ifc_buf
= host_ifc_buf
;
4165 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4166 if (!is_error(ret
)) {
4167 /* convert host ifc_len to target ifc_len */
4169 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4170 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4171 host_ifconf
->ifc_len
= target_ifc_len
;
4173 /* restore target ifc_buf */
4175 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4177 /* copy struct ifconf to target user */
4179 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4181 return -TARGET_EFAULT
;
4182 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4183 unlock_user(argptr
, arg
, target_size
);
4185 /* copy ifreq[] to target user */
4187 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4188 for (i
= 0; i
< nb_ifreq
; i
++) {
4189 thunk_convert(argptr
+ i
* target_ifreq_size
,
4190 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4191 ifreq_arg_type
, THUNK_TARGET
);
4193 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4203 #if defined(CONFIG_USBFS)
4204 #if HOST_LONG_BITS > 64
4205 #error USBDEVFS thunks do not support >64 bit hosts yet.
4208 uint64_t target_urb_adr
;
4209 uint64_t target_buf_adr
;
4210 char *target_buf_ptr
;
4211 struct usbdevfs_urb host_urb
;
4214 static GHashTable
*usbdevfs_urb_hashtable(void)
4216 static GHashTable
*urb_hashtable
;
4218 if (!urb_hashtable
) {
4219 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4221 return urb_hashtable
;
4224 static void urb_hashtable_insert(struct live_urb
*urb
)
4226 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4227 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4230 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4232 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4233 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4236 static void urb_hashtable_remove(struct live_urb
*urb
)
4238 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4239 g_hash_table_remove(urb_hashtable
, urb
);
4243 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4244 int fd
, int cmd
, abi_long arg
)
4246 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4247 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4248 struct live_urb
*lurb
;
4252 uintptr_t target_urb_adr
;
4255 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4257 memset(buf_temp
, 0, sizeof(uint64_t));
4258 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4259 if (is_error(ret
)) {
4263 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4264 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4265 if (!lurb
->target_urb_adr
) {
4266 return -TARGET_EFAULT
;
4268 urb_hashtable_remove(lurb
);
4269 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4270 lurb
->host_urb
.buffer_length
);
4271 lurb
->target_buf_ptr
= NULL
;
4273 /* restore the guest buffer pointer */
4274 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4276 /* update the guest urb struct */
4277 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4280 return -TARGET_EFAULT
;
4282 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4283 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4285 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4286 /* write back the urb handle */
4287 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4290 return -TARGET_EFAULT
;
4293 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4294 target_urb_adr
= lurb
->target_urb_adr
;
4295 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4296 unlock_user(argptr
, arg
, target_size
);
4303 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4304 uint8_t *buf_temp
__attribute__((unused
)),
4305 int fd
, int cmd
, abi_long arg
)
4307 struct live_urb
*lurb
;
4309 /* map target address back to host URB with metadata. */
4310 lurb
= urb_hashtable_lookup(arg
);
4312 return -TARGET_EFAULT
;
4314 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4318 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4319 int fd
, int cmd
, abi_long arg
)
4321 const argtype
*arg_type
= ie
->arg_type
;
4326 struct live_urb
*lurb
;
4329 * each submitted URB needs to map to a unique ID for the
4330 * kernel, and that unique ID needs to be a pointer to
4331 * host memory. hence, we need to malloc for each URB.
4332 * isochronous transfers have a variable length struct.
4335 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4337 /* construct host copy of urb and metadata */
4338 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4340 return -TARGET_ENOMEM
;
4343 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4346 return -TARGET_EFAULT
;
4348 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4349 unlock_user(argptr
, arg
, 0);
4351 lurb
->target_urb_adr
= arg
;
4352 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4354 /* buffer space used depends on endpoint type so lock the entire buffer */
4355 /* control type urbs should check the buffer contents for true direction */
4356 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4357 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4358 lurb
->host_urb
.buffer_length
, 1);
4359 if (lurb
->target_buf_ptr
== NULL
) {
4361 return -TARGET_EFAULT
;
4364 /* update buffer pointer in host copy */
4365 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4367 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4368 if (is_error(ret
)) {
4369 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4372 urb_hashtable_insert(lurb
);
4377 #endif /* CONFIG_USBFS */
4379 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4380 int cmd
, abi_long arg
)
4383 struct dm_ioctl
*host_dm
;
4384 abi_long guest_data
;
4385 uint32_t guest_data_size
;
4387 const argtype
*arg_type
= ie
->arg_type
;
4389 void *big_buf
= NULL
;
4393 target_size
= thunk_type_size(arg_type
, 0);
4394 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4396 ret
= -TARGET_EFAULT
;
4399 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4400 unlock_user(argptr
, arg
, 0);
4402 /* buf_temp is too small, so fetch things into a bigger buffer */
4403 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4404 memcpy(big_buf
, buf_temp
, target_size
);
4408 guest_data
= arg
+ host_dm
->data_start
;
4409 if ((guest_data
- arg
) < 0) {
4410 ret
= -TARGET_EINVAL
;
4413 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4414 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4416 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4418 ret
= -TARGET_EFAULT
;
4422 switch (ie
->host_cmd
) {
4424 case DM_LIST_DEVICES
:
4427 case DM_DEV_SUSPEND
:
4430 case DM_TABLE_STATUS
:
4431 case DM_TABLE_CLEAR
:
4433 case DM_LIST_VERSIONS
:
4437 case DM_DEV_SET_GEOMETRY
:
4438 /* data contains only strings */
4439 memcpy(host_data
, argptr
, guest_data_size
);
4442 memcpy(host_data
, argptr
, guest_data_size
);
4443 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4447 void *gspec
= argptr
;
4448 void *cur_data
= host_data
;
4449 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4450 int spec_size
= thunk_type_size(arg_type
, 0);
4453 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4454 struct dm_target_spec
*spec
= cur_data
;
4458 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4459 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4461 spec
->next
= sizeof(*spec
) + slen
;
4462 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4464 cur_data
+= spec
->next
;
4469 ret
= -TARGET_EINVAL
;
4470 unlock_user(argptr
, guest_data
, 0);
4473 unlock_user(argptr
, guest_data
, 0);
4475 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4476 if (!is_error(ret
)) {
4477 guest_data
= arg
+ host_dm
->data_start
;
4478 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4479 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4480 switch (ie
->host_cmd
) {
4485 case DM_DEV_SUSPEND
:
4488 case DM_TABLE_CLEAR
:
4490 case DM_DEV_SET_GEOMETRY
:
4491 /* no return data */
4493 case DM_LIST_DEVICES
:
4495 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4496 uint32_t remaining_data
= guest_data_size
;
4497 void *cur_data
= argptr
;
4498 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4499 int nl_size
= 12; /* can't use thunk_size due to alignment */
4502 uint32_t next
= nl
->next
;
4504 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4506 if (remaining_data
< nl
->next
) {
4507 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4510 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4511 strcpy(cur_data
+ nl_size
, nl
->name
);
4512 cur_data
+= nl
->next
;
4513 remaining_data
-= nl
->next
;
4517 nl
= (void*)nl
+ next
;
4522 case DM_TABLE_STATUS
:
4524 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4525 void *cur_data
= argptr
;
4526 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4527 int spec_size
= thunk_type_size(arg_type
, 0);
4530 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4531 uint32_t next
= spec
->next
;
4532 int slen
= strlen((char*)&spec
[1]) + 1;
4533 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4534 if (guest_data_size
< spec
->next
) {
4535 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4538 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4539 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4540 cur_data
= argptr
+ spec
->next
;
4541 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4547 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4548 int count
= *(uint32_t*)hdata
;
4549 uint64_t *hdev
= hdata
+ 8;
4550 uint64_t *gdev
= argptr
+ 8;
4553 *(uint32_t*)argptr
= tswap32(count
);
4554 for (i
= 0; i
< count
; i
++) {
4555 *gdev
= tswap64(*hdev
);
4561 case DM_LIST_VERSIONS
:
4563 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4564 uint32_t remaining_data
= guest_data_size
;
4565 void *cur_data
= argptr
;
4566 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4567 int vers_size
= thunk_type_size(arg_type
, 0);
4570 uint32_t next
= vers
->next
;
4572 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4574 if (remaining_data
< vers
->next
) {
4575 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4578 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4579 strcpy(cur_data
+ vers_size
, vers
->name
);
4580 cur_data
+= vers
->next
;
4581 remaining_data
-= vers
->next
;
4585 vers
= (void*)vers
+ next
;
4590 unlock_user(argptr
, guest_data
, 0);
4591 ret
= -TARGET_EINVAL
;
4594 unlock_user(argptr
, guest_data
, guest_data_size
);
4596 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4598 ret
= -TARGET_EFAULT
;
4601 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4602 unlock_user(argptr
, arg
, target_size
);
4609 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4610 int cmd
, abi_long arg
)
4614 const argtype
*arg_type
= ie
->arg_type
;
4615 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4618 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4619 struct blkpg_partition host_part
;
4621 /* Read and convert blkpg */
4623 target_size
= thunk_type_size(arg_type
, 0);
4624 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4626 ret
= -TARGET_EFAULT
;
4629 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4630 unlock_user(argptr
, arg
, 0);
4632 switch (host_blkpg
->op
) {
4633 case BLKPG_ADD_PARTITION
:
4634 case BLKPG_DEL_PARTITION
:
4635 /* payload is struct blkpg_partition */
4638 /* Unknown opcode */
4639 ret
= -TARGET_EINVAL
;
4643 /* Read and convert blkpg->data */
4644 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4645 target_size
= thunk_type_size(part_arg_type
, 0);
4646 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4648 ret
= -TARGET_EFAULT
;
4651 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4652 unlock_user(argptr
, arg
, 0);
4654 /* Swizzle the data pointer to our local copy and call! */
4655 host_blkpg
->data
= &host_part
;
4656 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4662 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4663 int fd
, int cmd
, abi_long arg
)
4665 const argtype
*arg_type
= ie
->arg_type
;
4666 const StructEntry
*se
;
4667 const argtype
*field_types
;
4668 const int *dst_offsets
, *src_offsets
;
4671 abi_ulong
*target_rt_dev_ptr
;
4672 unsigned long *host_rt_dev_ptr
;
4676 assert(ie
->access
== IOC_W
);
4677 assert(*arg_type
== TYPE_PTR
);
4679 assert(*arg_type
== TYPE_STRUCT
);
4680 target_size
= thunk_type_size(arg_type
, 0);
4681 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4683 return -TARGET_EFAULT
;
4686 assert(*arg_type
== (int)STRUCT_rtentry
);
4687 se
= struct_entries
+ *arg_type
++;
4688 assert(se
->convert
[0] == NULL
);
4689 /* convert struct here to be able to catch rt_dev string */
4690 field_types
= se
->field_types
;
4691 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4692 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4693 for (i
= 0; i
< se
->nb_fields
; i
++) {
4694 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4695 assert(*field_types
== TYPE_PTRVOID
);
4696 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4697 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4698 if (*target_rt_dev_ptr
!= 0) {
4699 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4700 tswapal(*target_rt_dev_ptr
));
4701 if (!*host_rt_dev_ptr
) {
4702 unlock_user(argptr
, arg
, 0);
4703 return -TARGET_EFAULT
;
4706 *host_rt_dev_ptr
= 0;
4711 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4712 argptr
+ src_offsets
[i
],
4713 field_types
, THUNK_HOST
);
4715 unlock_user(argptr
, arg
, 0);
4717 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4718 if (*host_rt_dev_ptr
!= 0) {
4719 unlock_user((void *)*host_rt_dev_ptr
,
4720 *target_rt_dev_ptr
, 0);
4725 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4726 int fd
, int cmd
, abi_long arg
)
4728 int sig
= target_to_host_signal(arg
);
4729 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4733 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4734 int fd
, int cmd
, abi_long arg
)
4736 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4737 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4741 static IOCTLEntry ioctl_entries
[] = {
4742 #define IOCTL(cmd, access, ...) \
4743 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4744 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4745 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4746 #define IOCTL_IGNORE(cmd) \
4747 { TARGET_ ## cmd, 0, #cmd },
4752 /* ??? Implement proper locking for ioctls. */
4753 /* do_ioctl() Must return target values and target errnos. */
4754 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4756 const IOCTLEntry
*ie
;
4757 const argtype
*arg_type
;
4759 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4765 if (ie
->target_cmd
== 0) {
4766 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4767 return -TARGET_ENOSYS
;
4769 if (ie
->target_cmd
== cmd
)
4773 arg_type
= ie
->arg_type
;
4775 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4776 } else if (!ie
->host_cmd
) {
4777 /* Some architectures define BSD ioctls in their headers
4778 that are not implemented in Linux. */
4779 return -TARGET_ENOSYS
;
4782 switch(arg_type
[0]) {
4785 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4789 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4793 target_size
= thunk_type_size(arg_type
, 0);
4794 switch(ie
->access
) {
4796 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4797 if (!is_error(ret
)) {
4798 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4800 return -TARGET_EFAULT
;
4801 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4802 unlock_user(argptr
, arg
, target_size
);
4806 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4808 return -TARGET_EFAULT
;
4809 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4810 unlock_user(argptr
, arg
, 0);
4811 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4815 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4817 return -TARGET_EFAULT
;
4818 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4819 unlock_user(argptr
, arg
, 0);
4820 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4821 if (!is_error(ret
)) {
4822 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4824 return -TARGET_EFAULT
;
4825 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4826 unlock_user(argptr
, arg
, target_size
);
4832 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4833 (long)cmd
, arg_type
[0]);
4834 ret
= -TARGET_ENOSYS
;
4840 static const bitmask_transtbl iflag_tbl
[] = {
4841 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4842 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4843 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4844 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4845 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4846 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4847 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4848 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4849 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4850 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4851 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4852 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4853 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4854 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4858 static const bitmask_transtbl oflag_tbl
[] = {
4859 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4860 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4861 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4862 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4863 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4864 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4865 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4866 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4867 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4868 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4869 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4870 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4871 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4872 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4873 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4874 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4875 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4876 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4877 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4878 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4879 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4880 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4881 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4882 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4886 static const bitmask_transtbl cflag_tbl
[] = {
4887 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4888 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4889 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4890 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4891 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4892 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4893 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4894 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4895 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4896 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4897 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4898 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4899 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4900 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4901 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4902 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4903 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4904 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4905 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4906 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4907 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4908 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4909 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4910 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4911 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4912 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4913 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4914 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4915 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4916 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4917 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4921 static const bitmask_transtbl lflag_tbl
[] = {
4922 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4923 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4924 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4925 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4926 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4927 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4928 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4929 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4930 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4931 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4932 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4933 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4934 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4935 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4936 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4940 static void target_to_host_termios (void *dst
, const void *src
)
4942 struct host_termios
*host
= dst
;
4943 const struct target_termios
*target
= src
;
4946 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4948 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4950 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4952 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4953 host
->c_line
= target
->c_line
;
4955 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4956 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4957 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4958 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4959 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4960 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4961 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4962 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4963 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4964 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4965 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4966 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4967 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4968 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4969 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4970 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4971 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4972 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4975 static void host_to_target_termios (void *dst
, const void *src
)
4977 struct target_termios
*target
= dst
;
4978 const struct host_termios
*host
= src
;
4981 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4983 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4985 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4987 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4988 target
->c_line
= host
->c_line
;
4990 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4991 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4992 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4993 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4994 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4995 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4996 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4997 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4998 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4999 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5000 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5001 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5002 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5003 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5004 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5005 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5006 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5007 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5010 static const StructEntry struct_termios_def
= {
5011 .convert
= { host_to_target_termios
, target_to_host_termios
},
5012 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5013 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5016 static bitmask_transtbl mmap_flags_tbl
[] = {
5017 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5018 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5019 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5020 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5021 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5022 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5023 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5024 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5025 MAP_DENYWRITE
, MAP_DENYWRITE
},
5026 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5027 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5028 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5029 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5030 MAP_NORESERVE
, MAP_NORESERVE
},
5031 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5032 /* MAP_STACK had been ignored by the kernel for quite some time.
5033 Recognize it for the target insofar as we do not want to pass
5034 it through to the host. */
5035 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5039 #if defined(TARGET_I386)
5041 /* NOTE: there is really one LDT for all the threads */
5042 static uint8_t *ldt_table
;
5044 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5051 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5052 if (size
> bytecount
)
5054 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5056 return -TARGET_EFAULT
;
5057 /* ??? Should this by byteswapped? */
5058 memcpy(p
, ldt_table
, size
);
5059 unlock_user(p
, ptr
, size
);
5063 /* XXX: add locking support */
5064 static abi_long
write_ldt(CPUX86State
*env
,
5065 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5067 struct target_modify_ldt_ldt_s ldt_info
;
5068 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5069 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5070 int seg_not_present
, useable
, lm
;
5071 uint32_t *lp
, entry_1
, entry_2
;
5073 if (bytecount
!= sizeof(ldt_info
))
5074 return -TARGET_EINVAL
;
5075 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5076 return -TARGET_EFAULT
;
5077 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5078 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5079 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5080 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5081 unlock_user_struct(target_ldt_info
, ptr
, 0);
5083 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5084 return -TARGET_EINVAL
;
5085 seg_32bit
= ldt_info
.flags
& 1;
5086 contents
= (ldt_info
.flags
>> 1) & 3;
5087 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5088 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5089 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5090 useable
= (ldt_info
.flags
>> 6) & 1;
5094 lm
= (ldt_info
.flags
>> 7) & 1;
5096 if (contents
== 3) {
5098 return -TARGET_EINVAL
;
5099 if (seg_not_present
== 0)
5100 return -TARGET_EINVAL
;
5102 /* allocate the LDT */
5104 env
->ldt
.base
= target_mmap(0,
5105 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5106 PROT_READ
|PROT_WRITE
,
5107 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5108 if (env
->ldt
.base
== -1)
5109 return -TARGET_ENOMEM
;
5110 memset(g2h(env
->ldt
.base
), 0,
5111 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5112 env
->ldt
.limit
= 0xffff;
5113 ldt_table
= g2h(env
->ldt
.base
);
5116 /* NOTE: same code as Linux kernel */
5117 /* Allow LDTs to be cleared by the user. */
5118 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5121 read_exec_only
== 1 &&
5123 limit_in_pages
== 0 &&
5124 seg_not_present
== 1 &&
5132 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5133 (ldt_info
.limit
& 0x0ffff);
5134 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5135 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5136 (ldt_info
.limit
& 0xf0000) |
5137 ((read_exec_only
^ 1) << 9) |
5139 ((seg_not_present
^ 1) << 15) |
5141 (limit_in_pages
<< 23) |
5145 entry_2
|= (useable
<< 20);
5147 /* Install the new entry ... */
5149 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5150 lp
[0] = tswap32(entry_1
);
5151 lp
[1] = tswap32(entry_2
);
5155 /* specific and weird i386 syscalls */
5156 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5157 unsigned long bytecount
)
5163 ret
= read_ldt(ptr
, bytecount
);
5166 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5169 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5172 ret
= -TARGET_ENOSYS
;
5178 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5179 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5181 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5182 struct target_modify_ldt_ldt_s ldt_info
;
5183 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5184 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5185 int seg_not_present
, useable
, lm
;
5186 uint32_t *lp
, entry_1
, entry_2
;
5189 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5190 if (!target_ldt_info
)
5191 return -TARGET_EFAULT
;
5192 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5193 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5194 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5195 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5196 if (ldt_info
.entry_number
== -1) {
5197 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5198 if (gdt_table
[i
] == 0) {
5199 ldt_info
.entry_number
= i
;
5200 target_ldt_info
->entry_number
= tswap32(i
);
5205 unlock_user_struct(target_ldt_info
, ptr
, 1);
5207 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5208 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5209 return -TARGET_EINVAL
;
5210 seg_32bit
= ldt_info
.flags
& 1;
5211 contents
= (ldt_info
.flags
>> 1) & 3;
5212 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5213 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5214 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5215 useable
= (ldt_info
.flags
>> 6) & 1;
5219 lm
= (ldt_info
.flags
>> 7) & 1;
5222 if (contents
== 3) {
5223 if (seg_not_present
== 0)
5224 return -TARGET_EINVAL
;
5227 /* NOTE: same code as Linux kernel */
5228 /* Allow LDTs to be cleared by the user. */
5229 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5230 if ((contents
== 0 &&
5231 read_exec_only
== 1 &&
5233 limit_in_pages
== 0 &&
5234 seg_not_present
== 1 &&
5242 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5243 (ldt_info
.limit
& 0x0ffff);
5244 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5245 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5246 (ldt_info
.limit
& 0xf0000) |
5247 ((read_exec_only
^ 1) << 9) |
5249 ((seg_not_present
^ 1) << 15) |
5251 (limit_in_pages
<< 23) |
5256 /* Install the new entry ... */
5258 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5259 lp
[0] = tswap32(entry_1
);
5260 lp
[1] = tswap32(entry_2
);
5264 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5266 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5267 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5268 uint32_t base_addr
, limit
, flags
;
5269 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5270 int seg_not_present
, useable
, lm
;
5271 uint32_t *lp
, entry_1
, entry_2
;
5273 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5274 if (!target_ldt_info
)
5275 return -TARGET_EFAULT
;
5276 idx
= tswap32(target_ldt_info
->entry_number
);
5277 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5278 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5279 unlock_user_struct(target_ldt_info
, ptr
, 1);
5280 return -TARGET_EINVAL
;
5282 lp
= (uint32_t *)(gdt_table
+ idx
);
5283 entry_1
= tswap32(lp
[0]);
5284 entry_2
= tswap32(lp
[1]);
5286 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5287 contents
= (entry_2
>> 10) & 3;
5288 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5289 seg_32bit
= (entry_2
>> 22) & 1;
5290 limit_in_pages
= (entry_2
>> 23) & 1;
5291 useable
= (entry_2
>> 20) & 1;
5295 lm
= (entry_2
>> 21) & 1;
5297 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5298 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5299 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5300 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5301 base_addr
= (entry_1
>> 16) |
5302 (entry_2
& 0xff000000) |
5303 ((entry_2
& 0xff) << 16);
5304 target_ldt_info
->base_addr
= tswapal(base_addr
);
5305 target_ldt_info
->limit
= tswap32(limit
);
5306 target_ldt_info
->flags
= tswap32(flags
);
5307 unlock_user_struct(target_ldt_info
, ptr
, 1);
5310 #endif /* TARGET_I386 && TARGET_ABI32 */
5312 #ifndef TARGET_ABI32
5313 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5320 case TARGET_ARCH_SET_GS
:
5321 case TARGET_ARCH_SET_FS
:
5322 if (code
== TARGET_ARCH_SET_GS
)
5326 cpu_x86_load_seg(env
, idx
, 0);
5327 env
->segs
[idx
].base
= addr
;
5329 case TARGET_ARCH_GET_GS
:
5330 case TARGET_ARCH_GET_FS
:
5331 if (code
== TARGET_ARCH_GET_GS
)
5335 val
= env
->segs
[idx
].base
;
5336 if (put_user(val
, addr
, abi_ulong
))
5337 ret
= -TARGET_EFAULT
;
5340 ret
= -TARGET_EINVAL
;
5347 #endif /* defined(TARGET_I386) */
5349 #define NEW_STACK_SIZE 0x40000
5352 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5355 pthread_mutex_t mutex
;
5356 pthread_cond_t cond
;
5359 abi_ulong child_tidptr
;
5360 abi_ulong parent_tidptr
;
5364 static void *clone_func(void *arg
)
5366 new_thread_info
*info
= arg
;
5371 rcu_register_thread();
5372 tcg_register_thread();
5374 cpu
= ENV_GET_CPU(env
);
5376 ts
= (TaskState
*)cpu
->opaque
;
5377 info
->tid
= gettid();
5379 if (info
->child_tidptr
)
5380 put_user_u32(info
->tid
, info
->child_tidptr
);
5381 if (info
->parent_tidptr
)
5382 put_user_u32(info
->tid
, info
->parent_tidptr
);
5383 /* Enable signals. */
5384 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5385 /* Signal to the parent that we're ready. */
5386 pthread_mutex_lock(&info
->mutex
);
5387 pthread_cond_broadcast(&info
->cond
);
5388 pthread_mutex_unlock(&info
->mutex
);
5389 /* Wait until the parent has finished initializing the tls state. */
5390 pthread_mutex_lock(&clone_lock
);
5391 pthread_mutex_unlock(&clone_lock
);
5397 /* do_fork() Must return host values and target errnos (unlike most
5398 do_*() functions). */
5399 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5400 abi_ulong parent_tidptr
, target_ulong newtls
,
5401 abi_ulong child_tidptr
)
5403 CPUState
*cpu
= ENV_GET_CPU(env
);
5407 CPUArchState
*new_env
;
5410 flags
&= ~CLONE_IGNORED_FLAGS
;
5412 /* Emulate vfork() with fork() */
5413 if (flags
& CLONE_VFORK
)
5414 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5416 if (flags
& CLONE_VM
) {
5417 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5418 new_thread_info info
;
5419 pthread_attr_t attr
;
5421 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5422 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5423 return -TARGET_EINVAL
;
5426 ts
= g_new0(TaskState
, 1);
5427 init_task_state(ts
);
5429 /* Grab a mutex so that thread setup appears atomic. */
5430 pthread_mutex_lock(&clone_lock
);
5432 /* we create a new CPU instance. */
5433 new_env
= cpu_copy(env
);
5434 /* Init regs that differ from the parent. */
5435 cpu_clone_regs(new_env
, newsp
);
5436 new_cpu
= ENV_GET_CPU(new_env
);
5437 new_cpu
->opaque
= ts
;
5438 ts
->bprm
= parent_ts
->bprm
;
5439 ts
->info
= parent_ts
->info
;
5440 ts
->signal_mask
= parent_ts
->signal_mask
;
5442 if (flags
& CLONE_CHILD_CLEARTID
) {
5443 ts
->child_tidptr
= child_tidptr
;
5446 if (flags
& CLONE_SETTLS
) {
5447 cpu_set_tls (new_env
, newtls
);
5450 memset(&info
, 0, sizeof(info
));
5451 pthread_mutex_init(&info
.mutex
, NULL
);
5452 pthread_mutex_lock(&info
.mutex
);
5453 pthread_cond_init(&info
.cond
, NULL
);
5455 if (flags
& CLONE_CHILD_SETTID
) {
5456 info
.child_tidptr
= child_tidptr
;
5458 if (flags
& CLONE_PARENT_SETTID
) {
5459 info
.parent_tidptr
= parent_tidptr
;
5462 ret
= pthread_attr_init(&attr
);
5463 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5464 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5465 /* It is not safe to deliver signals until the child has finished
5466 initializing, so temporarily block all signals. */
5467 sigfillset(&sigmask
);
5468 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5470 /* If this is our first additional thread, we need to ensure we
5471 * generate code for parallel execution and flush old translations.
5473 if (!parallel_cpus
) {
5474 parallel_cpus
= true;
5478 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5479 /* TODO: Free new CPU state if thread creation failed. */
5481 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5482 pthread_attr_destroy(&attr
);
5484 /* Wait for the child to initialize. */
5485 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5490 pthread_mutex_unlock(&info
.mutex
);
5491 pthread_cond_destroy(&info
.cond
);
5492 pthread_mutex_destroy(&info
.mutex
);
5493 pthread_mutex_unlock(&clone_lock
);
5495 /* if no CLONE_VM, we consider it is a fork */
5496 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5497 return -TARGET_EINVAL
;
5500 /* We can't support custom termination signals */
5501 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5502 return -TARGET_EINVAL
;
5505 if (block_signals()) {
5506 return -TARGET_ERESTARTSYS
;
5512 /* Child Process. */
5513 cpu_clone_regs(env
, newsp
);
5515 /* There is a race condition here. The parent process could
5516 theoretically read the TID in the child process before the child
5517 tid is set. This would require using either ptrace
5518 (not implemented) or having *_tidptr to point at a shared memory
5519 mapping. We can't repeat the spinlock hack used above because
5520 the child process gets its own copy of the lock. */
5521 if (flags
& CLONE_CHILD_SETTID
)
5522 put_user_u32(gettid(), child_tidptr
);
5523 if (flags
& CLONE_PARENT_SETTID
)
5524 put_user_u32(gettid(), parent_tidptr
);
5525 ts
= (TaskState
*)cpu
->opaque
;
5526 if (flags
& CLONE_SETTLS
)
5527 cpu_set_tls (env
, newtls
);
5528 if (flags
& CLONE_CHILD_CLEARTID
)
5529 ts
->child_tidptr
= child_tidptr
;
5537 /* warning : doesn't handle linux specific flags... */
5538 static int target_to_host_fcntl_cmd(int cmd
)
5543 case TARGET_F_DUPFD
:
5544 case TARGET_F_GETFD
:
5545 case TARGET_F_SETFD
:
5546 case TARGET_F_GETFL
:
5547 case TARGET_F_SETFL
:
5550 case TARGET_F_GETLK
:
5553 case TARGET_F_SETLK
:
5556 case TARGET_F_SETLKW
:
5559 case TARGET_F_GETOWN
:
5562 case TARGET_F_SETOWN
:
5565 case TARGET_F_GETSIG
:
5568 case TARGET_F_SETSIG
:
5571 #if TARGET_ABI_BITS == 32
5572 case TARGET_F_GETLK64
:
5575 case TARGET_F_SETLK64
:
5578 case TARGET_F_SETLKW64
:
5582 case TARGET_F_SETLEASE
:
5585 case TARGET_F_GETLEASE
:
5588 #ifdef F_DUPFD_CLOEXEC
5589 case TARGET_F_DUPFD_CLOEXEC
:
5590 ret
= F_DUPFD_CLOEXEC
;
5593 case TARGET_F_NOTIFY
:
5597 case TARGET_F_GETOWN_EX
:
5602 case TARGET_F_SETOWN_EX
:
5607 case TARGET_F_SETPIPE_SZ
:
5610 case TARGET_F_GETPIPE_SZ
:
5615 ret
= -TARGET_EINVAL
;
5619 #if defined(__powerpc64__)
5620 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5621 * is not supported by kernel. The glibc fcntl call actually adjusts
5622 * them to 5, 6 and 7 before making the syscall(). Since we make the
5623 * syscall directly, adjust to what is supported by the kernel.
5625 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5626 ret
-= F_GETLK64
- 5;
5633 #define FLOCK_TRANSTBL \
5635 TRANSTBL_CONVERT(F_RDLCK); \
5636 TRANSTBL_CONVERT(F_WRLCK); \
5637 TRANSTBL_CONVERT(F_UNLCK); \
5638 TRANSTBL_CONVERT(F_EXLCK); \
5639 TRANSTBL_CONVERT(F_SHLCK); \
5642 static int target_to_host_flock(int type
)
5644 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5646 #undef TRANSTBL_CONVERT
5647 return -TARGET_EINVAL
;
5650 static int host_to_target_flock(int type
)
5652 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5654 #undef TRANSTBL_CONVERT
5655 /* if we don't know how to convert the value coming
5656 * from the host we copy to the target field as-is
5661 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5662 abi_ulong target_flock_addr
)
5664 struct target_flock
*target_fl
;
5667 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5668 return -TARGET_EFAULT
;
5671 __get_user(l_type
, &target_fl
->l_type
);
5672 l_type
= target_to_host_flock(l_type
);
5676 fl
->l_type
= l_type
;
5677 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5678 __get_user(fl
->l_start
, &target_fl
->l_start
);
5679 __get_user(fl
->l_len
, &target_fl
->l_len
);
5680 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5681 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5685 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5686 const struct flock64
*fl
)
5688 struct target_flock
*target_fl
;
5691 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5692 return -TARGET_EFAULT
;
5695 l_type
= host_to_target_flock(fl
->l_type
);
5696 __put_user(l_type
, &target_fl
->l_type
);
5697 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5698 __put_user(fl
->l_start
, &target_fl
->l_start
);
5699 __put_user(fl
->l_len
, &target_fl
->l_len
);
5700 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5701 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5705 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5706 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5708 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5709 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5710 abi_ulong target_flock_addr
)
5712 struct target_oabi_flock64
*target_fl
;
5715 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5716 return -TARGET_EFAULT
;
5719 __get_user(l_type
, &target_fl
->l_type
);
5720 l_type
= target_to_host_flock(l_type
);
5724 fl
->l_type
= l_type
;
5725 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5726 __get_user(fl
->l_start
, &target_fl
->l_start
);
5727 __get_user(fl
->l_len
, &target_fl
->l_len
);
5728 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5729 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5733 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5734 const struct flock64
*fl
)
5736 struct target_oabi_flock64
*target_fl
;
5739 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5740 return -TARGET_EFAULT
;
5743 l_type
= host_to_target_flock(fl
->l_type
);
5744 __put_user(l_type
, &target_fl
->l_type
);
5745 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5746 __put_user(fl
->l_start
, &target_fl
->l_start
);
5747 __put_user(fl
->l_len
, &target_fl
->l_len
);
5748 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5749 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5754 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5755 abi_ulong target_flock_addr
)
5757 struct target_flock64
*target_fl
;
5760 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5761 return -TARGET_EFAULT
;
5764 __get_user(l_type
, &target_fl
->l_type
);
5765 l_type
= target_to_host_flock(l_type
);
5769 fl
->l_type
= l_type
;
5770 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5771 __get_user(fl
->l_start
, &target_fl
->l_start
);
5772 __get_user(fl
->l_len
, &target_fl
->l_len
);
5773 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5774 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5778 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5779 const struct flock64
*fl
)
5781 struct target_flock64
*target_fl
;
5784 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5785 return -TARGET_EFAULT
;
5788 l_type
= host_to_target_flock(fl
->l_type
);
5789 __put_user(l_type
, &target_fl
->l_type
);
5790 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5791 __put_user(fl
->l_start
, &target_fl
->l_start
);
5792 __put_user(fl
->l_len
, &target_fl
->l_len
);
5793 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5794 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5798 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5800 struct flock64 fl64
;
5802 struct f_owner_ex fox
;
5803 struct target_f_owner_ex
*target_fox
;
5806 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5808 if (host_cmd
== -TARGET_EINVAL
)
5812 case TARGET_F_GETLK
:
5813 ret
= copy_from_user_flock(&fl64
, arg
);
5817 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5819 ret
= copy_to_user_flock(arg
, &fl64
);
5823 case TARGET_F_SETLK
:
5824 case TARGET_F_SETLKW
:
5825 ret
= copy_from_user_flock(&fl64
, arg
);
5829 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5832 case TARGET_F_GETLK64
:
5833 ret
= copy_from_user_flock64(&fl64
, arg
);
5837 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5839 ret
= copy_to_user_flock64(arg
, &fl64
);
5842 case TARGET_F_SETLK64
:
5843 case TARGET_F_SETLKW64
:
5844 ret
= copy_from_user_flock64(&fl64
, arg
);
5848 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5851 case TARGET_F_GETFL
:
5852 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5854 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5858 case TARGET_F_SETFL
:
5859 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5860 target_to_host_bitmask(arg
,
5865 case TARGET_F_GETOWN_EX
:
5866 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5868 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5869 return -TARGET_EFAULT
;
5870 target_fox
->type
= tswap32(fox
.type
);
5871 target_fox
->pid
= tswap32(fox
.pid
);
5872 unlock_user_struct(target_fox
, arg
, 1);
5878 case TARGET_F_SETOWN_EX
:
5879 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5880 return -TARGET_EFAULT
;
5881 fox
.type
= tswap32(target_fox
->type
);
5882 fox
.pid
= tswap32(target_fox
->pid
);
5883 unlock_user_struct(target_fox
, arg
, 0);
5884 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5888 case TARGET_F_SETOWN
:
5889 case TARGET_F_GETOWN
:
5890 case TARGET_F_SETSIG
:
5891 case TARGET_F_GETSIG
:
5892 case TARGET_F_SETLEASE
:
5893 case TARGET_F_GETLEASE
:
5894 case TARGET_F_SETPIPE_SZ
:
5895 case TARGET_F_GETPIPE_SZ
:
5896 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5900 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5908 static inline int high2lowuid(int uid
)
5916 static inline int high2lowgid(int gid
)
5924 static inline int low2highuid(int uid
)
5926 if ((int16_t)uid
== -1)
5932 static inline int low2highgid(int gid
)
5934 if ((int16_t)gid
== -1)
5939 static inline int tswapid(int id
)
5944 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5946 #else /* !USE_UID16 */
5947 static inline int high2lowuid(int uid
)
5951 static inline int high2lowgid(int gid
)
5955 static inline int low2highuid(int uid
)
5959 static inline int low2highgid(int gid
)
5963 static inline int tswapid(int id
)
5968 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5970 #endif /* USE_UID16 */
5972 /* We must do direct syscalls for setting UID/GID, because we want to
5973 * implement the Linux system call semantics of "change only for this thread",
5974 * not the libc/POSIX semantics of "change for all threads in process".
5975 * (See http://ewontfix.com/17/ for more details.)
5976 * We use the 32-bit version of the syscalls if present; if it is not
5977 * then either the host architecture supports 32-bit UIDs natively with
5978 * the standard syscall, or the 16-bit UID is the best we can do.
5980 #ifdef __NR_setuid32
5981 #define __NR_sys_setuid __NR_setuid32
5983 #define __NR_sys_setuid __NR_setuid
5985 #ifdef __NR_setgid32
5986 #define __NR_sys_setgid __NR_setgid32
5988 #define __NR_sys_setgid __NR_setgid
5990 #ifdef __NR_setresuid32
5991 #define __NR_sys_setresuid __NR_setresuid32
5993 #define __NR_sys_setresuid __NR_setresuid
5995 #ifdef __NR_setresgid32
5996 #define __NR_sys_setresgid __NR_setresgid32
5998 #define __NR_sys_setresgid __NR_setresgid
6001 _syscall1(int, sys_setuid
, uid_t
, uid
)
6002 _syscall1(int, sys_setgid
, gid_t
, gid
)
6003 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6004 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6006 void syscall_init(void)
6009 const argtype
*arg_type
;
6013 thunk_init(STRUCT_MAX
);
6015 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6016 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6017 #include "syscall_types.h"
6019 #undef STRUCT_SPECIAL
6021 /* Build target_to_host_errno_table[] table from
6022 * host_to_target_errno_table[]. */
6023 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6024 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6027 /* we patch the ioctl size if necessary. We rely on the fact that
6028 no ioctl has all the bits at '1' in the size field */
6030 while (ie
->target_cmd
!= 0) {
6031 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6032 TARGET_IOC_SIZEMASK
) {
6033 arg_type
= ie
->arg_type
;
6034 if (arg_type
[0] != TYPE_PTR
) {
6035 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6040 size
= thunk_type_size(arg_type
, 0);
6041 ie
->target_cmd
= (ie
->target_cmd
&
6042 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6043 (size
<< TARGET_IOC_SIZESHIFT
);
6046 /* automatic consistency check if same arch */
6047 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6048 (defined(__x86_64__) && defined(TARGET_X86_64))
6049 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6050 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6051 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6058 #if TARGET_ABI_BITS == 32
6059 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6061 #ifdef TARGET_WORDS_BIGENDIAN
6062 return ((uint64_t)word0
<< 32) | word1
;
6064 return ((uint64_t)word1
<< 32) | word0
;
6067 #else /* TARGET_ABI_BITS == 32 */
6068 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6072 #endif /* TARGET_ABI_BITS != 32 */
6074 #ifdef TARGET_NR_truncate64
6075 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6080 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6084 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6088 #ifdef TARGET_NR_ftruncate64
6089 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6094 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6098 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6102 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6103 abi_ulong target_addr
)
6105 struct target_timespec
*target_ts
;
6107 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6108 return -TARGET_EFAULT
;
6109 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6110 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6111 unlock_user_struct(target_ts
, target_addr
, 0);
6115 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6116 struct timespec
*host_ts
)
6118 struct target_timespec
*target_ts
;
6120 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6121 return -TARGET_EFAULT
;
6122 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6123 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6124 unlock_user_struct(target_ts
, target_addr
, 1);
6128 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6129 abi_ulong target_addr
)
6131 struct target_itimerspec
*target_itspec
;
6133 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6134 return -TARGET_EFAULT
;
6137 host_itspec
->it_interval
.tv_sec
=
6138 tswapal(target_itspec
->it_interval
.tv_sec
);
6139 host_itspec
->it_interval
.tv_nsec
=
6140 tswapal(target_itspec
->it_interval
.tv_nsec
);
6141 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6142 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6144 unlock_user_struct(target_itspec
, target_addr
, 1);
6148 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6149 struct itimerspec
*host_its
)
6151 struct target_itimerspec
*target_itspec
;
6153 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6154 return -TARGET_EFAULT
;
6157 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6158 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6160 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6161 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6163 unlock_user_struct(target_itspec
, target_addr
, 0);
6167 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6168 abi_long target_addr
)
6170 struct target_timex
*target_tx
;
6172 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6173 return -TARGET_EFAULT
;
6176 __get_user(host_tx
->modes
, &target_tx
->modes
);
6177 __get_user(host_tx
->offset
, &target_tx
->offset
);
6178 __get_user(host_tx
->freq
, &target_tx
->freq
);
6179 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6180 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6181 __get_user(host_tx
->status
, &target_tx
->status
);
6182 __get_user(host_tx
->constant
, &target_tx
->constant
);
6183 __get_user(host_tx
->precision
, &target_tx
->precision
);
6184 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6185 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6186 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6187 __get_user(host_tx
->tick
, &target_tx
->tick
);
6188 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6189 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6190 __get_user(host_tx
->shift
, &target_tx
->shift
);
6191 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6192 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6193 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6194 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6195 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6196 __get_user(host_tx
->tai
, &target_tx
->tai
);
6198 unlock_user_struct(target_tx
, target_addr
, 0);
6202 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6203 struct timex
*host_tx
)
6205 struct target_timex
*target_tx
;
6207 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6208 return -TARGET_EFAULT
;
6211 __put_user(host_tx
->modes
, &target_tx
->modes
);
6212 __put_user(host_tx
->offset
, &target_tx
->offset
);
6213 __put_user(host_tx
->freq
, &target_tx
->freq
);
6214 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6215 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6216 __put_user(host_tx
->status
, &target_tx
->status
);
6217 __put_user(host_tx
->constant
, &target_tx
->constant
);
6218 __put_user(host_tx
->precision
, &target_tx
->precision
);
6219 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6220 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6221 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6222 __put_user(host_tx
->tick
, &target_tx
->tick
);
6223 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6224 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6225 __put_user(host_tx
->shift
, &target_tx
->shift
);
6226 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6227 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6228 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6229 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6230 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6231 __put_user(host_tx
->tai
, &target_tx
->tai
);
6233 unlock_user_struct(target_tx
, target_addr
, 1);
6238 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6239 abi_ulong target_addr
)
6241 struct target_sigevent
*target_sevp
;
6243 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6244 return -TARGET_EFAULT
;
6247 /* This union is awkward on 64 bit systems because it has a 32 bit
6248 * integer and a pointer in it; we follow the conversion approach
6249 * used for handling sigval types in signal.c so the guest should get
6250 * the correct value back even if we did a 64 bit byteswap and it's
6251 * using the 32 bit integer.
6253 host_sevp
->sigev_value
.sival_ptr
=
6254 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6255 host_sevp
->sigev_signo
=
6256 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6257 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6258 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6260 unlock_user_struct(target_sevp
, target_addr
, 1);
6264 #if defined(TARGET_NR_mlockall)
6265 static inline int target_to_host_mlockall_arg(int arg
)
6269 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6270 result
|= MCL_CURRENT
;
6272 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6273 result
|= MCL_FUTURE
;
6279 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6280 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6281 defined(TARGET_NR_newfstatat))
6282 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6283 abi_ulong target_addr
,
6284 struct stat
*host_st
)
6286 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6287 if (((CPUARMState
*)cpu_env
)->eabi
) {
6288 struct target_eabi_stat64
*target_st
;
6290 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6291 return -TARGET_EFAULT
;
6292 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6293 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6294 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6295 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6296 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6298 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6299 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6300 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6301 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6302 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6303 __put_user(host_st
->st_size
, &target_st
->st_size
);
6304 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6305 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6306 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6307 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6308 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6309 unlock_user_struct(target_st
, target_addr
, 1);
6313 #if defined(TARGET_HAS_STRUCT_STAT64)
6314 struct target_stat64
*target_st
;
6316 struct target_stat
*target_st
;
6319 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6320 return -TARGET_EFAULT
;
6321 memset(target_st
, 0, sizeof(*target_st
));
6322 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6323 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6324 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6325 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6327 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6328 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6329 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6330 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6331 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6332 /* XXX: better use of kernel struct */
6333 __put_user(host_st
->st_size
, &target_st
->st_size
);
6334 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6335 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6336 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6337 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6338 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6339 unlock_user_struct(target_st
, target_addr
, 1);
6346 /* ??? Using host futex calls even when target atomic operations
6347 are not really atomic probably breaks things. However implementing
6348 futexes locally would make futexes shared between multiple processes
6349 tricky. However they're probably useless because guest atomic
6350 operations won't work either. */
6351 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6352 target_ulong uaddr2
, int val3
)
6354 struct timespec ts
, *pts
;
6357 /* ??? We assume FUTEX_* constants are the same on both host
6359 #ifdef FUTEX_CMD_MASK
6360 base_op
= op
& FUTEX_CMD_MASK
;
6366 case FUTEX_WAIT_BITSET
:
6369 target_to_host_timespec(pts
, timeout
);
6373 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6376 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6378 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6380 case FUTEX_CMP_REQUEUE
:
6382 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6383 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6384 But the prototype takes a `struct timespec *'; insert casts
6385 to satisfy the compiler. We do not need to tswap TIMEOUT
6386 since it's not compared to guest memory. */
6387 pts
= (struct timespec
*)(uintptr_t) timeout
;
6388 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6390 (base_op
== FUTEX_CMP_REQUEUE
6394 return -TARGET_ENOSYS
;
6397 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6398 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6399 abi_long handle
, abi_long mount_id
,
6402 struct file_handle
*target_fh
;
6403 struct file_handle
*fh
;
6407 unsigned int size
, total_size
;
6409 if (get_user_s32(size
, handle
)) {
6410 return -TARGET_EFAULT
;
6413 name
= lock_user_string(pathname
);
6415 return -TARGET_EFAULT
;
6418 total_size
= sizeof(struct file_handle
) + size
;
6419 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6421 unlock_user(name
, pathname
, 0);
6422 return -TARGET_EFAULT
;
6425 fh
= g_malloc0(total_size
);
6426 fh
->handle_bytes
= size
;
6428 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6429 unlock_user(name
, pathname
, 0);
6431 /* man name_to_handle_at(2):
6432 * Other than the use of the handle_bytes field, the caller should treat
6433 * the file_handle structure as an opaque data type
6436 memcpy(target_fh
, fh
, total_size
);
6437 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6438 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6440 unlock_user(target_fh
, handle
, total_size
);
6442 if (put_user_s32(mid
, mount_id
)) {
6443 return -TARGET_EFAULT
;
6451 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6452 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6455 struct file_handle
*target_fh
;
6456 struct file_handle
*fh
;
6457 unsigned int size
, total_size
;
6460 if (get_user_s32(size
, handle
)) {
6461 return -TARGET_EFAULT
;
6464 total_size
= sizeof(struct file_handle
) + size
;
6465 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6467 return -TARGET_EFAULT
;
6470 fh
= g_memdup(target_fh
, total_size
);
6471 fh
->handle_bytes
= size
;
6472 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6474 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6475 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6479 unlock_user(target_fh
, handle
, total_size
);
6485 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6487 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6490 target_sigset_t
*target_mask
;
6494 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6495 return -TARGET_EINVAL
;
6497 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6498 return -TARGET_EFAULT
;
6501 target_to_host_sigset(&host_mask
, target_mask
);
6503 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6505 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6507 fd_trans_register(ret
, &target_signalfd_trans
);
6510 unlock_user_struct(target_mask
, mask
, 0);
6516 /* Map host to target signal numbers for the wait family of syscalls.
6517 Assume all other status bits are the same. */
6518 int host_to_target_waitstatus(int status
)
6520 if (WIFSIGNALED(status
)) {
6521 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6523 if (WIFSTOPPED(status
)) {
6524 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6530 static int open_self_cmdline(void *cpu_env
, int fd
)
6532 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6533 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6536 for (i
= 0; i
< bprm
->argc
; i
++) {
6537 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6539 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6547 static int open_self_maps(void *cpu_env
, int fd
)
6549 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6550 TaskState
*ts
= cpu
->opaque
;
6556 fp
= fopen("/proc/self/maps", "r");
6561 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6562 int fields
, dev_maj
, dev_min
, inode
;
6563 uint64_t min
, max
, offset
;
6564 char flag_r
, flag_w
, flag_x
, flag_p
;
6565 char path
[512] = "";
6566 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6567 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6568 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6570 if ((fields
< 10) || (fields
> 11)) {
6573 if (h2g_valid(min
)) {
6574 int flags
= page_get_flags(h2g(min
));
6575 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6576 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6579 if (h2g(min
) == ts
->info
->stack_limit
) {
6580 pstrcpy(path
, sizeof(path
), " [stack]");
6582 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6583 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6584 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6585 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6586 path
[0] ? " " : "", path
);
6596 static int open_self_stat(void *cpu_env
, int fd
)
6598 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6599 TaskState
*ts
= cpu
->opaque
;
6600 abi_ulong start_stack
= ts
->info
->start_stack
;
6603 for (i
= 0; i
< 44; i
++) {
6611 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6612 } else if (i
== 1) {
6614 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6615 } else if (i
== 27) {
6618 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6620 /* for the rest, there is MasterCard */
6621 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6625 if (write(fd
, buf
, len
) != len
) {
6633 static int open_self_auxv(void *cpu_env
, int fd
)
6635 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6636 TaskState
*ts
= cpu
->opaque
;
6637 abi_ulong auxv
= ts
->info
->saved_auxv
;
6638 abi_ulong len
= ts
->info
->auxv_len
;
6642 * Auxiliary vector is stored in target process stack.
6643 * read in whole auxv vector and copy it to file
6645 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6649 r
= write(fd
, ptr
, len
);
6656 lseek(fd
, 0, SEEK_SET
);
6657 unlock_user(ptr
, auxv
, len
);
6663 static int is_proc_myself(const char *filename
, const char *entry
)
6665 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6666 filename
+= strlen("/proc/");
6667 if (!strncmp(filename
, "self/", strlen("self/"))) {
6668 filename
+= strlen("self/");
6669 } else if (*filename
>= '1' && *filename
<= '9') {
6671 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6672 if (!strncmp(filename
, myself
, strlen(myself
))) {
6673 filename
+= strlen(myself
);
6680 if (!strcmp(filename
, entry
)) {
6687 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6688 static int is_proc(const char *filename
, const char *entry
)
6690 return strcmp(filename
, entry
) == 0;
6693 static int open_net_route(void *cpu_env
, int fd
)
6700 fp
= fopen("/proc/net/route", "r");
6707 read
= getline(&line
, &len
, fp
);
6708 dprintf(fd
, "%s", line
);
6712 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6714 uint32_t dest
, gw
, mask
;
6715 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6716 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6717 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6718 &mask
, &mtu
, &window
, &irtt
);
6719 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6720 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6721 metric
, tswap32(mask
), mtu
, window
, irtt
);
6731 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6734 const char *filename
;
6735 int (*fill
)(void *cpu_env
, int fd
);
6736 int (*cmp
)(const char *s1
, const char *s2
);
6738 const struct fake_open
*fake_open
;
6739 static const struct fake_open fakes
[] = {
6740 { "maps", open_self_maps
, is_proc_myself
},
6741 { "stat", open_self_stat
, is_proc_myself
},
6742 { "auxv", open_self_auxv
, is_proc_myself
},
6743 { "cmdline", open_self_cmdline
, is_proc_myself
},
6744 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6745 { "/proc/net/route", open_net_route
, is_proc
},
6747 { NULL
, NULL
, NULL
}
6750 if (is_proc_myself(pathname
, "exe")) {
6751 int execfd
= qemu_getauxval(AT_EXECFD
);
6752 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6755 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6756 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6761 if (fake_open
->filename
) {
6763 char filename
[PATH_MAX
];
6766 /* create temporary file to map stat to */
6767 tmpdir
= getenv("TMPDIR");
6770 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6771 fd
= mkstemp(filename
);
6777 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6783 lseek(fd
, 0, SEEK_SET
);
6788 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6791 #define TIMER_MAGIC 0x0caf0000
6792 #define TIMER_MAGIC_MASK 0xffff0000
6794 /* Convert QEMU provided timer ID back to internal 16bit index format */
6795 static target_timer_t
get_timer_id(abi_long arg
)
6797 target_timer_t timerid
= arg
;
6799 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6800 return -TARGET_EINVAL
;
6805 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6806 return -TARGET_EINVAL
;
6812 static int target_to_host_cpu_mask(unsigned long *host_mask
,
6814 abi_ulong target_addr
,
6817 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6818 unsigned host_bits
= sizeof(*host_mask
) * 8;
6819 abi_ulong
*target_mask
;
6822 assert(host_size
>= target_size
);
6824 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
6826 return -TARGET_EFAULT
;
6828 memset(host_mask
, 0, host_size
);
6830 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6831 unsigned bit
= i
* target_bits
;
6834 __get_user(val
, &target_mask
[i
]);
6835 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6836 if (val
& (1UL << j
)) {
6837 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
6842 unlock_user(target_mask
, target_addr
, 0);
6846 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
6848 abi_ulong target_addr
,
6851 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6852 unsigned host_bits
= sizeof(*host_mask
) * 8;
6853 abi_ulong
*target_mask
;
6856 assert(host_size
>= target_size
);
6858 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
6860 return -TARGET_EFAULT
;
6863 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6864 unsigned bit
= i
* target_bits
;
6867 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6868 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
6872 __put_user(val
, &target_mask
[i
]);
6875 unlock_user(target_mask
, target_addr
, target_size
);
6879 /* This is an internal helper for do_syscall so that it is easier
6880 * to have a single return point, so that actions, such as logging
6881 * of syscall results, can be performed.
6882 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6884 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
6885 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6886 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6889 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6891 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6892 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6893 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6896 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6897 || defined(TARGET_NR_fstatfs)
6903 case TARGET_NR_exit
:
6904 /* In old applications this may be used to implement _exit(2).
6905 However in threaded applictions it is used for thread termination,
6906 and _exit_group is used for application termination.
6907 Do thread termination if we have more then one thread. */
6909 if (block_signals()) {
6910 return -TARGET_ERESTARTSYS
;
6915 if (CPU_NEXT(first_cpu
)) {
6918 /* Remove the CPU from the list. */
6919 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
6924 if (ts
->child_tidptr
) {
6925 put_user_u32(0, ts
->child_tidptr
);
6926 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6930 object_unref(OBJECT(cpu
));
6932 rcu_unregister_thread();
6937 preexit_cleanup(cpu_env
, arg1
);
6939 return 0; /* avoid warning */
6940 case TARGET_NR_read
:
6944 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6945 return -TARGET_EFAULT
;
6946 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6948 fd_trans_host_to_target_data(arg1
)) {
6949 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6951 unlock_user(p
, arg2
, ret
);
6954 case TARGET_NR_write
:
6955 if (arg2
== 0 && arg3
== 0) {
6956 return get_errno(safe_write(arg1
, 0, 0));
6958 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6959 return -TARGET_EFAULT
;
6960 if (fd_trans_target_to_host_data(arg1
)) {
6961 void *copy
= g_malloc(arg3
);
6962 memcpy(copy
, p
, arg3
);
6963 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
6965 ret
= get_errno(safe_write(arg1
, copy
, ret
));
6969 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6971 unlock_user(p
, arg2
, 0);
6974 #ifdef TARGET_NR_open
6975 case TARGET_NR_open
:
6976 if (!(p
= lock_user_string(arg1
)))
6977 return -TARGET_EFAULT
;
6978 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6979 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6981 fd_trans_unregister(ret
);
6982 unlock_user(p
, arg1
, 0);
6985 case TARGET_NR_openat
:
6986 if (!(p
= lock_user_string(arg2
)))
6987 return -TARGET_EFAULT
;
6988 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6989 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6991 fd_trans_unregister(ret
);
6992 unlock_user(p
, arg2
, 0);
6994 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6995 case TARGET_NR_name_to_handle_at
:
6996 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6999 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7000 case TARGET_NR_open_by_handle_at
:
7001 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7002 fd_trans_unregister(ret
);
7005 case TARGET_NR_close
:
7006 fd_trans_unregister(arg1
);
7007 return get_errno(close(arg1
));
7010 return do_brk(arg1
);
7011 #ifdef TARGET_NR_fork
7012 case TARGET_NR_fork
:
7013 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7015 #ifdef TARGET_NR_waitpid
7016 case TARGET_NR_waitpid
:
7019 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7020 if (!is_error(ret
) && arg2
&& ret
7021 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7022 return -TARGET_EFAULT
;
7026 #ifdef TARGET_NR_waitid
7027 case TARGET_NR_waitid
:
7031 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7032 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7033 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7034 return -TARGET_EFAULT
;
7035 host_to_target_siginfo(p
, &info
);
7036 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7041 #ifdef TARGET_NR_creat /* not on alpha */
7042 case TARGET_NR_creat
:
7043 if (!(p
= lock_user_string(arg1
)))
7044 return -TARGET_EFAULT
;
7045 ret
= get_errno(creat(p
, arg2
));
7046 fd_trans_unregister(ret
);
7047 unlock_user(p
, arg1
, 0);
7050 #ifdef TARGET_NR_link
7051 case TARGET_NR_link
:
7054 p
= lock_user_string(arg1
);
7055 p2
= lock_user_string(arg2
);
7057 ret
= -TARGET_EFAULT
;
7059 ret
= get_errno(link(p
, p2
));
7060 unlock_user(p2
, arg2
, 0);
7061 unlock_user(p
, arg1
, 0);
7065 #if defined(TARGET_NR_linkat)
7066 case TARGET_NR_linkat
:
7070 return -TARGET_EFAULT
;
7071 p
= lock_user_string(arg2
);
7072 p2
= lock_user_string(arg4
);
7074 ret
= -TARGET_EFAULT
;
7076 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7077 unlock_user(p
, arg2
, 0);
7078 unlock_user(p2
, arg4
, 0);
7082 #ifdef TARGET_NR_unlink
7083 case TARGET_NR_unlink
:
7084 if (!(p
= lock_user_string(arg1
)))
7085 return -TARGET_EFAULT
;
7086 ret
= get_errno(unlink(p
));
7087 unlock_user(p
, arg1
, 0);
7090 #if defined(TARGET_NR_unlinkat)
7091 case TARGET_NR_unlinkat
:
7092 if (!(p
= lock_user_string(arg2
)))
7093 return -TARGET_EFAULT
;
7094 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7095 unlock_user(p
, arg2
, 0);
7098 case TARGET_NR_execve
:
7100 char **argp
, **envp
;
7103 abi_ulong guest_argp
;
7104 abi_ulong guest_envp
;
7111 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7112 if (get_user_ual(addr
, gp
))
7113 return -TARGET_EFAULT
;
7120 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7121 if (get_user_ual(addr
, gp
))
7122 return -TARGET_EFAULT
;
7128 argp
= g_new0(char *, argc
+ 1);
7129 envp
= g_new0(char *, envc
+ 1);
7131 for (gp
= guest_argp
, q
= argp
; gp
;
7132 gp
+= sizeof(abi_ulong
), q
++) {
7133 if (get_user_ual(addr
, gp
))
7137 if (!(*q
= lock_user_string(addr
)))
7139 total_size
+= strlen(*q
) + 1;
7143 for (gp
= guest_envp
, q
= envp
; gp
;
7144 gp
+= sizeof(abi_ulong
), q
++) {
7145 if (get_user_ual(addr
, gp
))
7149 if (!(*q
= lock_user_string(addr
)))
7151 total_size
+= strlen(*q
) + 1;
7155 if (!(p
= lock_user_string(arg1
)))
7157 /* Although execve() is not an interruptible syscall it is
7158 * a special case where we must use the safe_syscall wrapper:
7159 * if we allow a signal to happen before we make the host
7160 * syscall then we will 'lose' it, because at the point of
7161 * execve the process leaves QEMU's control. So we use the
7162 * safe syscall wrapper to ensure that we either take the
7163 * signal as a guest signal, or else it does not happen
7164 * before the execve completes and makes it the other
7165 * program's problem.
7167 ret
= get_errno(safe_execve(p
, argp
, envp
));
7168 unlock_user(p
, arg1
, 0);
7173 ret
= -TARGET_EFAULT
;
7176 for (gp
= guest_argp
, q
= argp
; *q
;
7177 gp
+= sizeof(abi_ulong
), q
++) {
7178 if (get_user_ual(addr
, gp
)
7181 unlock_user(*q
, addr
, 0);
7183 for (gp
= guest_envp
, q
= envp
; *q
;
7184 gp
+= sizeof(abi_ulong
), q
++) {
7185 if (get_user_ual(addr
, gp
)
7188 unlock_user(*q
, addr
, 0);
7195 case TARGET_NR_chdir
:
7196 if (!(p
= lock_user_string(arg1
)))
7197 return -TARGET_EFAULT
;
7198 ret
= get_errno(chdir(p
));
7199 unlock_user(p
, arg1
, 0);
7201 #ifdef TARGET_NR_time
7202 case TARGET_NR_time
:
7205 ret
= get_errno(time(&host_time
));
7208 && put_user_sal(host_time
, arg1
))
7209 return -TARGET_EFAULT
;
7213 #ifdef TARGET_NR_mknod
7214 case TARGET_NR_mknod
:
7215 if (!(p
= lock_user_string(arg1
)))
7216 return -TARGET_EFAULT
;
7217 ret
= get_errno(mknod(p
, arg2
, arg3
));
7218 unlock_user(p
, arg1
, 0);
7221 #if defined(TARGET_NR_mknodat)
7222 case TARGET_NR_mknodat
:
7223 if (!(p
= lock_user_string(arg2
)))
7224 return -TARGET_EFAULT
;
7225 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7226 unlock_user(p
, arg2
, 0);
7229 #ifdef TARGET_NR_chmod
7230 case TARGET_NR_chmod
:
7231 if (!(p
= lock_user_string(arg1
)))
7232 return -TARGET_EFAULT
;
7233 ret
= get_errno(chmod(p
, arg2
));
7234 unlock_user(p
, arg1
, 0);
7237 #ifdef TARGET_NR_lseek
7238 case TARGET_NR_lseek
:
7239 return get_errno(lseek(arg1
, arg2
, arg3
));
7241 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7242 /* Alpha specific */
7243 case TARGET_NR_getxpid
:
7244 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7245 return get_errno(getpid());
7247 #ifdef TARGET_NR_getpid
7248 case TARGET_NR_getpid
:
7249 return get_errno(getpid());
7251 case TARGET_NR_mount
:
7253 /* need to look at the data field */
7257 p
= lock_user_string(arg1
);
7259 return -TARGET_EFAULT
;
7265 p2
= lock_user_string(arg2
);
7268 unlock_user(p
, arg1
, 0);
7270 return -TARGET_EFAULT
;
7274 p3
= lock_user_string(arg3
);
7277 unlock_user(p
, arg1
, 0);
7279 unlock_user(p2
, arg2
, 0);
7280 return -TARGET_EFAULT
;
7286 /* FIXME - arg5 should be locked, but it isn't clear how to
7287 * do that since it's not guaranteed to be a NULL-terminated
7291 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7293 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7295 ret
= get_errno(ret
);
7298 unlock_user(p
, arg1
, 0);
7300 unlock_user(p2
, arg2
, 0);
7302 unlock_user(p3
, arg3
, 0);
7306 #ifdef TARGET_NR_umount
7307 case TARGET_NR_umount
:
7308 if (!(p
= lock_user_string(arg1
)))
7309 return -TARGET_EFAULT
;
7310 ret
= get_errno(umount(p
));
7311 unlock_user(p
, arg1
, 0);
7314 #ifdef TARGET_NR_stime /* not on alpha */
7315 case TARGET_NR_stime
:
7318 if (get_user_sal(host_time
, arg1
))
7319 return -TARGET_EFAULT
;
7320 return get_errno(stime(&host_time
));
7323 #ifdef TARGET_NR_alarm /* not on alpha */
7324 case TARGET_NR_alarm
:
7327 #ifdef TARGET_NR_pause /* not on alpha */
7328 case TARGET_NR_pause
:
7329 if (!block_signals()) {
7330 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7332 return -TARGET_EINTR
;
7334 #ifdef TARGET_NR_utime
7335 case TARGET_NR_utime
:
7337 struct utimbuf tbuf
, *host_tbuf
;
7338 struct target_utimbuf
*target_tbuf
;
7340 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7341 return -TARGET_EFAULT
;
7342 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7343 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7344 unlock_user_struct(target_tbuf
, arg2
, 0);
7349 if (!(p
= lock_user_string(arg1
)))
7350 return -TARGET_EFAULT
;
7351 ret
= get_errno(utime(p
, host_tbuf
));
7352 unlock_user(p
, arg1
, 0);
7356 #ifdef TARGET_NR_utimes
7357 case TARGET_NR_utimes
:
7359 struct timeval
*tvp
, tv
[2];
7361 if (copy_from_user_timeval(&tv
[0], arg2
)
7362 || copy_from_user_timeval(&tv
[1],
7363 arg2
+ sizeof(struct target_timeval
)))
7364 return -TARGET_EFAULT
;
7369 if (!(p
= lock_user_string(arg1
)))
7370 return -TARGET_EFAULT
;
7371 ret
= get_errno(utimes(p
, tvp
));
7372 unlock_user(p
, arg1
, 0);
7376 #if defined(TARGET_NR_futimesat)
7377 case TARGET_NR_futimesat
:
7379 struct timeval
*tvp
, tv
[2];
7381 if (copy_from_user_timeval(&tv
[0], arg3
)
7382 || copy_from_user_timeval(&tv
[1],
7383 arg3
+ sizeof(struct target_timeval
)))
7384 return -TARGET_EFAULT
;
7389 if (!(p
= lock_user_string(arg2
))) {
7390 return -TARGET_EFAULT
;
7392 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7393 unlock_user(p
, arg2
, 0);
7397 #ifdef TARGET_NR_access
7398 case TARGET_NR_access
:
7399 if (!(p
= lock_user_string(arg1
))) {
7400 return -TARGET_EFAULT
;
7402 ret
= get_errno(access(path(p
), arg2
));
7403 unlock_user(p
, arg1
, 0);
7406 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7407 case TARGET_NR_faccessat
:
7408 if (!(p
= lock_user_string(arg2
))) {
7409 return -TARGET_EFAULT
;
7411 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7412 unlock_user(p
, arg2
, 0);
7415 #ifdef TARGET_NR_nice /* not on alpha */
7416 case TARGET_NR_nice
:
7417 return get_errno(nice(arg1
));
7419 case TARGET_NR_sync
:
7422 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7423 case TARGET_NR_syncfs
:
7424 return get_errno(syncfs(arg1
));
7426 case TARGET_NR_kill
:
7427 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7428 #ifdef TARGET_NR_rename
7429 case TARGET_NR_rename
:
7432 p
= lock_user_string(arg1
);
7433 p2
= lock_user_string(arg2
);
7435 ret
= -TARGET_EFAULT
;
7437 ret
= get_errno(rename(p
, p2
));
7438 unlock_user(p2
, arg2
, 0);
7439 unlock_user(p
, arg1
, 0);
7443 #if defined(TARGET_NR_renameat)
7444 case TARGET_NR_renameat
:
7447 p
= lock_user_string(arg2
);
7448 p2
= lock_user_string(arg4
);
7450 ret
= -TARGET_EFAULT
;
7452 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7453 unlock_user(p2
, arg4
, 0);
7454 unlock_user(p
, arg2
, 0);
7458 #if defined(TARGET_NR_renameat2)
7459 case TARGET_NR_renameat2
:
7462 p
= lock_user_string(arg2
);
7463 p2
= lock_user_string(arg4
);
7465 ret
= -TARGET_EFAULT
;
7467 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7469 unlock_user(p2
, arg4
, 0);
7470 unlock_user(p
, arg2
, 0);
7474 #ifdef TARGET_NR_mkdir
7475 case TARGET_NR_mkdir
:
7476 if (!(p
= lock_user_string(arg1
)))
7477 return -TARGET_EFAULT
;
7478 ret
= get_errno(mkdir(p
, arg2
));
7479 unlock_user(p
, arg1
, 0);
7482 #if defined(TARGET_NR_mkdirat)
7483 case TARGET_NR_mkdirat
:
7484 if (!(p
= lock_user_string(arg2
)))
7485 return -TARGET_EFAULT
;
7486 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7487 unlock_user(p
, arg2
, 0);
7490 #ifdef TARGET_NR_rmdir
7491 case TARGET_NR_rmdir
:
7492 if (!(p
= lock_user_string(arg1
)))
7493 return -TARGET_EFAULT
;
7494 ret
= get_errno(rmdir(p
));
7495 unlock_user(p
, arg1
, 0);
7499 ret
= get_errno(dup(arg1
));
7501 fd_trans_dup(arg1
, ret
);
7504 #ifdef TARGET_NR_pipe
7505 case TARGET_NR_pipe
:
7506 return do_pipe(cpu_env
, arg1
, 0, 0);
7508 #ifdef TARGET_NR_pipe2
7509 case TARGET_NR_pipe2
:
7510 return do_pipe(cpu_env
, arg1
,
7511 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7513 case TARGET_NR_times
:
7515 struct target_tms
*tmsp
;
7517 ret
= get_errno(times(&tms
));
7519 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7521 return -TARGET_EFAULT
;
7522 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7523 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7524 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7525 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7528 ret
= host_to_target_clock_t(ret
);
7531 case TARGET_NR_acct
:
7533 ret
= get_errno(acct(NULL
));
7535 if (!(p
= lock_user_string(arg1
))) {
7536 return -TARGET_EFAULT
;
7538 ret
= get_errno(acct(path(p
)));
7539 unlock_user(p
, arg1
, 0);
7542 #ifdef TARGET_NR_umount2
7543 case TARGET_NR_umount2
:
7544 if (!(p
= lock_user_string(arg1
)))
7545 return -TARGET_EFAULT
;
7546 ret
= get_errno(umount2(p
, arg2
));
7547 unlock_user(p
, arg1
, 0);
7550 case TARGET_NR_ioctl
:
7551 return do_ioctl(arg1
, arg2
, arg3
);
7552 #ifdef TARGET_NR_fcntl
7553 case TARGET_NR_fcntl
:
7554 return do_fcntl(arg1
, arg2
, arg3
);
7556 case TARGET_NR_setpgid
:
7557 return get_errno(setpgid(arg1
, arg2
));
7558 case TARGET_NR_umask
:
7559 return get_errno(umask(arg1
));
7560 case TARGET_NR_chroot
:
7561 if (!(p
= lock_user_string(arg1
)))
7562 return -TARGET_EFAULT
;
7563 ret
= get_errno(chroot(p
));
7564 unlock_user(p
, arg1
, 0);
7566 #ifdef TARGET_NR_dup2
7567 case TARGET_NR_dup2
:
7568 ret
= get_errno(dup2(arg1
, arg2
));
7570 fd_trans_dup(arg1
, arg2
);
7574 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7575 case TARGET_NR_dup3
:
7579 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7582 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7583 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7585 fd_trans_dup(arg1
, arg2
);
7590 #ifdef TARGET_NR_getppid /* not on alpha */
7591 case TARGET_NR_getppid
:
7592 return get_errno(getppid());
7594 #ifdef TARGET_NR_getpgrp
7595 case TARGET_NR_getpgrp
:
7596 return get_errno(getpgrp());
7598 case TARGET_NR_setsid
:
7599 return get_errno(setsid());
7600 #ifdef TARGET_NR_sigaction
7601 case TARGET_NR_sigaction
:
7603 #if defined(TARGET_ALPHA)
7604 struct target_sigaction act
, oact
, *pact
= 0;
7605 struct target_old_sigaction
*old_act
;
7607 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7608 return -TARGET_EFAULT
;
7609 act
._sa_handler
= old_act
->_sa_handler
;
7610 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7611 act
.sa_flags
= old_act
->sa_flags
;
7612 act
.sa_restorer
= 0;
7613 unlock_user_struct(old_act
, arg2
, 0);
7616 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7617 if (!is_error(ret
) && arg3
) {
7618 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7619 return -TARGET_EFAULT
;
7620 old_act
->_sa_handler
= oact
._sa_handler
;
7621 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7622 old_act
->sa_flags
= oact
.sa_flags
;
7623 unlock_user_struct(old_act
, arg3
, 1);
7625 #elif defined(TARGET_MIPS)
7626 struct target_sigaction act
, oact
, *pact
, *old_act
;
7629 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7630 return -TARGET_EFAULT
;
7631 act
._sa_handler
= old_act
->_sa_handler
;
7632 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7633 act
.sa_flags
= old_act
->sa_flags
;
7634 unlock_user_struct(old_act
, arg2
, 0);
7640 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7642 if (!is_error(ret
) && arg3
) {
7643 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7644 return -TARGET_EFAULT
;
7645 old_act
->_sa_handler
= oact
._sa_handler
;
7646 old_act
->sa_flags
= oact
.sa_flags
;
7647 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7648 old_act
->sa_mask
.sig
[1] = 0;
7649 old_act
->sa_mask
.sig
[2] = 0;
7650 old_act
->sa_mask
.sig
[3] = 0;
7651 unlock_user_struct(old_act
, arg3
, 1);
7654 struct target_old_sigaction
*old_act
;
7655 struct target_sigaction act
, oact
, *pact
;
7657 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7658 return -TARGET_EFAULT
;
7659 act
._sa_handler
= old_act
->_sa_handler
;
7660 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7661 act
.sa_flags
= old_act
->sa_flags
;
7662 act
.sa_restorer
= old_act
->sa_restorer
;
7663 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7664 act
.ka_restorer
= 0;
7666 unlock_user_struct(old_act
, arg2
, 0);
7671 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7672 if (!is_error(ret
) && arg3
) {
7673 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7674 return -TARGET_EFAULT
;
7675 old_act
->_sa_handler
= oact
._sa_handler
;
7676 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7677 old_act
->sa_flags
= oact
.sa_flags
;
7678 old_act
->sa_restorer
= oact
.sa_restorer
;
7679 unlock_user_struct(old_act
, arg3
, 1);
7685 case TARGET_NR_rt_sigaction
:
7687 #if defined(TARGET_ALPHA)
7688 /* For Alpha and SPARC this is a 5 argument syscall, with
7689 * a 'restorer' parameter which must be copied into the
7690 * sa_restorer field of the sigaction struct.
7691 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7692 * and arg5 is the sigsetsize.
7693 * Alpha also has a separate rt_sigaction struct that it uses
7694 * here; SPARC uses the usual sigaction struct.
7696 struct target_rt_sigaction
*rt_act
;
7697 struct target_sigaction act
, oact
, *pact
= 0;
7699 if (arg4
!= sizeof(target_sigset_t
)) {
7700 return -TARGET_EINVAL
;
7703 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7704 return -TARGET_EFAULT
;
7705 act
._sa_handler
= rt_act
->_sa_handler
;
7706 act
.sa_mask
= rt_act
->sa_mask
;
7707 act
.sa_flags
= rt_act
->sa_flags
;
7708 act
.sa_restorer
= arg5
;
7709 unlock_user_struct(rt_act
, arg2
, 0);
7712 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7713 if (!is_error(ret
) && arg3
) {
7714 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7715 return -TARGET_EFAULT
;
7716 rt_act
->_sa_handler
= oact
._sa_handler
;
7717 rt_act
->sa_mask
= oact
.sa_mask
;
7718 rt_act
->sa_flags
= oact
.sa_flags
;
7719 unlock_user_struct(rt_act
, arg3
, 1);
7723 target_ulong restorer
= arg4
;
7724 target_ulong sigsetsize
= arg5
;
7726 target_ulong sigsetsize
= arg4
;
7728 struct target_sigaction
*act
;
7729 struct target_sigaction
*oact
;
7731 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7732 return -TARGET_EINVAL
;
7735 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7736 return -TARGET_EFAULT
;
7738 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7739 act
->ka_restorer
= restorer
;
7745 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7746 ret
= -TARGET_EFAULT
;
7747 goto rt_sigaction_fail
;
7751 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7754 unlock_user_struct(act
, arg2
, 0);
7756 unlock_user_struct(oact
, arg3
, 1);
7760 #ifdef TARGET_NR_sgetmask /* not on alpha */
7761 case TARGET_NR_sgetmask
:
7764 abi_ulong target_set
;
7765 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7767 host_to_target_old_sigset(&target_set
, &cur_set
);
7773 #ifdef TARGET_NR_ssetmask /* not on alpha */
7774 case TARGET_NR_ssetmask
:
7777 abi_ulong target_set
= arg1
;
7778 target_to_host_old_sigset(&set
, &target_set
);
7779 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7781 host_to_target_old_sigset(&target_set
, &oset
);
7787 #ifdef TARGET_NR_sigprocmask
7788 case TARGET_NR_sigprocmask
:
7790 #if defined(TARGET_ALPHA)
7791 sigset_t set
, oldset
;
7796 case TARGET_SIG_BLOCK
:
7799 case TARGET_SIG_UNBLOCK
:
7802 case TARGET_SIG_SETMASK
:
7806 return -TARGET_EINVAL
;
7809 target_to_host_old_sigset(&set
, &mask
);
7811 ret
= do_sigprocmask(how
, &set
, &oldset
);
7812 if (!is_error(ret
)) {
7813 host_to_target_old_sigset(&mask
, &oldset
);
7815 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7818 sigset_t set
, oldset
, *set_ptr
;
7823 case TARGET_SIG_BLOCK
:
7826 case TARGET_SIG_UNBLOCK
:
7829 case TARGET_SIG_SETMASK
:
7833 return -TARGET_EINVAL
;
7835 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7836 return -TARGET_EFAULT
;
7837 target_to_host_old_sigset(&set
, p
);
7838 unlock_user(p
, arg2
, 0);
7844 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7845 if (!is_error(ret
) && arg3
) {
7846 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7847 return -TARGET_EFAULT
;
7848 host_to_target_old_sigset(p
, &oldset
);
7849 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7855 case TARGET_NR_rt_sigprocmask
:
7858 sigset_t set
, oldset
, *set_ptr
;
7860 if (arg4
!= sizeof(target_sigset_t
)) {
7861 return -TARGET_EINVAL
;
7866 case TARGET_SIG_BLOCK
:
7869 case TARGET_SIG_UNBLOCK
:
7872 case TARGET_SIG_SETMASK
:
7876 return -TARGET_EINVAL
;
7878 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7879 return -TARGET_EFAULT
;
7880 target_to_host_sigset(&set
, p
);
7881 unlock_user(p
, arg2
, 0);
7887 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7888 if (!is_error(ret
) && arg3
) {
7889 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7890 return -TARGET_EFAULT
;
7891 host_to_target_sigset(p
, &oldset
);
7892 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7896 #ifdef TARGET_NR_sigpending
7897 case TARGET_NR_sigpending
:
7900 ret
= get_errno(sigpending(&set
));
7901 if (!is_error(ret
)) {
7902 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7903 return -TARGET_EFAULT
;
7904 host_to_target_old_sigset(p
, &set
);
7905 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7910 case TARGET_NR_rt_sigpending
:
7914 /* Yes, this check is >, not != like most. We follow the kernel's
7915 * logic and it does it like this because it implements
7916 * NR_sigpending through the same code path, and in that case
7917 * the old_sigset_t is smaller in size.
7919 if (arg2
> sizeof(target_sigset_t
)) {
7920 return -TARGET_EINVAL
;
7923 ret
= get_errno(sigpending(&set
));
7924 if (!is_error(ret
)) {
7925 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7926 return -TARGET_EFAULT
;
7927 host_to_target_sigset(p
, &set
);
7928 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7932 #ifdef TARGET_NR_sigsuspend
7933 case TARGET_NR_sigsuspend
:
7935 TaskState
*ts
= cpu
->opaque
;
7936 #if defined(TARGET_ALPHA)
7937 abi_ulong mask
= arg1
;
7938 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7940 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7941 return -TARGET_EFAULT
;
7942 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7943 unlock_user(p
, arg1
, 0);
7945 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7947 if (ret
!= -TARGET_ERESTARTSYS
) {
7948 ts
->in_sigsuspend
= 1;
7953 case TARGET_NR_rt_sigsuspend
:
7955 TaskState
*ts
= cpu
->opaque
;
7957 if (arg2
!= sizeof(target_sigset_t
)) {
7958 return -TARGET_EINVAL
;
7960 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7961 return -TARGET_EFAULT
;
7962 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7963 unlock_user(p
, arg1
, 0);
7964 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7966 if (ret
!= -TARGET_ERESTARTSYS
) {
7967 ts
->in_sigsuspend
= 1;
7971 case TARGET_NR_rt_sigtimedwait
:
7974 struct timespec uts
, *puts
;
7977 if (arg4
!= sizeof(target_sigset_t
)) {
7978 return -TARGET_EINVAL
;
7981 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7982 return -TARGET_EFAULT
;
7983 target_to_host_sigset(&set
, p
);
7984 unlock_user(p
, arg1
, 0);
7987 target_to_host_timespec(puts
, arg3
);
7991 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7993 if (!is_error(ret
)) {
7995 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7998 return -TARGET_EFAULT
;
8000 host_to_target_siginfo(p
, &uinfo
);
8001 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8003 ret
= host_to_target_signal(ret
);
8007 case TARGET_NR_rt_sigqueueinfo
:
8011 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8013 return -TARGET_EFAULT
;
8015 target_to_host_siginfo(&uinfo
, p
);
8016 unlock_user(p
, arg3
, 0);
8017 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8020 case TARGET_NR_rt_tgsigqueueinfo
:
8024 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8026 return -TARGET_EFAULT
;
8028 target_to_host_siginfo(&uinfo
, p
);
8029 unlock_user(p
, arg4
, 0);
8030 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8033 #ifdef TARGET_NR_sigreturn
8034 case TARGET_NR_sigreturn
:
8035 if (block_signals()) {
8036 return -TARGET_ERESTARTSYS
;
8038 return do_sigreturn(cpu_env
);
8040 case TARGET_NR_rt_sigreturn
:
8041 if (block_signals()) {
8042 return -TARGET_ERESTARTSYS
;
8044 return do_rt_sigreturn(cpu_env
);
8045 case TARGET_NR_sethostname
:
8046 if (!(p
= lock_user_string(arg1
)))
8047 return -TARGET_EFAULT
;
8048 ret
= get_errno(sethostname(p
, arg2
));
8049 unlock_user(p
, arg1
, 0);
8051 #ifdef TARGET_NR_setrlimit
8052 case TARGET_NR_setrlimit
:
8054 int resource
= target_to_host_resource(arg1
);
8055 struct target_rlimit
*target_rlim
;
8057 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8058 return -TARGET_EFAULT
;
8059 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8060 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8061 unlock_user_struct(target_rlim
, arg2
, 0);
8063 * If we just passed through resource limit settings for memory then
8064 * they would also apply to QEMU's own allocations, and QEMU will
8065 * crash or hang or die if its allocations fail. Ideally we would
8066 * track the guest allocations in QEMU and apply the limits ourselves.
8067 * For now, just tell the guest the call succeeded but don't actually
8070 if (resource
!= RLIMIT_AS
&&
8071 resource
!= RLIMIT_DATA
&&
8072 resource
!= RLIMIT_STACK
) {
8073 return get_errno(setrlimit(resource
, &rlim
));
8079 #ifdef TARGET_NR_getrlimit
8080 case TARGET_NR_getrlimit
:
8082 int resource
= target_to_host_resource(arg1
);
8083 struct target_rlimit
*target_rlim
;
8086 ret
= get_errno(getrlimit(resource
, &rlim
));
8087 if (!is_error(ret
)) {
8088 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8089 return -TARGET_EFAULT
;
8090 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8091 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8092 unlock_user_struct(target_rlim
, arg2
, 1);
8097 case TARGET_NR_getrusage
:
8099 struct rusage rusage
;
8100 ret
= get_errno(getrusage(arg1
, &rusage
));
8101 if (!is_error(ret
)) {
8102 ret
= host_to_target_rusage(arg2
, &rusage
);
8106 case TARGET_NR_gettimeofday
:
8109 ret
= get_errno(gettimeofday(&tv
, NULL
));
8110 if (!is_error(ret
)) {
8111 if (copy_to_user_timeval(arg1
, &tv
))
8112 return -TARGET_EFAULT
;
8116 case TARGET_NR_settimeofday
:
8118 struct timeval tv
, *ptv
= NULL
;
8119 struct timezone tz
, *ptz
= NULL
;
8122 if (copy_from_user_timeval(&tv
, arg1
)) {
8123 return -TARGET_EFAULT
;
8129 if (copy_from_user_timezone(&tz
, arg2
)) {
8130 return -TARGET_EFAULT
;
8135 return get_errno(settimeofday(ptv
, ptz
));
8137 #if defined(TARGET_NR_select)
8138 case TARGET_NR_select
:
8139 #if defined(TARGET_WANT_NI_OLD_SELECT)
8140 /* some architectures used to have old_select here
8141 * but now ENOSYS it.
8143 ret
= -TARGET_ENOSYS
;
8144 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8145 ret
= do_old_select(arg1
);
8147 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8151 #ifdef TARGET_NR_pselect6
8152 case TARGET_NR_pselect6
:
8154 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8155 fd_set rfds
, wfds
, efds
;
8156 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8157 struct timespec ts
, *ts_ptr
;
8160 * The 6th arg is actually two args smashed together,
8161 * so we cannot use the C library.
8169 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8170 target_sigset_t
*target_sigset
;
8178 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8182 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8186 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8192 * This takes a timespec, and not a timeval, so we cannot
8193 * use the do_select() helper ...
8196 if (target_to_host_timespec(&ts
, ts_addr
)) {
8197 return -TARGET_EFAULT
;
8204 /* Extract the two packed args for the sigset */
8207 sig
.size
= SIGSET_T_SIZE
;
8209 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8211 return -TARGET_EFAULT
;
8213 arg_sigset
= tswapal(arg7
[0]);
8214 arg_sigsize
= tswapal(arg7
[1]);
8215 unlock_user(arg7
, arg6
, 0);
8219 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8220 /* Like the kernel, we enforce correct size sigsets */
8221 return -TARGET_EINVAL
;
8223 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8224 sizeof(*target_sigset
), 1);
8225 if (!target_sigset
) {
8226 return -TARGET_EFAULT
;
8228 target_to_host_sigset(&set
, target_sigset
);
8229 unlock_user(target_sigset
, arg_sigset
, 0);
8237 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8240 if (!is_error(ret
)) {
8241 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8242 return -TARGET_EFAULT
;
8243 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8244 return -TARGET_EFAULT
;
8245 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8246 return -TARGET_EFAULT
;
8248 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8249 return -TARGET_EFAULT
;
8254 #ifdef TARGET_NR_symlink
8255 case TARGET_NR_symlink
:
8258 p
= lock_user_string(arg1
);
8259 p2
= lock_user_string(arg2
);
8261 ret
= -TARGET_EFAULT
;
8263 ret
= get_errno(symlink(p
, p2
));
8264 unlock_user(p2
, arg2
, 0);
8265 unlock_user(p
, arg1
, 0);
8269 #if defined(TARGET_NR_symlinkat)
8270 case TARGET_NR_symlinkat
:
8273 p
= lock_user_string(arg1
);
8274 p2
= lock_user_string(arg3
);
8276 ret
= -TARGET_EFAULT
;
8278 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8279 unlock_user(p2
, arg3
, 0);
8280 unlock_user(p
, arg1
, 0);
8284 #ifdef TARGET_NR_readlink
8285 case TARGET_NR_readlink
:
8288 p
= lock_user_string(arg1
);
8289 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8291 ret
= -TARGET_EFAULT
;
8293 /* Short circuit this for the magic exe check. */
8294 ret
= -TARGET_EINVAL
;
8295 } else if (is_proc_myself((const char *)p
, "exe")) {
8296 char real
[PATH_MAX
], *temp
;
8297 temp
= realpath(exec_path
, real
);
8298 /* Return value is # of bytes that we wrote to the buffer. */
8300 ret
= get_errno(-1);
8302 /* Don't worry about sign mismatch as earlier mapping
8303 * logic would have thrown a bad address error. */
8304 ret
= MIN(strlen(real
), arg3
);
8305 /* We cannot NUL terminate the string. */
8306 memcpy(p2
, real
, ret
);
8309 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8311 unlock_user(p2
, arg2
, ret
);
8312 unlock_user(p
, arg1
, 0);
8316 #if defined(TARGET_NR_readlinkat)
8317 case TARGET_NR_readlinkat
:
8320 p
= lock_user_string(arg2
);
8321 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8323 ret
= -TARGET_EFAULT
;
8324 } else if (is_proc_myself((const char *)p
, "exe")) {
8325 char real
[PATH_MAX
], *temp
;
8326 temp
= realpath(exec_path
, real
);
8327 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8328 snprintf((char *)p2
, arg4
, "%s", real
);
8330 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8332 unlock_user(p2
, arg3
, ret
);
8333 unlock_user(p
, arg2
, 0);
8337 #ifdef TARGET_NR_swapon
8338 case TARGET_NR_swapon
:
8339 if (!(p
= lock_user_string(arg1
)))
8340 return -TARGET_EFAULT
;
8341 ret
= get_errno(swapon(p
, arg2
));
8342 unlock_user(p
, arg1
, 0);
8345 case TARGET_NR_reboot
:
8346 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8347 /* arg4 must be ignored in all other cases */
8348 p
= lock_user_string(arg4
);
8350 return -TARGET_EFAULT
;
8352 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8353 unlock_user(p
, arg4
, 0);
8355 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8358 #ifdef TARGET_NR_mmap
8359 case TARGET_NR_mmap
:
8360 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8361 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8362 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8363 || defined(TARGET_S390X)
8366 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8367 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8368 return -TARGET_EFAULT
;
8375 unlock_user(v
, arg1
, 0);
8376 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8377 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8381 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8382 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8388 #ifdef TARGET_NR_mmap2
8389 case TARGET_NR_mmap2
:
8391 #define MMAP_SHIFT 12
8393 ret
= target_mmap(arg1
, arg2
, arg3
,
8394 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8395 arg5
, arg6
<< MMAP_SHIFT
);
8396 return get_errno(ret
);
8398 case TARGET_NR_munmap
:
8399 return get_errno(target_munmap(arg1
, arg2
));
8400 case TARGET_NR_mprotect
:
8402 TaskState
*ts
= cpu
->opaque
;
8403 /* Special hack to detect libc making the stack executable. */
8404 if ((arg3
& PROT_GROWSDOWN
)
8405 && arg1
>= ts
->info
->stack_limit
8406 && arg1
<= ts
->info
->start_stack
) {
8407 arg3
&= ~PROT_GROWSDOWN
;
8408 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8409 arg1
= ts
->info
->stack_limit
;
8412 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8413 #ifdef TARGET_NR_mremap
8414 case TARGET_NR_mremap
:
8415 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8417 /* ??? msync/mlock/munlock are broken for softmmu. */
8418 #ifdef TARGET_NR_msync
8419 case TARGET_NR_msync
:
8420 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8422 #ifdef TARGET_NR_mlock
8423 case TARGET_NR_mlock
:
8424 return get_errno(mlock(g2h(arg1
), arg2
));
8426 #ifdef TARGET_NR_munlock
8427 case TARGET_NR_munlock
:
8428 return get_errno(munlock(g2h(arg1
), arg2
));
8430 #ifdef TARGET_NR_mlockall
8431 case TARGET_NR_mlockall
:
8432 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8434 #ifdef TARGET_NR_munlockall
8435 case TARGET_NR_munlockall
:
8436 return get_errno(munlockall());
8438 #ifdef TARGET_NR_truncate
8439 case TARGET_NR_truncate
:
8440 if (!(p
= lock_user_string(arg1
)))
8441 return -TARGET_EFAULT
;
8442 ret
= get_errno(truncate(p
, arg2
));
8443 unlock_user(p
, arg1
, 0);
8446 #ifdef TARGET_NR_ftruncate
8447 case TARGET_NR_ftruncate
:
8448 return get_errno(ftruncate(arg1
, arg2
));
8450 case TARGET_NR_fchmod
:
8451 return get_errno(fchmod(arg1
, arg2
));
8452 #if defined(TARGET_NR_fchmodat)
8453 case TARGET_NR_fchmodat
:
8454 if (!(p
= lock_user_string(arg2
)))
8455 return -TARGET_EFAULT
;
8456 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8457 unlock_user(p
, arg2
, 0);
8460 case TARGET_NR_getpriority
:
8461 /* Note that negative values are valid for getpriority, so we must
8462 differentiate based on errno settings. */
8464 ret
= getpriority(arg1
, arg2
);
8465 if (ret
== -1 && errno
!= 0) {
8466 return -host_to_target_errno(errno
);
8469 /* Return value is the unbiased priority. Signal no error. */
8470 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8472 /* Return value is a biased priority to avoid negative numbers. */
8476 case TARGET_NR_setpriority
:
8477 return get_errno(setpriority(arg1
, arg2
, arg3
));
8478 #ifdef TARGET_NR_statfs
8479 case TARGET_NR_statfs
:
8480 if (!(p
= lock_user_string(arg1
))) {
8481 return -TARGET_EFAULT
;
8483 ret
= get_errno(statfs(path(p
), &stfs
));
8484 unlock_user(p
, arg1
, 0);
8486 if (!is_error(ret
)) {
8487 struct target_statfs
*target_stfs
;
8489 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8490 return -TARGET_EFAULT
;
8491 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8492 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8493 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8494 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8495 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8496 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8497 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8498 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8499 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8500 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8501 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8502 #ifdef _STATFS_F_FLAGS
8503 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8505 __put_user(0, &target_stfs
->f_flags
);
8507 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8508 unlock_user_struct(target_stfs
, arg2
, 1);
8512 #ifdef TARGET_NR_fstatfs
8513 case TARGET_NR_fstatfs
:
8514 ret
= get_errno(fstatfs(arg1
, &stfs
));
8515 goto convert_statfs
;
8517 #ifdef TARGET_NR_statfs64
8518 case TARGET_NR_statfs64
:
8519 if (!(p
= lock_user_string(arg1
))) {
8520 return -TARGET_EFAULT
;
8522 ret
= get_errno(statfs(path(p
), &stfs
));
8523 unlock_user(p
, arg1
, 0);
8525 if (!is_error(ret
)) {
8526 struct target_statfs64
*target_stfs
;
8528 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8529 return -TARGET_EFAULT
;
8530 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8531 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8532 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8533 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8534 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8535 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8536 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8537 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8538 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8539 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8540 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8541 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8542 unlock_user_struct(target_stfs
, arg3
, 1);
8545 case TARGET_NR_fstatfs64
:
8546 ret
= get_errno(fstatfs(arg1
, &stfs
));
8547 goto convert_statfs64
;
8549 #ifdef TARGET_NR_socketcall
8550 case TARGET_NR_socketcall
:
8551 return do_socketcall(arg1
, arg2
);
8553 #ifdef TARGET_NR_accept
8554 case TARGET_NR_accept
:
8555 return do_accept4(arg1
, arg2
, arg3
, 0);
8557 #ifdef TARGET_NR_accept4
8558 case TARGET_NR_accept4
:
8559 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8561 #ifdef TARGET_NR_bind
8562 case TARGET_NR_bind
:
8563 return do_bind(arg1
, arg2
, arg3
);
8565 #ifdef TARGET_NR_connect
8566 case TARGET_NR_connect
:
8567 return do_connect(arg1
, arg2
, arg3
);
8569 #ifdef TARGET_NR_getpeername
8570 case TARGET_NR_getpeername
:
8571 return do_getpeername(arg1
, arg2
, arg3
);
8573 #ifdef TARGET_NR_getsockname
8574 case TARGET_NR_getsockname
:
8575 return do_getsockname(arg1
, arg2
, arg3
);
8577 #ifdef TARGET_NR_getsockopt
8578 case TARGET_NR_getsockopt
:
8579 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8581 #ifdef TARGET_NR_listen
8582 case TARGET_NR_listen
:
8583 return get_errno(listen(arg1
, arg2
));
8585 #ifdef TARGET_NR_recv
8586 case TARGET_NR_recv
:
8587 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8589 #ifdef TARGET_NR_recvfrom
8590 case TARGET_NR_recvfrom
:
8591 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8593 #ifdef TARGET_NR_recvmsg
8594 case TARGET_NR_recvmsg
:
8595 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8597 #ifdef TARGET_NR_send
8598 case TARGET_NR_send
:
8599 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8601 #ifdef TARGET_NR_sendmsg
8602 case TARGET_NR_sendmsg
:
8603 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8605 #ifdef TARGET_NR_sendmmsg
8606 case TARGET_NR_sendmmsg
:
8607 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8608 case TARGET_NR_recvmmsg
:
8609 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8611 #ifdef TARGET_NR_sendto
8612 case TARGET_NR_sendto
:
8613 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8615 #ifdef TARGET_NR_shutdown
8616 case TARGET_NR_shutdown
:
8617 return get_errno(shutdown(arg1
, arg2
));
8619 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8620 case TARGET_NR_getrandom
:
8621 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8623 return -TARGET_EFAULT
;
8625 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8626 unlock_user(p
, arg1
, ret
);
8629 #ifdef TARGET_NR_socket
8630 case TARGET_NR_socket
:
8631 return do_socket(arg1
, arg2
, arg3
);
8633 #ifdef TARGET_NR_socketpair
8634 case TARGET_NR_socketpair
:
8635 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8637 #ifdef TARGET_NR_setsockopt
8638 case TARGET_NR_setsockopt
:
8639 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8641 #if defined(TARGET_NR_syslog)
8642 case TARGET_NR_syslog
:
8647 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8648 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8649 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8650 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8651 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8652 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8653 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8654 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8655 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8656 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8657 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8658 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8661 return -TARGET_EINVAL
;
8666 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8668 return -TARGET_EFAULT
;
8670 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8671 unlock_user(p
, arg2
, arg3
);
8675 return -TARGET_EINVAL
;
8680 case TARGET_NR_setitimer
:
8682 struct itimerval value
, ovalue
, *pvalue
;
8686 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8687 || copy_from_user_timeval(&pvalue
->it_value
,
8688 arg2
+ sizeof(struct target_timeval
)))
8689 return -TARGET_EFAULT
;
8693 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8694 if (!is_error(ret
) && arg3
) {
8695 if (copy_to_user_timeval(arg3
,
8696 &ovalue
.it_interval
)
8697 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8699 return -TARGET_EFAULT
;
8703 case TARGET_NR_getitimer
:
8705 struct itimerval value
;
8707 ret
= get_errno(getitimer(arg1
, &value
));
8708 if (!is_error(ret
) && arg2
) {
8709 if (copy_to_user_timeval(arg2
,
8711 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8713 return -TARGET_EFAULT
;
8717 #ifdef TARGET_NR_stat
8718 case TARGET_NR_stat
:
8719 if (!(p
= lock_user_string(arg1
))) {
8720 return -TARGET_EFAULT
;
8722 ret
= get_errno(stat(path(p
), &st
));
8723 unlock_user(p
, arg1
, 0);
8726 #ifdef TARGET_NR_lstat
8727 case TARGET_NR_lstat
:
8728 if (!(p
= lock_user_string(arg1
))) {
8729 return -TARGET_EFAULT
;
8731 ret
= get_errno(lstat(path(p
), &st
));
8732 unlock_user(p
, arg1
, 0);
8735 #ifdef TARGET_NR_fstat
8736 case TARGET_NR_fstat
:
8738 ret
= get_errno(fstat(arg1
, &st
));
8739 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8742 if (!is_error(ret
)) {
8743 struct target_stat
*target_st
;
8745 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8746 return -TARGET_EFAULT
;
8747 memset(target_st
, 0, sizeof(*target_st
));
8748 __put_user(st
.st_dev
, &target_st
->st_dev
);
8749 __put_user(st
.st_ino
, &target_st
->st_ino
);
8750 __put_user(st
.st_mode
, &target_st
->st_mode
);
8751 __put_user(st
.st_uid
, &target_st
->st_uid
);
8752 __put_user(st
.st_gid
, &target_st
->st_gid
);
8753 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8754 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8755 __put_user(st
.st_size
, &target_st
->st_size
);
8756 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8757 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8758 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8759 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8760 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8761 unlock_user_struct(target_st
, arg2
, 1);
8766 case TARGET_NR_vhangup
:
8767 return get_errno(vhangup());
8768 #ifdef TARGET_NR_syscall
8769 case TARGET_NR_syscall
:
8770 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8771 arg6
, arg7
, arg8
, 0);
8773 case TARGET_NR_wait4
:
8776 abi_long status_ptr
= arg2
;
8777 struct rusage rusage
, *rusage_ptr
;
8778 abi_ulong target_rusage
= arg4
;
8779 abi_long rusage_err
;
8781 rusage_ptr
= &rusage
;
8784 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8785 if (!is_error(ret
)) {
8786 if (status_ptr
&& ret
) {
8787 status
= host_to_target_waitstatus(status
);
8788 if (put_user_s32(status
, status_ptr
))
8789 return -TARGET_EFAULT
;
8791 if (target_rusage
) {
8792 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8800 #ifdef TARGET_NR_swapoff
8801 case TARGET_NR_swapoff
:
8802 if (!(p
= lock_user_string(arg1
)))
8803 return -TARGET_EFAULT
;
8804 ret
= get_errno(swapoff(p
));
8805 unlock_user(p
, arg1
, 0);
8808 case TARGET_NR_sysinfo
:
8810 struct target_sysinfo
*target_value
;
8811 struct sysinfo value
;
8812 ret
= get_errno(sysinfo(&value
));
8813 if (!is_error(ret
) && arg1
)
8815 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8816 return -TARGET_EFAULT
;
8817 __put_user(value
.uptime
, &target_value
->uptime
);
8818 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8819 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8820 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8821 __put_user(value
.totalram
, &target_value
->totalram
);
8822 __put_user(value
.freeram
, &target_value
->freeram
);
8823 __put_user(value
.sharedram
, &target_value
->sharedram
);
8824 __put_user(value
.bufferram
, &target_value
->bufferram
);
8825 __put_user(value
.totalswap
, &target_value
->totalswap
);
8826 __put_user(value
.freeswap
, &target_value
->freeswap
);
8827 __put_user(value
.procs
, &target_value
->procs
);
8828 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8829 __put_user(value
.freehigh
, &target_value
->freehigh
);
8830 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8831 unlock_user_struct(target_value
, arg1
, 1);
8835 #ifdef TARGET_NR_ipc
8837 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8839 #ifdef TARGET_NR_semget
8840 case TARGET_NR_semget
:
8841 return get_errno(semget(arg1
, arg2
, arg3
));
8843 #ifdef TARGET_NR_semop
8844 case TARGET_NR_semop
:
8845 return do_semop(arg1
, arg2
, arg3
);
8847 #ifdef TARGET_NR_semctl
8848 case TARGET_NR_semctl
:
8849 return do_semctl(arg1
, arg2
, arg3
, arg4
);
8851 #ifdef TARGET_NR_msgctl
8852 case TARGET_NR_msgctl
:
8853 return do_msgctl(arg1
, arg2
, arg3
);
8855 #ifdef TARGET_NR_msgget
8856 case TARGET_NR_msgget
:
8857 return get_errno(msgget(arg1
, arg2
));
8859 #ifdef TARGET_NR_msgrcv
8860 case TARGET_NR_msgrcv
:
8861 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8863 #ifdef TARGET_NR_msgsnd
8864 case TARGET_NR_msgsnd
:
8865 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8867 #ifdef TARGET_NR_shmget
8868 case TARGET_NR_shmget
:
8869 return get_errno(shmget(arg1
, arg2
, arg3
));
8871 #ifdef TARGET_NR_shmctl
8872 case TARGET_NR_shmctl
:
8873 return do_shmctl(arg1
, arg2
, arg3
);
8875 #ifdef TARGET_NR_shmat
8876 case TARGET_NR_shmat
:
8877 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
8879 #ifdef TARGET_NR_shmdt
8880 case TARGET_NR_shmdt
:
8881 return do_shmdt(arg1
);
8883 case TARGET_NR_fsync
:
8884 return get_errno(fsync(arg1
));
8885 case TARGET_NR_clone
:
8886 /* Linux manages to have three different orderings for its
8887 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8888 * match the kernel's CONFIG_CLONE_* settings.
8889 * Microblaze is further special in that it uses a sixth
8890 * implicit argument to clone for the TLS pointer.
8892 #if defined(TARGET_MICROBLAZE)
8893 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8894 #elif defined(TARGET_CLONE_BACKWARDS)
8895 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8896 #elif defined(TARGET_CLONE_BACKWARDS2)
8897 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8899 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8902 #ifdef __NR_exit_group
8903 /* new thread calls */
8904 case TARGET_NR_exit_group
:
8905 preexit_cleanup(cpu_env
, arg1
);
8906 return get_errno(exit_group(arg1
));
8908 case TARGET_NR_setdomainname
:
8909 if (!(p
= lock_user_string(arg1
)))
8910 return -TARGET_EFAULT
;
8911 ret
= get_errno(setdomainname(p
, arg2
));
8912 unlock_user(p
, arg1
, 0);
8914 case TARGET_NR_uname
:
8915 /* no need to transcode because we use the linux syscall */
8917 struct new_utsname
* buf
;
8919 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8920 return -TARGET_EFAULT
;
8921 ret
= get_errno(sys_uname(buf
));
8922 if (!is_error(ret
)) {
8923 /* Overwrite the native machine name with whatever is being
8925 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
8926 sizeof(buf
->machine
));
8927 /* Allow the user to override the reported release. */
8928 if (qemu_uname_release
&& *qemu_uname_release
) {
8929 g_strlcpy(buf
->release
, qemu_uname_release
,
8930 sizeof(buf
->release
));
8933 unlock_user_struct(buf
, arg1
, 1);
8937 case TARGET_NR_modify_ldt
:
8938 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8939 #if !defined(TARGET_X86_64)
8940 case TARGET_NR_vm86
:
8941 return do_vm86(cpu_env
, arg1
, arg2
);
8944 case TARGET_NR_adjtimex
:
8946 struct timex host_buf
;
8948 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
8949 return -TARGET_EFAULT
;
8951 ret
= get_errno(adjtimex(&host_buf
));
8952 if (!is_error(ret
)) {
8953 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
8954 return -TARGET_EFAULT
;
8959 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8960 case TARGET_NR_clock_adjtime
:
8962 struct timex htx
, *phtx
= &htx
;
8964 if (target_to_host_timex(phtx
, arg2
) != 0) {
8965 return -TARGET_EFAULT
;
8967 ret
= get_errno(clock_adjtime(arg1
, phtx
));
8968 if (!is_error(ret
) && phtx
) {
8969 if (host_to_target_timex(arg2
, phtx
) != 0) {
8970 return -TARGET_EFAULT
;
8976 case TARGET_NR_getpgid
:
8977 return get_errno(getpgid(arg1
));
8978 case TARGET_NR_fchdir
:
8979 return get_errno(fchdir(arg1
));
8980 case TARGET_NR_personality
:
8981 return get_errno(personality(arg1
));
8982 #ifdef TARGET_NR__llseek /* Not on alpha */
8983 case TARGET_NR__llseek
:
8986 #if !defined(__NR_llseek)
8987 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
8989 ret
= get_errno(res
);
8994 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8996 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8997 return -TARGET_EFAULT
;
9002 #ifdef TARGET_NR_getdents
9003 case TARGET_NR_getdents
:
9004 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9005 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9007 struct target_dirent
*target_dirp
;
9008 struct linux_dirent
*dirp
;
9009 abi_long count
= arg3
;
9011 dirp
= g_try_malloc(count
);
9013 return -TARGET_ENOMEM
;
9016 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9017 if (!is_error(ret
)) {
9018 struct linux_dirent
*de
;
9019 struct target_dirent
*tde
;
9021 int reclen
, treclen
;
9022 int count1
, tnamelen
;
9026 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9027 return -TARGET_EFAULT
;
9030 reclen
= de
->d_reclen
;
9031 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9032 assert(tnamelen
>= 0);
9033 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9034 assert(count1
+ treclen
<= count
);
9035 tde
->d_reclen
= tswap16(treclen
);
9036 tde
->d_ino
= tswapal(de
->d_ino
);
9037 tde
->d_off
= tswapal(de
->d_off
);
9038 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9039 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9041 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9045 unlock_user(target_dirp
, arg2
, ret
);
9051 struct linux_dirent
*dirp
;
9052 abi_long count
= arg3
;
9054 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9055 return -TARGET_EFAULT
;
9056 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9057 if (!is_error(ret
)) {
9058 struct linux_dirent
*de
;
9063 reclen
= de
->d_reclen
;
9066 de
->d_reclen
= tswap16(reclen
);
9067 tswapls(&de
->d_ino
);
9068 tswapls(&de
->d_off
);
9069 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9073 unlock_user(dirp
, arg2
, ret
);
9077 /* Implement getdents in terms of getdents64 */
9079 struct linux_dirent64
*dirp
;
9080 abi_long count
= arg3
;
9082 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9084 return -TARGET_EFAULT
;
9086 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9087 if (!is_error(ret
)) {
9088 /* Convert the dirent64 structs to target dirent. We do this
9089 * in-place, since we can guarantee that a target_dirent is no
9090 * larger than a dirent64; however this means we have to be
9091 * careful to read everything before writing in the new format.
9093 struct linux_dirent64
*de
;
9094 struct target_dirent
*tde
;
9099 tde
= (struct target_dirent
*)dirp
;
9101 int namelen
, treclen
;
9102 int reclen
= de
->d_reclen
;
9103 uint64_t ino
= de
->d_ino
;
9104 int64_t off
= de
->d_off
;
9105 uint8_t type
= de
->d_type
;
9107 namelen
= strlen(de
->d_name
);
9108 treclen
= offsetof(struct target_dirent
, d_name
)
9110 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9112 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9113 tde
->d_ino
= tswapal(ino
);
9114 tde
->d_off
= tswapal(off
);
9115 tde
->d_reclen
= tswap16(treclen
);
9116 /* The target_dirent type is in what was formerly a padding
9117 * byte at the end of the structure:
9119 *(((char *)tde
) + treclen
- 1) = type
;
9121 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9122 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9128 unlock_user(dirp
, arg2
, ret
);
9132 #endif /* TARGET_NR_getdents */
9133 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9134 case TARGET_NR_getdents64
:
9136 struct linux_dirent64
*dirp
;
9137 abi_long count
= arg3
;
9138 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9139 return -TARGET_EFAULT
;
9140 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9141 if (!is_error(ret
)) {
9142 struct linux_dirent64
*de
;
9147 reclen
= de
->d_reclen
;
9150 de
->d_reclen
= tswap16(reclen
);
9151 tswap64s((uint64_t *)&de
->d_ino
);
9152 tswap64s((uint64_t *)&de
->d_off
);
9153 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9157 unlock_user(dirp
, arg2
, ret
);
9160 #endif /* TARGET_NR_getdents64 */
9161 #if defined(TARGET_NR__newselect)
9162 case TARGET_NR__newselect
:
9163 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9166 # ifdef TARGET_NR_poll
9167 case TARGET_NR_poll
:
9169 # ifdef TARGET_NR_ppoll
9170 case TARGET_NR_ppoll
:
9173 struct target_pollfd
*target_pfd
;
9174 unsigned int nfds
= arg2
;
9181 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9182 return -TARGET_EINVAL
;
9185 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9186 sizeof(struct target_pollfd
) * nfds
, 1);
9188 return -TARGET_EFAULT
;
9191 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9192 for (i
= 0; i
< nfds
; i
++) {
9193 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9194 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9199 # ifdef TARGET_NR_ppoll
9200 case TARGET_NR_ppoll
:
9202 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9203 target_sigset_t
*target_set
;
9204 sigset_t _set
, *set
= &_set
;
9207 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9208 unlock_user(target_pfd
, arg1
, 0);
9209 return -TARGET_EFAULT
;
9216 if (arg5
!= sizeof(target_sigset_t
)) {
9217 unlock_user(target_pfd
, arg1
, 0);
9218 return -TARGET_EINVAL
;
9221 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9223 unlock_user(target_pfd
, arg1
, 0);
9224 return -TARGET_EFAULT
;
9226 target_to_host_sigset(set
, target_set
);
9231 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9232 set
, SIGSET_T_SIZE
));
9234 if (!is_error(ret
) && arg3
) {
9235 host_to_target_timespec(arg3
, timeout_ts
);
9238 unlock_user(target_set
, arg4
, 0);
9243 # ifdef TARGET_NR_poll
9244 case TARGET_NR_poll
:
9246 struct timespec ts
, *pts
;
9249 /* Convert ms to secs, ns */
9250 ts
.tv_sec
= arg3
/ 1000;
9251 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9254 /* -ve poll() timeout means "infinite" */
9257 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9262 g_assert_not_reached();
9265 if (!is_error(ret
)) {
9266 for(i
= 0; i
< nfds
; i
++) {
9267 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9270 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9274 case TARGET_NR_flock
:
9275 /* NOTE: the flock constant seems to be the same for every
9277 return get_errno(safe_flock(arg1
, arg2
));
9278 case TARGET_NR_readv
:
9280 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9282 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9283 unlock_iovec(vec
, arg2
, arg3
, 1);
9285 ret
= -host_to_target_errno(errno
);
9289 case TARGET_NR_writev
:
9291 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9293 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9294 unlock_iovec(vec
, arg2
, arg3
, 0);
9296 ret
= -host_to_target_errno(errno
);
9300 #if defined(TARGET_NR_preadv)
9301 case TARGET_NR_preadv
:
9303 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9305 unsigned long low
, high
;
9307 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9308 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9309 unlock_iovec(vec
, arg2
, arg3
, 1);
9311 ret
= -host_to_target_errno(errno
);
9316 #if defined(TARGET_NR_pwritev)
9317 case TARGET_NR_pwritev
:
9319 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9321 unsigned long low
, high
;
9323 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9324 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9325 unlock_iovec(vec
, arg2
, arg3
, 0);
9327 ret
= -host_to_target_errno(errno
);
9332 case TARGET_NR_getsid
:
9333 return get_errno(getsid(arg1
));
9334 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9335 case TARGET_NR_fdatasync
:
9336 return get_errno(fdatasync(arg1
));
9338 #ifdef TARGET_NR__sysctl
9339 case TARGET_NR__sysctl
:
9340 /* We don't implement this, but ENOTDIR is always a safe
9342 return -TARGET_ENOTDIR
;
9344 case TARGET_NR_sched_getaffinity
:
9346 unsigned int mask_size
;
9347 unsigned long *mask
;
9350 * sched_getaffinity needs multiples of ulong, so need to take
9351 * care of mismatches between target ulong and host ulong sizes.
9353 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9354 return -TARGET_EINVAL
;
9356 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9358 mask
= alloca(mask_size
);
9359 memset(mask
, 0, mask_size
);
9360 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9362 if (!is_error(ret
)) {
9364 /* More data returned than the caller's buffer will fit.
9365 * This only happens if sizeof(abi_long) < sizeof(long)
9366 * and the caller passed us a buffer holding an odd number
9367 * of abi_longs. If the host kernel is actually using the
9368 * extra 4 bytes then fail EINVAL; otherwise we can just
9369 * ignore them and only copy the interesting part.
9371 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9372 if (numcpus
> arg2
* 8) {
9373 return -TARGET_EINVAL
;
9378 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9379 return -TARGET_EFAULT
;
9384 case TARGET_NR_sched_setaffinity
:
9386 unsigned int mask_size
;
9387 unsigned long *mask
;
9390 * sched_setaffinity needs multiples of ulong, so need to take
9391 * care of mismatches between target ulong and host ulong sizes.
9393 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9394 return -TARGET_EINVAL
;
9396 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9397 mask
= alloca(mask_size
);
9399 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9404 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9406 case TARGET_NR_getcpu
:
9409 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9410 arg2
? &node
: NULL
,
9412 if (is_error(ret
)) {
9415 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9416 return -TARGET_EFAULT
;
9418 if (arg2
&& put_user_u32(node
, arg2
)) {
9419 return -TARGET_EFAULT
;
9423 case TARGET_NR_sched_setparam
:
9425 struct sched_param
*target_schp
;
9426 struct sched_param schp
;
9429 return -TARGET_EINVAL
;
9431 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9432 return -TARGET_EFAULT
;
9433 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9434 unlock_user_struct(target_schp
, arg2
, 0);
9435 return get_errno(sched_setparam(arg1
, &schp
));
9437 case TARGET_NR_sched_getparam
:
9439 struct sched_param
*target_schp
;
9440 struct sched_param schp
;
9443 return -TARGET_EINVAL
;
9445 ret
= get_errno(sched_getparam(arg1
, &schp
));
9446 if (!is_error(ret
)) {
9447 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9448 return -TARGET_EFAULT
;
9449 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9450 unlock_user_struct(target_schp
, arg2
, 1);
9454 case TARGET_NR_sched_setscheduler
:
9456 struct sched_param
*target_schp
;
9457 struct sched_param schp
;
9459 return -TARGET_EINVAL
;
9461 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9462 return -TARGET_EFAULT
;
9463 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9464 unlock_user_struct(target_schp
, arg3
, 0);
9465 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9467 case TARGET_NR_sched_getscheduler
:
9468 return get_errno(sched_getscheduler(arg1
));
9469 case TARGET_NR_sched_yield
:
9470 return get_errno(sched_yield());
9471 case TARGET_NR_sched_get_priority_max
:
9472 return get_errno(sched_get_priority_max(arg1
));
9473 case TARGET_NR_sched_get_priority_min
:
9474 return get_errno(sched_get_priority_min(arg1
));
9475 case TARGET_NR_sched_rr_get_interval
:
9478 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9479 if (!is_error(ret
)) {
9480 ret
= host_to_target_timespec(arg2
, &ts
);
9484 case TARGET_NR_nanosleep
:
9486 struct timespec req
, rem
;
9487 target_to_host_timespec(&req
, arg1
);
9488 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9489 if (is_error(ret
) && arg2
) {
9490 host_to_target_timespec(arg2
, &rem
);
9494 case TARGET_NR_prctl
:
9496 case PR_GET_PDEATHSIG
:
9499 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9500 if (!is_error(ret
) && arg2
9501 && put_user_ual(deathsig
, arg2
)) {
9502 return -TARGET_EFAULT
;
9509 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9511 return -TARGET_EFAULT
;
9513 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9515 unlock_user(name
, arg2
, 16);
9520 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9522 return -TARGET_EFAULT
;
9524 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9526 unlock_user(name
, arg2
, 0);
9531 case TARGET_PR_GET_FP_MODE
:
9533 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9535 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9536 ret
|= TARGET_PR_FP_MODE_FR
;
9538 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9539 ret
|= TARGET_PR_FP_MODE_FRE
;
9543 case TARGET_PR_SET_FP_MODE
:
9545 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9546 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9547 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9548 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9550 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9551 /* FR1 is not supported */
9552 return -TARGET_EOPNOTSUPP
;
9554 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9555 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9556 /* cannot set FR=0 */
9557 return -TARGET_EOPNOTSUPP
;
9559 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9560 /* Cannot set FRE=1 */
9561 return -TARGET_EOPNOTSUPP
;
9565 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9566 for (i
= 0; i
< 32 ; i
+= 2) {
9567 if (!old_fr
&& new_fr
) {
9568 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9569 } else if (old_fr
&& !new_fr
) {
9570 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9575 env
->CP0_Status
|= (1 << CP0St_FR
);
9576 env
->hflags
|= MIPS_HFLAG_F64
;
9578 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9581 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9582 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9583 env
->hflags
|= MIPS_HFLAG_FRE
;
9586 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9592 #ifdef TARGET_AARCH64
9593 case TARGET_PR_SVE_SET_VL
:
9595 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9596 * PR_SVE_VL_INHERIT. Note the kernel definition
9597 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9598 * even though the current architectural maximum is VQ=16.
9600 ret
= -TARGET_EINVAL
;
9601 if (cpu_isar_feature(aa64_sve
, arm_env_get_cpu(cpu_env
))
9602 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9603 CPUARMState
*env
= cpu_env
;
9604 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9605 uint32_t vq
, old_vq
;
9607 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9608 vq
= MAX(arg2
/ 16, 1);
9609 vq
= MIN(vq
, cpu
->sve_max_vq
);
9612 aarch64_sve_narrow_vq(env
, vq
);
9614 env
->vfp
.zcr_el
[1] = vq
- 1;
9618 case TARGET_PR_SVE_GET_VL
:
9619 ret
= -TARGET_EINVAL
;
9621 ARMCPU
*cpu
= arm_env_get_cpu(cpu_env
);
9622 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9623 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9627 #endif /* AARCH64 */
9628 case PR_GET_SECCOMP
:
9629 case PR_SET_SECCOMP
:
9630 /* Disable seccomp to prevent the target disabling syscalls we
9632 return -TARGET_EINVAL
;
9634 /* Most prctl options have no pointer arguments */
9635 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9638 #ifdef TARGET_NR_arch_prctl
9639 case TARGET_NR_arch_prctl
:
9640 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9641 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9646 #ifdef TARGET_NR_pread64
9647 case TARGET_NR_pread64
:
9648 if (regpairs_aligned(cpu_env
, num
)) {
9652 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9653 return -TARGET_EFAULT
;
9654 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9655 unlock_user(p
, arg2
, ret
);
9657 case TARGET_NR_pwrite64
:
9658 if (regpairs_aligned(cpu_env
, num
)) {
9662 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9663 return -TARGET_EFAULT
;
9664 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9665 unlock_user(p
, arg2
, 0);
9668 case TARGET_NR_getcwd
:
9669 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9670 return -TARGET_EFAULT
;
9671 ret
= get_errno(sys_getcwd1(p
, arg2
));
9672 unlock_user(p
, arg1
, ret
);
9674 case TARGET_NR_capget
:
9675 case TARGET_NR_capset
:
9677 struct target_user_cap_header
*target_header
;
9678 struct target_user_cap_data
*target_data
= NULL
;
9679 struct __user_cap_header_struct header
;
9680 struct __user_cap_data_struct data
[2];
9681 struct __user_cap_data_struct
*dataptr
= NULL
;
9682 int i
, target_datalen
;
9685 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9686 return -TARGET_EFAULT
;
9688 header
.version
= tswap32(target_header
->version
);
9689 header
.pid
= tswap32(target_header
->pid
);
9691 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9692 /* Version 2 and up takes pointer to two user_data structs */
9696 target_datalen
= sizeof(*target_data
) * data_items
;
9699 if (num
== TARGET_NR_capget
) {
9700 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9702 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9705 unlock_user_struct(target_header
, arg1
, 0);
9706 return -TARGET_EFAULT
;
9709 if (num
== TARGET_NR_capset
) {
9710 for (i
= 0; i
< data_items
; i
++) {
9711 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9712 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9713 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9720 if (num
== TARGET_NR_capget
) {
9721 ret
= get_errno(capget(&header
, dataptr
));
9723 ret
= get_errno(capset(&header
, dataptr
));
9726 /* The kernel always updates version for both capget and capset */
9727 target_header
->version
= tswap32(header
.version
);
9728 unlock_user_struct(target_header
, arg1
, 1);
9731 if (num
== TARGET_NR_capget
) {
9732 for (i
= 0; i
< data_items
; i
++) {
9733 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9734 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9735 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9737 unlock_user(target_data
, arg2
, target_datalen
);
9739 unlock_user(target_data
, arg2
, 0);
9744 case TARGET_NR_sigaltstack
:
9745 return do_sigaltstack(arg1
, arg2
,
9746 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9748 #ifdef CONFIG_SENDFILE
9749 #ifdef TARGET_NR_sendfile
9750 case TARGET_NR_sendfile
:
9755 ret
= get_user_sal(off
, arg3
);
9756 if (is_error(ret
)) {
9761 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9762 if (!is_error(ret
) && arg3
) {
9763 abi_long ret2
= put_user_sal(off
, arg3
);
9764 if (is_error(ret2
)) {
9771 #ifdef TARGET_NR_sendfile64
9772 case TARGET_NR_sendfile64
:
9777 ret
= get_user_s64(off
, arg3
);
9778 if (is_error(ret
)) {
9783 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9784 if (!is_error(ret
) && arg3
) {
9785 abi_long ret2
= put_user_s64(off
, arg3
);
9786 if (is_error(ret2
)) {
9794 #ifdef TARGET_NR_vfork
9795 case TARGET_NR_vfork
:
9796 return get_errno(do_fork(cpu_env
,
9797 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
9800 #ifdef TARGET_NR_ugetrlimit
9801 case TARGET_NR_ugetrlimit
:
9804 int resource
= target_to_host_resource(arg1
);
9805 ret
= get_errno(getrlimit(resource
, &rlim
));
9806 if (!is_error(ret
)) {
9807 struct target_rlimit
*target_rlim
;
9808 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9809 return -TARGET_EFAULT
;
9810 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9811 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9812 unlock_user_struct(target_rlim
, arg2
, 1);
9817 #ifdef TARGET_NR_truncate64
9818 case TARGET_NR_truncate64
:
9819 if (!(p
= lock_user_string(arg1
)))
9820 return -TARGET_EFAULT
;
9821 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9822 unlock_user(p
, arg1
, 0);
9825 #ifdef TARGET_NR_ftruncate64
9826 case TARGET_NR_ftruncate64
:
9827 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9829 #ifdef TARGET_NR_stat64
9830 case TARGET_NR_stat64
:
9831 if (!(p
= lock_user_string(arg1
))) {
9832 return -TARGET_EFAULT
;
9834 ret
= get_errno(stat(path(p
), &st
));
9835 unlock_user(p
, arg1
, 0);
9837 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9840 #ifdef TARGET_NR_lstat64
9841 case TARGET_NR_lstat64
:
9842 if (!(p
= lock_user_string(arg1
))) {
9843 return -TARGET_EFAULT
;
9845 ret
= get_errno(lstat(path(p
), &st
));
9846 unlock_user(p
, arg1
, 0);
9848 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9851 #ifdef TARGET_NR_fstat64
9852 case TARGET_NR_fstat64
:
9853 ret
= get_errno(fstat(arg1
, &st
));
9855 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9858 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9859 #ifdef TARGET_NR_fstatat64
9860 case TARGET_NR_fstatat64
:
9862 #ifdef TARGET_NR_newfstatat
9863 case TARGET_NR_newfstatat
:
9865 if (!(p
= lock_user_string(arg2
))) {
9866 return -TARGET_EFAULT
;
9868 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9869 unlock_user(p
, arg2
, 0);
9871 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9874 #ifdef TARGET_NR_lchown
9875 case TARGET_NR_lchown
:
9876 if (!(p
= lock_user_string(arg1
)))
9877 return -TARGET_EFAULT
;
9878 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9879 unlock_user(p
, arg1
, 0);
9882 #ifdef TARGET_NR_getuid
9883 case TARGET_NR_getuid
:
9884 return get_errno(high2lowuid(getuid()));
9886 #ifdef TARGET_NR_getgid
9887 case TARGET_NR_getgid
:
9888 return get_errno(high2lowgid(getgid()));
9890 #ifdef TARGET_NR_geteuid
9891 case TARGET_NR_geteuid
:
9892 return get_errno(high2lowuid(geteuid()));
9894 #ifdef TARGET_NR_getegid
9895 case TARGET_NR_getegid
:
9896 return get_errno(high2lowgid(getegid()));
9898 case TARGET_NR_setreuid
:
9899 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9900 case TARGET_NR_setregid
:
9901 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9902 case TARGET_NR_getgroups
:
9904 int gidsetsize
= arg1
;
9905 target_id
*target_grouplist
;
9909 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9910 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9911 if (gidsetsize
== 0)
9913 if (!is_error(ret
)) {
9914 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9915 if (!target_grouplist
)
9916 return -TARGET_EFAULT
;
9917 for(i
= 0;i
< ret
; i
++)
9918 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9919 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9923 case TARGET_NR_setgroups
:
9925 int gidsetsize
= arg1
;
9926 target_id
*target_grouplist
;
9927 gid_t
*grouplist
= NULL
;
9930 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9931 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9932 if (!target_grouplist
) {
9933 return -TARGET_EFAULT
;
9935 for (i
= 0; i
< gidsetsize
; i
++) {
9936 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9938 unlock_user(target_grouplist
, arg2
, 0);
9940 return get_errno(setgroups(gidsetsize
, grouplist
));
9942 case TARGET_NR_fchown
:
9943 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9944 #if defined(TARGET_NR_fchownat)
9945 case TARGET_NR_fchownat
:
9946 if (!(p
= lock_user_string(arg2
)))
9947 return -TARGET_EFAULT
;
9948 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9949 low2highgid(arg4
), arg5
));
9950 unlock_user(p
, arg2
, 0);
9953 #ifdef TARGET_NR_setresuid
9954 case TARGET_NR_setresuid
:
9955 return get_errno(sys_setresuid(low2highuid(arg1
),
9957 low2highuid(arg3
)));
9959 #ifdef TARGET_NR_getresuid
9960 case TARGET_NR_getresuid
:
9962 uid_t ruid
, euid
, suid
;
9963 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9964 if (!is_error(ret
)) {
9965 if (put_user_id(high2lowuid(ruid
), arg1
)
9966 || put_user_id(high2lowuid(euid
), arg2
)
9967 || put_user_id(high2lowuid(suid
), arg3
))
9968 return -TARGET_EFAULT
;
9973 #ifdef TARGET_NR_getresgid
9974 case TARGET_NR_setresgid
:
9975 return get_errno(sys_setresgid(low2highgid(arg1
),
9977 low2highgid(arg3
)));
9979 #ifdef TARGET_NR_getresgid
9980 case TARGET_NR_getresgid
:
9982 gid_t rgid
, egid
, sgid
;
9983 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9984 if (!is_error(ret
)) {
9985 if (put_user_id(high2lowgid(rgid
), arg1
)
9986 || put_user_id(high2lowgid(egid
), arg2
)
9987 || put_user_id(high2lowgid(sgid
), arg3
))
9988 return -TARGET_EFAULT
;
9993 #ifdef TARGET_NR_chown
9994 case TARGET_NR_chown
:
9995 if (!(p
= lock_user_string(arg1
)))
9996 return -TARGET_EFAULT
;
9997 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9998 unlock_user(p
, arg1
, 0);
10001 case TARGET_NR_setuid
:
10002 return get_errno(sys_setuid(low2highuid(arg1
)));
10003 case TARGET_NR_setgid
:
10004 return get_errno(sys_setgid(low2highgid(arg1
)));
10005 case TARGET_NR_setfsuid
:
10006 return get_errno(setfsuid(arg1
));
10007 case TARGET_NR_setfsgid
:
10008 return get_errno(setfsgid(arg1
));
10010 #ifdef TARGET_NR_lchown32
10011 case TARGET_NR_lchown32
:
10012 if (!(p
= lock_user_string(arg1
)))
10013 return -TARGET_EFAULT
;
10014 ret
= get_errno(lchown(p
, arg2
, arg3
));
10015 unlock_user(p
, arg1
, 0);
10018 #ifdef TARGET_NR_getuid32
10019 case TARGET_NR_getuid32
:
10020 return get_errno(getuid());
10023 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10024 /* Alpha specific */
10025 case TARGET_NR_getxuid
:
10029 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10031 return get_errno(getuid());
10033 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10034 /* Alpha specific */
10035 case TARGET_NR_getxgid
:
10039 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10041 return get_errno(getgid());
10043 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10044 /* Alpha specific */
10045 case TARGET_NR_osf_getsysinfo
:
10046 ret
= -TARGET_EOPNOTSUPP
;
10048 case TARGET_GSI_IEEE_FP_CONTROL
:
10050 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10052 /* Copied from linux ieee_fpcr_to_swcr. */
10053 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10054 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10055 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10056 | SWCR_TRAP_ENABLE_DZE
10057 | SWCR_TRAP_ENABLE_OVF
);
10058 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10059 | SWCR_TRAP_ENABLE_INE
);
10060 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10061 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10063 if (put_user_u64 (swcr
, arg2
))
10064 return -TARGET_EFAULT
;
10069 /* case GSI_IEEE_STATE_AT_SIGNAL:
10070 -- Not implemented in linux kernel.
10072 -- Retrieves current unaligned access state; not much used.
10073 case GSI_PROC_TYPE:
10074 -- Retrieves implver information; surely not used.
10075 case GSI_GET_HWRPB:
10076 -- Grabs a copy of the HWRPB; surely not used.
10081 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10082 /* Alpha specific */
10083 case TARGET_NR_osf_setsysinfo
:
10084 ret
= -TARGET_EOPNOTSUPP
;
10086 case TARGET_SSI_IEEE_FP_CONTROL
:
10088 uint64_t swcr
, fpcr
, orig_fpcr
;
10090 if (get_user_u64 (swcr
, arg2
)) {
10091 return -TARGET_EFAULT
;
10093 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10094 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10096 /* Copied from linux ieee_swcr_to_fpcr. */
10097 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10098 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10099 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10100 | SWCR_TRAP_ENABLE_DZE
10101 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10102 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10103 | SWCR_TRAP_ENABLE_INE
)) << 57;
10104 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10105 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10107 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10112 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10114 uint64_t exc
, fpcr
, orig_fpcr
;
10117 if (get_user_u64(exc
, arg2
)) {
10118 return -TARGET_EFAULT
;
10121 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10123 /* We only add to the exception status here. */
10124 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10126 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10129 /* Old exceptions are not signaled. */
10130 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10132 /* If any exceptions set by this call,
10133 and are unmasked, send a signal. */
10135 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10136 si_code
= TARGET_FPE_FLTRES
;
10138 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10139 si_code
= TARGET_FPE_FLTUND
;
10141 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10142 si_code
= TARGET_FPE_FLTOVF
;
10144 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10145 si_code
= TARGET_FPE_FLTDIV
;
10147 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10148 si_code
= TARGET_FPE_FLTINV
;
10150 if (si_code
!= 0) {
10151 target_siginfo_t info
;
10152 info
.si_signo
= SIGFPE
;
10154 info
.si_code
= si_code
;
10155 info
._sifields
._sigfault
._addr
10156 = ((CPUArchState
*)cpu_env
)->pc
;
10157 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10158 QEMU_SI_FAULT
, &info
);
10163 /* case SSI_NVPAIRS:
10164 -- Used with SSIN_UACPROC to enable unaligned accesses.
10165 case SSI_IEEE_STATE_AT_SIGNAL:
10166 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10167 -- Not implemented in linux kernel
10172 #ifdef TARGET_NR_osf_sigprocmask
10173 /* Alpha specific. */
10174 case TARGET_NR_osf_sigprocmask
:
10178 sigset_t set
, oldset
;
10181 case TARGET_SIG_BLOCK
:
10184 case TARGET_SIG_UNBLOCK
:
10187 case TARGET_SIG_SETMASK
:
10191 return -TARGET_EINVAL
;
10194 target_to_host_old_sigset(&set
, &mask
);
10195 ret
= do_sigprocmask(how
, &set
, &oldset
);
10197 host_to_target_old_sigset(&mask
, &oldset
);
10204 #ifdef TARGET_NR_getgid32
10205 case TARGET_NR_getgid32
:
10206 return get_errno(getgid());
10208 #ifdef TARGET_NR_geteuid32
10209 case TARGET_NR_geteuid32
:
10210 return get_errno(geteuid());
10212 #ifdef TARGET_NR_getegid32
10213 case TARGET_NR_getegid32
:
10214 return get_errno(getegid());
10216 #ifdef TARGET_NR_setreuid32
10217 case TARGET_NR_setreuid32
:
10218 return get_errno(setreuid(arg1
, arg2
));
10220 #ifdef TARGET_NR_setregid32
10221 case TARGET_NR_setregid32
:
10222 return get_errno(setregid(arg1
, arg2
));
10224 #ifdef TARGET_NR_getgroups32
10225 case TARGET_NR_getgroups32
:
10227 int gidsetsize
= arg1
;
10228 uint32_t *target_grouplist
;
10232 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10233 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10234 if (gidsetsize
== 0)
10236 if (!is_error(ret
)) {
10237 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10238 if (!target_grouplist
) {
10239 return -TARGET_EFAULT
;
10241 for(i
= 0;i
< ret
; i
++)
10242 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10243 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10248 #ifdef TARGET_NR_setgroups32
10249 case TARGET_NR_setgroups32
:
10251 int gidsetsize
= arg1
;
10252 uint32_t *target_grouplist
;
10256 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10257 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10258 if (!target_grouplist
) {
10259 return -TARGET_EFAULT
;
10261 for(i
= 0;i
< gidsetsize
; i
++)
10262 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10263 unlock_user(target_grouplist
, arg2
, 0);
10264 return get_errno(setgroups(gidsetsize
, grouplist
));
10267 #ifdef TARGET_NR_fchown32
10268 case TARGET_NR_fchown32
:
10269 return get_errno(fchown(arg1
, arg2
, arg3
));
10271 #ifdef TARGET_NR_setresuid32
10272 case TARGET_NR_setresuid32
:
10273 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10275 #ifdef TARGET_NR_getresuid32
10276 case TARGET_NR_getresuid32
:
10278 uid_t ruid
, euid
, suid
;
10279 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10280 if (!is_error(ret
)) {
10281 if (put_user_u32(ruid
, arg1
)
10282 || put_user_u32(euid
, arg2
)
10283 || put_user_u32(suid
, arg3
))
10284 return -TARGET_EFAULT
;
10289 #ifdef TARGET_NR_setresgid32
10290 case TARGET_NR_setresgid32
:
10291 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10293 #ifdef TARGET_NR_getresgid32
10294 case TARGET_NR_getresgid32
:
10296 gid_t rgid
, egid
, sgid
;
10297 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10298 if (!is_error(ret
)) {
10299 if (put_user_u32(rgid
, arg1
)
10300 || put_user_u32(egid
, arg2
)
10301 || put_user_u32(sgid
, arg3
))
10302 return -TARGET_EFAULT
;
10307 #ifdef TARGET_NR_chown32
10308 case TARGET_NR_chown32
:
10309 if (!(p
= lock_user_string(arg1
)))
10310 return -TARGET_EFAULT
;
10311 ret
= get_errno(chown(p
, arg2
, arg3
));
10312 unlock_user(p
, arg1
, 0);
10315 #ifdef TARGET_NR_setuid32
10316 case TARGET_NR_setuid32
:
10317 return get_errno(sys_setuid(arg1
));
10319 #ifdef TARGET_NR_setgid32
10320 case TARGET_NR_setgid32
:
10321 return get_errno(sys_setgid(arg1
));
10323 #ifdef TARGET_NR_setfsuid32
10324 case TARGET_NR_setfsuid32
:
10325 return get_errno(setfsuid(arg1
));
10327 #ifdef TARGET_NR_setfsgid32
10328 case TARGET_NR_setfsgid32
:
10329 return get_errno(setfsgid(arg1
));
10331 #ifdef TARGET_NR_mincore
10332 case TARGET_NR_mincore
:
10334 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10336 return -TARGET_ENOMEM
;
10338 p
= lock_user_string(arg3
);
10340 ret
= -TARGET_EFAULT
;
10342 ret
= get_errno(mincore(a
, arg2
, p
));
10343 unlock_user(p
, arg3
, ret
);
10345 unlock_user(a
, arg1
, 0);
10349 #ifdef TARGET_NR_arm_fadvise64_64
10350 case TARGET_NR_arm_fadvise64_64
:
10351 /* arm_fadvise64_64 looks like fadvise64_64 but
10352 * with different argument order: fd, advice, offset, len
10353 * rather than the usual fd, offset, len, advice.
10354 * Note that offset and len are both 64-bit so appear as
10355 * pairs of 32-bit registers.
10357 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10358 target_offset64(arg5
, arg6
), arg2
);
10359 return -host_to_target_errno(ret
);
10362 #if TARGET_ABI_BITS == 32
10364 #ifdef TARGET_NR_fadvise64_64
10365 case TARGET_NR_fadvise64_64
:
10366 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10367 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10375 /* 6 args: fd, offset (high, low), len (high, low), advice */
10376 if (regpairs_aligned(cpu_env
, num
)) {
10377 /* offset is in (3,4), len in (5,6) and advice in 7 */
10385 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10386 target_offset64(arg4
, arg5
), arg6
);
10387 return -host_to_target_errno(ret
);
10390 #ifdef TARGET_NR_fadvise64
10391 case TARGET_NR_fadvise64
:
10392 /* 5 args: fd, offset (high, low), len, advice */
10393 if (regpairs_aligned(cpu_env
, num
)) {
10394 /* offset is in (3,4), len in 5 and advice in 6 */
10400 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10401 return -host_to_target_errno(ret
);
10404 #else /* not a 32-bit ABI */
10405 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10406 #ifdef TARGET_NR_fadvise64_64
10407 case TARGET_NR_fadvise64_64
:
10409 #ifdef TARGET_NR_fadvise64
10410 case TARGET_NR_fadvise64
:
10412 #ifdef TARGET_S390X
10414 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10415 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10416 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10417 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10421 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10423 #endif /* end of 64-bit ABI fadvise handling */
10425 #ifdef TARGET_NR_madvise
10426 case TARGET_NR_madvise
:
10427 /* A straight passthrough may not be safe because qemu sometimes
10428 turns private file-backed mappings into anonymous mappings.
10429 This will break MADV_DONTNEED.
10430 This is a hint, so ignoring and returning success is ok. */
10433 #if TARGET_ABI_BITS == 32
10434 case TARGET_NR_fcntl64
:
10438 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10439 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10442 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10443 copyfrom
= copy_from_user_oabi_flock64
;
10444 copyto
= copy_to_user_oabi_flock64
;
10448 cmd
= target_to_host_fcntl_cmd(arg2
);
10449 if (cmd
== -TARGET_EINVAL
) {
10454 case TARGET_F_GETLK64
:
10455 ret
= copyfrom(&fl
, arg3
);
10459 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10461 ret
= copyto(arg3
, &fl
);
10465 case TARGET_F_SETLK64
:
10466 case TARGET_F_SETLKW64
:
10467 ret
= copyfrom(&fl
, arg3
);
10471 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10474 ret
= do_fcntl(arg1
, arg2
, arg3
);
10480 #ifdef TARGET_NR_cacheflush
10481 case TARGET_NR_cacheflush
:
10482 /* self-modifying code is handled automatically, so nothing needed */
10485 #ifdef TARGET_NR_getpagesize
10486 case TARGET_NR_getpagesize
:
10487 return TARGET_PAGE_SIZE
;
10489 case TARGET_NR_gettid
:
10490 return get_errno(gettid());
10491 #ifdef TARGET_NR_readahead
10492 case TARGET_NR_readahead
:
10493 #if TARGET_ABI_BITS == 32
10494 if (regpairs_aligned(cpu_env
, num
)) {
10499 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10501 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10506 #ifdef TARGET_NR_setxattr
10507 case TARGET_NR_listxattr
:
10508 case TARGET_NR_llistxattr
:
10512 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10514 return -TARGET_EFAULT
;
10517 p
= lock_user_string(arg1
);
10519 if (num
== TARGET_NR_listxattr
) {
10520 ret
= get_errno(listxattr(p
, b
, arg3
));
10522 ret
= get_errno(llistxattr(p
, b
, arg3
));
10525 ret
= -TARGET_EFAULT
;
10527 unlock_user(p
, arg1
, 0);
10528 unlock_user(b
, arg2
, arg3
);
10531 case TARGET_NR_flistxattr
:
10535 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10537 return -TARGET_EFAULT
;
10540 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10541 unlock_user(b
, arg2
, arg3
);
10544 case TARGET_NR_setxattr
:
10545 case TARGET_NR_lsetxattr
:
10547 void *p
, *n
, *v
= 0;
10549 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10551 return -TARGET_EFAULT
;
10554 p
= lock_user_string(arg1
);
10555 n
= lock_user_string(arg2
);
10557 if (num
== TARGET_NR_setxattr
) {
10558 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10560 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10563 ret
= -TARGET_EFAULT
;
10565 unlock_user(p
, arg1
, 0);
10566 unlock_user(n
, arg2
, 0);
10567 unlock_user(v
, arg3
, 0);
10570 case TARGET_NR_fsetxattr
:
10574 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10576 return -TARGET_EFAULT
;
10579 n
= lock_user_string(arg2
);
10581 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10583 ret
= -TARGET_EFAULT
;
10585 unlock_user(n
, arg2
, 0);
10586 unlock_user(v
, arg3
, 0);
10589 case TARGET_NR_getxattr
:
10590 case TARGET_NR_lgetxattr
:
10592 void *p
, *n
, *v
= 0;
10594 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10596 return -TARGET_EFAULT
;
10599 p
= lock_user_string(arg1
);
10600 n
= lock_user_string(arg2
);
10602 if (num
== TARGET_NR_getxattr
) {
10603 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10605 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10608 ret
= -TARGET_EFAULT
;
10610 unlock_user(p
, arg1
, 0);
10611 unlock_user(n
, arg2
, 0);
10612 unlock_user(v
, arg3
, arg4
);
10615 case TARGET_NR_fgetxattr
:
10619 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10621 return -TARGET_EFAULT
;
10624 n
= lock_user_string(arg2
);
10626 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10628 ret
= -TARGET_EFAULT
;
10630 unlock_user(n
, arg2
, 0);
10631 unlock_user(v
, arg3
, arg4
);
10634 case TARGET_NR_removexattr
:
10635 case TARGET_NR_lremovexattr
:
10638 p
= lock_user_string(arg1
);
10639 n
= lock_user_string(arg2
);
10641 if (num
== TARGET_NR_removexattr
) {
10642 ret
= get_errno(removexattr(p
, n
));
10644 ret
= get_errno(lremovexattr(p
, n
));
10647 ret
= -TARGET_EFAULT
;
10649 unlock_user(p
, arg1
, 0);
10650 unlock_user(n
, arg2
, 0);
10653 case TARGET_NR_fremovexattr
:
10656 n
= lock_user_string(arg2
);
10658 ret
= get_errno(fremovexattr(arg1
, n
));
10660 ret
= -TARGET_EFAULT
;
10662 unlock_user(n
, arg2
, 0);
10666 #endif /* CONFIG_ATTR */
10667 #ifdef TARGET_NR_set_thread_area
10668 case TARGET_NR_set_thread_area
:
10669 #if defined(TARGET_MIPS)
10670 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10672 #elif defined(TARGET_CRIS)
10674 ret
= -TARGET_EINVAL
;
10676 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10680 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10681 return do_set_thread_area(cpu_env
, arg1
);
10682 #elif defined(TARGET_M68K)
10684 TaskState
*ts
= cpu
->opaque
;
10685 ts
->tp_value
= arg1
;
10689 return -TARGET_ENOSYS
;
10692 #ifdef TARGET_NR_get_thread_area
10693 case TARGET_NR_get_thread_area
:
10694 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10695 return do_get_thread_area(cpu_env
, arg1
);
10696 #elif defined(TARGET_M68K)
10698 TaskState
*ts
= cpu
->opaque
;
10699 return ts
->tp_value
;
10702 return -TARGET_ENOSYS
;
10705 #ifdef TARGET_NR_getdomainname
10706 case TARGET_NR_getdomainname
:
10707 return -TARGET_ENOSYS
;
10710 #ifdef TARGET_NR_clock_settime
10711 case TARGET_NR_clock_settime
:
10713 struct timespec ts
;
10715 ret
= target_to_host_timespec(&ts
, arg2
);
10716 if (!is_error(ret
)) {
10717 ret
= get_errno(clock_settime(arg1
, &ts
));
10722 #ifdef TARGET_NR_clock_gettime
10723 case TARGET_NR_clock_gettime
:
10725 struct timespec ts
;
10726 ret
= get_errno(clock_gettime(arg1
, &ts
));
10727 if (!is_error(ret
)) {
10728 ret
= host_to_target_timespec(arg2
, &ts
);
10733 #ifdef TARGET_NR_clock_getres
10734 case TARGET_NR_clock_getres
:
10736 struct timespec ts
;
10737 ret
= get_errno(clock_getres(arg1
, &ts
));
10738 if (!is_error(ret
)) {
10739 host_to_target_timespec(arg2
, &ts
);
10744 #ifdef TARGET_NR_clock_nanosleep
10745 case TARGET_NR_clock_nanosleep
:
10747 struct timespec ts
;
10748 target_to_host_timespec(&ts
, arg3
);
10749 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10750 &ts
, arg4
? &ts
: NULL
));
10752 host_to_target_timespec(arg4
, &ts
);
10754 #if defined(TARGET_PPC)
10755 /* clock_nanosleep is odd in that it returns positive errno values.
10756 * On PPC, CR0 bit 3 should be set in such a situation. */
10757 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10758 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10765 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10766 case TARGET_NR_set_tid_address
:
10767 return get_errno(set_tid_address((int *)g2h(arg1
)));
10770 case TARGET_NR_tkill
:
10771 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10773 case TARGET_NR_tgkill
:
10774 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10775 target_to_host_signal(arg3
)));
10777 #ifdef TARGET_NR_set_robust_list
10778 case TARGET_NR_set_robust_list
:
10779 case TARGET_NR_get_robust_list
:
10780 /* The ABI for supporting robust futexes has userspace pass
10781 * the kernel a pointer to a linked list which is updated by
10782 * userspace after the syscall; the list is walked by the kernel
10783 * when the thread exits. Since the linked list in QEMU guest
10784 * memory isn't a valid linked list for the host and we have
10785 * no way to reliably intercept the thread-death event, we can't
10786 * support these. Silently return ENOSYS so that guest userspace
10787 * falls back to a non-robust futex implementation (which should
10788 * be OK except in the corner case of the guest crashing while
10789 * holding a mutex that is shared with another process via
10792 return -TARGET_ENOSYS
;
10795 #if defined(TARGET_NR_utimensat)
10796 case TARGET_NR_utimensat
:
10798 struct timespec
*tsp
, ts
[2];
10802 target_to_host_timespec(ts
, arg3
);
10803 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10807 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10809 if (!(p
= lock_user_string(arg2
))) {
10810 return -TARGET_EFAULT
;
10812 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10813 unlock_user(p
, arg2
, 0);
10818 case TARGET_NR_futex
:
10819 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10820 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10821 case TARGET_NR_inotify_init
:
10822 ret
= get_errno(sys_inotify_init());
10824 fd_trans_register(ret
, &target_inotify_trans
);
10828 #ifdef CONFIG_INOTIFY1
10829 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10830 case TARGET_NR_inotify_init1
:
10831 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
10832 fcntl_flags_tbl
)));
10834 fd_trans_register(ret
, &target_inotify_trans
);
10839 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10840 case TARGET_NR_inotify_add_watch
:
10841 p
= lock_user_string(arg2
);
10842 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10843 unlock_user(p
, arg2
, 0);
10846 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10847 case TARGET_NR_inotify_rm_watch
:
10848 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10851 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10852 case TARGET_NR_mq_open
:
10854 struct mq_attr posix_mq_attr
;
10855 struct mq_attr
*pposix_mq_attr
;
10858 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
10859 pposix_mq_attr
= NULL
;
10861 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
10862 return -TARGET_EFAULT
;
10864 pposix_mq_attr
= &posix_mq_attr
;
10866 p
= lock_user_string(arg1
- 1);
10868 return -TARGET_EFAULT
;
10870 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
10871 unlock_user (p
, arg1
, 0);
10875 case TARGET_NR_mq_unlink
:
10876 p
= lock_user_string(arg1
- 1);
10878 return -TARGET_EFAULT
;
10880 ret
= get_errno(mq_unlink(p
));
10881 unlock_user (p
, arg1
, 0);
10884 case TARGET_NR_mq_timedsend
:
10886 struct timespec ts
;
10888 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10890 target_to_host_timespec(&ts
, arg5
);
10891 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10892 host_to_target_timespec(arg5
, &ts
);
10894 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10896 unlock_user (p
, arg2
, arg3
);
10900 case TARGET_NR_mq_timedreceive
:
10902 struct timespec ts
;
10905 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10907 target_to_host_timespec(&ts
, arg5
);
10908 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10910 host_to_target_timespec(arg5
, &ts
);
10912 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10915 unlock_user (p
, arg2
, arg3
);
10917 put_user_u32(prio
, arg4
);
10921 /* Not implemented for now... */
10922 /* case TARGET_NR_mq_notify: */
10925 case TARGET_NR_mq_getsetattr
:
10927 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10930 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10931 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
10932 &posix_mq_attr_out
));
10933 } else if (arg3
!= 0) {
10934 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
10936 if (ret
== 0 && arg3
!= 0) {
10937 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10943 #ifdef CONFIG_SPLICE
10944 #ifdef TARGET_NR_tee
10945 case TARGET_NR_tee
:
10947 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10951 #ifdef TARGET_NR_splice
10952 case TARGET_NR_splice
:
10954 loff_t loff_in
, loff_out
;
10955 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10957 if (get_user_u64(loff_in
, arg2
)) {
10958 return -TARGET_EFAULT
;
10960 ploff_in
= &loff_in
;
10963 if (get_user_u64(loff_out
, arg4
)) {
10964 return -TARGET_EFAULT
;
10966 ploff_out
= &loff_out
;
10968 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10970 if (put_user_u64(loff_in
, arg2
)) {
10971 return -TARGET_EFAULT
;
10975 if (put_user_u64(loff_out
, arg4
)) {
10976 return -TARGET_EFAULT
;
10982 #ifdef TARGET_NR_vmsplice
10983 case TARGET_NR_vmsplice
:
10985 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10987 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10988 unlock_iovec(vec
, arg2
, arg3
, 0);
10990 ret
= -host_to_target_errno(errno
);
10995 #endif /* CONFIG_SPLICE */
10996 #ifdef CONFIG_EVENTFD
10997 #if defined(TARGET_NR_eventfd)
10998 case TARGET_NR_eventfd
:
10999 ret
= get_errno(eventfd(arg1
, 0));
11001 fd_trans_register(ret
, &target_eventfd_trans
);
11005 #if defined(TARGET_NR_eventfd2)
11006 case TARGET_NR_eventfd2
:
11008 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11009 if (arg2
& TARGET_O_NONBLOCK
) {
11010 host_flags
|= O_NONBLOCK
;
11012 if (arg2
& TARGET_O_CLOEXEC
) {
11013 host_flags
|= O_CLOEXEC
;
11015 ret
= get_errno(eventfd(arg1
, host_flags
));
11017 fd_trans_register(ret
, &target_eventfd_trans
);
11022 #endif /* CONFIG_EVENTFD */
11023 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11024 case TARGET_NR_fallocate
:
11025 #if TARGET_ABI_BITS == 32
11026 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11027 target_offset64(arg5
, arg6
)));
11029 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11033 #if defined(CONFIG_SYNC_FILE_RANGE)
11034 #if defined(TARGET_NR_sync_file_range)
11035 case TARGET_NR_sync_file_range
:
11036 #if TARGET_ABI_BITS == 32
11037 #if defined(TARGET_MIPS)
11038 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11039 target_offset64(arg5
, arg6
), arg7
));
11041 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11042 target_offset64(arg4
, arg5
), arg6
));
11043 #endif /* !TARGET_MIPS */
11045 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11049 #if defined(TARGET_NR_sync_file_range2)
11050 case TARGET_NR_sync_file_range2
:
11051 /* This is like sync_file_range but the arguments are reordered */
11052 #if TARGET_ABI_BITS == 32
11053 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11054 target_offset64(arg5
, arg6
), arg2
));
11056 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11061 #if defined(TARGET_NR_signalfd4)
11062 case TARGET_NR_signalfd4
:
11063 return do_signalfd4(arg1
, arg2
, arg4
);
11065 #if defined(TARGET_NR_signalfd)
11066 case TARGET_NR_signalfd
:
11067 return do_signalfd4(arg1
, arg2
, 0);
11069 #if defined(CONFIG_EPOLL)
11070 #if defined(TARGET_NR_epoll_create)
11071 case TARGET_NR_epoll_create
:
11072 return get_errno(epoll_create(arg1
));
11074 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11075 case TARGET_NR_epoll_create1
:
11076 return get_errno(epoll_create1(arg1
));
11078 #if defined(TARGET_NR_epoll_ctl)
11079 case TARGET_NR_epoll_ctl
:
11081 struct epoll_event ep
;
11082 struct epoll_event
*epp
= 0;
11084 struct target_epoll_event
*target_ep
;
11085 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11086 return -TARGET_EFAULT
;
11088 ep
.events
= tswap32(target_ep
->events
);
11089 /* The epoll_data_t union is just opaque data to the kernel,
11090 * so we transfer all 64 bits across and need not worry what
11091 * actual data type it is.
11093 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11094 unlock_user_struct(target_ep
, arg4
, 0);
11097 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11101 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11102 #if defined(TARGET_NR_epoll_wait)
11103 case TARGET_NR_epoll_wait
:
11105 #if defined(TARGET_NR_epoll_pwait)
11106 case TARGET_NR_epoll_pwait
:
11109 struct target_epoll_event
*target_ep
;
11110 struct epoll_event
*ep
;
11112 int maxevents
= arg3
;
11113 int timeout
= arg4
;
11115 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11116 return -TARGET_EINVAL
;
11119 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11120 maxevents
* sizeof(struct target_epoll_event
), 1);
11122 return -TARGET_EFAULT
;
11125 ep
= g_try_new(struct epoll_event
, maxevents
);
11127 unlock_user(target_ep
, arg2
, 0);
11128 return -TARGET_ENOMEM
;
11132 #if defined(TARGET_NR_epoll_pwait)
11133 case TARGET_NR_epoll_pwait
:
11135 target_sigset_t
*target_set
;
11136 sigset_t _set
, *set
= &_set
;
11139 if (arg6
!= sizeof(target_sigset_t
)) {
11140 ret
= -TARGET_EINVAL
;
11144 target_set
= lock_user(VERIFY_READ
, arg5
,
11145 sizeof(target_sigset_t
), 1);
11147 ret
= -TARGET_EFAULT
;
11150 target_to_host_sigset(set
, target_set
);
11151 unlock_user(target_set
, arg5
, 0);
11156 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11157 set
, SIGSET_T_SIZE
));
11161 #if defined(TARGET_NR_epoll_wait)
11162 case TARGET_NR_epoll_wait
:
11163 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11168 ret
= -TARGET_ENOSYS
;
11170 if (!is_error(ret
)) {
11172 for (i
= 0; i
< ret
; i
++) {
11173 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11174 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11176 unlock_user(target_ep
, arg2
,
11177 ret
* sizeof(struct target_epoll_event
));
11179 unlock_user(target_ep
, arg2
, 0);
11186 #ifdef TARGET_NR_prlimit64
11187 case TARGET_NR_prlimit64
:
11189 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11190 struct target_rlimit64
*target_rnew
, *target_rold
;
11191 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11192 int resource
= target_to_host_resource(arg2
);
11194 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11195 return -TARGET_EFAULT
;
11197 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11198 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11199 unlock_user_struct(target_rnew
, arg3
, 0);
11203 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11204 if (!is_error(ret
) && arg4
) {
11205 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11206 return -TARGET_EFAULT
;
11208 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11209 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11210 unlock_user_struct(target_rold
, arg4
, 1);
11215 #ifdef TARGET_NR_gethostname
11216 case TARGET_NR_gethostname
:
11218 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11220 ret
= get_errno(gethostname(name
, arg2
));
11221 unlock_user(name
, arg1
, arg2
);
11223 ret
= -TARGET_EFAULT
;
11228 #ifdef TARGET_NR_atomic_cmpxchg_32
11229 case TARGET_NR_atomic_cmpxchg_32
:
11231 /* should use start_exclusive from main.c */
11232 abi_ulong mem_value
;
11233 if (get_user_u32(mem_value
, arg6
)) {
11234 target_siginfo_t info
;
11235 info
.si_signo
= SIGSEGV
;
11237 info
.si_code
= TARGET_SEGV_MAPERR
;
11238 info
._sifields
._sigfault
._addr
= arg6
;
11239 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11240 QEMU_SI_FAULT
, &info
);
11244 if (mem_value
== arg2
)
11245 put_user_u32(arg1
, arg6
);
11249 #ifdef TARGET_NR_atomic_barrier
11250 case TARGET_NR_atomic_barrier
:
11251 /* Like the kernel implementation and the
11252 qemu arm barrier, no-op this? */
11256 #ifdef TARGET_NR_timer_create
11257 case TARGET_NR_timer_create
:
11259 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11261 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11264 int timer_index
= next_free_host_timer();
11266 if (timer_index
< 0) {
11267 ret
= -TARGET_EAGAIN
;
11269 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11272 phost_sevp
= &host_sevp
;
11273 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11279 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11283 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11284 return -TARGET_EFAULT
;
11292 #ifdef TARGET_NR_timer_settime
11293 case TARGET_NR_timer_settime
:
11295 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11296 * struct itimerspec * old_value */
11297 target_timer_t timerid
= get_timer_id(arg1
);
11301 } else if (arg3
== 0) {
11302 ret
= -TARGET_EINVAL
;
11304 timer_t htimer
= g_posix_timers
[timerid
];
11305 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11307 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11308 return -TARGET_EFAULT
;
11311 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11312 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11313 return -TARGET_EFAULT
;
11320 #ifdef TARGET_NR_timer_gettime
11321 case TARGET_NR_timer_gettime
:
11323 /* args: timer_t timerid, struct itimerspec *curr_value */
11324 target_timer_t timerid
= get_timer_id(arg1
);
11328 } else if (!arg2
) {
11329 ret
= -TARGET_EFAULT
;
11331 timer_t htimer
= g_posix_timers
[timerid
];
11332 struct itimerspec hspec
;
11333 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11335 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11336 ret
= -TARGET_EFAULT
;
11343 #ifdef TARGET_NR_timer_getoverrun
11344 case TARGET_NR_timer_getoverrun
:
11346 /* args: timer_t timerid */
11347 target_timer_t timerid
= get_timer_id(arg1
);
11352 timer_t htimer
= g_posix_timers
[timerid
];
11353 ret
= get_errno(timer_getoverrun(htimer
));
11355 fd_trans_unregister(ret
);
11360 #ifdef TARGET_NR_timer_delete
11361 case TARGET_NR_timer_delete
:
11363 /* args: timer_t timerid */
11364 target_timer_t timerid
= get_timer_id(arg1
);
11369 timer_t htimer
= g_posix_timers
[timerid
];
11370 ret
= get_errno(timer_delete(htimer
));
11371 g_posix_timers
[timerid
] = 0;
11377 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11378 case TARGET_NR_timerfd_create
:
11379 return get_errno(timerfd_create(arg1
,
11380 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11383 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11384 case TARGET_NR_timerfd_gettime
:
11386 struct itimerspec its_curr
;
11388 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11390 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11391 return -TARGET_EFAULT
;
11397 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11398 case TARGET_NR_timerfd_settime
:
11400 struct itimerspec its_new
, its_old
, *p_new
;
11403 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11404 return -TARGET_EFAULT
;
11411 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11413 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11414 return -TARGET_EFAULT
;
11420 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11421 case TARGET_NR_ioprio_get
:
11422 return get_errno(ioprio_get(arg1
, arg2
));
11425 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11426 case TARGET_NR_ioprio_set
:
11427 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11430 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11431 case TARGET_NR_setns
:
11432 return get_errno(setns(arg1
, arg2
));
11434 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11435 case TARGET_NR_unshare
:
11436 return get_errno(unshare(arg1
));
11438 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11439 case TARGET_NR_kcmp
:
11440 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11442 #ifdef TARGET_NR_swapcontext
11443 case TARGET_NR_swapcontext
:
11444 /* PowerPC specific. */
11445 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11449 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11450 return -TARGET_ENOSYS
;
11455 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11456 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11457 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11460 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
11463 #ifdef DEBUG_ERESTARTSYS
11464 /* Debug-only code for exercising the syscall-restart code paths
11465 * in the per-architecture cpu main loops: restart every syscall
11466 * the guest makes once before letting it through.
11472 return -TARGET_ERESTARTSYS
;
11477 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11478 arg5
, arg6
, arg7
, arg8
);
11480 if (unlikely(do_strace
)) {
11481 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11482 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11483 arg5
, arg6
, arg7
, arg8
);
11484 print_syscall_ret(num
, ret
);
11486 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11487 arg5
, arg6
, arg7
, arg8
);
11490 trace_guest_user_syscall_ret(cpu
, num
, ret
);