4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
108 #include <linux/audit.h>
109 #include "linux_loop.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 #define __NR__llseek __NR_lseek
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
210 _syscall0(int, gettid
)
212 /* This is a replacement for the host gettid() and must return a host
214 static int gettid(void) {
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
227 loff_t
*, res
, uint
, wh
);
229 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
230 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
239 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
246 unsigned long *, user_mask_ptr
);
247 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
249 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
250 struct __user_cap_data_struct
*, data
);
251 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
252 struct __user_cap_data_struct
*, data
);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get
, int, which
, int, who
)
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
263 static bitmask_transtbl fcntl_flags_tbl
[] = {
264 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
265 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
266 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
267 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
268 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
269 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
270 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
271 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
272 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
273 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
274 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
275 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
276 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
287 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
296 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
298 typedef struct TargetFdTrans
{
299 TargetFdDataFunc host_to_target_data
;
300 TargetFdDataFunc target_to_host_data
;
301 TargetFdAddrFunc target_to_host_addr
;
304 static TargetFdTrans
**target_fd_trans
;
306 static unsigned int target_fd_max
;
308 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
310 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
311 return target_fd_trans
[fd
]->target_to_host_data
;
316 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
318 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
319 return target_fd_trans
[fd
]->host_to_target_data
;
324 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
326 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
327 return target_fd_trans
[fd
]->target_to_host_addr
;
332 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
336 if (fd
>= target_fd_max
) {
337 oldmax
= target_fd_max
;
338 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans
= g_renew(TargetFdTrans
*,
340 target_fd_trans
, target_fd_max
);
341 memset((void *)(target_fd_trans
+ oldmax
), 0,
342 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
344 target_fd_trans
[fd
] = trans
;
347 static void fd_trans_unregister(int fd
)
349 if (fd
>= 0 && fd
< target_fd_max
) {
350 target_fd_trans
[fd
] = NULL
;
354 static void fd_trans_dup(int oldfd
, int newfd
)
356 fd_trans_unregister(newfd
);
357 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
358 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
362 static int sys_getcwd1(char *buf
, size_t size
)
364 if (getcwd(buf
, size
) == NULL
) {
365 /* getcwd() sets errno */
368 return strlen(buf
)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd
, const char *pathname
,
374 const struct timespec times
[2], int flags
)
376 if (pathname
== NULL
)
377 return futimens(dirfd
, times
);
379 return utimensat(dirfd
, pathname
, times
, flags
);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
384 const struct timespec
*,tsp
,int,flags
)
386 static int sys_utimensat(int dirfd
, const char *pathname
,
387 const struct timespec times
[2], int flags
)
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
407 return (inotify_add_watch(fd
, pathname
, mask
));
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
413 return (inotify_rm_watch(fd
, wd
));
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags
)
420 return (inotify_init1(flags
));
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
434 # define __NR_ppoll -1
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
438 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
) {
479 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
487 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
489 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
504 [EAGAIN
] = TARGET_EAGAIN
,
505 [EIDRM
] = TARGET_EIDRM
,
506 [ECHRNG
] = TARGET_ECHRNG
,
507 [EL2NSYNC
] = TARGET_EL2NSYNC
,
508 [EL3HLT
] = TARGET_EL3HLT
,
509 [EL3RST
] = TARGET_EL3RST
,
510 [ELNRNG
] = TARGET_ELNRNG
,
511 [EUNATCH
] = TARGET_EUNATCH
,
512 [ENOCSI
] = TARGET_ENOCSI
,
513 [EL2HLT
] = TARGET_EL2HLT
,
514 [EDEADLK
] = TARGET_EDEADLK
,
515 [ENOLCK
] = TARGET_ENOLCK
,
516 [EBADE
] = TARGET_EBADE
,
517 [EBADR
] = TARGET_EBADR
,
518 [EXFULL
] = TARGET_EXFULL
,
519 [ENOANO
] = TARGET_ENOANO
,
520 [EBADRQC
] = TARGET_EBADRQC
,
521 [EBADSLT
] = TARGET_EBADSLT
,
522 [EBFONT
] = TARGET_EBFONT
,
523 [ENOSTR
] = TARGET_ENOSTR
,
524 [ENODATA
] = TARGET_ENODATA
,
525 [ETIME
] = TARGET_ETIME
,
526 [ENOSR
] = TARGET_ENOSR
,
527 [ENONET
] = TARGET_ENONET
,
528 [ENOPKG
] = TARGET_ENOPKG
,
529 [EREMOTE
] = TARGET_EREMOTE
,
530 [ENOLINK
] = TARGET_ENOLINK
,
531 [EADV
] = TARGET_EADV
,
532 [ESRMNT
] = TARGET_ESRMNT
,
533 [ECOMM
] = TARGET_ECOMM
,
534 [EPROTO
] = TARGET_EPROTO
,
535 [EDOTDOT
] = TARGET_EDOTDOT
,
536 [EMULTIHOP
] = TARGET_EMULTIHOP
,
537 [EBADMSG
] = TARGET_EBADMSG
,
538 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
539 [EOVERFLOW
] = TARGET_EOVERFLOW
,
540 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
541 [EBADFD
] = TARGET_EBADFD
,
542 [EREMCHG
] = TARGET_EREMCHG
,
543 [ELIBACC
] = TARGET_ELIBACC
,
544 [ELIBBAD
] = TARGET_ELIBBAD
,
545 [ELIBSCN
] = TARGET_ELIBSCN
,
546 [ELIBMAX
] = TARGET_ELIBMAX
,
547 [ELIBEXEC
] = TARGET_ELIBEXEC
,
548 [EILSEQ
] = TARGET_EILSEQ
,
549 [ENOSYS
] = TARGET_ENOSYS
,
550 [ELOOP
] = TARGET_ELOOP
,
551 [ERESTART
] = TARGET_ERESTART
,
552 [ESTRPIPE
] = TARGET_ESTRPIPE
,
553 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
554 [EUSERS
] = TARGET_EUSERS
,
555 [ENOTSOCK
] = TARGET_ENOTSOCK
,
556 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
557 [EMSGSIZE
] = TARGET_EMSGSIZE
,
558 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
559 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
560 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
561 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
562 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
563 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
564 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
565 [EADDRINUSE
] = TARGET_EADDRINUSE
,
566 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
567 [ENETDOWN
] = TARGET_ENETDOWN
,
568 [ENETUNREACH
] = TARGET_ENETUNREACH
,
569 [ENETRESET
] = TARGET_ENETRESET
,
570 [ECONNABORTED
] = TARGET_ECONNABORTED
,
571 [ECONNRESET
] = TARGET_ECONNRESET
,
572 [ENOBUFS
] = TARGET_ENOBUFS
,
573 [EISCONN
] = TARGET_EISCONN
,
574 [ENOTCONN
] = TARGET_ENOTCONN
,
575 [EUCLEAN
] = TARGET_EUCLEAN
,
576 [ENOTNAM
] = TARGET_ENOTNAM
,
577 [ENAVAIL
] = TARGET_ENAVAIL
,
578 [EISNAM
] = TARGET_EISNAM
,
579 [EREMOTEIO
] = TARGET_EREMOTEIO
,
580 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
581 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
582 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
583 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
584 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
585 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
586 [EALREADY
] = TARGET_EALREADY
,
587 [EINPROGRESS
] = TARGET_EINPROGRESS
,
588 [ESTALE
] = TARGET_ESTALE
,
589 [ECANCELED
] = TARGET_ECANCELED
,
590 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
591 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
593 [ENOKEY
] = TARGET_ENOKEY
,
596 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
599 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
602 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
605 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
612 static inline int host_to_target_errno(int err
)
614 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
615 host_to_target_errno_table
[err
]) {
616 return host_to_target_errno_table
[err
];
621 static inline int target_to_host_errno(int err
)
623 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
624 target_to_host_errno_table
[err
]) {
625 return target_to_host_errno_table
[err
];
630 static inline abi_long
get_errno(abi_long ret
)
633 return -host_to_target_errno(errno
);
638 static inline int is_error(abi_long ret
)
640 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
643 char *target_strerror(int err
)
645 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
648 return strerror(target_to_host_errno(err
));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
699 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
700 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
701 int, flags
, mode_t
, mode
)
702 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
703 struct rusage
*, rusage
)
704 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
705 int, options
, struct rusage
*, rusage
)
706 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
707 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
708 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
709 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
710 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
711 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
712 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
713 safe_syscall2(int, tkill
, int, tid
, int, sig
)
714 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
715 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
717 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
719 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
720 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
721 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
722 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
723 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
724 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
726 static inline int host_to_target_sock_type(int host_type
)
730 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
732 target_type
= TARGET_SOCK_DGRAM
;
735 target_type
= TARGET_SOCK_STREAM
;
738 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
742 #if defined(SOCK_CLOEXEC)
743 if (host_type
& SOCK_CLOEXEC
) {
744 target_type
|= TARGET_SOCK_CLOEXEC
;
748 #if defined(SOCK_NONBLOCK)
749 if (host_type
& SOCK_NONBLOCK
) {
750 target_type
|= TARGET_SOCK_NONBLOCK
;
757 static abi_ulong target_brk
;
758 static abi_ulong target_original_brk
;
759 static abi_ulong brk_page
;
761 void target_set_brk(abi_ulong new_brk
)
763 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
764 brk_page
= HOST_PAGE_ALIGN(target_brk
);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long
do_brk(abi_ulong new_brk
)
773 abi_long mapped_addr
;
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
782 if (new_brk
< target_original_brk
) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk
<= brk_page
) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk
> target_brk
) {
794 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
796 target_brk
= new_brk
;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
808 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
809 PROT_READ
|PROT_WRITE
,
810 MAP_ANON
|MAP_PRIVATE
, 0, 0));
812 if (mapped_addr
== brk_page
) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
822 target_brk
= new_brk
;
823 brk_page
= HOST_PAGE_ALIGN(target_brk
);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
827 } else if (mapped_addr
!= -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr
, new_alloc_size
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM
;
844 /* For everything else, return the previous break. */
848 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
849 abi_ulong target_fds_addr
,
853 abi_ulong b
, *target_fds
;
855 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
856 if (!(target_fds
= lock_user(VERIFY_READ
,
858 sizeof(abi_ulong
) * nw
,
860 return -TARGET_EFAULT
;
864 for (i
= 0; i
< nw
; i
++) {
865 /* grab the abi_ulong */
866 __get_user(b
, &target_fds
[i
]);
867 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
868 /* check the bit inside the abi_ulong */
875 unlock_user(target_fds
, target_fds_addr
, 0);
880 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
881 abi_ulong target_fds_addr
,
884 if (target_fds_addr
) {
885 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
886 return -TARGET_EFAULT
;
894 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
900 abi_ulong
*target_fds
;
902 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
903 if (!(target_fds
= lock_user(VERIFY_WRITE
,
905 sizeof(abi_ulong
) * nw
,
907 return -TARGET_EFAULT
;
910 for (i
= 0; i
< nw
; i
++) {
912 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
913 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
916 __put_user(v
, &target_fds
[i
]);
919 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
924 #if defined(__alpha__)
930 static inline abi_long
host_to_target_clock_t(long ticks
)
932 #if HOST_HZ == TARGET_HZ
935 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
939 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
940 const struct rusage
*rusage
)
942 struct target_rusage
*target_rusage
;
944 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
945 return -TARGET_EFAULT
;
946 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
947 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
948 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
949 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
950 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
951 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
952 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
953 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
954 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
955 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
956 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
957 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
958 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
959 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
960 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
961 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
962 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
963 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
964 unlock_user_struct(target_rusage
, target_addr
, 1);
969 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
971 abi_ulong target_rlim_swap
;
974 target_rlim_swap
= tswapal(target_rlim
);
975 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
976 return RLIM_INFINITY
;
978 result
= target_rlim_swap
;
979 if (target_rlim_swap
!= (rlim_t
)result
)
980 return RLIM_INFINITY
;
985 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
987 abi_ulong target_rlim_swap
;
990 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
991 target_rlim_swap
= TARGET_RLIM_INFINITY
;
993 target_rlim_swap
= rlim
;
994 result
= tswapal(target_rlim_swap
);
999 static inline int target_to_host_resource(int code
)
1002 case TARGET_RLIMIT_AS
:
1004 case TARGET_RLIMIT_CORE
:
1006 case TARGET_RLIMIT_CPU
:
1008 case TARGET_RLIMIT_DATA
:
1010 case TARGET_RLIMIT_FSIZE
:
1011 return RLIMIT_FSIZE
;
1012 case TARGET_RLIMIT_LOCKS
:
1013 return RLIMIT_LOCKS
;
1014 case TARGET_RLIMIT_MEMLOCK
:
1015 return RLIMIT_MEMLOCK
;
1016 case TARGET_RLIMIT_MSGQUEUE
:
1017 return RLIMIT_MSGQUEUE
;
1018 case TARGET_RLIMIT_NICE
:
1020 case TARGET_RLIMIT_NOFILE
:
1021 return RLIMIT_NOFILE
;
1022 case TARGET_RLIMIT_NPROC
:
1023 return RLIMIT_NPROC
;
1024 case TARGET_RLIMIT_RSS
:
1026 case TARGET_RLIMIT_RTPRIO
:
1027 return RLIMIT_RTPRIO
;
1028 case TARGET_RLIMIT_SIGPENDING
:
1029 return RLIMIT_SIGPENDING
;
1030 case TARGET_RLIMIT_STACK
:
1031 return RLIMIT_STACK
;
1037 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1038 abi_ulong target_tv_addr
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1043 return -TARGET_EFAULT
;
1045 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1053 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1054 const struct timeval
*tv
)
1056 struct target_timeval
*target_tv
;
1058 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1059 return -TARGET_EFAULT
;
1061 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1062 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1064 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1069 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1070 abi_ulong target_tz_addr
)
1072 struct target_timezone
*target_tz
;
1074 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1075 return -TARGET_EFAULT
;
1078 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1079 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1081 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1086 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1089 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1090 abi_ulong target_mq_attr_addr
)
1092 struct target_mq_attr
*target_mq_attr
;
1094 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1095 target_mq_attr_addr
, 1))
1096 return -TARGET_EFAULT
;
1098 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1099 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1100 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1101 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1103 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1108 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1109 const struct mq_attr
*attr
)
1111 struct target_mq_attr
*target_mq_attr
;
1113 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1114 target_mq_attr_addr
, 0))
1115 return -TARGET_EFAULT
;
1117 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1118 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1119 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1120 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1122 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1128 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1129 /* do_select() must return target values and target errnos. */
1130 static abi_long
do_select(int n
,
1131 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1132 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1134 fd_set rfds
, wfds
, efds
;
1135 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1137 struct timespec ts
, *ts_ptr
;
1140 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1144 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1148 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1153 if (target_tv_addr
) {
1154 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1155 return -TARGET_EFAULT
;
1156 ts
.tv_sec
= tv
.tv_sec
;
1157 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1163 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1166 if (!is_error(ret
)) {
1167 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1168 return -TARGET_EFAULT
;
1169 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1170 return -TARGET_EFAULT
;
1171 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1172 return -TARGET_EFAULT
;
1174 if (target_tv_addr
) {
1175 tv
.tv_sec
= ts
.tv_sec
;
1176 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1177 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1178 return -TARGET_EFAULT
;
1187 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1190 return pipe2(host_pipe
, flags
);
1196 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1197 int flags
, int is_pipe2
)
1201 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1204 return get_errno(ret
);
1206 /* Several targets have special calling conventions for the original
1207 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1209 #if defined(TARGET_ALPHA)
1210 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1211 return host_pipe
[0];
1212 #elif defined(TARGET_MIPS)
1213 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1214 return host_pipe
[0];
1215 #elif defined(TARGET_SH4)
1216 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1217 return host_pipe
[0];
1218 #elif defined(TARGET_SPARC)
1219 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1220 return host_pipe
[0];
1224 if (put_user_s32(host_pipe
[0], pipedes
)
1225 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1226 return -TARGET_EFAULT
;
1227 return get_errno(ret
);
1230 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1231 abi_ulong target_addr
,
1234 struct target_ip_mreqn
*target_smreqn
;
1236 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1238 return -TARGET_EFAULT
;
1239 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1240 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1241 if (len
== sizeof(struct target_ip_mreqn
))
1242 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1243 unlock_user(target_smreqn
, target_addr
, 0);
1248 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1249 abi_ulong target_addr
,
1252 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1253 sa_family_t sa_family
;
1254 struct target_sockaddr
*target_saddr
;
1256 if (fd_trans_target_to_host_addr(fd
)) {
1257 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1260 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1262 return -TARGET_EFAULT
;
1264 sa_family
= tswap16(target_saddr
->sa_family
);
1266 /* Oops. The caller might send a incomplete sun_path; sun_path
1267 * must be terminated by \0 (see the manual page), but
1268 * unfortunately it is quite common to specify sockaddr_un
1269 * length as "strlen(x->sun_path)" while it should be
1270 * "strlen(...) + 1". We'll fix that here if needed.
1271 * Linux kernel has a similar feature.
1274 if (sa_family
== AF_UNIX
) {
1275 if (len
< unix_maxlen
&& len
> 0) {
1276 char *cp
= (char*)target_saddr
;
1278 if ( cp
[len
-1] && !cp
[len
] )
1281 if (len
> unix_maxlen
)
1285 memcpy(addr
, target_saddr
, len
);
1286 addr
->sa_family
= sa_family
;
1287 if (sa_family
== AF_NETLINK
) {
1288 struct sockaddr_nl
*nladdr
;
1290 nladdr
= (struct sockaddr_nl
*)addr
;
1291 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1292 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1293 } else if (sa_family
== AF_PACKET
) {
1294 struct target_sockaddr_ll
*lladdr
;
1296 lladdr
= (struct target_sockaddr_ll
*)addr
;
1297 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1298 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1300 unlock_user(target_saddr
, target_addr
, 0);
1305 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1306 struct sockaddr
*addr
,
1309 struct target_sockaddr
*target_saddr
;
1311 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1313 return -TARGET_EFAULT
;
1314 memcpy(target_saddr
, addr
, len
);
1315 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1316 if (addr
->sa_family
== AF_NETLINK
) {
1317 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1318 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1319 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1321 unlock_user(target_saddr
, target_addr
, len
);
1326 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1327 struct target_msghdr
*target_msgh
)
1329 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1330 abi_long msg_controllen
;
1331 abi_ulong target_cmsg_addr
;
1332 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1333 socklen_t space
= 0;
1335 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1336 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1338 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1339 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1340 target_cmsg_start
= target_cmsg
;
1342 return -TARGET_EFAULT
;
1344 while (cmsg
&& target_cmsg
) {
1345 void *data
= CMSG_DATA(cmsg
);
1346 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1348 int len
= tswapal(target_cmsg
->cmsg_len
)
1349 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1351 space
+= CMSG_SPACE(len
);
1352 if (space
> msgh
->msg_controllen
) {
1353 space
-= CMSG_SPACE(len
);
1354 /* This is a QEMU bug, since we allocated the payload
1355 * area ourselves (unlike overflow in host-to-target
1356 * conversion, which is just the guest giving us a buffer
1357 * that's too small). It can't happen for the payload types
1358 * we currently support; if it becomes an issue in future
1359 * we would need to improve our allocation strategy to
1360 * something more intelligent than "twice the size of the
1361 * target buffer we're reading from".
1363 gemu_log("Host cmsg overflow\n");
1367 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1368 cmsg
->cmsg_level
= SOL_SOCKET
;
1370 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1372 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1373 cmsg
->cmsg_len
= CMSG_LEN(len
);
1375 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1376 int *fd
= (int *)data
;
1377 int *target_fd
= (int *)target_data
;
1378 int i
, numfds
= len
/ sizeof(int);
1380 for (i
= 0; i
< numfds
; i
++) {
1381 __get_user(fd
[i
], target_fd
+ i
);
1383 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1384 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1385 struct ucred
*cred
= (struct ucred
*)data
;
1386 struct target_ucred
*target_cred
=
1387 (struct target_ucred
*)target_data
;
1389 __get_user(cred
->pid
, &target_cred
->pid
);
1390 __get_user(cred
->uid
, &target_cred
->uid
);
1391 __get_user(cred
->gid
, &target_cred
->gid
);
1393 gemu_log("Unsupported ancillary data: %d/%d\n",
1394 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1395 memcpy(data
, target_data
, len
);
1398 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1399 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1402 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1404 msgh
->msg_controllen
= space
;
1408 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1409 struct msghdr
*msgh
)
1411 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1412 abi_long msg_controllen
;
1413 abi_ulong target_cmsg_addr
;
1414 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1415 socklen_t space
= 0;
1417 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1418 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1420 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1421 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1422 target_cmsg_start
= target_cmsg
;
1424 return -TARGET_EFAULT
;
1426 while (cmsg
&& target_cmsg
) {
1427 void *data
= CMSG_DATA(cmsg
);
1428 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1430 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1431 int tgt_len
, tgt_space
;
1433 /* We never copy a half-header but may copy half-data;
1434 * this is Linux's behaviour in put_cmsg(). Note that
1435 * truncation here is a guest problem (which we report
1436 * to the guest via the CTRUNC bit), unlike truncation
1437 * in target_to_host_cmsg, which is a QEMU bug.
1439 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1440 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1444 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1445 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1447 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1449 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1451 tgt_len
= TARGET_CMSG_LEN(len
);
1453 /* Payload types which need a different size of payload on
1454 * the target must adjust tgt_len here.
1456 switch (cmsg
->cmsg_level
) {
1458 switch (cmsg
->cmsg_type
) {
1460 tgt_len
= sizeof(struct target_timeval
);
1469 if (msg_controllen
< tgt_len
) {
1470 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1471 tgt_len
= msg_controllen
;
1474 /* We must now copy-and-convert len bytes of payload
1475 * into tgt_len bytes of destination space. Bear in mind
1476 * that in both source and destination we may be dealing
1477 * with a truncated value!
1479 switch (cmsg
->cmsg_level
) {
1481 switch (cmsg
->cmsg_type
) {
1484 int *fd
= (int *)data
;
1485 int *target_fd
= (int *)target_data
;
1486 int i
, numfds
= tgt_len
/ sizeof(int);
1488 for (i
= 0; i
< numfds
; i
++) {
1489 __put_user(fd
[i
], target_fd
+ i
);
1495 struct timeval
*tv
= (struct timeval
*)data
;
1496 struct target_timeval
*target_tv
=
1497 (struct target_timeval
*)target_data
;
1499 if (len
!= sizeof(struct timeval
) ||
1500 tgt_len
!= sizeof(struct target_timeval
)) {
1504 /* copy struct timeval to target */
1505 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1506 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1509 case SCM_CREDENTIALS
:
1511 struct ucred
*cred
= (struct ucred
*)data
;
1512 struct target_ucred
*target_cred
=
1513 (struct target_ucred
*)target_data
;
1515 __put_user(cred
->pid
, &target_cred
->pid
);
1516 __put_user(cred
->uid
, &target_cred
->uid
);
1517 __put_user(cred
->gid
, &target_cred
->gid
);
1527 gemu_log("Unsupported ancillary data: %d/%d\n",
1528 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1529 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1530 if (tgt_len
> len
) {
1531 memset(target_data
+ len
, 0, tgt_len
- len
);
1535 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1536 tgt_space
= TARGET_CMSG_SPACE(len
);
1537 if (msg_controllen
< tgt_space
) {
1538 tgt_space
= msg_controllen
;
1540 msg_controllen
-= tgt_space
;
1542 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1543 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1546 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1548 target_msgh
->msg_controllen
= tswapal(space
);
1552 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1554 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1555 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1556 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1557 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1558 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1561 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1563 abi_long (*host_to_target_nlmsg
)
1564 (struct nlmsghdr
*))
1569 while (len
> sizeof(struct nlmsghdr
)) {
1571 nlmsg_len
= nlh
->nlmsg_len
;
1572 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1577 switch (nlh
->nlmsg_type
) {
1579 tswap_nlmsghdr(nlh
);
1585 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1586 e
->error
= tswap32(e
->error
);
1587 tswap_nlmsghdr(&e
->msg
);
1588 tswap_nlmsghdr(nlh
);
1592 ret
= host_to_target_nlmsg(nlh
);
1594 tswap_nlmsghdr(nlh
);
1599 tswap_nlmsghdr(nlh
);
1600 len
-= NLMSG_ALIGN(nlmsg_len
);
1601 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1606 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1608 abi_long (*target_to_host_nlmsg
)
1609 (struct nlmsghdr
*))
1613 while (len
> sizeof(struct nlmsghdr
)) {
1614 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1615 tswap32(nlh
->nlmsg_len
) > len
) {
1618 tswap_nlmsghdr(nlh
);
1619 switch (nlh
->nlmsg_type
) {
1626 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1627 e
->error
= tswap32(e
->error
);
1628 tswap_nlmsghdr(&e
->msg
);
1631 ret
= target_to_host_nlmsg(nlh
);
1636 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1637 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1642 #ifdef CONFIG_RTNETLINK
1643 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1645 abi_long (*host_to_target_rtattr
)
1648 unsigned short rta_len
;
1651 while (len
> sizeof(struct rtattr
)) {
1652 rta_len
= rtattr
->rta_len
;
1653 if (rta_len
< sizeof(struct rtattr
) ||
1657 ret
= host_to_target_rtattr(rtattr
);
1658 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1659 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1663 len
-= RTA_ALIGN(rta_len
);
1664 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1669 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1672 struct rtnl_link_stats
*st
;
1673 struct rtnl_link_stats64
*st64
;
1674 struct rtnl_link_ifmap
*map
;
1676 switch (rtattr
->rta_type
) {
1679 case IFLA_BROADCAST
:
1685 case IFLA_OPERSTATE
:
1688 case IFLA_PROTO_DOWN
:
1695 case IFLA_CARRIER_CHANGES
:
1696 case IFLA_NUM_RX_QUEUES
:
1697 case IFLA_NUM_TX_QUEUES
:
1698 case IFLA_PROMISCUITY
:
1700 case IFLA_LINK_NETNSID
:
1704 u32
= RTA_DATA(rtattr
);
1705 *u32
= tswap32(*u32
);
1707 /* struct rtnl_link_stats */
1709 st
= RTA_DATA(rtattr
);
1710 st
->rx_packets
= tswap32(st
->rx_packets
);
1711 st
->tx_packets
= tswap32(st
->tx_packets
);
1712 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1713 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1714 st
->rx_errors
= tswap32(st
->rx_errors
);
1715 st
->tx_errors
= tswap32(st
->tx_errors
);
1716 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1717 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1718 st
->multicast
= tswap32(st
->multicast
);
1719 st
->collisions
= tswap32(st
->collisions
);
1721 /* detailed rx_errors: */
1722 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1723 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1724 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1725 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1726 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1727 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1729 /* detailed tx_errors */
1730 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1731 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1732 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1733 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1734 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1737 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1738 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1740 /* struct rtnl_link_stats64 */
1742 st64
= RTA_DATA(rtattr
);
1743 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1744 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1745 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1746 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1747 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1748 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1749 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1750 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1751 st64
->multicast
= tswap64(st64
->multicast
);
1752 st64
->collisions
= tswap64(st64
->collisions
);
1754 /* detailed rx_errors: */
1755 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1756 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1757 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1758 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1759 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1760 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1762 /* detailed tx_errors */
1763 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1764 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1765 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1766 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1767 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1770 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1771 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1773 /* struct rtnl_link_ifmap */
1775 map
= RTA_DATA(rtattr
);
1776 map
->mem_start
= tswap64(map
->mem_start
);
1777 map
->mem_end
= tswap64(map
->mem_end
);
1778 map
->base_addr
= tswap64(map
->base_addr
);
1779 map
->irq
= tswap16(map
->irq
);
1784 /* FIXME: implement nested type */
1785 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1788 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1794 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1797 struct ifa_cacheinfo
*ci
;
1799 switch (rtattr
->rta_type
) {
1800 /* binary: depends on family type */
1810 u32
= RTA_DATA(rtattr
);
1811 *u32
= tswap32(*u32
);
1813 /* struct ifa_cacheinfo */
1815 ci
= RTA_DATA(rtattr
);
1816 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1817 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1818 ci
->cstamp
= tswap32(ci
->cstamp
);
1819 ci
->tstamp
= tswap32(ci
->tstamp
);
1822 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1828 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1831 switch (rtattr
->rta_type
) {
1832 /* binary: depends on family type */
1841 u32
= RTA_DATA(rtattr
);
1842 *u32
= tswap32(*u32
);
1845 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1851 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1852 uint32_t rtattr_len
)
1854 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1855 host_to_target_data_link_rtattr
);
1858 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1859 uint32_t rtattr_len
)
1861 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1862 host_to_target_data_addr_rtattr
);
1865 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1866 uint32_t rtattr_len
)
1868 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1869 host_to_target_data_route_rtattr
);
1872 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1875 struct ifinfomsg
*ifi
;
1876 struct ifaddrmsg
*ifa
;
1879 nlmsg_len
= nlh
->nlmsg_len
;
1880 switch (nlh
->nlmsg_type
) {
1884 ifi
= NLMSG_DATA(nlh
);
1885 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1886 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1887 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1888 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1889 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1890 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1895 ifa
= NLMSG_DATA(nlh
);
1896 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1897 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1898 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1903 rtm
= NLMSG_DATA(nlh
);
1904 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1905 host_to_target_route_rtattr(RTM_RTA(rtm
),
1906 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1909 return -TARGET_EINVAL
;
1914 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1917 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1920 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1922 abi_long (*target_to_host_rtattr
)
1927 while (len
>= sizeof(struct rtattr
)) {
1928 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1929 tswap16(rtattr
->rta_len
) > len
) {
1932 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1933 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1934 ret
= target_to_host_rtattr(rtattr
);
1938 len
-= RTA_ALIGN(rtattr
->rta_len
);
1939 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
1940 RTA_ALIGN(rtattr
->rta_len
));
1945 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
1947 switch (rtattr
->rta_type
) {
1949 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
1955 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
1957 switch (rtattr
->rta_type
) {
1958 /* binary: depends on family type */
1963 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
1969 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
1972 switch (rtattr
->rta_type
) {
1973 /* binary: depends on family type */
1980 u32
= RTA_DATA(rtattr
);
1981 *u32
= tswap32(*u32
);
1984 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
1990 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
1991 uint32_t rtattr_len
)
1993 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
1994 target_to_host_data_link_rtattr
);
1997 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
1998 uint32_t rtattr_len
)
2000 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2001 target_to_host_data_addr_rtattr
);
2004 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2005 uint32_t rtattr_len
)
2007 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2008 target_to_host_data_route_rtattr
);
2011 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2013 struct ifinfomsg
*ifi
;
2014 struct ifaddrmsg
*ifa
;
2017 switch (nlh
->nlmsg_type
) {
2022 ifi
= NLMSG_DATA(nlh
);
2023 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2024 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2025 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2026 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2027 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2028 NLMSG_LENGTH(sizeof(*ifi
)));
2033 ifa
= NLMSG_DATA(nlh
);
2034 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2035 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2036 NLMSG_LENGTH(sizeof(*ifa
)));
2042 rtm
= NLMSG_DATA(nlh
);
2043 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2044 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2045 NLMSG_LENGTH(sizeof(*rtm
)));
2048 return -TARGET_EOPNOTSUPP
;
2053 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2055 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2057 #endif /* CONFIG_RTNETLINK */
2059 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2061 switch (nlh
->nlmsg_type
) {
2063 gemu_log("Unknown host audit message type %d\n",
2065 return -TARGET_EINVAL
;
2070 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2073 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2076 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2078 switch (nlh
->nlmsg_type
) {
2080 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2081 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2084 gemu_log("Unknown target audit message type %d\n",
2086 return -TARGET_EINVAL
;
2092 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2094 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2097 /* do_setsockopt() Must return target values and target errnos. */
2098 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2099 abi_ulong optval_addr
, socklen_t optlen
)
2103 struct ip_mreqn
*ip_mreq
;
2104 struct ip_mreq_source
*ip_mreq_source
;
2108 /* TCP options all take an 'int' value. */
2109 if (optlen
< sizeof(uint32_t))
2110 return -TARGET_EINVAL
;
2112 if (get_user_u32(val
, optval_addr
))
2113 return -TARGET_EFAULT
;
2114 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2121 case IP_ROUTER_ALERT
:
2125 case IP_MTU_DISCOVER
:
2131 case IP_MULTICAST_TTL
:
2132 case IP_MULTICAST_LOOP
:
2134 if (optlen
>= sizeof(uint32_t)) {
2135 if (get_user_u32(val
, optval_addr
))
2136 return -TARGET_EFAULT
;
2137 } else if (optlen
>= 1) {
2138 if (get_user_u8(val
, optval_addr
))
2139 return -TARGET_EFAULT
;
2141 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2143 case IP_ADD_MEMBERSHIP
:
2144 case IP_DROP_MEMBERSHIP
:
2145 if (optlen
< sizeof (struct target_ip_mreq
) ||
2146 optlen
> sizeof (struct target_ip_mreqn
))
2147 return -TARGET_EINVAL
;
2149 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2150 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2151 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2154 case IP_BLOCK_SOURCE
:
2155 case IP_UNBLOCK_SOURCE
:
2156 case IP_ADD_SOURCE_MEMBERSHIP
:
2157 case IP_DROP_SOURCE_MEMBERSHIP
:
2158 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2159 return -TARGET_EINVAL
;
2161 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2162 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2163 unlock_user (ip_mreq_source
, optval_addr
, 0);
2172 case IPV6_MTU_DISCOVER
:
2175 case IPV6_RECVPKTINFO
:
2177 if (optlen
< sizeof(uint32_t)) {
2178 return -TARGET_EINVAL
;
2180 if (get_user_u32(val
, optval_addr
)) {
2181 return -TARGET_EFAULT
;
2183 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2184 &val
, sizeof(val
)));
2193 /* struct icmp_filter takes an u32 value */
2194 if (optlen
< sizeof(uint32_t)) {
2195 return -TARGET_EINVAL
;
2198 if (get_user_u32(val
, optval_addr
)) {
2199 return -TARGET_EFAULT
;
2201 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2202 &val
, sizeof(val
)));
2209 case TARGET_SOL_SOCKET
:
2211 case TARGET_SO_RCVTIMEO
:
2215 optname
= SO_RCVTIMEO
;
2218 if (optlen
!= sizeof(struct target_timeval
)) {
2219 return -TARGET_EINVAL
;
2222 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2223 return -TARGET_EFAULT
;
2226 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2230 case TARGET_SO_SNDTIMEO
:
2231 optname
= SO_SNDTIMEO
;
2233 case TARGET_SO_ATTACH_FILTER
:
2235 struct target_sock_fprog
*tfprog
;
2236 struct target_sock_filter
*tfilter
;
2237 struct sock_fprog fprog
;
2238 struct sock_filter
*filter
;
2241 if (optlen
!= sizeof(*tfprog
)) {
2242 return -TARGET_EINVAL
;
2244 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2245 return -TARGET_EFAULT
;
2247 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2248 tswapal(tfprog
->filter
), 0)) {
2249 unlock_user_struct(tfprog
, optval_addr
, 1);
2250 return -TARGET_EFAULT
;
2253 fprog
.len
= tswap16(tfprog
->len
);
2254 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2255 if (filter
== NULL
) {
2256 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2257 unlock_user_struct(tfprog
, optval_addr
, 1);
2258 return -TARGET_ENOMEM
;
2260 for (i
= 0; i
< fprog
.len
; i
++) {
2261 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2262 filter
[i
].jt
= tfilter
[i
].jt
;
2263 filter
[i
].jf
= tfilter
[i
].jf
;
2264 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2266 fprog
.filter
= filter
;
2268 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2269 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2272 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2273 unlock_user_struct(tfprog
, optval_addr
, 1);
2276 case TARGET_SO_BINDTODEVICE
:
2278 char *dev_ifname
, *addr_ifname
;
2280 if (optlen
> IFNAMSIZ
- 1) {
2281 optlen
= IFNAMSIZ
- 1;
2283 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2285 return -TARGET_EFAULT
;
2287 optname
= SO_BINDTODEVICE
;
2288 addr_ifname
= alloca(IFNAMSIZ
);
2289 memcpy(addr_ifname
, dev_ifname
, optlen
);
2290 addr_ifname
[optlen
] = 0;
2291 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2292 addr_ifname
, optlen
));
2293 unlock_user (dev_ifname
, optval_addr
, 0);
2296 /* Options with 'int' argument. */
2297 case TARGET_SO_DEBUG
:
2300 case TARGET_SO_REUSEADDR
:
2301 optname
= SO_REUSEADDR
;
2303 case TARGET_SO_TYPE
:
2306 case TARGET_SO_ERROR
:
2309 case TARGET_SO_DONTROUTE
:
2310 optname
= SO_DONTROUTE
;
2312 case TARGET_SO_BROADCAST
:
2313 optname
= SO_BROADCAST
;
2315 case TARGET_SO_SNDBUF
:
2316 optname
= SO_SNDBUF
;
2318 case TARGET_SO_SNDBUFFORCE
:
2319 optname
= SO_SNDBUFFORCE
;
2321 case TARGET_SO_RCVBUF
:
2322 optname
= SO_RCVBUF
;
2324 case TARGET_SO_RCVBUFFORCE
:
2325 optname
= SO_RCVBUFFORCE
;
2327 case TARGET_SO_KEEPALIVE
:
2328 optname
= SO_KEEPALIVE
;
2330 case TARGET_SO_OOBINLINE
:
2331 optname
= SO_OOBINLINE
;
2333 case TARGET_SO_NO_CHECK
:
2334 optname
= SO_NO_CHECK
;
2336 case TARGET_SO_PRIORITY
:
2337 optname
= SO_PRIORITY
;
2340 case TARGET_SO_BSDCOMPAT
:
2341 optname
= SO_BSDCOMPAT
;
2344 case TARGET_SO_PASSCRED
:
2345 optname
= SO_PASSCRED
;
2347 case TARGET_SO_PASSSEC
:
2348 optname
= SO_PASSSEC
;
2350 case TARGET_SO_TIMESTAMP
:
2351 optname
= SO_TIMESTAMP
;
2353 case TARGET_SO_RCVLOWAT
:
2354 optname
= SO_RCVLOWAT
;
2360 if (optlen
< sizeof(uint32_t))
2361 return -TARGET_EINVAL
;
2363 if (get_user_u32(val
, optval_addr
))
2364 return -TARGET_EFAULT
;
2365 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2369 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2370 ret
= -TARGET_ENOPROTOOPT
;
2375 /* do_getsockopt() Must return target values and target errnos. */
2376 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2377 abi_ulong optval_addr
, abi_ulong optlen
)
2384 case TARGET_SOL_SOCKET
:
2387 /* These don't just return a single integer */
2388 case TARGET_SO_LINGER
:
2389 case TARGET_SO_RCVTIMEO
:
2390 case TARGET_SO_SNDTIMEO
:
2391 case TARGET_SO_PEERNAME
:
2393 case TARGET_SO_PEERCRED
: {
2396 struct target_ucred
*tcr
;
2398 if (get_user_u32(len
, optlen
)) {
2399 return -TARGET_EFAULT
;
2402 return -TARGET_EINVAL
;
2406 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2414 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2415 return -TARGET_EFAULT
;
2417 __put_user(cr
.pid
, &tcr
->pid
);
2418 __put_user(cr
.uid
, &tcr
->uid
);
2419 __put_user(cr
.gid
, &tcr
->gid
);
2420 unlock_user_struct(tcr
, optval_addr
, 1);
2421 if (put_user_u32(len
, optlen
)) {
2422 return -TARGET_EFAULT
;
2426 /* Options with 'int' argument. */
2427 case TARGET_SO_DEBUG
:
2430 case TARGET_SO_REUSEADDR
:
2431 optname
= SO_REUSEADDR
;
2433 case TARGET_SO_TYPE
:
2436 case TARGET_SO_ERROR
:
2439 case TARGET_SO_DONTROUTE
:
2440 optname
= SO_DONTROUTE
;
2442 case TARGET_SO_BROADCAST
:
2443 optname
= SO_BROADCAST
;
2445 case TARGET_SO_SNDBUF
:
2446 optname
= SO_SNDBUF
;
2448 case TARGET_SO_RCVBUF
:
2449 optname
= SO_RCVBUF
;
2451 case TARGET_SO_KEEPALIVE
:
2452 optname
= SO_KEEPALIVE
;
2454 case TARGET_SO_OOBINLINE
:
2455 optname
= SO_OOBINLINE
;
2457 case TARGET_SO_NO_CHECK
:
2458 optname
= SO_NO_CHECK
;
2460 case TARGET_SO_PRIORITY
:
2461 optname
= SO_PRIORITY
;
2464 case TARGET_SO_BSDCOMPAT
:
2465 optname
= SO_BSDCOMPAT
;
2468 case TARGET_SO_PASSCRED
:
2469 optname
= SO_PASSCRED
;
2471 case TARGET_SO_TIMESTAMP
:
2472 optname
= SO_TIMESTAMP
;
2474 case TARGET_SO_RCVLOWAT
:
2475 optname
= SO_RCVLOWAT
;
2477 case TARGET_SO_ACCEPTCONN
:
2478 optname
= SO_ACCEPTCONN
;
2485 /* TCP options all take an 'int' value. */
2487 if (get_user_u32(len
, optlen
))
2488 return -TARGET_EFAULT
;
2490 return -TARGET_EINVAL
;
2492 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2495 if (optname
== SO_TYPE
) {
2496 val
= host_to_target_sock_type(val
);
2501 if (put_user_u32(val
, optval_addr
))
2502 return -TARGET_EFAULT
;
2504 if (put_user_u8(val
, optval_addr
))
2505 return -TARGET_EFAULT
;
2507 if (put_user_u32(len
, optlen
))
2508 return -TARGET_EFAULT
;
2515 case IP_ROUTER_ALERT
:
2519 case IP_MTU_DISCOVER
:
2525 case IP_MULTICAST_TTL
:
2526 case IP_MULTICAST_LOOP
:
2527 if (get_user_u32(len
, optlen
))
2528 return -TARGET_EFAULT
;
2530 return -TARGET_EINVAL
;
2532 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2535 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2537 if (put_user_u32(len
, optlen
)
2538 || put_user_u8(val
, optval_addr
))
2539 return -TARGET_EFAULT
;
2541 if (len
> sizeof(int))
2543 if (put_user_u32(len
, optlen
)
2544 || put_user_u32(val
, optval_addr
))
2545 return -TARGET_EFAULT
;
2549 ret
= -TARGET_ENOPROTOOPT
;
2555 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2557 ret
= -TARGET_EOPNOTSUPP
;
2563 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2564 int count
, int copy
)
2566 struct target_iovec
*target_vec
;
2568 abi_ulong total_len
, max_len
;
2571 bool bad_address
= false;
2577 if (count
< 0 || count
> IOV_MAX
) {
2582 vec
= g_try_new0(struct iovec
, count
);
2588 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2589 count
* sizeof(struct target_iovec
), 1);
2590 if (target_vec
== NULL
) {
2595 /* ??? If host page size > target page size, this will result in a
2596 value larger than what we can actually support. */
2597 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2600 for (i
= 0; i
< count
; i
++) {
2601 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2602 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2607 } else if (len
== 0) {
2608 /* Zero length pointer is ignored. */
2609 vec
[i
].iov_base
= 0;
2611 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2612 /* If the first buffer pointer is bad, this is a fault. But
2613 * subsequent bad buffers will result in a partial write; this
2614 * is realized by filling the vector with null pointers and
2616 if (!vec
[i
].iov_base
) {
2627 if (len
> max_len
- total_len
) {
2628 len
= max_len
- total_len
;
2631 vec
[i
].iov_len
= len
;
2635 unlock_user(target_vec
, target_addr
, 0);
2640 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2641 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2644 unlock_user(target_vec
, target_addr
, 0);
2651 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2652 int count
, int copy
)
2654 struct target_iovec
*target_vec
;
2657 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2658 count
* sizeof(struct target_iovec
), 1);
2660 for (i
= 0; i
< count
; i
++) {
2661 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2662 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2666 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2668 unlock_user(target_vec
, target_addr
, 0);
2674 static inline int target_to_host_sock_type(int *type
)
2677 int target_type
= *type
;
2679 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2680 case TARGET_SOCK_DGRAM
:
2681 host_type
= SOCK_DGRAM
;
2683 case TARGET_SOCK_STREAM
:
2684 host_type
= SOCK_STREAM
;
2687 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2690 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2691 #if defined(SOCK_CLOEXEC)
2692 host_type
|= SOCK_CLOEXEC
;
2694 return -TARGET_EINVAL
;
2697 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2698 #if defined(SOCK_NONBLOCK)
2699 host_type
|= SOCK_NONBLOCK
;
2700 #elif !defined(O_NONBLOCK)
2701 return -TARGET_EINVAL
;
2708 /* Try to emulate socket type flags after socket creation. */
2709 static int sock_flags_fixup(int fd
, int target_type
)
2711 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2712 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2713 int flags
= fcntl(fd
, F_GETFL
);
2714 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2716 return -TARGET_EINVAL
;
2723 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2724 abi_ulong target_addr
,
2727 struct sockaddr
*addr
= host_addr
;
2728 struct target_sockaddr
*target_saddr
;
2730 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2731 if (!target_saddr
) {
2732 return -TARGET_EFAULT
;
2735 memcpy(addr
, target_saddr
, len
);
2736 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2737 /* spkt_protocol is big-endian */
2739 unlock_user(target_saddr
, target_addr
, 0);
2743 static TargetFdTrans target_packet_trans
= {
2744 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2747 #ifdef CONFIG_RTNETLINK
2748 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2750 return target_to_host_nlmsg_route(buf
, len
);
2753 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2755 return host_to_target_nlmsg_route(buf
, len
);
2758 static TargetFdTrans target_netlink_route_trans
= {
2759 .target_to_host_data
= netlink_route_target_to_host
,
2760 .host_to_target_data
= netlink_route_host_to_target
,
2762 #endif /* CONFIG_RTNETLINK */
2764 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2766 return target_to_host_nlmsg_audit(buf
, len
);
2769 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2771 return host_to_target_nlmsg_audit(buf
, len
);
2774 static TargetFdTrans target_netlink_audit_trans
= {
2775 .target_to_host_data
= netlink_audit_target_to_host
,
2776 .host_to_target_data
= netlink_audit_host_to_target
,
2779 /* do_socket() Must return target values and target errnos. */
2780 static abi_long
do_socket(int domain
, int type
, int protocol
)
2782 int target_type
= type
;
2785 ret
= target_to_host_sock_type(&type
);
2790 if (domain
== PF_NETLINK
&& !(
2791 #ifdef CONFIG_RTNETLINK
2792 protocol
== NETLINK_ROUTE
||
2794 protocol
== NETLINK_KOBJECT_UEVENT
||
2795 protocol
== NETLINK_AUDIT
)) {
2796 return -EPFNOSUPPORT
;
2799 if (domain
== AF_PACKET
||
2800 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2801 protocol
= tswap16(protocol
);
2804 ret
= get_errno(socket(domain
, type
, protocol
));
2806 ret
= sock_flags_fixup(ret
, target_type
);
2807 if (type
== SOCK_PACKET
) {
2808 /* Manage an obsolete case :
2809 * if socket type is SOCK_PACKET, bind by name
2811 fd_trans_register(ret
, &target_packet_trans
);
2812 } else if (domain
== PF_NETLINK
) {
2814 #ifdef CONFIG_RTNETLINK
2816 fd_trans_register(ret
, &target_netlink_route_trans
);
2819 case NETLINK_KOBJECT_UEVENT
:
2820 /* nothing to do: messages are strings */
2823 fd_trans_register(ret
, &target_netlink_audit_trans
);
2826 g_assert_not_reached();
2833 /* do_bind() Must return target values and target errnos. */
2834 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2840 if ((int)addrlen
< 0) {
2841 return -TARGET_EINVAL
;
2844 addr
= alloca(addrlen
+1);
2846 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2850 return get_errno(bind(sockfd
, addr
, addrlen
));
2853 /* do_connect() Must return target values and target errnos. */
2854 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2860 if ((int)addrlen
< 0) {
2861 return -TARGET_EINVAL
;
2864 addr
= alloca(addrlen
+1);
2866 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2870 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2873 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2874 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2875 int flags
, int send
)
2881 abi_ulong target_vec
;
2883 if (msgp
->msg_name
) {
2884 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2885 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2886 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2887 tswapal(msgp
->msg_name
),
2893 msg
.msg_name
= NULL
;
2894 msg
.msg_namelen
= 0;
2896 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2897 msg
.msg_control
= alloca(msg
.msg_controllen
);
2898 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2900 count
= tswapal(msgp
->msg_iovlen
);
2901 target_vec
= tswapal(msgp
->msg_iov
);
2902 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2903 target_vec
, count
, send
);
2905 ret
= -host_to_target_errno(errno
);
2908 msg
.msg_iovlen
= count
;
2912 if (fd_trans_target_to_host_data(fd
)) {
2913 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2914 msg
.msg_iov
->iov_len
);
2916 ret
= target_to_host_cmsg(&msg
, msgp
);
2919 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2922 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2923 if (!is_error(ret
)) {
2925 if (fd_trans_host_to_target_data(fd
)) {
2926 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2927 msg
.msg_iov
->iov_len
);
2929 ret
= host_to_target_cmsg(msgp
, &msg
);
2931 if (!is_error(ret
)) {
2932 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2933 if (msg
.msg_name
!= NULL
) {
2934 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2935 msg
.msg_name
, msg
.msg_namelen
);
2947 unlock_iovec(vec
, target_vec
, count
, !send
);
2952 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2953 int flags
, int send
)
2956 struct target_msghdr
*msgp
;
2958 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2962 return -TARGET_EFAULT
;
2964 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2965 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2969 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2970 * so it might not have this *mmsg-specific flag either.
2972 #ifndef MSG_WAITFORONE
2973 #define MSG_WAITFORONE 0x10000
2976 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2977 unsigned int vlen
, unsigned int flags
,
2980 struct target_mmsghdr
*mmsgp
;
2984 if (vlen
> UIO_MAXIOV
) {
2988 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2990 return -TARGET_EFAULT
;
2993 for (i
= 0; i
< vlen
; i
++) {
2994 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2995 if (is_error(ret
)) {
2998 mmsgp
[i
].msg_len
= tswap32(ret
);
2999 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3000 if (flags
& MSG_WAITFORONE
) {
3001 flags
|= MSG_DONTWAIT
;
3005 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3007 /* Return number of datagrams sent if we sent any at all;
3008 * otherwise return the error.
3016 /* If we don't have a system accept4() then just call accept.
3017 * The callsites to do_accept4() will ensure that they don't
3018 * pass a non-zero flags argument in this config.
3020 #ifndef CONFIG_ACCEPT4
3021 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
3022 socklen_t
*addrlen
, int flags
)
3025 return accept(sockfd
, addr
, addrlen
);
3029 /* do_accept4() Must return target values and target errnos. */
3030 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3031 abi_ulong target_addrlen_addr
, int flags
)
3038 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3040 if (target_addr
== 0) {
3041 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
3044 /* linux returns EINVAL if addrlen pointer is invalid */
3045 if (get_user_u32(addrlen
, target_addrlen_addr
))
3046 return -TARGET_EINVAL
;
3048 if ((int)addrlen
< 0) {
3049 return -TARGET_EINVAL
;
3052 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3053 return -TARGET_EINVAL
;
3055 addr
= alloca(addrlen
);
3057 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
3058 if (!is_error(ret
)) {
3059 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3060 if (put_user_u32(addrlen
, target_addrlen_addr
))
3061 ret
= -TARGET_EFAULT
;
3066 /* do_getpeername() Must return target values and target errnos. */
3067 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3068 abi_ulong target_addrlen_addr
)
3074 if (get_user_u32(addrlen
, target_addrlen_addr
))
3075 return -TARGET_EFAULT
;
3077 if ((int)addrlen
< 0) {
3078 return -TARGET_EINVAL
;
3081 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3082 return -TARGET_EFAULT
;
3084 addr
= alloca(addrlen
);
3086 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3087 if (!is_error(ret
)) {
3088 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3089 if (put_user_u32(addrlen
, target_addrlen_addr
))
3090 ret
= -TARGET_EFAULT
;
3095 /* do_getsockname() Must return target values and target errnos. */
3096 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3097 abi_ulong target_addrlen_addr
)
3103 if (get_user_u32(addrlen
, target_addrlen_addr
))
3104 return -TARGET_EFAULT
;
3106 if ((int)addrlen
< 0) {
3107 return -TARGET_EINVAL
;
3110 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3111 return -TARGET_EFAULT
;
3113 addr
= alloca(addrlen
);
3115 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3116 if (!is_error(ret
)) {
3117 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3118 if (put_user_u32(addrlen
, target_addrlen_addr
))
3119 ret
= -TARGET_EFAULT
;
3124 /* do_socketpair() Must return target values and target errnos. */
3125 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3126 abi_ulong target_tab_addr
)
3131 target_to_host_sock_type(&type
);
3133 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3134 if (!is_error(ret
)) {
3135 if (put_user_s32(tab
[0], target_tab_addr
)
3136 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3137 ret
= -TARGET_EFAULT
;
3142 /* do_sendto() Must return target values and target errnos. */
3143 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3144 abi_ulong target_addr
, socklen_t addrlen
)
3150 if ((int)addrlen
< 0) {
3151 return -TARGET_EINVAL
;
3154 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3156 return -TARGET_EFAULT
;
3157 if (fd_trans_target_to_host_data(fd
)) {
3158 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3160 unlock_user(host_msg
, msg
, 0);
3165 addr
= alloca(addrlen
+1);
3166 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3168 unlock_user(host_msg
, msg
, 0);
3171 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3173 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3175 unlock_user(host_msg
, msg
, 0);
3179 /* do_recvfrom() Must return target values and target errnos. */
3180 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3181 abi_ulong target_addr
,
3182 abi_ulong target_addrlen
)
3189 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3191 return -TARGET_EFAULT
;
3193 if (get_user_u32(addrlen
, target_addrlen
)) {
3194 ret
= -TARGET_EFAULT
;
3197 if ((int)addrlen
< 0) {
3198 ret
= -TARGET_EINVAL
;
3201 addr
= alloca(addrlen
);
3202 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3205 addr
= NULL
; /* To keep compiler quiet. */
3206 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3208 if (!is_error(ret
)) {
3210 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3211 if (put_user_u32(addrlen
, target_addrlen
)) {
3212 ret
= -TARGET_EFAULT
;
3216 unlock_user(host_msg
, msg
, len
);
3219 unlock_user(host_msg
, msg
, 0);
3224 #ifdef TARGET_NR_socketcall
3225 /* do_socketcall() Must return target values and target errnos. */
3226 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3228 static const unsigned ac
[] = { /* number of arguments per call */
3229 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3230 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3231 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3232 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3233 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3234 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3235 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3236 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3237 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3238 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3239 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3240 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3241 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3242 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3243 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3244 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3245 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3246 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3247 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3248 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3250 abi_long a
[6]; /* max 6 args */
3252 /* first, collect the arguments in a[] according to ac[] */
3253 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3255 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3256 for (i
= 0; i
< ac
[num
]; ++i
) {
3257 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3258 return -TARGET_EFAULT
;
3263 /* now when we have the args, actually handle the call */
3265 case SOCKOP_socket
: /* domain, type, protocol */
3266 return do_socket(a
[0], a
[1], a
[2]);
3267 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3268 return do_bind(a
[0], a
[1], a
[2]);
3269 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3270 return do_connect(a
[0], a
[1], a
[2]);
3271 case SOCKOP_listen
: /* sockfd, backlog */
3272 return get_errno(listen(a
[0], a
[1]));
3273 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3274 return do_accept4(a
[0], a
[1], a
[2], 0);
3275 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3276 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3277 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3278 return do_getsockname(a
[0], a
[1], a
[2]);
3279 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3280 return do_getpeername(a
[0], a
[1], a
[2]);
3281 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3282 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3283 case SOCKOP_send
: /* sockfd, msg, len, flags */
3284 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3285 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3286 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3287 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3288 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3289 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3290 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3291 case SOCKOP_shutdown
: /* sockfd, how */
3292 return get_errno(shutdown(a
[0], a
[1]));
3293 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3294 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3295 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3296 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3297 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3298 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3299 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3300 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3301 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3302 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3303 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3304 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3306 gemu_log("Unsupported socketcall: %d\n", num
);
3307 return -TARGET_ENOSYS
;
3312 #define N_SHM_REGIONS 32
3314 static struct shm_region
{
3318 } shm_regions
[N_SHM_REGIONS
];
3320 struct target_semid_ds
3322 struct target_ipc_perm sem_perm
;
3323 abi_ulong sem_otime
;
3324 #if !defined(TARGET_PPC64)
3325 abi_ulong __unused1
;
3327 abi_ulong sem_ctime
;
3328 #if !defined(TARGET_PPC64)
3329 abi_ulong __unused2
;
3331 abi_ulong sem_nsems
;
3332 abi_ulong __unused3
;
3333 abi_ulong __unused4
;
3336 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3337 abi_ulong target_addr
)
3339 struct target_ipc_perm
*target_ip
;
3340 struct target_semid_ds
*target_sd
;
3342 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3343 return -TARGET_EFAULT
;
3344 target_ip
= &(target_sd
->sem_perm
);
3345 host_ip
->__key
= tswap32(target_ip
->__key
);
3346 host_ip
->uid
= tswap32(target_ip
->uid
);
3347 host_ip
->gid
= tswap32(target_ip
->gid
);
3348 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3349 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3350 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3351 host_ip
->mode
= tswap32(target_ip
->mode
);
3353 host_ip
->mode
= tswap16(target_ip
->mode
);
3355 #if defined(TARGET_PPC)
3356 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3358 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3360 unlock_user_struct(target_sd
, target_addr
, 0);
3364 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3365 struct ipc_perm
*host_ip
)
3367 struct target_ipc_perm
*target_ip
;
3368 struct target_semid_ds
*target_sd
;
3370 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3371 return -TARGET_EFAULT
;
3372 target_ip
= &(target_sd
->sem_perm
);
3373 target_ip
->__key
= tswap32(host_ip
->__key
);
3374 target_ip
->uid
= tswap32(host_ip
->uid
);
3375 target_ip
->gid
= tswap32(host_ip
->gid
);
3376 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3377 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3378 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3379 target_ip
->mode
= tswap32(host_ip
->mode
);
3381 target_ip
->mode
= tswap16(host_ip
->mode
);
3383 #if defined(TARGET_PPC)
3384 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3386 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3388 unlock_user_struct(target_sd
, target_addr
, 1);
3392 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3393 abi_ulong target_addr
)
3395 struct target_semid_ds
*target_sd
;
3397 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3398 return -TARGET_EFAULT
;
3399 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3400 return -TARGET_EFAULT
;
3401 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3402 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3403 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3404 unlock_user_struct(target_sd
, target_addr
, 0);
3408 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3409 struct semid_ds
*host_sd
)
3411 struct target_semid_ds
*target_sd
;
3413 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3414 return -TARGET_EFAULT
;
3415 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3416 return -TARGET_EFAULT
;
3417 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3418 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3419 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3420 unlock_user_struct(target_sd
, target_addr
, 1);
3424 struct target_seminfo
{
3437 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3438 struct seminfo
*host_seminfo
)
3440 struct target_seminfo
*target_seminfo
;
3441 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3442 return -TARGET_EFAULT
;
3443 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3444 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3445 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3446 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3447 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3448 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3449 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3450 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3451 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3452 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3453 unlock_user_struct(target_seminfo
, target_addr
, 1);
3459 struct semid_ds
*buf
;
3460 unsigned short *array
;
3461 struct seminfo
*__buf
;
3464 union target_semun
{
3471 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3472 abi_ulong target_addr
)
3475 unsigned short *array
;
3477 struct semid_ds semid_ds
;
3480 semun
.buf
= &semid_ds
;
3482 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3484 return get_errno(ret
);
3486 nsems
= semid_ds
.sem_nsems
;
3488 *host_array
= g_try_new(unsigned short, nsems
);
3490 return -TARGET_ENOMEM
;
3492 array
= lock_user(VERIFY_READ
, target_addr
,
3493 nsems
*sizeof(unsigned short), 1);
3495 g_free(*host_array
);
3496 return -TARGET_EFAULT
;
3499 for(i
=0; i
<nsems
; i
++) {
3500 __get_user((*host_array
)[i
], &array
[i
]);
3502 unlock_user(array
, target_addr
, 0);
3507 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3508 unsigned short **host_array
)
3511 unsigned short *array
;
3513 struct semid_ds semid_ds
;
3516 semun
.buf
= &semid_ds
;
3518 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3520 return get_errno(ret
);
3522 nsems
= semid_ds
.sem_nsems
;
3524 array
= lock_user(VERIFY_WRITE
, target_addr
,
3525 nsems
*sizeof(unsigned short), 0);
3527 return -TARGET_EFAULT
;
3529 for(i
=0; i
<nsems
; i
++) {
3530 __put_user((*host_array
)[i
], &array
[i
]);
3532 g_free(*host_array
);
3533 unlock_user(array
, target_addr
, 1);
3538 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3539 abi_ulong target_arg
)
3541 union target_semun target_su
= { .buf
= target_arg
};
3543 struct semid_ds dsarg
;
3544 unsigned short *array
= NULL
;
3545 struct seminfo seminfo
;
3546 abi_long ret
= -TARGET_EINVAL
;
3553 /* In 64 bit cross-endian situations, we will erroneously pick up
3554 * the wrong half of the union for the "val" element. To rectify
3555 * this, the entire 8-byte structure is byteswapped, followed by
3556 * a swap of the 4 byte val field. In other cases, the data is
3557 * already in proper host byte order. */
3558 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3559 target_su
.buf
= tswapal(target_su
.buf
);
3560 arg
.val
= tswap32(target_su
.val
);
3562 arg
.val
= target_su
.val
;
3564 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3568 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3572 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3573 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3580 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3584 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3585 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3591 arg
.__buf
= &seminfo
;
3592 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3593 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3601 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3608 struct target_sembuf
{
3609 unsigned short sem_num
;
3614 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3615 abi_ulong target_addr
,
3618 struct target_sembuf
*target_sembuf
;
3621 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3622 nsops
*sizeof(struct target_sembuf
), 1);
3624 return -TARGET_EFAULT
;
3626 for(i
=0; i
<nsops
; i
++) {
3627 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3628 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3629 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3632 unlock_user(target_sembuf
, target_addr
, 0);
3637 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3639 struct sembuf sops
[nsops
];
3641 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3642 return -TARGET_EFAULT
;
3644 return get_errno(semop(semid
, sops
, nsops
));
3647 struct target_msqid_ds
3649 struct target_ipc_perm msg_perm
;
3650 abi_ulong msg_stime
;
3651 #if TARGET_ABI_BITS == 32
3652 abi_ulong __unused1
;
3654 abi_ulong msg_rtime
;
3655 #if TARGET_ABI_BITS == 32
3656 abi_ulong __unused2
;
3658 abi_ulong msg_ctime
;
3659 #if TARGET_ABI_BITS == 32
3660 abi_ulong __unused3
;
3662 abi_ulong __msg_cbytes
;
3664 abi_ulong msg_qbytes
;
3665 abi_ulong msg_lspid
;
3666 abi_ulong msg_lrpid
;
3667 abi_ulong __unused4
;
3668 abi_ulong __unused5
;
3671 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3672 abi_ulong target_addr
)
3674 struct target_msqid_ds
*target_md
;
3676 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3677 return -TARGET_EFAULT
;
3678 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3679 return -TARGET_EFAULT
;
3680 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3681 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3682 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3683 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3684 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3685 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3686 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3687 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3688 unlock_user_struct(target_md
, target_addr
, 0);
3692 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3693 struct msqid_ds
*host_md
)
3695 struct target_msqid_ds
*target_md
;
3697 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3698 return -TARGET_EFAULT
;
3699 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3700 return -TARGET_EFAULT
;
3701 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3702 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3703 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3704 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3705 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3706 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3707 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3708 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3709 unlock_user_struct(target_md
, target_addr
, 1);
3713 struct target_msginfo
{
3721 unsigned short int msgseg
;
3724 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3725 struct msginfo
*host_msginfo
)
3727 struct target_msginfo
*target_msginfo
;
3728 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3729 return -TARGET_EFAULT
;
3730 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3731 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3732 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3733 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3734 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3735 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3736 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3737 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3738 unlock_user_struct(target_msginfo
, target_addr
, 1);
3742 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3744 struct msqid_ds dsarg
;
3745 struct msginfo msginfo
;
3746 abi_long ret
= -TARGET_EINVAL
;
3754 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3755 return -TARGET_EFAULT
;
3756 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3757 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3758 return -TARGET_EFAULT
;
3761 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3765 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3766 if (host_to_target_msginfo(ptr
, &msginfo
))
3767 return -TARGET_EFAULT
;
3774 struct target_msgbuf
{
3779 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3780 ssize_t msgsz
, int msgflg
)
3782 struct target_msgbuf
*target_mb
;
3783 struct msgbuf
*host_mb
;
3787 return -TARGET_EINVAL
;
3790 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3791 return -TARGET_EFAULT
;
3792 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3794 unlock_user_struct(target_mb
, msgp
, 0);
3795 return -TARGET_ENOMEM
;
3797 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3798 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3799 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3801 unlock_user_struct(target_mb
, msgp
, 0);
3806 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3807 ssize_t msgsz
, abi_long msgtyp
,
3810 struct target_msgbuf
*target_mb
;
3812 struct msgbuf
*host_mb
;
3816 return -TARGET_EINVAL
;
3819 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3820 return -TARGET_EFAULT
;
3822 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3824 ret
= -TARGET_ENOMEM
;
3827 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3830 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3831 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3832 if (!target_mtext
) {
3833 ret
= -TARGET_EFAULT
;
3836 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3837 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3840 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3844 unlock_user_struct(target_mb
, msgp
, 1);
3849 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3850 abi_ulong target_addr
)
3852 struct target_shmid_ds
*target_sd
;
3854 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3855 return -TARGET_EFAULT
;
3856 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3857 return -TARGET_EFAULT
;
3858 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3859 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3860 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3861 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3862 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3863 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3864 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3865 unlock_user_struct(target_sd
, target_addr
, 0);
3869 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3870 struct shmid_ds
*host_sd
)
3872 struct target_shmid_ds
*target_sd
;
3874 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3875 return -TARGET_EFAULT
;
3876 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3877 return -TARGET_EFAULT
;
3878 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3879 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3880 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3881 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3882 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3883 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3884 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3885 unlock_user_struct(target_sd
, target_addr
, 1);
3889 struct target_shminfo
{
3897 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3898 struct shminfo
*host_shminfo
)
3900 struct target_shminfo
*target_shminfo
;
3901 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3902 return -TARGET_EFAULT
;
3903 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3904 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3905 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3906 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3907 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3908 unlock_user_struct(target_shminfo
, target_addr
, 1);
3912 struct target_shm_info
{
3917 abi_ulong swap_attempts
;
3918 abi_ulong swap_successes
;
3921 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3922 struct shm_info
*host_shm_info
)
3924 struct target_shm_info
*target_shm_info
;
3925 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3926 return -TARGET_EFAULT
;
3927 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3928 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3929 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3930 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3931 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3932 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3933 unlock_user_struct(target_shm_info
, target_addr
, 1);
3937 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3939 struct shmid_ds dsarg
;
3940 struct shminfo shminfo
;
3941 struct shm_info shm_info
;
3942 abi_long ret
= -TARGET_EINVAL
;
3950 if (target_to_host_shmid_ds(&dsarg
, buf
))
3951 return -TARGET_EFAULT
;
3952 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3953 if (host_to_target_shmid_ds(buf
, &dsarg
))
3954 return -TARGET_EFAULT
;
3957 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3958 if (host_to_target_shminfo(buf
, &shminfo
))
3959 return -TARGET_EFAULT
;
3962 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3963 if (host_to_target_shm_info(buf
, &shm_info
))
3964 return -TARGET_EFAULT
;
3969 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3976 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3980 struct shmid_ds shm_info
;
3983 /* find out the length of the shared memory segment */
3984 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3985 if (is_error(ret
)) {
3986 /* can't get length, bail out */
3993 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3995 abi_ulong mmap_start
;
3997 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3999 if (mmap_start
== -1) {
4001 host_raddr
= (void *)-1;
4003 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4006 if (host_raddr
== (void *)-1) {
4008 return get_errno((long)host_raddr
);
4010 raddr
=h2g((unsigned long)host_raddr
);
4012 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4013 PAGE_VALID
| PAGE_READ
|
4014 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4016 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4017 if (!shm_regions
[i
].in_use
) {
4018 shm_regions
[i
].in_use
= true;
4019 shm_regions
[i
].start
= raddr
;
4020 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4030 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4034 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4035 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4036 shm_regions
[i
].in_use
= false;
4037 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4042 return get_errno(shmdt(g2h(shmaddr
)));
4045 #ifdef TARGET_NR_ipc
4046 /* ??? This only works with linear mappings. */
4047 /* do_ipc() must return target values and target errnos. */
4048 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4049 abi_long second
, abi_long third
,
4050 abi_long ptr
, abi_long fifth
)
4055 version
= call
>> 16;
4060 ret
= do_semop(first
, ptr
, second
);
4064 ret
= get_errno(semget(first
, second
, third
));
4067 case IPCOP_semctl
: {
4068 /* The semun argument to semctl is passed by value, so dereference the
4071 get_user_ual(atptr
, ptr
);
4072 ret
= do_semctl(first
, second
, third
, atptr
);
4077 ret
= get_errno(msgget(first
, second
));
4081 ret
= do_msgsnd(first
, ptr
, second
, third
);
4085 ret
= do_msgctl(first
, second
, ptr
);
4092 struct target_ipc_kludge
{
4097 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4098 ret
= -TARGET_EFAULT
;
4102 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4104 unlock_user_struct(tmp
, ptr
, 0);
4108 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4117 raddr
= do_shmat(first
, ptr
, second
);
4118 if (is_error(raddr
))
4119 return get_errno(raddr
);
4120 if (put_user_ual(raddr
, third
))
4121 return -TARGET_EFAULT
;
4125 ret
= -TARGET_EINVAL
;
4130 ret
= do_shmdt(ptr
);
4134 /* IPC_* flag values are the same on all linux platforms */
4135 ret
= get_errno(shmget(first
, second
, third
));
4138 /* IPC_* and SHM_* command values are the same on all linux platforms */
4140 ret
= do_shmctl(first
, second
, ptr
);
4143 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4144 ret
= -TARGET_ENOSYS
;
4151 /* kernel structure types definitions */
4153 #define STRUCT(name, ...) STRUCT_ ## name,
4154 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4156 #include "syscall_types.h"
4160 #undef STRUCT_SPECIAL
4162 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4163 #define STRUCT_SPECIAL(name)
4164 #include "syscall_types.h"
4166 #undef STRUCT_SPECIAL
4168 typedef struct IOCTLEntry IOCTLEntry
;
4170 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4171 int fd
, int cmd
, abi_long arg
);
4175 unsigned int host_cmd
;
4178 do_ioctl_fn
*do_ioctl
;
4179 const argtype arg_type
[5];
4182 #define IOC_R 0x0001
4183 #define IOC_W 0x0002
4184 #define IOC_RW (IOC_R | IOC_W)
4186 #define MAX_STRUCT_SIZE 4096
4188 #ifdef CONFIG_FIEMAP
4189 /* So fiemap access checks don't overflow on 32 bit systems.
4190 * This is very slightly smaller than the limit imposed by
4191 * the underlying kernel.
4193 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4194 / sizeof(struct fiemap_extent))
4196 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4197 int fd
, int cmd
, abi_long arg
)
4199 /* The parameter for this ioctl is a struct fiemap followed
4200 * by an array of struct fiemap_extent whose size is set
4201 * in fiemap->fm_extent_count. The array is filled in by the
4204 int target_size_in
, target_size_out
;
4206 const argtype
*arg_type
= ie
->arg_type
;
4207 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4210 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4214 assert(arg_type
[0] == TYPE_PTR
);
4215 assert(ie
->access
== IOC_RW
);
4217 target_size_in
= thunk_type_size(arg_type
, 0);
4218 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4220 return -TARGET_EFAULT
;
4222 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4223 unlock_user(argptr
, arg
, 0);
4224 fm
= (struct fiemap
*)buf_temp
;
4225 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4226 return -TARGET_EINVAL
;
4229 outbufsz
= sizeof (*fm
) +
4230 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4232 if (outbufsz
> MAX_STRUCT_SIZE
) {
4233 /* We can't fit all the extents into the fixed size buffer.
4234 * Allocate one that is large enough and use it instead.
4236 fm
= g_try_malloc(outbufsz
);
4238 return -TARGET_ENOMEM
;
4240 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4243 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
4244 if (!is_error(ret
)) {
4245 target_size_out
= target_size_in
;
4246 /* An extent_count of 0 means we were only counting the extents
4247 * so there are no structs to copy
4249 if (fm
->fm_extent_count
!= 0) {
4250 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4252 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4254 ret
= -TARGET_EFAULT
;
4256 /* Convert the struct fiemap */
4257 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4258 if (fm
->fm_extent_count
!= 0) {
4259 p
= argptr
+ target_size_in
;
4260 /* ...and then all the struct fiemap_extents */
4261 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4262 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4267 unlock_user(argptr
, arg
, target_size_out
);
4277 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4278 int fd
, int cmd
, abi_long arg
)
4280 const argtype
*arg_type
= ie
->arg_type
;
4284 struct ifconf
*host_ifconf
;
4286 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4287 int target_ifreq_size
;
4292 abi_long target_ifc_buf
;
4296 assert(arg_type
[0] == TYPE_PTR
);
4297 assert(ie
->access
== IOC_RW
);
4300 target_size
= thunk_type_size(arg_type
, 0);
4302 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4304 return -TARGET_EFAULT
;
4305 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4306 unlock_user(argptr
, arg
, 0);
4308 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4309 target_ifc_len
= host_ifconf
->ifc_len
;
4310 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4312 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4313 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4314 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4316 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4317 if (outbufsz
> MAX_STRUCT_SIZE
) {
4318 /* We can't fit all the extents into the fixed size buffer.
4319 * Allocate one that is large enough and use it instead.
4321 host_ifconf
= malloc(outbufsz
);
4323 return -TARGET_ENOMEM
;
4325 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4328 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4330 host_ifconf
->ifc_len
= host_ifc_len
;
4331 host_ifconf
->ifc_buf
= host_ifc_buf
;
4333 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4334 if (!is_error(ret
)) {
4335 /* convert host ifc_len to target ifc_len */
4337 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4338 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4339 host_ifconf
->ifc_len
= target_ifc_len
;
4341 /* restore target ifc_buf */
4343 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4345 /* copy struct ifconf to target user */
4347 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4349 return -TARGET_EFAULT
;
4350 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4351 unlock_user(argptr
, arg
, target_size
);
4353 /* copy ifreq[] to target user */
4355 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4356 for (i
= 0; i
< nb_ifreq
; i
++) {
4357 thunk_convert(argptr
+ i
* target_ifreq_size
,
4358 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4359 ifreq_arg_type
, THUNK_TARGET
);
4361 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4371 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4372 int cmd
, abi_long arg
)
4375 struct dm_ioctl
*host_dm
;
4376 abi_long guest_data
;
4377 uint32_t guest_data_size
;
4379 const argtype
*arg_type
= ie
->arg_type
;
4381 void *big_buf
= NULL
;
4385 target_size
= thunk_type_size(arg_type
, 0);
4386 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4388 ret
= -TARGET_EFAULT
;
4391 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4392 unlock_user(argptr
, arg
, 0);
4394 /* buf_temp is too small, so fetch things into a bigger buffer */
4395 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4396 memcpy(big_buf
, buf_temp
, target_size
);
4400 guest_data
= arg
+ host_dm
->data_start
;
4401 if ((guest_data
- arg
) < 0) {
4405 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4406 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4408 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4409 switch (ie
->host_cmd
) {
4411 case DM_LIST_DEVICES
:
4414 case DM_DEV_SUSPEND
:
4417 case DM_TABLE_STATUS
:
4418 case DM_TABLE_CLEAR
:
4420 case DM_LIST_VERSIONS
:
4424 case DM_DEV_SET_GEOMETRY
:
4425 /* data contains only strings */
4426 memcpy(host_data
, argptr
, guest_data_size
);
4429 memcpy(host_data
, argptr
, guest_data_size
);
4430 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4434 void *gspec
= argptr
;
4435 void *cur_data
= host_data
;
4436 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4437 int spec_size
= thunk_type_size(arg_type
, 0);
4440 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4441 struct dm_target_spec
*spec
= cur_data
;
4445 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4446 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4448 spec
->next
= sizeof(*spec
) + slen
;
4449 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4451 cur_data
+= spec
->next
;
4456 ret
= -TARGET_EINVAL
;
4457 unlock_user(argptr
, guest_data
, 0);
4460 unlock_user(argptr
, guest_data
, 0);
4462 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4463 if (!is_error(ret
)) {
4464 guest_data
= arg
+ host_dm
->data_start
;
4465 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4466 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4467 switch (ie
->host_cmd
) {
4472 case DM_DEV_SUSPEND
:
4475 case DM_TABLE_CLEAR
:
4477 case DM_DEV_SET_GEOMETRY
:
4478 /* no return data */
4480 case DM_LIST_DEVICES
:
4482 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4483 uint32_t remaining_data
= guest_data_size
;
4484 void *cur_data
= argptr
;
4485 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4486 int nl_size
= 12; /* can't use thunk_size due to alignment */
4489 uint32_t next
= nl
->next
;
4491 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4493 if (remaining_data
< nl
->next
) {
4494 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4497 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4498 strcpy(cur_data
+ nl_size
, nl
->name
);
4499 cur_data
+= nl
->next
;
4500 remaining_data
-= nl
->next
;
4504 nl
= (void*)nl
+ next
;
4509 case DM_TABLE_STATUS
:
4511 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4512 void *cur_data
= argptr
;
4513 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4514 int spec_size
= thunk_type_size(arg_type
, 0);
4517 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4518 uint32_t next
= spec
->next
;
4519 int slen
= strlen((char*)&spec
[1]) + 1;
4520 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4521 if (guest_data_size
< spec
->next
) {
4522 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4525 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4526 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4527 cur_data
= argptr
+ spec
->next
;
4528 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4534 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4535 int count
= *(uint32_t*)hdata
;
4536 uint64_t *hdev
= hdata
+ 8;
4537 uint64_t *gdev
= argptr
+ 8;
4540 *(uint32_t*)argptr
= tswap32(count
);
4541 for (i
= 0; i
< count
; i
++) {
4542 *gdev
= tswap64(*hdev
);
4548 case DM_LIST_VERSIONS
:
4550 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4551 uint32_t remaining_data
= guest_data_size
;
4552 void *cur_data
= argptr
;
4553 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4554 int vers_size
= thunk_type_size(arg_type
, 0);
4557 uint32_t next
= vers
->next
;
4559 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4561 if (remaining_data
< vers
->next
) {
4562 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4565 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4566 strcpy(cur_data
+ vers_size
, vers
->name
);
4567 cur_data
+= vers
->next
;
4568 remaining_data
-= vers
->next
;
4572 vers
= (void*)vers
+ next
;
4577 unlock_user(argptr
, guest_data
, 0);
4578 ret
= -TARGET_EINVAL
;
4581 unlock_user(argptr
, guest_data
, guest_data_size
);
4583 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4585 ret
= -TARGET_EFAULT
;
4588 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4589 unlock_user(argptr
, arg
, target_size
);
4596 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4597 int cmd
, abi_long arg
)
4601 const argtype
*arg_type
= ie
->arg_type
;
4602 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4605 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4606 struct blkpg_partition host_part
;
4608 /* Read and convert blkpg */
4610 target_size
= thunk_type_size(arg_type
, 0);
4611 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4613 ret
= -TARGET_EFAULT
;
4616 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4617 unlock_user(argptr
, arg
, 0);
4619 switch (host_blkpg
->op
) {
4620 case BLKPG_ADD_PARTITION
:
4621 case BLKPG_DEL_PARTITION
:
4622 /* payload is struct blkpg_partition */
4625 /* Unknown opcode */
4626 ret
= -TARGET_EINVAL
;
4630 /* Read and convert blkpg->data */
4631 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4632 target_size
= thunk_type_size(part_arg_type
, 0);
4633 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4635 ret
= -TARGET_EFAULT
;
4638 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4639 unlock_user(argptr
, arg
, 0);
4641 /* Swizzle the data pointer to our local copy and call! */
4642 host_blkpg
->data
= &host_part
;
4643 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4649 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4650 int fd
, int cmd
, abi_long arg
)
4652 const argtype
*arg_type
= ie
->arg_type
;
4653 const StructEntry
*se
;
4654 const argtype
*field_types
;
4655 const int *dst_offsets
, *src_offsets
;
4658 abi_ulong
*target_rt_dev_ptr
;
4659 unsigned long *host_rt_dev_ptr
;
4663 assert(ie
->access
== IOC_W
);
4664 assert(*arg_type
== TYPE_PTR
);
4666 assert(*arg_type
== TYPE_STRUCT
);
4667 target_size
= thunk_type_size(arg_type
, 0);
4668 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4670 return -TARGET_EFAULT
;
4673 assert(*arg_type
== (int)STRUCT_rtentry
);
4674 se
= struct_entries
+ *arg_type
++;
4675 assert(se
->convert
[0] == NULL
);
4676 /* convert struct here to be able to catch rt_dev string */
4677 field_types
= se
->field_types
;
4678 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4679 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4680 for (i
= 0; i
< se
->nb_fields
; i
++) {
4681 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4682 assert(*field_types
== TYPE_PTRVOID
);
4683 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4684 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4685 if (*target_rt_dev_ptr
!= 0) {
4686 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4687 tswapal(*target_rt_dev_ptr
));
4688 if (!*host_rt_dev_ptr
) {
4689 unlock_user(argptr
, arg
, 0);
4690 return -TARGET_EFAULT
;
4693 *host_rt_dev_ptr
= 0;
4698 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4699 argptr
+ src_offsets
[i
],
4700 field_types
, THUNK_HOST
);
4702 unlock_user(argptr
, arg
, 0);
4704 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4705 if (*host_rt_dev_ptr
!= 0) {
4706 unlock_user((void *)*host_rt_dev_ptr
,
4707 *target_rt_dev_ptr
, 0);
4712 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4713 int fd
, int cmd
, abi_long arg
)
4715 int sig
= target_to_host_signal(arg
);
4716 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4719 static IOCTLEntry ioctl_entries
[] = {
4720 #define IOCTL(cmd, access, ...) \
4721 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4722 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4723 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4728 /* ??? Implement proper locking for ioctls. */
4729 /* do_ioctl() Must return target values and target errnos. */
4730 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4732 const IOCTLEntry
*ie
;
4733 const argtype
*arg_type
;
4735 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4741 if (ie
->target_cmd
== 0) {
4742 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4743 return -TARGET_ENOSYS
;
4745 if (ie
->target_cmd
== cmd
)
4749 arg_type
= ie
->arg_type
;
4751 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4754 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4757 switch(arg_type
[0]) {
4760 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4764 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4768 target_size
= thunk_type_size(arg_type
, 0);
4769 switch(ie
->access
) {
4771 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4772 if (!is_error(ret
)) {
4773 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4775 return -TARGET_EFAULT
;
4776 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4777 unlock_user(argptr
, arg
, target_size
);
4781 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4783 return -TARGET_EFAULT
;
4784 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4785 unlock_user(argptr
, arg
, 0);
4786 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4790 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4792 return -TARGET_EFAULT
;
4793 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4794 unlock_user(argptr
, arg
, 0);
4795 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4796 if (!is_error(ret
)) {
4797 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4799 return -TARGET_EFAULT
;
4800 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4801 unlock_user(argptr
, arg
, target_size
);
4807 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4808 (long)cmd
, arg_type
[0]);
4809 ret
= -TARGET_ENOSYS
;
4815 static const bitmask_transtbl iflag_tbl
[] = {
4816 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4817 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4818 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4819 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4820 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4821 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4822 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4823 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4824 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4825 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4826 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4827 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4828 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4829 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4833 static const bitmask_transtbl oflag_tbl
[] = {
4834 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4835 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4836 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4837 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4838 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4839 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4840 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4841 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4842 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4843 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4844 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4845 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4846 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4847 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4848 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4849 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4850 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4851 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4852 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4853 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4854 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4855 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4856 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4857 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4861 static const bitmask_transtbl cflag_tbl
[] = {
4862 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4863 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4864 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4865 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4866 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4867 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4868 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4869 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4870 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4871 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4872 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4873 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4874 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4875 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4876 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4877 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4878 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4879 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4880 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4881 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4882 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4883 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4884 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4885 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4886 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4887 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4888 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4889 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4890 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4891 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4892 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4896 static const bitmask_transtbl lflag_tbl
[] = {
4897 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4898 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4899 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4900 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4901 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4902 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4903 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4904 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4905 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4906 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4907 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4908 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4909 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4910 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4911 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4915 static void target_to_host_termios (void *dst
, const void *src
)
4917 struct host_termios
*host
= dst
;
4918 const struct target_termios
*target
= src
;
4921 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4923 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4925 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4927 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4928 host
->c_line
= target
->c_line
;
4930 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4931 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4932 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4933 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4934 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4935 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4936 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4937 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4938 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4939 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4940 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4941 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4942 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4943 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4944 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4945 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4946 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4947 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4950 static void host_to_target_termios (void *dst
, const void *src
)
4952 struct target_termios
*target
= dst
;
4953 const struct host_termios
*host
= src
;
4956 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4958 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4960 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4962 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4963 target
->c_line
= host
->c_line
;
4965 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4966 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4967 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4968 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4969 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4970 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4971 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4972 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4973 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4974 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4975 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4976 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4977 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4978 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4979 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4980 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4981 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4982 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4985 static const StructEntry struct_termios_def
= {
4986 .convert
= { host_to_target_termios
, target_to_host_termios
},
4987 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4988 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4991 static bitmask_transtbl mmap_flags_tbl
[] = {
4992 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4993 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4994 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4995 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4996 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4997 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4998 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4999 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5000 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5005 #if defined(TARGET_I386)
5007 /* NOTE: there is really one LDT for all the threads */
5008 static uint8_t *ldt_table
;
5010 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5017 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5018 if (size
> bytecount
)
5020 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5022 return -TARGET_EFAULT
;
5023 /* ??? Should this by byteswapped? */
5024 memcpy(p
, ldt_table
, size
);
5025 unlock_user(p
, ptr
, size
);
5029 /* XXX: add locking support */
5030 static abi_long
write_ldt(CPUX86State
*env
,
5031 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5033 struct target_modify_ldt_ldt_s ldt_info
;
5034 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5035 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5036 int seg_not_present
, useable
, lm
;
5037 uint32_t *lp
, entry_1
, entry_2
;
5039 if (bytecount
!= sizeof(ldt_info
))
5040 return -TARGET_EINVAL
;
5041 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5042 return -TARGET_EFAULT
;
5043 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5044 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5045 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5046 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5047 unlock_user_struct(target_ldt_info
, ptr
, 0);
5049 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5050 return -TARGET_EINVAL
;
5051 seg_32bit
= ldt_info
.flags
& 1;
5052 contents
= (ldt_info
.flags
>> 1) & 3;
5053 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5054 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5055 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5056 useable
= (ldt_info
.flags
>> 6) & 1;
5060 lm
= (ldt_info
.flags
>> 7) & 1;
5062 if (contents
== 3) {
5064 return -TARGET_EINVAL
;
5065 if (seg_not_present
== 0)
5066 return -TARGET_EINVAL
;
5068 /* allocate the LDT */
5070 env
->ldt
.base
= target_mmap(0,
5071 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5072 PROT_READ
|PROT_WRITE
,
5073 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5074 if (env
->ldt
.base
== -1)
5075 return -TARGET_ENOMEM
;
5076 memset(g2h(env
->ldt
.base
), 0,
5077 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5078 env
->ldt
.limit
= 0xffff;
5079 ldt_table
= g2h(env
->ldt
.base
);
5082 /* NOTE: same code as Linux kernel */
5083 /* Allow LDTs to be cleared by the user. */
5084 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5087 read_exec_only
== 1 &&
5089 limit_in_pages
== 0 &&
5090 seg_not_present
== 1 &&
5098 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5099 (ldt_info
.limit
& 0x0ffff);
5100 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5101 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5102 (ldt_info
.limit
& 0xf0000) |
5103 ((read_exec_only
^ 1) << 9) |
5105 ((seg_not_present
^ 1) << 15) |
5107 (limit_in_pages
<< 23) |
5111 entry_2
|= (useable
<< 20);
5113 /* Install the new entry ... */
5115 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5116 lp
[0] = tswap32(entry_1
);
5117 lp
[1] = tswap32(entry_2
);
5121 /* specific and weird i386 syscalls */
5122 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5123 unsigned long bytecount
)
5129 ret
= read_ldt(ptr
, bytecount
);
5132 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5135 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5138 ret
= -TARGET_ENOSYS
;
5144 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5145 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5147 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5148 struct target_modify_ldt_ldt_s ldt_info
;
5149 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5150 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5151 int seg_not_present
, useable
, lm
;
5152 uint32_t *lp
, entry_1
, entry_2
;
5155 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5156 if (!target_ldt_info
)
5157 return -TARGET_EFAULT
;
5158 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5159 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5160 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5161 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5162 if (ldt_info
.entry_number
== -1) {
5163 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5164 if (gdt_table
[i
] == 0) {
5165 ldt_info
.entry_number
= i
;
5166 target_ldt_info
->entry_number
= tswap32(i
);
5171 unlock_user_struct(target_ldt_info
, ptr
, 1);
5173 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5174 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5175 return -TARGET_EINVAL
;
5176 seg_32bit
= ldt_info
.flags
& 1;
5177 contents
= (ldt_info
.flags
>> 1) & 3;
5178 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5179 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5180 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5181 useable
= (ldt_info
.flags
>> 6) & 1;
5185 lm
= (ldt_info
.flags
>> 7) & 1;
5188 if (contents
== 3) {
5189 if (seg_not_present
== 0)
5190 return -TARGET_EINVAL
;
5193 /* NOTE: same code as Linux kernel */
5194 /* Allow LDTs to be cleared by the user. */
5195 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5196 if ((contents
== 0 &&
5197 read_exec_only
== 1 &&
5199 limit_in_pages
== 0 &&
5200 seg_not_present
== 1 &&
5208 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5209 (ldt_info
.limit
& 0x0ffff);
5210 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5211 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5212 (ldt_info
.limit
& 0xf0000) |
5213 ((read_exec_only
^ 1) << 9) |
5215 ((seg_not_present
^ 1) << 15) |
5217 (limit_in_pages
<< 23) |
5222 /* Install the new entry ... */
5224 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5225 lp
[0] = tswap32(entry_1
);
5226 lp
[1] = tswap32(entry_2
);
5230 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5232 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5233 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5234 uint32_t base_addr
, limit
, flags
;
5235 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5236 int seg_not_present
, useable
, lm
;
5237 uint32_t *lp
, entry_1
, entry_2
;
5239 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5240 if (!target_ldt_info
)
5241 return -TARGET_EFAULT
;
5242 idx
= tswap32(target_ldt_info
->entry_number
);
5243 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5244 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5245 unlock_user_struct(target_ldt_info
, ptr
, 1);
5246 return -TARGET_EINVAL
;
5248 lp
= (uint32_t *)(gdt_table
+ idx
);
5249 entry_1
= tswap32(lp
[0]);
5250 entry_2
= tswap32(lp
[1]);
5252 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5253 contents
= (entry_2
>> 10) & 3;
5254 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5255 seg_32bit
= (entry_2
>> 22) & 1;
5256 limit_in_pages
= (entry_2
>> 23) & 1;
5257 useable
= (entry_2
>> 20) & 1;
5261 lm
= (entry_2
>> 21) & 1;
5263 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5264 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5265 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5266 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5267 base_addr
= (entry_1
>> 16) |
5268 (entry_2
& 0xff000000) |
5269 ((entry_2
& 0xff) << 16);
5270 target_ldt_info
->base_addr
= tswapal(base_addr
);
5271 target_ldt_info
->limit
= tswap32(limit
);
5272 target_ldt_info
->flags
= tswap32(flags
);
5273 unlock_user_struct(target_ldt_info
, ptr
, 1);
5276 #endif /* TARGET_I386 && TARGET_ABI32 */
5278 #ifndef TARGET_ABI32
5279 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5286 case TARGET_ARCH_SET_GS
:
5287 case TARGET_ARCH_SET_FS
:
5288 if (code
== TARGET_ARCH_SET_GS
)
5292 cpu_x86_load_seg(env
, idx
, 0);
5293 env
->segs
[idx
].base
= addr
;
5295 case TARGET_ARCH_GET_GS
:
5296 case TARGET_ARCH_GET_FS
:
5297 if (code
== TARGET_ARCH_GET_GS
)
5301 val
= env
->segs
[idx
].base
;
5302 if (put_user(val
, addr
, abi_ulong
))
5303 ret
= -TARGET_EFAULT
;
5306 ret
= -TARGET_EINVAL
;
5313 #endif /* defined(TARGET_I386) */
5315 #define NEW_STACK_SIZE 0x40000
5318 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5321 pthread_mutex_t mutex
;
5322 pthread_cond_t cond
;
5325 abi_ulong child_tidptr
;
5326 abi_ulong parent_tidptr
;
5330 static void *clone_func(void *arg
)
5332 new_thread_info
*info
= arg
;
5337 rcu_register_thread();
5339 cpu
= ENV_GET_CPU(env
);
5341 ts
= (TaskState
*)cpu
->opaque
;
5342 info
->tid
= gettid();
5343 cpu
->host_tid
= info
->tid
;
5345 if (info
->child_tidptr
)
5346 put_user_u32(info
->tid
, info
->child_tidptr
);
5347 if (info
->parent_tidptr
)
5348 put_user_u32(info
->tid
, info
->parent_tidptr
);
5349 /* Enable signals. */
5350 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5351 /* Signal to the parent that we're ready. */
5352 pthread_mutex_lock(&info
->mutex
);
5353 pthread_cond_broadcast(&info
->cond
);
5354 pthread_mutex_unlock(&info
->mutex
);
5355 /* Wait until the parent has finshed initializing the tls state. */
5356 pthread_mutex_lock(&clone_lock
);
5357 pthread_mutex_unlock(&clone_lock
);
5363 /* do_fork() Must return host values and target errnos (unlike most
5364 do_*() functions). */
5365 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5366 abi_ulong parent_tidptr
, target_ulong newtls
,
5367 abi_ulong child_tidptr
)
5369 CPUState
*cpu
= ENV_GET_CPU(env
);
5373 CPUArchState
*new_env
;
5374 unsigned int nptl_flags
;
5377 /* Emulate vfork() with fork() */
5378 if (flags
& CLONE_VFORK
)
5379 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5381 if (flags
& CLONE_VM
) {
5382 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5383 new_thread_info info
;
5384 pthread_attr_t attr
;
5386 ts
= g_new0(TaskState
, 1);
5387 init_task_state(ts
);
5388 /* we create a new CPU instance. */
5389 new_env
= cpu_copy(env
);
5390 /* Init regs that differ from the parent. */
5391 cpu_clone_regs(new_env
, newsp
);
5392 new_cpu
= ENV_GET_CPU(new_env
);
5393 new_cpu
->opaque
= ts
;
5394 ts
->bprm
= parent_ts
->bprm
;
5395 ts
->info
= parent_ts
->info
;
5396 ts
->signal_mask
= parent_ts
->signal_mask
;
5398 flags
&= ~CLONE_NPTL_FLAGS2
;
5400 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5401 ts
->child_tidptr
= child_tidptr
;
5404 if (nptl_flags
& CLONE_SETTLS
)
5405 cpu_set_tls (new_env
, newtls
);
5407 /* Grab a mutex so that thread setup appears atomic. */
5408 pthread_mutex_lock(&clone_lock
);
5410 memset(&info
, 0, sizeof(info
));
5411 pthread_mutex_init(&info
.mutex
, NULL
);
5412 pthread_mutex_lock(&info
.mutex
);
5413 pthread_cond_init(&info
.cond
, NULL
);
5415 if (nptl_flags
& CLONE_CHILD_SETTID
)
5416 info
.child_tidptr
= child_tidptr
;
5417 if (nptl_flags
& CLONE_PARENT_SETTID
)
5418 info
.parent_tidptr
= parent_tidptr
;
5420 ret
= pthread_attr_init(&attr
);
5421 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5422 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5423 /* It is not safe to deliver signals until the child has finished
5424 initializing, so temporarily block all signals. */
5425 sigfillset(&sigmask
);
5426 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5428 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5429 /* TODO: Free new CPU state if thread creation failed. */
5431 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5432 pthread_attr_destroy(&attr
);
5434 /* Wait for the child to initialize. */
5435 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5437 if (flags
& CLONE_PARENT_SETTID
)
5438 put_user_u32(ret
, parent_tidptr
);
5442 pthread_mutex_unlock(&info
.mutex
);
5443 pthread_cond_destroy(&info
.cond
);
5444 pthread_mutex_destroy(&info
.mutex
);
5445 pthread_mutex_unlock(&clone_lock
);
5447 /* if no CLONE_VM, we consider it is a fork */
5448 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5449 return -TARGET_EINVAL
;
5452 if (block_signals()) {
5453 return -TARGET_ERESTARTSYS
;
5459 /* Child Process. */
5461 cpu_clone_regs(env
, newsp
);
5463 /* There is a race condition here. The parent process could
5464 theoretically read the TID in the child process before the child
5465 tid is set. This would require using either ptrace
5466 (not implemented) or having *_tidptr to point at a shared memory
5467 mapping. We can't repeat the spinlock hack used above because
5468 the child process gets its own copy of the lock. */
5469 if (flags
& CLONE_CHILD_SETTID
)
5470 put_user_u32(gettid(), child_tidptr
);
5471 if (flags
& CLONE_PARENT_SETTID
)
5472 put_user_u32(gettid(), parent_tidptr
);
5473 ts
= (TaskState
*)cpu
->opaque
;
5474 if (flags
& CLONE_SETTLS
)
5475 cpu_set_tls (env
, newtls
);
5476 if (flags
& CLONE_CHILD_CLEARTID
)
5477 ts
->child_tidptr
= child_tidptr
;
5485 /* warning : doesn't handle linux specific flags... */
5486 static int target_to_host_fcntl_cmd(int cmd
)
5489 case TARGET_F_DUPFD
:
5490 case TARGET_F_GETFD
:
5491 case TARGET_F_SETFD
:
5492 case TARGET_F_GETFL
:
5493 case TARGET_F_SETFL
:
5495 case TARGET_F_GETLK
:
5497 case TARGET_F_SETLK
:
5499 case TARGET_F_SETLKW
:
5501 case TARGET_F_GETOWN
:
5503 case TARGET_F_SETOWN
:
5505 case TARGET_F_GETSIG
:
5507 case TARGET_F_SETSIG
:
5509 #if TARGET_ABI_BITS == 32
5510 case TARGET_F_GETLK64
:
5512 case TARGET_F_SETLK64
:
5514 case TARGET_F_SETLKW64
:
5517 case TARGET_F_SETLEASE
:
5519 case TARGET_F_GETLEASE
:
5521 #ifdef F_DUPFD_CLOEXEC
5522 case TARGET_F_DUPFD_CLOEXEC
:
5523 return F_DUPFD_CLOEXEC
;
5525 case TARGET_F_NOTIFY
:
5528 case TARGET_F_GETOWN_EX
:
5532 case TARGET_F_SETOWN_EX
:
5536 return -TARGET_EINVAL
;
5538 return -TARGET_EINVAL
;
5541 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5542 static const bitmask_transtbl flock_tbl
[] = {
5543 TRANSTBL_CONVERT(F_RDLCK
),
5544 TRANSTBL_CONVERT(F_WRLCK
),
5545 TRANSTBL_CONVERT(F_UNLCK
),
5546 TRANSTBL_CONVERT(F_EXLCK
),
5547 TRANSTBL_CONVERT(F_SHLCK
),
5551 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5554 struct target_flock
*target_fl
;
5555 struct flock64 fl64
;
5556 struct target_flock64
*target_fl64
;
5558 struct f_owner_ex fox
;
5559 struct target_f_owner_ex
*target_fox
;
5562 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5564 if (host_cmd
== -TARGET_EINVAL
)
5568 case TARGET_F_GETLK
:
5569 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5570 return -TARGET_EFAULT
;
5572 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5573 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5574 fl
.l_start
= tswapal(target_fl
->l_start
);
5575 fl
.l_len
= tswapal(target_fl
->l_len
);
5576 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5577 unlock_user_struct(target_fl
, arg
, 0);
5578 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5580 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
5581 return -TARGET_EFAULT
;
5583 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
5584 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5585 target_fl
->l_start
= tswapal(fl
.l_start
);
5586 target_fl
->l_len
= tswapal(fl
.l_len
);
5587 target_fl
->l_pid
= tswap32(fl
.l_pid
);
5588 unlock_user_struct(target_fl
, arg
, 1);
5592 case TARGET_F_SETLK
:
5593 case TARGET_F_SETLKW
:
5594 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5595 return -TARGET_EFAULT
;
5597 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5598 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5599 fl
.l_start
= tswapal(target_fl
->l_start
);
5600 fl
.l_len
= tswapal(target_fl
->l_len
);
5601 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5602 unlock_user_struct(target_fl
, arg
, 0);
5603 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5606 case TARGET_F_GETLK64
:
5607 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5608 return -TARGET_EFAULT
;
5610 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5611 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5612 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5613 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5614 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5615 unlock_user_struct(target_fl64
, arg
, 0);
5616 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5618 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
5619 return -TARGET_EFAULT
;
5620 target_fl64
->l_type
=
5621 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
5622 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
5623 target_fl64
->l_start
= tswap64(fl64
.l_start
);
5624 target_fl64
->l_len
= tswap64(fl64
.l_len
);
5625 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
5626 unlock_user_struct(target_fl64
, arg
, 1);
5629 case TARGET_F_SETLK64
:
5630 case TARGET_F_SETLKW64
:
5631 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5632 return -TARGET_EFAULT
;
5634 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5635 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5636 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5637 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5638 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5639 unlock_user_struct(target_fl64
, arg
, 0);
5640 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5643 case TARGET_F_GETFL
:
5644 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5646 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5650 case TARGET_F_SETFL
:
5651 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
5655 case TARGET_F_GETOWN_EX
:
5656 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5658 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5659 return -TARGET_EFAULT
;
5660 target_fox
->type
= tswap32(fox
.type
);
5661 target_fox
->pid
= tswap32(fox
.pid
);
5662 unlock_user_struct(target_fox
, arg
, 1);
5668 case TARGET_F_SETOWN_EX
:
5669 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5670 return -TARGET_EFAULT
;
5671 fox
.type
= tswap32(target_fox
->type
);
5672 fox
.pid
= tswap32(target_fox
->pid
);
5673 unlock_user_struct(target_fox
, arg
, 0);
5674 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5678 case TARGET_F_SETOWN
:
5679 case TARGET_F_GETOWN
:
5680 case TARGET_F_SETSIG
:
5681 case TARGET_F_GETSIG
:
5682 case TARGET_F_SETLEASE
:
5683 case TARGET_F_GETLEASE
:
5684 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5688 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5696 static inline int high2lowuid(int uid
)
5704 static inline int high2lowgid(int gid
)
5712 static inline int low2highuid(int uid
)
5714 if ((int16_t)uid
== -1)
5720 static inline int low2highgid(int gid
)
5722 if ((int16_t)gid
== -1)
5727 static inline int tswapid(int id
)
5732 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5734 #else /* !USE_UID16 */
5735 static inline int high2lowuid(int uid
)
5739 static inline int high2lowgid(int gid
)
5743 static inline int low2highuid(int uid
)
5747 static inline int low2highgid(int gid
)
5751 static inline int tswapid(int id
)
5756 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5758 #endif /* USE_UID16 */
5760 /* We must do direct syscalls for setting UID/GID, because we want to
5761 * implement the Linux system call semantics of "change only for this thread",
5762 * not the libc/POSIX semantics of "change for all threads in process".
5763 * (See http://ewontfix.com/17/ for more details.)
5764 * We use the 32-bit version of the syscalls if present; if it is not
5765 * then either the host architecture supports 32-bit UIDs natively with
5766 * the standard syscall, or the 16-bit UID is the best we can do.
5768 #ifdef __NR_setuid32
5769 #define __NR_sys_setuid __NR_setuid32
5771 #define __NR_sys_setuid __NR_setuid
5773 #ifdef __NR_setgid32
5774 #define __NR_sys_setgid __NR_setgid32
5776 #define __NR_sys_setgid __NR_setgid
5778 #ifdef __NR_setresuid32
5779 #define __NR_sys_setresuid __NR_setresuid32
5781 #define __NR_sys_setresuid __NR_setresuid
5783 #ifdef __NR_setresgid32
5784 #define __NR_sys_setresgid __NR_setresgid32
5786 #define __NR_sys_setresgid __NR_setresgid
5789 _syscall1(int, sys_setuid
, uid_t
, uid
)
5790 _syscall1(int, sys_setgid
, gid_t
, gid
)
5791 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5792 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5794 void syscall_init(void)
5797 const argtype
*arg_type
;
5801 thunk_init(STRUCT_MAX
);
5803 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5804 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5805 #include "syscall_types.h"
5807 #undef STRUCT_SPECIAL
5809 /* Build target_to_host_errno_table[] table from
5810 * host_to_target_errno_table[]. */
5811 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5812 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5815 /* we patch the ioctl size if necessary. We rely on the fact that
5816 no ioctl has all the bits at '1' in the size field */
5818 while (ie
->target_cmd
!= 0) {
5819 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5820 TARGET_IOC_SIZEMASK
) {
5821 arg_type
= ie
->arg_type
;
5822 if (arg_type
[0] != TYPE_PTR
) {
5823 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5828 size
= thunk_type_size(arg_type
, 0);
5829 ie
->target_cmd
= (ie
->target_cmd
&
5830 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5831 (size
<< TARGET_IOC_SIZESHIFT
);
5834 /* automatic consistency check if same arch */
5835 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5836 (defined(__x86_64__) && defined(TARGET_X86_64))
5837 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5838 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5839 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5846 #if TARGET_ABI_BITS == 32
5847 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5849 #ifdef TARGET_WORDS_BIGENDIAN
5850 return ((uint64_t)word0
<< 32) | word1
;
5852 return ((uint64_t)word1
<< 32) | word0
;
5855 #else /* TARGET_ABI_BITS == 32 */
5856 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5860 #endif /* TARGET_ABI_BITS != 32 */
5862 #ifdef TARGET_NR_truncate64
5863 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5868 if (regpairs_aligned(cpu_env
)) {
5872 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5876 #ifdef TARGET_NR_ftruncate64
5877 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5882 if (regpairs_aligned(cpu_env
)) {
5886 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5890 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5891 abi_ulong target_addr
)
5893 struct target_timespec
*target_ts
;
5895 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5896 return -TARGET_EFAULT
;
5897 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5898 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5899 unlock_user_struct(target_ts
, target_addr
, 0);
5903 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5904 struct timespec
*host_ts
)
5906 struct target_timespec
*target_ts
;
5908 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5909 return -TARGET_EFAULT
;
5910 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5911 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5912 unlock_user_struct(target_ts
, target_addr
, 1);
5916 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5917 abi_ulong target_addr
)
5919 struct target_itimerspec
*target_itspec
;
5921 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5922 return -TARGET_EFAULT
;
5925 host_itspec
->it_interval
.tv_sec
=
5926 tswapal(target_itspec
->it_interval
.tv_sec
);
5927 host_itspec
->it_interval
.tv_nsec
=
5928 tswapal(target_itspec
->it_interval
.tv_nsec
);
5929 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5930 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5932 unlock_user_struct(target_itspec
, target_addr
, 1);
5936 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5937 struct itimerspec
*host_its
)
5939 struct target_itimerspec
*target_itspec
;
5941 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5942 return -TARGET_EFAULT
;
5945 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5946 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5948 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5949 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5951 unlock_user_struct(target_itspec
, target_addr
, 0);
5955 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5956 abi_ulong target_addr
)
5958 struct target_sigevent
*target_sevp
;
5960 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5961 return -TARGET_EFAULT
;
5964 /* This union is awkward on 64 bit systems because it has a 32 bit
5965 * integer and a pointer in it; we follow the conversion approach
5966 * used for handling sigval types in signal.c so the guest should get
5967 * the correct value back even if we did a 64 bit byteswap and it's
5968 * using the 32 bit integer.
5970 host_sevp
->sigev_value
.sival_ptr
=
5971 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5972 host_sevp
->sigev_signo
=
5973 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5974 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5975 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5977 unlock_user_struct(target_sevp
, target_addr
, 1);
5981 #if defined(TARGET_NR_mlockall)
5982 static inline int target_to_host_mlockall_arg(int arg
)
5986 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5987 result
|= MCL_CURRENT
;
5989 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5990 result
|= MCL_FUTURE
;
5996 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5997 abi_ulong target_addr
,
5998 struct stat
*host_st
)
6000 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6001 if (((CPUARMState
*)cpu_env
)->eabi
) {
6002 struct target_eabi_stat64
*target_st
;
6004 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6005 return -TARGET_EFAULT
;
6006 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6007 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6008 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6009 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6010 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6012 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6013 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6014 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6015 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6016 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6017 __put_user(host_st
->st_size
, &target_st
->st_size
);
6018 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6019 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6020 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6021 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6022 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6023 unlock_user_struct(target_st
, target_addr
, 1);
6027 #if defined(TARGET_HAS_STRUCT_STAT64)
6028 struct target_stat64
*target_st
;
6030 struct target_stat
*target_st
;
6033 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6034 return -TARGET_EFAULT
;
6035 memset(target_st
, 0, sizeof(*target_st
));
6036 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6037 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6038 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6039 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6041 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6042 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6043 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6044 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6045 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6046 /* XXX: better use of kernel struct */
6047 __put_user(host_st
->st_size
, &target_st
->st_size
);
6048 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6049 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6050 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6051 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6052 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6053 unlock_user_struct(target_st
, target_addr
, 1);
6059 /* ??? Using host futex calls even when target atomic operations
6060 are not really atomic probably breaks things. However implementing
6061 futexes locally would make futexes shared between multiple processes
6062 tricky. However they're probably useless because guest atomic
6063 operations won't work either. */
6064 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6065 target_ulong uaddr2
, int val3
)
6067 struct timespec ts
, *pts
;
6070 /* ??? We assume FUTEX_* constants are the same on both host
6072 #ifdef FUTEX_CMD_MASK
6073 base_op
= op
& FUTEX_CMD_MASK
;
6079 case FUTEX_WAIT_BITSET
:
6082 target_to_host_timespec(pts
, timeout
);
6086 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6089 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6091 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6093 case FUTEX_CMP_REQUEUE
:
6095 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6096 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6097 But the prototype takes a `struct timespec *'; insert casts
6098 to satisfy the compiler. We do not need to tswap TIMEOUT
6099 since it's not compared to guest memory. */
6100 pts
= (struct timespec
*)(uintptr_t) timeout
;
6101 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6103 (base_op
== FUTEX_CMP_REQUEUE
6107 return -TARGET_ENOSYS
;
6110 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6111 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6112 abi_long handle
, abi_long mount_id
,
6115 struct file_handle
*target_fh
;
6116 struct file_handle
*fh
;
6120 unsigned int size
, total_size
;
6122 if (get_user_s32(size
, handle
)) {
6123 return -TARGET_EFAULT
;
6126 name
= lock_user_string(pathname
);
6128 return -TARGET_EFAULT
;
6131 total_size
= sizeof(struct file_handle
) + size
;
6132 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6134 unlock_user(name
, pathname
, 0);
6135 return -TARGET_EFAULT
;
6138 fh
= g_malloc0(total_size
);
6139 fh
->handle_bytes
= size
;
6141 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6142 unlock_user(name
, pathname
, 0);
6144 /* man name_to_handle_at(2):
6145 * Other than the use of the handle_bytes field, the caller should treat
6146 * the file_handle structure as an opaque data type
6149 memcpy(target_fh
, fh
, total_size
);
6150 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6151 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6153 unlock_user(target_fh
, handle
, total_size
);
6155 if (put_user_s32(mid
, mount_id
)) {
6156 return -TARGET_EFAULT
;
6164 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6165 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6168 struct file_handle
*target_fh
;
6169 struct file_handle
*fh
;
6170 unsigned int size
, total_size
;
6173 if (get_user_s32(size
, handle
)) {
6174 return -TARGET_EFAULT
;
6177 total_size
= sizeof(struct file_handle
) + size
;
6178 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6180 return -TARGET_EFAULT
;
6183 fh
= g_memdup(target_fh
, total_size
);
6184 fh
->handle_bytes
= size
;
6185 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6187 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6188 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6192 unlock_user(target_fh
, handle
, total_size
);
6198 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6200 /* signalfd siginfo conversion */
6203 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6204 const struct signalfd_siginfo
*info
)
6206 int sig
= host_to_target_signal(info
->ssi_signo
);
6208 /* linux/signalfd.h defines a ssi_addr_lsb
6209 * not defined in sys/signalfd.h but used by some kernels
6212 #ifdef BUS_MCEERR_AO
6213 if (tinfo
->ssi_signo
== SIGBUS
&&
6214 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6215 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6216 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6217 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6218 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6222 tinfo
->ssi_signo
= tswap32(sig
);
6223 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6224 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6225 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6226 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6227 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6228 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6229 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6230 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6231 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6232 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6233 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6234 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6235 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6236 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6237 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6240 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6244 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6245 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6251 static TargetFdTrans target_signalfd_trans
= {
6252 .host_to_target_data
= host_to_target_data_signalfd
,
6255 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6258 target_sigset_t
*target_mask
;
6262 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6263 return -TARGET_EINVAL
;
6265 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6266 return -TARGET_EFAULT
;
6269 target_to_host_sigset(&host_mask
, target_mask
);
6271 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6273 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6275 fd_trans_register(ret
, &target_signalfd_trans
);
6278 unlock_user_struct(target_mask
, mask
, 0);
6284 /* Map host to target signal numbers for the wait family of syscalls.
6285 Assume all other status bits are the same. */
6286 int host_to_target_waitstatus(int status
)
6288 if (WIFSIGNALED(status
)) {
6289 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6291 if (WIFSTOPPED(status
)) {
6292 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6298 static int open_self_cmdline(void *cpu_env
, int fd
)
6301 bool word_skipped
= false;
6303 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6313 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6316 fd_orig
= close(fd_orig
);
6319 } else if (nb_read
== 0) {
6323 if (!word_skipped
) {
6324 /* Skip the first string, which is the path to qemu-*-static
6325 instead of the actual command. */
6326 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6328 /* Null byte found, skip one string */
6330 nb_read
-= cp_buf
- buf
;
6331 word_skipped
= true;
6336 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6345 return close(fd_orig
);
6348 static int open_self_maps(void *cpu_env
, int fd
)
6350 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6351 TaskState
*ts
= cpu
->opaque
;
6357 fp
= fopen("/proc/self/maps", "r");
6362 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6363 int fields
, dev_maj
, dev_min
, inode
;
6364 uint64_t min
, max
, offset
;
6365 char flag_r
, flag_w
, flag_x
, flag_p
;
6366 char path
[512] = "";
6367 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6368 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6369 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6371 if ((fields
< 10) || (fields
> 11)) {
6374 if (h2g_valid(min
)) {
6375 int flags
= page_get_flags(h2g(min
));
6376 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6377 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6380 if (h2g(min
) == ts
->info
->stack_limit
) {
6381 pstrcpy(path
, sizeof(path
), " [stack]");
6383 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6384 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6385 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6386 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6387 path
[0] ? " " : "", path
);
6397 static int open_self_stat(void *cpu_env
, int fd
)
6399 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6400 TaskState
*ts
= cpu
->opaque
;
6401 abi_ulong start_stack
= ts
->info
->start_stack
;
6404 for (i
= 0; i
< 44; i
++) {
6412 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6413 } else if (i
== 1) {
6415 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6416 } else if (i
== 27) {
6419 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6421 /* for the rest, there is MasterCard */
6422 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6426 if (write(fd
, buf
, len
) != len
) {
6434 static int open_self_auxv(void *cpu_env
, int fd
)
6436 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6437 TaskState
*ts
= cpu
->opaque
;
6438 abi_ulong auxv
= ts
->info
->saved_auxv
;
6439 abi_ulong len
= ts
->info
->auxv_len
;
6443 * Auxiliary vector is stored in target process stack.
6444 * read in whole auxv vector and copy it to file
6446 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6450 r
= write(fd
, ptr
, len
);
6457 lseek(fd
, 0, SEEK_SET
);
6458 unlock_user(ptr
, auxv
, len
);
6464 static int is_proc_myself(const char *filename
, const char *entry
)
6466 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6467 filename
+= strlen("/proc/");
6468 if (!strncmp(filename
, "self/", strlen("self/"))) {
6469 filename
+= strlen("self/");
6470 } else if (*filename
>= '1' && *filename
<= '9') {
6472 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6473 if (!strncmp(filename
, myself
, strlen(myself
))) {
6474 filename
+= strlen(myself
);
6481 if (!strcmp(filename
, entry
)) {
6488 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6489 static int is_proc(const char *filename
, const char *entry
)
6491 return strcmp(filename
, entry
) == 0;
6494 static int open_net_route(void *cpu_env
, int fd
)
6501 fp
= fopen("/proc/net/route", "r");
6508 read
= getline(&line
, &len
, fp
);
6509 dprintf(fd
, "%s", line
);
6513 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6515 uint32_t dest
, gw
, mask
;
6516 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6517 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6518 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6519 &mask
, &mtu
, &window
, &irtt
);
6520 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6521 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6522 metric
, tswap32(mask
), mtu
, window
, irtt
);
6532 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6535 const char *filename
;
6536 int (*fill
)(void *cpu_env
, int fd
);
6537 int (*cmp
)(const char *s1
, const char *s2
);
6539 const struct fake_open
*fake_open
;
6540 static const struct fake_open fakes
[] = {
6541 { "maps", open_self_maps
, is_proc_myself
},
6542 { "stat", open_self_stat
, is_proc_myself
},
6543 { "auxv", open_self_auxv
, is_proc_myself
},
6544 { "cmdline", open_self_cmdline
, is_proc_myself
},
6545 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6546 { "/proc/net/route", open_net_route
, is_proc
},
6548 { NULL
, NULL
, NULL
}
6551 if (is_proc_myself(pathname
, "exe")) {
6552 int execfd
= qemu_getauxval(AT_EXECFD
);
6553 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6556 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6557 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6562 if (fake_open
->filename
) {
6564 char filename
[PATH_MAX
];
6567 /* create temporary file to map stat to */
6568 tmpdir
= getenv("TMPDIR");
6571 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6572 fd
= mkstemp(filename
);
6578 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6584 lseek(fd
, 0, SEEK_SET
);
6589 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6592 #define TIMER_MAGIC 0x0caf0000
6593 #define TIMER_MAGIC_MASK 0xffff0000
6595 /* Convert QEMU provided timer ID back to internal 16bit index format */
6596 static target_timer_t
get_timer_id(abi_long arg
)
6598 target_timer_t timerid
= arg
;
6600 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6601 return -TARGET_EINVAL
;
6606 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6607 return -TARGET_EINVAL
;
6613 /* do_syscall() should always have a single exit point at the end so
6614 that actions, such as logging of syscall results, can be performed.
6615 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6616 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6617 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6618 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6621 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6627 #if defined(DEBUG_ERESTARTSYS)
6628 /* Debug-only code for exercising the syscall-restart code paths
6629 * in the per-architecture cpu main loops: restart every syscall
6630 * the guest makes once before letting it through.
6637 return -TARGET_ERESTARTSYS
;
6643 gemu_log("syscall %d", num
);
6646 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6649 case TARGET_NR_exit
:
6650 /* In old applications this may be used to implement _exit(2).
6651 However in threaded applictions it is used for thread termination,
6652 and _exit_group is used for application termination.
6653 Do thread termination if we have more then one thread. */
6655 if (block_signals()) {
6656 ret
= -TARGET_ERESTARTSYS
;
6660 if (CPU_NEXT(first_cpu
)) {
6664 /* Remove the CPU from the list. */
6665 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6668 if (ts
->child_tidptr
) {
6669 put_user_u32(0, ts
->child_tidptr
);
6670 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6674 object_unref(OBJECT(cpu
));
6676 rcu_unregister_thread();
6682 gdb_exit(cpu_env
, arg1
);
6684 ret
= 0; /* avoid warning */
6686 case TARGET_NR_read
:
6690 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6692 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6694 fd_trans_host_to_target_data(arg1
)) {
6695 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6697 unlock_user(p
, arg2
, ret
);
6700 case TARGET_NR_write
:
6701 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6703 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6704 unlock_user(p
, arg2
, 0);
6706 #ifdef TARGET_NR_open
6707 case TARGET_NR_open
:
6708 if (!(p
= lock_user_string(arg1
)))
6710 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6711 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6713 fd_trans_unregister(ret
);
6714 unlock_user(p
, arg1
, 0);
6717 case TARGET_NR_openat
:
6718 if (!(p
= lock_user_string(arg2
)))
6720 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6721 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6723 fd_trans_unregister(ret
);
6724 unlock_user(p
, arg2
, 0);
6726 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6727 case TARGET_NR_name_to_handle_at
:
6728 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6731 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6732 case TARGET_NR_open_by_handle_at
:
6733 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6734 fd_trans_unregister(ret
);
6737 case TARGET_NR_close
:
6738 fd_trans_unregister(arg1
);
6739 ret
= get_errno(close(arg1
));
6744 #ifdef TARGET_NR_fork
6745 case TARGET_NR_fork
:
6746 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6749 #ifdef TARGET_NR_waitpid
6750 case TARGET_NR_waitpid
:
6753 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6754 if (!is_error(ret
) && arg2
&& ret
6755 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6760 #ifdef TARGET_NR_waitid
6761 case TARGET_NR_waitid
:
6765 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6766 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6767 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6769 host_to_target_siginfo(p
, &info
);
6770 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6775 #ifdef TARGET_NR_creat /* not on alpha */
6776 case TARGET_NR_creat
:
6777 if (!(p
= lock_user_string(arg1
)))
6779 ret
= get_errno(creat(p
, arg2
));
6780 fd_trans_unregister(ret
);
6781 unlock_user(p
, arg1
, 0);
6784 #ifdef TARGET_NR_link
6785 case TARGET_NR_link
:
6788 p
= lock_user_string(arg1
);
6789 p2
= lock_user_string(arg2
);
6791 ret
= -TARGET_EFAULT
;
6793 ret
= get_errno(link(p
, p2
));
6794 unlock_user(p2
, arg2
, 0);
6795 unlock_user(p
, arg1
, 0);
6799 #if defined(TARGET_NR_linkat)
6800 case TARGET_NR_linkat
:
6805 p
= lock_user_string(arg2
);
6806 p2
= lock_user_string(arg4
);
6808 ret
= -TARGET_EFAULT
;
6810 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6811 unlock_user(p
, arg2
, 0);
6812 unlock_user(p2
, arg4
, 0);
6816 #ifdef TARGET_NR_unlink
6817 case TARGET_NR_unlink
:
6818 if (!(p
= lock_user_string(arg1
)))
6820 ret
= get_errno(unlink(p
));
6821 unlock_user(p
, arg1
, 0);
6824 #if defined(TARGET_NR_unlinkat)
6825 case TARGET_NR_unlinkat
:
6826 if (!(p
= lock_user_string(arg2
)))
6828 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6829 unlock_user(p
, arg2
, 0);
6832 case TARGET_NR_execve
:
6834 char **argp
, **envp
;
6837 abi_ulong guest_argp
;
6838 abi_ulong guest_envp
;
6845 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6846 if (get_user_ual(addr
, gp
))
6854 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6855 if (get_user_ual(addr
, gp
))
6862 argp
= alloca((argc
+ 1) * sizeof(void *));
6863 envp
= alloca((envc
+ 1) * sizeof(void *));
6865 for (gp
= guest_argp
, q
= argp
; gp
;
6866 gp
+= sizeof(abi_ulong
), q
++) {
6867 if (get_user_ual(addr
, gp
))
6871 if (!(*q
= lock_user_string(addr
)))
6873 total_size
+= strlen(*q
) + 1;
6877 for (gp
= guest_envp
, q
= envp
; gp
;
6878 gp
+= sizeof(abi_ulong
), q
++) {
6879 if (get_user_ual(addr
, gp
))
6883 if (!(*q
= lock_user_string(addr
)))
6885 total_size
+= strlen(*q
) + 1;
6889 if (!(p
= lock_user_string(arg1
)))
6891 /* Although execve() is not an interruptible syscall it is
6892 * a special case where we must use the safe_syscall wrapper:
6893 * if we allow a signal to happen before we make the host
6894 * syscall then we will 'lose' it, because at the point of
6895 * execve the process leaves QEMU's control. So we use the
6896 * safe syscall wrapper to ensure that we either take the
6897 * signal as a guest signal, or else it does not happen
6898 * before the execve completes and makes it the other
6899 * program's problem.
6901 ret
= get_errno(safe_execve(p
, argp
, envp
));
6902 unlock_user(p
, arg1
, 0);
6907 ret
= -TARGET_EFAULT
;
6910 for (gp
= guest_argp
, q
= argp
; *q
;
6911 gp
+= sizeof(abi_ulong
), q
++) {
6912 if (get_user_ual(addr
, gp
)
6915 unlock_user(*q
, addr
, 0);
6917 for (gp
= guest_envp
, q
= envp
; *q
;
6918 gp
+= sizeof(abi_ulong
), q
++) {
6919 if (get_user_ual(addr
, gp
)
6922 unlock_user(*q
, addr
, 0);
6926 case TARGET_NR_chdir
:
6927 if (!(p
= lock_user_string(arg1
)))
6929 ret
= get_errno(chdir(p
));
6930 unlock_user(p
, arg1
, 0);
6932 #ifdef TARGET_NR_time
6933 case TARGET_NR_time
:
6936 ret
= get_errno(time(&host_time
));
6939 && put_user_sal(host_time
, arg1
))
6944 #ifdef TARGET_NR_mknod
6945 case TARGET_NR_mknod
:
6946 if (!(p
= lock_user_string(arg1
)))
6948 ret
= get_errno(mknod(p
, arg2
, arg3
));
6949 unlock_user(p
, arg1
, 0);
6952 #if defined(TARGET_NR_mknodat)
6953 case TARGET_NR_mknodat
:
6954 if (!(p
= lock_user_string(arg2
)))
6956 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6957 unlock_user(p
, arg2
, 0);
6960 #ifdef TARGET_NR_chmod
6961 case TARGET_NR_chmod
:
6962 if (!(p
= lock_user_string(arg1
)))
6964 ret
= get_errno(chmod(p
, arg2
));
6965 unlock_user(p
, arg1
, 0);
6968 #ifdef TARGET_NR_break
6969 case TARGET_NR_break
:
6972 #ifdef TARGET_NR_oldstat
6973 case TARGET_NR_oldstat
:
6976 case TARGET_NR_lseek
:
6977 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6979 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6980 /* Alpha specific */
6981 case TARGET_NR_getxpid
:
6982 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6983 ret
= get_errno(getpid());
6986 #ifdef TARGET_NR_getpid
6987 case TARGET_NR_getpid
:
6988 ret
= get_errno(getpid());
6991 case TARGET_NR_mount
:
6993 /* need to look at the data field */
6997 p
= lock_user_string(arg1
);
7005 p2
= lock_user_string(arg2
);
7008 unlock_user(p
, arg1
, 0);
7014 p3
= lock_user_string(arg3
);
7017 unlock_user(p
, arg1
, 0);
7019 unlock_user(p2
, arg2
, 0);
7026 /* FIXME - arg5 should be locked, but it isn't clear how to
7027 * do that since it's not guaranteed to be a NULL-terminated
7031 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7033 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7035 ret
= get_errno(ret
);
7038 unlock_user(p
, arg1
, 0);
7040 unlock_user(p2
, arg2
, 0);
7042 unlock_user(p3
, arg3
, 0);
7046 #ifdef TARGET_NR_umount
7047 case TARGET_NR_umount
:
7048 if (!(p
= lock_user_string(arg1
)))
7050 ret
= get_errno(umount(p
));
7051 unlock_user(p
, arg1
, 0);
7054 #ifdef TARGET_NR_stime /* not on alpha */
7055 case TARGET_NR_stime
:
7058 if (get_user_sal(host_time
, arg1
))
7060 ret
= get_errno(stime(&host_time
));
7064 case TARGET_NR_ptrace
:
7066 #ifdef TARGET_NR_alarm /* not on alpha */
7067 case TARGET_NR_alarm
:
7071 #ifdef TARGET_NR_oldfstat
7072 case TARGET_NR_oldfstat
:
7075 #ifdef TARGET_NR_pause /* not on alpha */
7076 case TARGET_NR_pause
:
7077 if (!block_signals()) {
7078 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7080 ret
= -TARGET_EINTR
;
7083 #ifdef TARGET_NR_utime
7084 case TARGET_NR_utime
:
7086 struct utimbuf tbuf
, *host_tbuf
;
7087 struct target_utimbuf
*target_tbuf
;
7089 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7091 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7092 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7093 unlock_user_struct(target_tbuf
, arg2
, 0);
7098 if (!(p
= lock_user_string(arg1
)))
7100 ret
= get_errno(utime(p
, host_tbuf
));
7101 unlock_user(p
, arg1
, 0);
7105 #ifdef TARGET_NR_utimes
7106 case TARGET_NR_utimes
:
7108 struct timeval
*tvp
, tv
[2];
7110 if (copy_from_user_timeval(&tv
[0], arg2
)
7111 || copy_from_user_timeval(&tv
[1],
7112 arg2
+ sizeof(struct target_timeval
)))
7118 if (!(p
= lock_user_string(arg1
)))
7120 ret
= get_errno(utimes(p
, tvp
));
7121 unlock_user(p
, arg1
, 0);
7125 #if defined(TARGET_NR_futimesat)
7126 case TARGET_NR_futimesat
:
7128 struct timeval
*tvp
, tv
[2];
7130 if (copy_from_user_timeval(&tv
[0], arg3
)
7131 || copy_from_user_timeval(&tv
[1],
7132 arg3
+ sizeof(struct target_timeval
)))
7138 if (!(p
= lock_user_string(arg2
)))
7140 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7141 unlock_user(p
, arg2
, 0);
7145 #ifdef TARGET_NR_stty
7146 case TARGET_NR_stty
:
7149 #ifdef TARGET_NR_gtty
7150 case TARGET_NR_gtty
:
7153 #ifdef TARGET_NR_access
7154 case TARGET_NR_access
:
7155 if (!(p
= lock_user_string(arg1
)))
7157 ret
= get_errno(access(path(p
), arg2
));
7158 unlock_user(p
, arg1
, 0);
7161 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7162 case TARGET_NR_faccessat
:
7163 if (!(p
= lock_user_string(arg2
)))
7165 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7166 unlock_user(p
, arg2
, 0);
7169 #ifdef TARGET_NR_nice /* not on alpha */
7170 case TARGET_NR_nice
:
7171 ret
= get_errno(nice(arg1
));
7174 #ifdef TARGET_NR_ftime
7175 case TARGET_NR_ftime
:
7178 case TARGET_NR_sync
:
7182 case TARGET_NR_kill
:
7183 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7185 #ifdef TARGET_NR_rename
7186 case TARGET_NR_rename
:
7189 p
= lock_user_string(arg1
);
7190 p2
= lock_user_string(arg2
);
7192 ret
= -TARGET_EFAULT
;
7194 ret
= get_errno(rename(p
, p2
));
7195 unlock_user(p2
, arg2
, 0);
7196 unlock_user(p
, arg1
, 0);
7200 #if defined(TARGET_NR_renameat)
7201 case TARGET_NR_renameat
:
7204 p
= lock_user_string(arg2
);
7205 p2
= lock_user_string(arg4
);
7207 ret
= -TARGET_EFAULT
;
7209 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7210 unlock_user(p2
, arg4
, 0);
7211 unlock_user(p
, arg2
, 0);
7215 #ifdef TARGET_NR_mkdir
7216 case TARGET_NR_mkdir
:
7217 if (!(p
= lock_user_string(arg1
)))
7219 ret
= get_errno(mkdir(p
, arg2
));
7220 unlock_user(p
, arg1
, 0);
7223 #if defined(TARGET_NR_mkdirat)
7224 case TARGET_NR_mkdirat
:
7225 if (!(p
= lock_user_string(arg2
)))
7227 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7228 unlock_user(p
, arg2
, 0);
7231 #ifdef TARGET_NR_rmdir
7232 case TARGET_NR_rmdir
:
7233 if (!(p
= lock_user_string(arg1
)))
7235 ret
= get_errno(rmdir(p
));
7236 unlock_user(p
, arg1
, 0);
7240 ret
= get_errno(dup(arg1
));
7242 fd_trans_dup(arg1
, ret
);
7245 #ifdef TARGET_NR_pipe
7246 case TARGET_NR_pipe
:
7247 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7250 #ifdef TARGET_NR_pipe2
7251 case TARGET_NR_pipe2
:
7252 ret
= do_pipe(cpu_env
, arg1
,
7253 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7256 case TARGET_NR_times
:
7258 struct target_tms
*tmsp
;
7260 ret
= get_errno(times(&tms
));
7262 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7265 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7266 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7267 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7268 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7271 ret
= host_to_target_clock_t(ret
);
7274 #ifdef TARGET_NR_prof
7275 case TARGET_NR_prof
:
7278 #ifdef TARGET_NR_signal
7279 case TARGET_NR_signal
:
7282 case TARGET_NR_acct
:
7284 ret
= get_errno(acct(NULL
));
7286 if (!(p
= lock_user_string(arg1
)))
7288 ret
= get_errno(acct(path(p
)));
7289 unlock_user(p
, arg1
, 0);
7292 #ifdef TARGET_NR_umount2
7293 case TARGET_NR_umount2
:
7294 if (!(p
= lock_user_string(arg1
)))
7296 ret
= get_errno(umount2(p
, arg2
));
7297 unlock_user(p
, arg1
, 0);
7300 #ifdef TARGET_NR_lock
7301 case TARGET_NR_lock
:
7304 case TARGET_NR_ioctl
:
7305 ret
= do_ioctl(arg1
, arg2
, arg3
);
7307 case TARGET_NR_fcntl
:
7308 ret
= do_fcntl(arg1
, arg2
, arg3
);
7310 #ifdef TARGET_NR_mpx
7314 case TARGET_NR_setpgid
:
7315 ret
= get_errno(setpgid(arg1
, arg2
));
7317 #ifdef TARGET_NR_ulimit
7318 case TARGET_NR_ulimit
:
7321 #ifdef TARGET_NR_oldolduname
7322 case TARGET_NR_oldolduname
:
7325 case TARGET_NR_umask
:
7326 ret
= get_errno(umask(arg1
));
7328 case TARGET_NR_chroot
:
7329 if (!(p
= lock_user_string(arg1
)))
7331 ret
= get_errno(chroot(p
));
7332 unlock_user(p
, arg1
, 0);
7334 #ifdef TARGET_NR_ustat
7335 case TARGET_NR_ustat
:
7338 #ifdef TARGET_NR_dup2
7339 case TARGET_NR_dup2
:
7340 ret
= get_errno(dup2(arg1
, arg2
));
7342 fd_trans_dup(arg1
, arg2
);
7346 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7347 case TARGET_NR_dup3
:
7348 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7350 fd_trans_dup(arg1
, arg2
);
7354 #ifdef TARGET_NR_getppid /* not on alpha */
7355 case TARGET_NR_getppid
:
7356 ret
= get_errno(getppid());
7359 #ifdef TARGET_NR_getpgrp
7360 case TARGET_NR_getpgrp
:
7361 ret
= get_errno(getpgrp());
7364 case TARGET_NR_setsid
:
7365 ret
= get_errno(setsid());
7367 #ifdef TARGET_NR_sigaction
7368 case TARGET_NR_sigaction
:
7370 #if defined(TARGET_ALPHA)
7371 struct target_sigaction act
, oact
, *pact
= 0;
7372 struct target_old_sigaction
*old_act
;
7374 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7376 act
._sa_handler
= old_act
->_sa_handler
;
7377 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7378 act
.sa_flags
= old_act
->sa_flags
;
7379 act
.sa_restorer
= 0;
7380 unlock_user_struct(old_act
, arg2
, 0);
7383 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7384 if (!is_error(ret
) && arg3
) {
7385 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7387 old_act
->_sa_handler
= oact
._sa_handler
;
7388 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7389 old_act
->sa_flags
= oact
.sa_flags
;
7390 unlock_user_struct(old_act
, arg3
, 1);
7392 #elif defined(TARGET_MIPS)
7393 struct target_sigaction act
, oact
, *pact
, *old_act
;
7396 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7398 act
._sa_handler
= old_act
->_sa_handler
;
7399 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7400 act
.sa_flags
= old_act
->sa_flags
;
7401 unlock_user_struct(old_act
, arg2
, 0);
7407 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7409 if (!is_error(ret
) && arg3
) {
7410 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7412 old_act
->_sa_handler
= oact
._sa_handler
;
7413 old_act
->sa_flags
= oact
.sa_flags
;
7414 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7415 old_act
->sa_mask
.sig
[1] = 0;
7416 old_act
->sa_mask
.sig
[2] = 0;
7417 old_act
->sa_mask
.sig
[3] = 0;
7418 unlock_user_struct(old_act
, arg3
, 1);
7421 struct target_old_sigaction
*old_act
;
7422 struct target_sigaction act
, oact
, *pact
;
7424 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7426 act
._sa_handler
= old_act
->_sa_handler
;
7427 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7428 act
.sa_flags
= old_act
->sa_flags
;
7429 act
.sa_restorer
= old_act
->sa_restorer
;
7430 unlock_user_struct(old_act
, arg2
, 0);
7435 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7436 if (!is_error(ret
) && arg3
) {
7437 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7439 old_act
->_sa_handler
= oact
._sa_handler
;
7440 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7441 old_act
->sa_flags
= oact
.sa_flags
;
7442 old_act
->sa_restorer
= oact
.sa_restorer
;
7443 unlock_user_struct(old_act
, arg3
, 1);
7449 case TARGET_NR_rt_sigaction
:
7451 #if defined(TARGET_ALPHA)
7452 struct target_sigaction act
, oact
, *pact
= 0;
7453 struct target_rt_sigaction
*rt_act
;
7454 /* ??? arg4 == sizeof(sigset_t). */
7456 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7458 act
._sa_handler
= rt_act
->_sa_handler
;
7459 act
.sa_mask
= rt_act
->sa_mask
;
7460 act
.sa_flags
= rt_act
->sa_flags
;
7461 act
.sa_restorer
= arg5
;
7462 unlock_user_struct(rt_act
, arg2
, 0);
7465 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7466 if (!is_error(ret
) && arg3
) {
7467 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7469 rt_act
->_sa_handler
= oact
._sa_handler
;
7470 rt_act
->sa_mask
= oact
.sa_mask
;
7471 rt_act
->sa_flags
= oact
.sa_flags
;
7472 unlock_user_struct(rt_act
, arg3
, 1);
7475 struct target_sigaction
*act
;
7476 struct target_sigaction
*oact
;
7479 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7484 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7485 ret
= -TARGET_EFAULT
;
7486 goto rt_sigaction_fail
;
7490 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7493 unlock_user_struct(act
, arg2
, 0);
7495 unlock_user_struct(oact
, arg3
, 1);
7499 #ifdef TARGET_NR_sgetmask /* not on alpha */
7500 case TARGET_NR_sgetmask
:
7503 abi_ulong target_set
;
7504 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7506 host_to_target_old_sigset(&target_set
, &cur_set
);
7512 #ifdef TARGET_NR_ssetmask /* not on alpha */
7513 case TARGET_NR_ssetmask
:
7515 sigset_t set
, oset
, cur_set
;
7516 abi_ulong target_set
= arg1
;
7517 /* We only have one word of the new mask so we must read
7518 * the rest of it with do_sigprocmask() and OR in this word.
7519 * We are guaranteed that a do_sigprocmask() that only queries
7520 * the signal mask will not fail.
7522 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7524 target_to_host_old_sigset(&set
, &target_set
);
7525 sigorset(&set
, &set
, &cur_set
);
7526 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7528 host_to_target_old_sigset(&target_set
, &oset
);
7534 #ifdef TARGET_NR_sigprocmask
7535 case TARGET_NR_sigprocmask
:
7537 #if defined(TARGET_ALPHA)
7538 sigset_t set
, oldset
;
7543 case TARGET_SIG_BLOCK
:
7546 case TARGET_SIG_UNBLOCK
:
7549 case TARGET_SIG_SETMASK
:
7553 ret
= -TARGET_EINVAL
;
7557 target_to_host_old_sigset(&set
, &mask
);
7559 ret
= do_sigprocmask(how
, &set
, &oldset
);
7560 if (!is_error(ret
)) {
7561 host_to_target_old_sigset(&mask
, &oldset
);
7563 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7566 sigset_t set
, oldset
, *set_ptr
;
7571 case TARGET_SIG_BLOCK
:
7574 case TARGET_SIG_UNBLOCK
:
7577 case TARGET_SIG_SETMASK
:
7581 ret
= -TARGET_EINVAL
;
7584 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7586 target_to_host_old_sigset(&set
, p
);
7587 unlock_user(p
, arg2
, 0);
7593 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7594 if (!is_error(ret
) && arg3
) {
7595 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7597 host_to_target_old_sigset(p
, &oldset
);
7598 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7604 case TARGET_NR_rt_sigprocmask
:
7607 sigset_t set
, oldset
, *set_ptr
;
7611 case TARGET_SIG_BLOCK
:
7614 case TARGET_SIG_UNBLOCK
:
7617 case TARGET_SIG_SETMASK
:
7621 ret
= -TARGET_EINVAL
;
7624 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7626 target_to_host_sigset(&set
, p
);
7627 unlock_user(p
, arg2
, 0);
7633 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7634 if (!is_error(ret
) && arg3
) {
7635 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7637 host_to_target_sigset(p
, &oldset
);
7638 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7642 #ifdef TARGET_NR_sigpending
7643 case TARGET_NR_sigpending
:
7646 ret
= get_errno(sigpending(&set
));
7647 if (!is_error(ret
)) {
7648 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7650 host_to_target_old_sigset(p
, &set
);
7651 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7656 case TARGET_NR_rt_sigpending
:
7659 ret
= get_errno(sigpending(&set
));
7660 if (!is_error(ret
)) {
7661 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7663 host_to_target_sigset(p
, &set
);
7664 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7668 #ifdef TARGET_NR_sigsuspend
7669 case TARGET_NR_sigsuspend
:
7671 TaskState
*ts
= cpu
->opaque
;
7672 #if defined(TARGET_ALPHA)
7673 abi_ulong mask
= arg1
;
7674 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7676 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7678 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7679 unlock_user(p
, arg1
, 0);
7681 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7683 if (ret
!= -TARGET_ERESTARTSYS
) {
7684 ts
->in_sigsuspend
= 1;
7689 case TARGET_NR_rt_sigsuspend
:
7691 TaskState
*ts
= cpu
->opaque
;
7692 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7694 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7695 unlock_user(p
, arg1
, 0);
7696 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7698 if (ret
!= -TARGET_ERESTARTSYS
) {
7699 ts
->in_sigsuspend
= 1;
7703 case TARGET_NR_rt_sigtimedwait
:
7706 struct timespec uts
, *puts
;
7709 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7711 target_to_host_sigset(&set
, p
);
7712 unlock_user(p
, arg1
, 0);
7715 target_to_host_timespec(puts
, arg3
);
7719 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
7720 if (!is_error(ret
)) {
7722 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7727 host_to_target_siginfo(p
, &uinfo
);
7728 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7730 ret
= host_to_target_signal(ret
);
7734 case TARGET_NR_rt_sigqueueinfo
:
7737 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7739 target_to_host_siginfo(&uinfo
, p
);
7740 unlock_user(p
, arg1
, 0);
7741 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7744 #ifdef TARGET_NR_sigreturn
7745 case TARGET_NR_sigreturn
:
7746 if (block_signals()) {
7747 ret
= -TARGET_ERESTARTSYS
;
7749 ret
= do_sigreturn(cpu_env
);
7753 case TARGET_NR_rt_sigreturn
:
7754 if (block_signals()) {
7755 ret
= -TARGET_ERESTARTSYS
;
7757 ret
= do_rt_sigreturn(cpu_env
);
7760 case TARGET_NR_sethostname
:
7761 if (!(p
= lock_user_string(arg1
)))
7763 ret
= get_errno(sethostname(p
, arg2
));
7764 unlock_user(p
, arg1
, 0);
7766 case TARGET_NR_setrlimit
:
7768 int resource
= target_to_host_resource(arg1
);
7769 struct target_rlimit
*target_rlim
;
7771 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7773 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7774 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7775 unlock_user_struct(target_rlim
, arg2
, 0);
7776 ret
= get_errno(setrlimit(resource
, &rlim
));
7779 case TARGET_NR_getrlimit
:
7781 int resource
= target_to_host_resource(arg1
);
7782 struct target_rlimit
*target_rlim
;
7785 ret
= get_errno(getrlimit(resource
, &rlim
));
7786 if (!is_error(ret
)) {
7787 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7789 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7790 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7791 unlock_user_struct(target_rlim
, arg2
, 1);
7795 case TARGET_NR_getrusage
:
7797 struct rusage rusage
;
7798 ret
= get_errno(getrusage(arg1
, &rusage
));
7799 if (!is_error(ret
)) {
7800 ret
= host_to_target_rusage(arg2
, &rusage
);
7804 case TARGET_NR_gettimeofday
:
7807 ret
= get_errno(gettimeofday(&tv
, NULL
));
7808 if (!is_error(ret
)) {
7809 if (copy_to_user_timeval(arg1
, &tv
))
7814 case TARGET_NR_settimeofday
:
7816 struct timeval tv
, *ptv
= NULL
;
7817 struct timezone tz
, *ptz
= NULL
;
7820 if (copy_from_user_timeval(&tv
, arg1
)) {
7827 if (copy_from_user_timezone(&tz
, arg2
)) {
7833 ret
= get_errno(settimeofday(ptv
, ptz
));
7836 #if defined(TARGET_NR_select)
7837 case TARGET_NR_select
:
7838 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7839 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7842 struct target_sel_arg_struct
*sel
;
7843 abi_ulong inp
, outp
, exp
, tvp
;
7846 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7848 nsel
= tswapal(sel
->n
);
7849 inp
= tswapal(sel
->inp
);
7850 outp
= tswapal(sel
->outp
);
7851 exp
= tswapal(sel
->exp
);
7852 tvp
= tswapal(sel
->tvp
);
7853 unlock_user_struct(sel
, arg1
, 0);
7854 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7859 #ifdef TARGET_NR_pselect6
7860 case TARGET_NR_pselect6
:
7862 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7863 fd_set rfds
, wfds
, efds
;
7864 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7865 struct timespec ts
, *ts_ptr
;
7868 * The 6th arg is actually two args smashed together,
7869 * so we cannot use the C library.
7877 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7878 target_sigset_t
*target_sigset
;
7886 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7890 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7894 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7900 * This takes a timespec, and not a timeval, so we cannot
7901 * use the do_select() helper ...
7904 if (target_to_host_timespec(&ts
, ts_addr
)) {
7912 /* Extract the two packed args for the sigset */
7915 sig
.size
= SIGSET_T_SIZE
;
7917 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7921 arg_sigset
= tswapal(arg7
[0]);
7922 arg_sigsize
= tswapal(arg7
[1]);
7923 unlock_user(arg7
, arg6
, 0);
7927 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7928 /* Like the kernel, we enforce correct size sigsets */
7929 ret
= -TARGET_EINVAL
;
7932 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7933 sizeof(*target_sigset
), 1);
7934 if (!target_sigset
) {
7937 target_to_host_sigset(&set
, target_sigset
);
7938 unlock_user(target_sigset
, arg_sigset
, 0);
7946 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7949 if (!is_error(ret
)) {
7950 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7952 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7954 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7957 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7963 #ifdef TARGET_NR_symlink
7964 case TARGET_NR_symlink
:
7967 p
= lock_user_string(arg1
);
7968 p2
= lock_user_string(arg2
);
7970 ret
= -TARGET_EFAULT
;
7972 ret
= get_errno(symlink(p
, p2
));
7973 unlock_user(p2
, arg2
, 0);
7974 unlock_user(p
, arg1
, 0);
7978 #if defined(TARGET_NR_symlinkat)
7979 case TARGET_NR_symlinkat
:
7982 p
= lock_user_string(arg1
);
7983 p2
= lock_user_string(arg3
);
7985 ret
= -TARGET_EFAULT
;
7987 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7988 unlock_user(p2
, arg3
, 0);
7989 unlock_user(p
, arg1
, 0);
7993 #ifdef TARGET_NR_oldlstat
7994 case TARGET_NR_oldlstat
:
7997 #ifdef TARGET_NR_readlink
7998 case TARGET_NR_readlink
:
8001 p
= lock_user_string(arg1
);
8002 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8004 ret
= -TARGET_EFAULT
;
8006 /* Short circuit this for the magic exe check. */
8007 ret
= -TARGET_EINVAL
;
8008 } else if (is_proc_myself((const char *)p
, "exe")) {
8009 char real
[PATH_MAX
], *temp
;
8010 temp
= realpath(exec_path
, real
);
8011 /* Return value is # of bytes that we wrote to the buffer. */
8013 ret
= get_errno(-1);
8015 /* Don't worry about sign mismatch as earlier mapping
8016 * logic would have thrown a bad address error. */
8017 ret
= MIN(strlen(real
), arg3
);
8018 /* We cannot NUL terminate the string. */
8019 memcpy(p2
, real
, ret
);
8022 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8024 unlock_user(p2
, arg2
, ret
);
8025 unlock_user(p
, arg1
, 0);
8029 #if defined(TARGET_NR_readlinkat)
8030 case TARGET_NR_readlinkat
:
8033 p
= lock_user_string(arg2
);
8034 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8036 ret
= -TARGET_EFAULT
;
8037 } else if (is_proc_myself((const char *)p
, "exe")) {
8038 char real
[PATH_MAX
], *temp
;
8039 temp
= realpath(exec_path
, real
);
8040 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8041 snprintf((char *)p2
, arg4
, "%s", real
);
8043 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8045 unlock_user(p2
, arg3
, ret
);
8046 unlock_user(p
, arg2
, 0);
8050 #ifdef TARGET_NR_uselib
8051 case TARGET_NR_uselib
:
8054 #ifdef TARGET_NR_swapon
8055 case TARGET_NR_swapon
:
8056 if (!(p
= lock_user_string(arg1
)))
8058 ret
= get_errno(swapon(p
, arg2
));
8059 unlock_user(p
, arg1
, 0);
8062 case TARGET_NR_reboot
:
8063 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8064 /* arg4 must be ignored in all other cases */
8065 p
= lock_user_string(arg4
);
8069 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8070 unlock_user(p
, arg4
, 0);
8072 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8075 #ifdef TARGET_NR_readdir
8076 case TARGET_NR_readdir
:
8079 #ifdef TARGET_NR_mmap
8080 case TARGET_NR_mmap
:
8081 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8082 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8083 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8084 || defined(TARGET_S390X)
8087 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8088 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8096 unlock_user(v
, arg1
, 0);
8097 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8098 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8102 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8103 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8109 #ifdef TARGET_NR_mmap2
8110 case TARGET_NR_mmap2
:
8112 #define MMAP_SHIFT 12
8114 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8115 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8117 arg6
<< MMAP_SHIFT
));
8120 case TARGET_NR_munmap
:
8121 ret
= get_errno(target_munmap(arg1
, arg2
));
8123 case TARGET_NR_mprotect
:
8125 TaskState
*ts
= cpu
->opaque
;
8126 /* Special hack to detect libc making the stack executable. */
8127 if ((arg3
& PROT_GROWSDOWN
)
8128 && arg1
>= ts
->info
->stack_limit
8129 && arg1
<= ts
->info
->start_stack
) {
8130 arg3
&= ~PROT_GROWSDOWN
;
8131 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8132 arg1
= ts
->info
->stack_limit
;
8135 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8137 #ifdef TARGET_NR_mremap
8138 case TARGET_NR_mremap
:
8139 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8142 /* ??? msync/mlock/munlock are broken for softmmu. */
8143 #ifdef TARGET_NR_msync
8144 case TARGET_NR_msync
:
8145 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8148 #ifdef TARGET_NR_mlock
8149 case TARGET_NR_mlock
:
8150 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8153 #ifdef TARGET_NR_munlock
8154 case TARGET_NR_munlock
:
8155 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8158 #ifdef TARGET_NR_mlockall
8159 case TARGET_NR_mlockall
:
8160 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8163 #ifdef TARGET_NR_munlockall
8164 case TARGET_NR_munlockall
:
8165 ret
= get_errno(munlockall());
8168 case TARGET_NR_truncate
:
8169 if (!(p
= lock_user_string(arg1
)))
8171 ret
= get_errno(truncate(p
, arg2
));
8172 unlock_user(p
, arg1
, 0);
8174 case TARGET_NR_ftruncate
:
8175 ret
= get_errno(ftruncate(arg1
, arg2
));
8177 case TARGET_NR_fchmod
:
8178 ret
= get_errno(fchmod(arg1
, arg2
));
8180 #if defined(TARGET_NR_fchmodat)
8181 case TARGET_NR_fchmodat
:
8182 if (!(p
= lock_user_string(arg2
)))
8184 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8185 unlock_user(p
, arg2
, 0);
8188 case TARGET_NR_getpriority
:
8189 /* Note that negative values are valid for getpriority, so we must
8190 differentiate based on errno settings. */
8192 ret
= getpriority(arg1
, arg2
);
8193 if (ret
== -1 && errno
!= 0) {
8194 ret
= -host_to_target_errno(errno
);
8198 /* Return value is the unbiased priority. Signal no error. */
8199 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8201 /* Return value is a biased priority to avoid negative numbers. */
8205 case TARGET_NR_setpriority
:
8206 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8208 #ifdef TARGET_NR_profil
8209 case TARGET_NR_profil
:
8212 case TARGET_NR_statfs
:
8213 if (!(p
= lock_user_string(arg1
)))
8215 ret
= get_errno(statfs(path(p
), &stfs
));
8216 unlock_user(p
, arg1
, 0);
8218 if (!is_error(ret
)) {
8219 struct target_statfs
*target_stfs
;
8221 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8223 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8224 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8225 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8226 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8227 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8228 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8229 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8230 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8231 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8232 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8233 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8234 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8235 unlock_user_struct(target_stfs
, arg2
, 1);
8238 case TARGET_NR_fstatfs
:
8239 ret
= get_errno(fstatfs(arg1
, &stfs
));
8240 goto convert_statfs
;
8241 #ifdef TARGET_NR_statfs64
8242 case TARGET_NR_statfs64
:
8243 if (!(p
= lock_user_string(arg1
)))
8245 ret
= get_errno(statfs(path(p
), &stfs
));
8246 unlock_user(p
, arg1
, 0);
8248 if (!is_error(ret
)) {
8249 struct target_statfs64
*target_stfs
;
8251 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8253 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8254 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8255 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8256 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8257 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8258 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8259 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8260 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8261 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8262 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8263 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8264 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8265 unlock_user_struct(target_stfs
, arg3
, 1);
8268 case TARGET_NR_fstatfs64
:
8269 ret
= get_errno(fstatfs(arg1
, &stfs
));
8270 goto convert_statfs64
;
8272 #ifdef TARGET_NR_ioperm
8273 case TARGET_NR_ioperm
:
8276 #ifdef TARGET_NR_socketcall
8277 case TARGET_NR_socketcall
:
8278 ret
= do_socketcall(arg1
, arg2
);
8281 #ifdef TARGET_NR_accept
8282 case TARGET_NR_accept
:
8283 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8286 #ifdef TARGET_NR_accept4
8287 case TARGET_NR_accept4
:
8288 #ifdef CONFIG_ACCEPT4
8289 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8295 #ifdef TARGET_NR_bind
8296 case TARGET_NR_bind
:
8297 ret
= do_bind(arg1
, arg2
, arg3
);
8300 #ifdef TARGET_NR_connect
8301 case TARGET_NR_connect
:
8302 ret
= do_connect(arg1
, arg2
, arg3
);
8305 #ifdef TARGET_NR_getpeername
8306 case TARGET_NR_getpeername
:
8307 ret
= do_getpeername(arg1
, arg2
, arg3
);
8310 #ifdef TARGET_NR_getsockname
8311 case TARGET_NR_getsockname
:
8312 ret
= do_getsockname(arg1
, arg2
, arg3
);
8315 #ifdef TARGET_NR_getsockopt
8316 case TARGET_NR_getsockopt
:
8317 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8320 #ifdef TARGET_NR_listen
8321 case TARGET_NR_listen
:
8322 ret
= get_errno(listen(arg1
, arg2
));
8325 #ifdef TARGET_NR_recv
8326 case TARGET_NR_recv
:
8327 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8330 #ifdef TARGET_NR_recvfrom
8331 case TARGET_NR_recvfrom
:
8332 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8335 #ifdef TARGET_NR_recvmsg
8336 case TARGET_NR_recvmsg
:
8337 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8340 #ifdef TARGET_NR_send
8341 case TARGET_NR_send
:
8342 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8345 #ifdef TARGET_NR_sendmsg
8346 case TARGET_NR_sendmsg
:
8347 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8350 #ifdef TARGET_NR_sendmmsg
8351 case TARGET_NR_sendmmsg
:
8352 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8354 case TARGET_NR_recvmmsg
:
8355 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8358 #ifdef TARGET_NR_sendto
8359 case TARGET_NR_sendto
:
8360 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8363 #ifdef TARGET_NR_shutdown
8364 case TARGET_NR_shutdown
:
8365 ret
= get_errno(shutdown(arg1
, arg2
));
8368 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8369 case TARGET_NR_getrandom
:
8370 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8374 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8375 unlock_user(p
, arg1
, ret
);
8378 #ifdef TARGET_NR_socket
8379 case TARGET_NR_socket
:
8380 ret
= do_socket(arg1
, arg2
, arg3
);
8381 fd_trans_unregister(ret
);
8384 #ifdef TARGET_NR_socketpair
8385 case TARGET_NR_socketpair
:
8386 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8389 #ifdef TARGET_NR_setsockopt
8390 case TARGET_NR_setsockopt
:
8391 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8395 case TARGET_NR_syslog
:
8396 if (!(p
= lock_user_string(arg2
)))
8398 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8399 unlock_user(p
, arg2
, 0);
8402 case TARGET_NR_setitimer
:
8404 struct itimerval value
, ovalue
, *pvalue
;
8408 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8409 || copy_from_user_timeval(&pvalue
->it_value
,
8410 arg2
+ sizeof(struct target_timeval
)))
8415 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8416 if (!is_error(ret
) && arg3
) {
8417 if (copy_to_user_timeval(arg3
,
8418 &ovalue
.it_interval
)
8419 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8425 case TARGET_NR_getitimer
:
8427 struct itimerval value
;
8429 ret
= get_errno(getitimer(arg1
, &value
));
8430 if (!is_error(ret
) && arg2
) {
8431 if (copy_to_user_timeval(arg2
,
8433 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8439 #ifdef TARGET_NR_stat
8440 case TARGET_NR_stat
:
8441 if (!(p
= lock_user_string(arg1
)))
8443 ret
= get_errno(stat(path(p
), &st
));
8444 unlock_user(p
, arg1
, 0);
8447 #ifdef TARGET_NR_lstat
8448 case TARGET_NR_lstat
:
8449 if (!(p
= lock_user_string(arg1
)))
8451 ret
= get_errno(lstat(path(p
), &st
));
8452 unlock_user(p
, arg1
, 0);
8455 case TARGET_NR_fstat
:
8457 ret
= get_errno(fstat(arg1
, &st
));
8458 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8461 if (!is_error(ret
)) {
8462 struct target_stat
*target_st
;
8464 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8466 memset(target_st
, 0, sizeof(*target_st
));
8467 __put_user(st
.st_dev
, &target_st
->st_dev
);
8468 __put_user(st
.st_ino
, &target_st
->st_ino
);
8469 __put_user(st
.st_mode
, &target_st
->st_mode
);
8470 __put_user(st
.st_uid
, &target_st
->st_uid
);
8471 __put_user(st
.st_gid
, &target_st
->st_gid
);
8472 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8473 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8474 __put_user(st
.st_size
, &target_st
->st_size
);
8475 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8476 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8477 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8478 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8479 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8480 unlock_user_struct(target_st
, arg2
, 1);
8484 #ifdef TARGET_NR_olduname
8485 case TARGET_NR_olduname
:
8488 #ifdef TARGET_NR_iopl
8489 case TARGET_NR_iopl
:
8492 case TARGET_NR_vhangup
:
8493 ret
= get_errno(vhangup());
8495 #ifdef TARGET_NR_idle
8496 case TARGET_NR_idle
:
8499 #ifdef TARGET_NR_syscall
8500 case TARGET_NR_syscall
:
8501 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8502 arg6
, arg7
, arg8
, 0);
8505 case TARGET_NR_wait4
:
8508 abi_long status_ptr
= arg2
;
8509 struct rusage rusage
, *rusage_ptr
;
8510 abi_ulong target_rusage
= arg4
;
8511 abi_long rusage_err
;
8513 rusage_ptr
= &rusage
;
8516 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8517 if (!is_error(ret
)) {
8518 if (status_ptr
&& ret
) {
8519 status
= host_to_target_waitstatus(status
);
8520 if (put_user_s32(status
, status_ptr
))
8523 if (target_rusage
) {
8524 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8532 #ifdef TARGET_NR_swapoff
8533 case TARGET_NR_swapoff
:
8534 if (!(p
= lock_user_string(arg1
)))
8536 ret
= get_errno(swapoff(p
));
8537 unlock_user(p
, arg1
, 0);
8540 case TARGET_NR_sysinfo
:
8542 struct target_sysinfo
*target_value
;
8543 struct sysinfo value
;
8544 ret
= get_errno(sysinfo(&value
));
8545 if (!is_error(ret
) && arg1
)
8547 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8549 __put_user(value
.uptime
, &target_value
->uptime
);
8550 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8551 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8552 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8553 __put_user(value
.totalram
, &target_value
->totalram
);
8554 __put_user(value
.freeram
, &target_value
->freeram
);
8555 __put_user(value
.sharedram
, &target_value
->sharedram
);
8556 __put_user(value
.bufferram
, &target_value
->bufferram
);
8557 __put_user(value
.totalswap
, &target_value
->totalswap
);
8558 __put_user(value
.freeswap
, &target_value
->freeswap
);
8559 __put_user(value
.procs
, &target_value
->procs
);
8560 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8561 __put_user(value
.freehigh
, &target_value
->freehigh
);
8562 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8563 unlock_user_struct(target_value
, arg1
, 1);
8567 #ifdef TARGET_NR_ipc
8569 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8572 #ifdef TARGET_NR_semget
8573 case TARGET_NR_semget
:
8574 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8577 #ifdef TARGET_NR_semop
8578 case TARGET_NR_semop
:
8579 ret
= do_semop(arg1
, arg2
, arg3
);
8582 #ifdef TARGET_NR_semctl
8583 case TARGET_NR_semctl
:
8584 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8587 #ifdef TARGET_NR_msgctl
8588 case TARGET_NR_msgctl
:
8589 ret
= do_msgctl(arg1
, arg2
, arg3
);
8592 #ifdef TARGET_NR_msgget
8593 case TARGET_NR_msgget
:
8594 ret
= get_errno(msgget(arg1
, arg2
));
8597 #ifdef TARGET_NR_msgrcv
8598 case TARGET_NR_msgrcv
:
8599 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8602 #ifdef TARGET_NR_msgsnd
8603 case TARGET_NR_msgsnd
:
8604 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8607 #ifdef TARGET_NR_shmget
8608 case TARGET_NR_shmget
:
8609 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8612 #ifdef TARGET_NR_shmctl
8613 case TARGET_NR_shmctl
:
8614 ret
= do_shmctl(arg1
, arg2
, arg3
);
8617 #ifdef TARGET_NR_shmat
8618 case TARGET_NR_shmat
:
8619 ret
= do_shmat(arg1
, arg2
, arg3
);
8622 #ifdef TARGET_NR_shmdt
8623 case TARGET_NR_shmdt
:
8624 ret
= do_shmdt(arg1
);
8627 case TARGET_NR_fsync
:
8628 ret
= get_errno(fsync(arg1
));
8630 case TARGET_NR_clone
:
8631 /* Linux manages to have three different orderings for its
8632 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8633 * match the kernel's CONFIG_CLONE_* settings.
8634 * Microblaze is further special in that it uses a sixth
8635 * implicit argument to clone for the TLS pointer.
8637 #if defined(TARGET_MICROBLAZE)
8638 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8639 #elif defined(TARGET_CLONE_BACKWARDS)
8640 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8641 #elif defined(TARGET_CLONE_BACKWARDS2)
8642 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8644 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8647 #ifdef __NR_exit_group
8648 /* new thread calls */
8649 case TARGET_NR_exit_group
:
8653 gdb_exit(cpu_env
, arg1
);
8654 ret
= get_errno(exit_group(arg1
));
8657 case TARGET_NR_setdomainname
:
8658 if (!(p
= lock_user_string(arg1
)))
8660 ret
= get_errno(setdomainname(p
, arg2
));
8661 unlock_user(p
, arg1
, 0);
8663 case TARGET_NR_uname
:
8664 /* no need to transcode because we use the linux syscall */
8666 struct new_utsname
* buf
;
8668 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8670 ret
= get_errno(sys_uname(buf
));
8671 if (!is_error(ret
)) {
8672 /* Overrite the native machine name with whatever is being
8674 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8675 /* Allow the user to override the reported release. */
8676 if (qemu_uname_release
&& *qemu_uname_release
)
8677 strcpy (buf
->release
, qemu_uname_release
);
8679 unlock_user_struct(buf
, arg1
, 1);
8683 case TARGET_NR_modify_ldt
:
8684 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8686 #if !defined(TARGET_X86_64)
8687 case TARGET_NR_vm86old
:
8689 case TARGET_NR_vm86
:
8690 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8694 case TARGET_NR_adjtimex
:
8696 #ifdef TARGET_NR_create_module
8697 case TARGET_NR_create_module
:
8699 case TARGET_NR_init_module
:
8700 case TARGET_NR_delete_module
:
8701 #ifdef TARGET_NR_get_kernel_syms
8702 case TARGET_NR_get_kernel_syms
:
8705 case TARGET_NR_quotactl
:
8707 case TARGET_NR_getpgid
:
8708 ret
= get_errno(getpgid(arg1
));
8710 case TARGET_NR_fchdir
:
8711 ret
= get_errno(fchdir(arg1
));
8713 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8714 case TARGET_NR_bdflush
:
8717 #ifdef TARGET_NR_sysfs
8718 case TARGET_NR_sysfs
:
8721 case TARGET_NR_personality
:
8722 ret
= get_errno(personality(arg1
));
8724 #ifdef TARGET_NR_afs_syscall
8725 case TARGET_NR_afs_syscall
:
8728 #ifdef TARGET_NR__llseek /* Not on alpha */
8729 case TARGET_NR__llseek
:
8732 #if !defined(__NR_llseek)
8733 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8735 ret
= get_errno(res
);
8740 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8742 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8748 #ifdef TARGET_NR_getdents
8749 case TARGET_NR_getdents
:
8750 #ifdef __NR_getdents
8751 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8753 struct target_dirent
*target_dirp
;
8754 struct linux_dirent
*dirp
;
8755 abi_long count
= arg3
;
8757 dirp
= g_try_malloc(count
);
8759 ret
= -TARGET_ENOMEM
;
8763 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8764 if (!is_error(ret
)) {
8765 struct linux_dirent
*de
;
8766 struct target_dirent
*tde
;
8768 int reclen
, treclen
;
8769 int count1
, tnamelen
;
8773 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8777 reclen
= de
->d_reclen
;
8778 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8779 assert(tnamelen
>= 0);
8780 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8781 assert(count1
+ treclen
<= count
);
8782 tde
->d_reclen
= tswap16(treclen
);
8783 tde
->d_ino
= tswapal(de
->d_ino
);
8784 tde
->d_off
= tswapal(de
->d_off
);
8785 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8786 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8788 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8792 unlock_user(target_dirp
, arg2
, ret
);
8798 struct linux_dirent
*dirp
;
8799 abi_long count
= arg3
;
8801 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8803 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8804 if (!is_error(ret
)) {
8805 struct linux_dirent
*de
;
8810 reclen
= de
->d_reclen
;
8813 de
->d_reclen
= tswap16(reclen
);
8814 tswapls(&de
->d_ino
);
8815 tswapls(&de
->d_off
);
8816 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8820 unlock_user(dirp
, arg2
, ret
);
8824 /* Implement getdents in terms of getdents64 */
8826 struct linux_dirent64
*dirp
;
8827 abi_long count
= arg3
;
8829 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8833 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8834 if (!is_error(ret
)) {
8835 /* Convert the dirent64 structs to target dirent. We do this
8836 * in-place, since we can guarantee that a target_dirent is no
8837 * larger than a dirent64; however this means we have to be
8838 * careful to read everything before writing in the new format.
8840 struct linux_dirent64
*de
;
8841 struct target_dirent
*tde
;
8846 tde
= (struct target_dirent
*)dirp
;
8848 int namelen
, treclen
;
8849 int reclen
= de
->d_reclen
;
8850 uint64_t ino
= de
->d_ino
;
8851 int64_t off
= de
->d_off
;
8852 uint8_t type
= de
->d_type
;
8854 namelen
= strlen(de
->d_name
);
8855 treclen
= offsetof(struct target_dirent
, d_name
)
8857 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8859 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8860 tde
->d_ino
= tswapal(ino
);
8861 tde
->d_off
= tswapal(off
);
8862 tde
->d_reclen
= tswap16(treclen
);
8863 /* The target_dirent type is in what was formerly a padding
8864 * byte at the end of the structure:
8866 *(((char *)tde
) + treclen
- 1) = type
;
8868 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8869 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8875 unlock_user(dirp
, arg2
, ret
);
8879 #endif /* TARGET_NR_getdents */
8880 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8881 case TARGET_NR_getdents64
:
8883 struct linux_dirent64
*dirp
;
8884 abi_long count
= arg3
;
8885 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8887 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8888 if (!is_error(ret
)) {
8889 struct linux_dirent64
*de
;
8894 reclen
= de
->d_reclen
;
8897 de
->d_reclen
= tswap16(reclen
);
8898 tswap64s((uint64_t *)&de
->d_ino
);
8899 tswap64s((uint64_t *)&de
->d_off
);
8900 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8904 unlock_user(dirp
, arg2
, ret
);
8907 #endif /* TARGET_NR_getdents64 */
8908 #if defined(TARGET_NR__newselect)
8909 case TARGET_NR__newselect
:
8910 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8913 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8914 # ifdef TARGET_NR_poll
8915 case TARGET_NR_poll
:
8917 # ifdef TARGET_NR_ppoll
8918 case TARGET_NR_ppoll
:
8921 struct target_pollfd
*target_pfd
;
8922 unsigned int nfds
= arg2
;
8930 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8931 sizeof(struct target_pollfd
) * nfds
, 1);
8936 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8937 for (i
= 0; i
< nfds
; i
++) {
8938 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8939 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8943 # ifdef TARGET_NR_ppoll
8944 if (num
== TARGET_NR_ppoll
) {
8945 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8946 target_sigset_t
*target_set
;
8947 sigset_t _set
, *set
= &_set
;
8950 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8951 unlock_user(target_pfd
, arg1
, 0);
8959 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8961 unlock_user(target_pfd
, arg1
, 0);
8964 target_to_host_sigset(set
, target_set
);
8969 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
,
8970 set
, SIGSET_T_SIZE
));
8972 if (!is_error(ret
) && arg3
) {
8973 host_to_target_timespec(arg3
, timeout_ts
);
8976 unlock_user(target_set
, arg4
, 0);
8980 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8982 if (!is_error(ret
)) {
8983 for(i
= 0; i
< nfds
; i
++) {
8984 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8987 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8991 case TARGET_NR_flock
:
8992 /* NOTE: the flock constant seems to be the same for every
8994 ret
= get_errno(flock(arg1
, arg2
));
8996 case TARGET_NR_readv
:
8998 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9000 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9001 unlock_iovec(vec
, arg2
, arg3
, 1);
9003 ret
= -host_to_target_errno(errno
);
9007 case TARGET_NR_writev
:
9009 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9011 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9012 unlock_iovec(vec
, arg2
, arg3
, 0);
9014 ret
= -host_to_target_errno(errno
);
9018 case TARGET_NR_getsid
:
9019 ret
= get_errno(getsid(arg1
));
9021 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9022 case TARGET_NR_fdatasync
:
9023 ret
= get_errno(fdatasync(arg1
));
9026 #ifdef TARGET_NR__sysctl
9027 case TARGET_NR__sysctl
:
9028 /* We don't implement this, but ENOTDIR is always a safe
9030 ret
= -TARGET_ENOTDIR
;
9033 case TARGET_NR_sched_getaffinity
:
9035 unsigned int mask_size
;
9036 unsigned long *mask
;
9039 * sched_getaffinity needs multiples of ulong, so need to take
9040 * care of mismatches between target ulong and host ulong sizes.
9042 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9043 ret
= -TARGET_EINVAL
;
9046 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9048 mask
= alloca(mask_size
);
9049 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9051 if (!is_error(ret
)) {
9053 /* More data returned than the caller's buffer will fit.
9054 * This only happens if sizeof(abi_long) < sizeof(long)
9055 * and the caller passed us a buffer holding an odd number
9056 * of abi_longs. If the host kernel is actually using the
9057 * extra 4 bytes then fail EINVAL; otherwise we can just
9058 * ignore them and only copy the interesting part.
9060 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9061 if (numcpus
> arg2
* 8) {
9062 ret
= -TARGET_EINVAL
;
9068 if (copy_to_user(arg3
, mask
, ret
)) {
9074 case TARGET_NR_sched_setaffinity
:
9076 unsigned int mask_size
;
9077 unsigned long *mask
;
9080 * sched_setaffinity needs multiples of ulong, so need to take
9081 * care of mismatches between target ulong and host ulong sizes.
9083 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9084 ret
= -TARGET_EINVAL
;
9087 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9089 mask
= alloca(mask_size
);
9090 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9093 memcpy(mask
, p
, arg2
);
9094 unlock_user_struct(p
, arg2
, 0);
9096 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9099 case TARGET_NR_sched_setparam
:
9101 struct sched_param
*target_schp
;
9102 struct sched_param schp
;
9105 return -TARGET_EINVAL
;
9107 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9109 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9110 unlock_user_struct(target_schp
, arg2
, 0);
9111 ret
= get_errno(sched_setparam(arg1
, &schp
));
9114 case TARGET_NR_sched_getparam
:
9116 struct sched_param
*target_schp
;
9117 struct sched_param schp
;
9120 return -TARGET_EINVAL
;
9122 ret
= get_errno(sched_getparam(arg1
, &schp
));
9123 if (!is_error(ret
)) {
9124 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9126 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9127 unlock_user_struct(target_schp
, arg2
, 1);
9131 case TARGET_NR_sched_setscheduler
:
9133 struct sched_param
*target_schp
;
9134 struct sched_param schp
;
9136 return -TARGET_EINVAL
;
9138 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9140 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9141 unlock_user_struct(target_schp
, arg3
, 0);
9142 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9145 case TARGET_NR_sched_getscheduler
:
9146 ret
= get_errno(sched_getscheduler(arg1
));
9148 case TARGET_NR_sched_yield
:
9149 ret
= get_errno(sched_yield());
9151 case TARGET_NR_sched_get_priority_max
:
9152 ret
= get_errno(sched_get_priority_max(arg1
));
9154 case TARGET_NR_sched_get_priority_min
:
9155 ret
= get_errno(sched_get_priority_min(arg1
));
9157 case TARGET_NR_sched_rr_get_interval
:
9160 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9161 if (!is_error(ret
)) {
9162 ret
= host_to_target_timespec(arg2
, &ts
);
9166 case TARGET_NR_nanosleep
:
9168 struct timespec req
, rem
;
9169 target_to_host_timespec(&req
, arg1
);
9170 ret
= get_errno(nanosleep(&req
, &rem
));
9171 if (is_error(ret
) && arg2
) {
9172 host_to_target_timespec(arg2
, &rem
);
9176 #ifdef TARGET_NR_query_module
9177 case TARGET_NR_query_module
:
9180 #ifdef TARGET_NR_nfsservctl
9181 case TARGET_NR_nfsservctl
:
9184 case TARGET_NR_prctl
:
9186 case PR_GET_PDEATHSIG
:
9189 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9190 if (!is_error(ret
) && arg2
9191 && put_user_ual(deathsig
, arg2
)) {
9199 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9203 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9205 unlock_user(name
, arg2
, 16);
9210 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9214 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9216 unlock_user(name
, arg2
, 0);
9221 /* Most prctl options have no pointer arguments */
9222 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9226 #ifdef TARGET_NR_arch_prctl
9227 case TARGET_NR_arch_prctl
:
9228 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9229 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9235 #ifdef TARGET_NR_pread64
9236 case TARGET_NR_pread64
:
9237 if (regpairs_aligned(cpu_env
)) {
9241 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9243 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9244 unlock_user(p
, arg2
, ret
);
9246 case TARGET_NR_pwrite64
:
9247 if (regpairs_aligned(cpu_env
)) {
9251 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9253 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9254 unlock_user(p
, arg2
, 0);
9257 case TARGET_NR_getcwd
:
9258 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9260 ret
= get_errno(sys_getcwd1(p
, arg2
));
9261 unlock_user(p
, arg1
, ret
);
9263 case TARGET_NR_capget
:
9264 case TARGET_NR_capset
:
9266 struct target_user_cap_header
*target_header
;
9267 struct target_user_cap_data
*target_data
= NULL
;
9268 struct __user_cap_header_struct header
;
9269 struct __user_cap_data_struct data
[2];
9270 struct __user_cap_data_struct
*dataptr
= NULL
;
9271 int i
, target_datalen
;
9274 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9277 header
.version
= tswap32(target_header
->version
);
9278 header
.pid
= tswap32(target_header
->pid
);
9280 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9281 /* Version 2 and up takes pointer to two user_data structs */
9285 target_datalen
= sizeof(*target_data
) * data_items
;
9288 if (num
== TARGET_NR_capget
) {
9289 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9291 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9294 unlock_user_struct(target_header
, arg1
, 0);
9298 if (num
== TARGET_NR_capset
) {
9299 for (i
= 0; i
< data_items
; i
++) {
9300 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9301 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9302 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9309 if (num
== TARGET_NR_capget
) {
9310 ret
= get_errno(capget(&header
, dataptr
));
9312 ret
= get_errno(capset(&header
, dataptr
));
9315 /* The kernel always updates version for both capget and capset */
9316 target_header
->version
= tswap32(header
.version
);
9317 unlock_user_struct(target_header
, arg1
, 1);
9320 if (num
== TARGET_NR_capget
) {
9321 for (i
= 0; i
< data_items
; i
++) {
9322 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9323 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9324 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9326 unlock_user(target_data
, arg2
, target_datalen
);
9328 unlock_user(target_data
, arg2
, 0);
9333 case TARGET_NR_sigaltstack
:
9334 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9337 #ifdef CONFIG_SENDFILE
9338 case TARGET_NR_sendfile
:
9343 ret
= get_user_sal(off
, arg3
);
9344 if (is_error(ret
)) {
9349 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9350 if (!is_error(ret
) && arg3
) {
9351 abi_long ret2
= put_user_sal(off
, arg3
);
9352 if (is_error(ret2
)) {
9358 #ifdef TARGET_NR_sendfile64
9359 case TARGET_NR_sendfile64
:
9364 ret
= get_user_s64(off
, arg3
);
9365 if (is_error(ret
)) {
9370 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9371 if (!is_error(ret
) && arg3
) {
9372 abi_long ret2
= put_user_s64(off
, arg3
);
9373 if (is_error(ret2
)) {
9381 case TARGET_NR_sendfile
:
9382 #ifdef TARGET_NR_sendfile64
9383 case TARGET_NR_sendfile64
:
9388 #ifdef TARGET_NR_getpmsg
9389 case TARGET_NR_getpmsg
:
9392 #ifdef TARGET_NR_putpmsg
9393 case TARGET_NR_putpmsg
:
9396 #ifdef TARGET_NR_vfork
9397 case TARGET_NR_vfork
:
9398 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9402 #ifdef TARGET_NR_ugetrlimit
9403 case TARGET_NR_ugetrlimit
:
9406 int resource
= target_to_host_resource(arg1
);
9407 ret
= get_errno(getrlimit(resource
, &rlim
));
9408 if (!is_error(ret
)) {
9409 struct target_rlimit
*target_rlim
;
9410 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9412 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9413 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9414 unlock_user_struct(target_rlim
, arg2
, 1);
9419 #ifdef TARGET_NR_truncate64
9420 case TARGET_NR_truncate64
:
9421 if (!(p
= lock_user_string(arg1
)))
9423 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9424 unlock_user(p
, arg1
, 0);
9427 #ifdef TARGET_NR_ftruncate64
9428 case TARGET_NR_ftruncate64
:
9429 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9432 #ifdef TARGET_NR_stat64
9433 case TARGET_NR_stat64
:
9434 if (!(p
= lock_user_string(arg1
)))
9436 ret
= get_errno(stat(path(p
), &st
));
9437 unlock_user(p
, arg1
, 0);
9439 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9442 #ifdef TARGET_NR_lstat64
9443 case TARGET_NR_lstat64
:
9444 if (!(p
= lock_user_string(arg1
)))
9446 ret
= get_errno(lstat(path(p
), &st
));
9447 unlock_user(p
, arg1
, 0);
9449 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9452 #ifdef TARGET_NR_fstat64
9453 case TARGET_NR_fstat64
:
9454 ret
= get_errno(fstat(arg1
, &st
));
9456 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9459 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9460 #ifdef TARGET_NR_fstatat64
9461 case TARGET_NR_fstatat64
:
9463 #ifdef TARGET_NR_newfstatat
9464 case TARGET_NR_newfstatat
:
9466 if (!(p
= lock_user_string(arg2
)))
9468 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9470 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9473 #ifdef TARGET_NR_lchown
9474 case TARGET_NR_lchown
:
9475 if (!(p
= lock_user_string(arg1
)))
9477 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9478 unlock_user(p
, arg1
, 0);
9481 #ifdef TARGET_NR_getuid
9482 case TARGET_NR_getuid
:
9483 ret
= get_errno(high2lowuid(getuid()));
9486 #ifdef TARGET_NR_getgid
9487 case TARGET_NR_getgid
:
9488 ret
= get_errno(high2lowgid(getgid()));
9491 #ifdef TARGET_NR_geteuid
9492 case TARGET_NR_geteuid
:
9493 ret
= get_errno(high2lowuid(geteuid()));
9496 #ifdef TARGET_NR_getegid
9497 case TARGET_NR_getegid
:
9498 ret
= get_errno(high2lowgid(getegid()));
9501 case TARGET_NR_setreuid
:
9502 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9504 case TARGET_NR_setregid
:
9505 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9507 case TARGET_NR_getgroups
:
9509 int gidsetsize
= arg1
;
9510 target_id
*target_grouplist
;
9514 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9515 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9516 if (gidsetsize
== 0)
9518 if (!is_error(ret
)) {
9519 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9520 if (!target_grouplist
)
9522 for(i
= 0;i
< ret
; i
++)
9523 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9524 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9528 case TARGET_NR_setgroups
:
9530 int gidsetsize
= arg1
;
9531 target_id
*target_grouplist
;
9532 gid_t
*grouplist
= NULL
;
9535 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9536 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9537 if (!target_grouplist
) {
9538 ret
= -TARGET_EFAULT
;
9541 for (i
= 0; i
< gidsetsize
; i
++) {
9542 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9544 unlock_user(target_grouplist
, arg2
, 0);
9546 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9549 case TARGET_NR_fchown
:
9550 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9552 #if defined(TARGET_NR_fchownat)
9553 case TARGET_NR_fchownat
:
9554 if (!(p
= lock_user_string(arg2
)))
9556 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9557 low2highgid(arg4
), arg5
));
9558 unlock_user(p
, arg2
, 0);
9561 #ifdef TARGET_NR_setresuid
9562 case TARGET_NR_setresuid
:
9563 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9565 low2highuid(arg3
)));
9568 #ifdef TARGET_NR_getresuid
9569 case TARGET_NR_getresuid
:
9571 uid_t ruid
, euid
, suid
;
9572 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9573 if (!is_error(ret
)) {
9574 if (put_user_id(high2lowuid(ruid
), arg1
)
9575 || put_user_id(high2lowuid(euid
), arg2
)
9576 || put_user_id(high2lowuid(suid
), arg3
))
9582 #ifdef TARGET_NR_getresgid
9583 case TARGET_NR_setresgid
:
9584 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9586 low2highgid(arg3
)));
9589 #ifdef TARGET_NR_getresgid
9590 case TARGET_NR_getresgid
:
9592 gid_t rgid
, egid
, sgid
;
9593 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9594 if (!is_error(ret
)) {
9595 if (put_user_id(high2lowgid(rgid
), arg1
)
9596 || put_user_id(high2lowgid(egid
), arg2
)
9597 || put_user_id(high2lowgid(sgid
), arg3
))
9603 #ifdef TARGET_NR_chown
9604 case TARGET_NR_chown
:
9605 if (!(p
= lock_user_string(arg1
)))
9607 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9608 unlock_user(p
, arg1
, 0);
9611 case TARGET_NR_setuid
:
9612 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9614 case TARGET_NR_setgid
:
9615 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9617 case TARGET_NR_setfsuid
:
9618 ret
= get_errno(setfsuid(arg1
));
9620 case TARGET_NR_setfsgid
:
9621 ret
= get_errno(setfsgid(arg1
));
9624 #ifdef TARGET_NR_lchown32
9625 case TARGET_NR_lchown32
:
9626 if (!(p
= lock_user_string(arg1
)))
9628 ret
= get_errno(lchown(p
, arg2
, arg3
));
9629 unlock_user(p
, arg1
, 0);
9632 #ifdef TARGET_NR_getuid32
9633 case TARGET_NR_getuid32
:
9634 ret
= get_errno(getuid());
9638 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9639 /* Alpha specific */
9640 case TARGET_NR_getxuid
:
9644 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9646 ret
= get_errno(getuid());
9649 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9650 /* Alpha specific */
9651 case TARGET_NR_getxgid
:
9655 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9657 ret
= get_errno(getgid());
9660 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9661 /* Alpha specific */
9662 case TARGET_NR_osf_getsysinfo
:
9663 ret
= -TARGET_EOPNOTSUPP
;
9665 case TARGET_GSI_IEEE_FP_CONTROL
:
9667 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9669 /* Copied from linux ieee_fpcr_to_swcr. */
9670 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9671 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9672 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9673 | SWCR_TRAP_ENABLE_DZE
9674 | SWCR_TRAP_ENABLE_OVF
);
9675 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9676 | SWCR_TRAP_ENABLE_INE
);
9677 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9678 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9680 if (put_user_u64 (swcr
, arg2
))
9686 /* case GSI_IEEE_STATE_AT_SIGNAL:
9687 -- Not implemented in linux kernel.
9689 -- Retrieves current unaligned access state; not much used.
9691 -- Retrieves implver information; surely not used.
9693 -- Grabs a copy of the HWRPB; surely not used.
9698 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9699 /* Alpha specific */
9700 case TARGET_NR_osf_setsysinfo
:
9701 ret
= -TARGET_EOPNOTSUPP
;
9703 case TARGET_SSI_IEEE_FP_CONTROL
:
9705 uint64_t swcr
, fpcr
, orig_fpcr
;
9707 if (get_user_u64 (swcr
, arg2
)) {
9710 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9711 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9713 /* Copied from linux ieee_swcr_to_fpcr. */
9714 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9715 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9716 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9717 | SWCR_TRAP_ENABLE_DZE
9718 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9719 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9720 | SWCR_TRAP_ENABLE_INE
)) << 57;
9721 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9722 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9724 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9729 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9731 uint64_t exc
, fpcr
, orig_fpcr
;
9734 if (get_user_u64(exc
, arg2
)) {
9738 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9740 /* We only add to the exception status here. */
9741 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9743 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9746 /* Old exceptions are not signaled. */
9747 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9749 /* If any exceptions set by this call,
9750 and are unmasked, send a signal. */
9752 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9753 si_code
= TARGET_FPE_FLTRES
;
9755 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9756 si_code
= TARGET_FPE_FLTUND
;
9758 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9759 si_code
= TARGET_FPE_FLTOVF
;
9761 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9762 si_code
= TARGET_FPE_FLTDIV
;
9764 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9765 si_code
= TARGET_FPE_FLTINV
;
9768 target_siginfo_t info
;
9769 info
.si_signo
= SIGFPE
;
9771 info
.si_code
= si_code
;
9772 info
._sifields
._sigfault
._addr
9773 = ((CPUArchState
*)cpu_env
)->pc
;
9774 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9779 /* case SSI_NVPAIRS:
9780 -- Used with SSIN_UACPROC to enable unaligned accesses.
9781 case SSI_IEEE_STATE_AT_SIGNAL:
9782 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9783 -- Not implemented in linux kernel
9788 #ifdef TARGET_NR_osf_sigprocmask
9789 /* Alpha specific. */
9790 case TARGET_NR_osf_sigprocmask
:
9794 sigset_t set
, oldset
;
9797 case TARGET_SIG_BLOCK
:
9800 case TARGET_SIG_UNBLOCK
:
9803 case TARGET_SIG_SETMASK
:
9807 ret
= -TARGET_EINVAL
;
9811 target_to_host_old_sigset(&set
, &mask
);
9812 ret
= do_sigprocmask(how
, &set
, &oldset
);
9814 host_to_target_old_sigset(&mask
, &oldset
);
9821 #ifdef TARGET_NR_getgid32
9822 case TARGET_NR_getgid32
:
9823 ret
= get_errno(getgid());
9826 #ifdef TARGET_NR_geteuid32
9827 case TARGET_NR_geteuid32
:
9828 ret
= get_errno(geteuid());
9831 #ifdef TARGET_NR_getegid32
9832 case TARGET_NR_getegid32
:
9833 ret
= get_errno(getegid());
9836 #ifdef TARGET_NR_setreuid32
9837 case TARGET_NR_setreuid32
:
9838 ret
= get_errno(setreuid(arg1
, arg2
));
9841 #ifdef TARGET_NR_setregid32
9842 case TARGET_NR_setregid32
:
9843 ret
= get_errno(setregid(arg1
, arg2
));
9846 #ifdef TARGET_NR_getgroups32
9847 case TARGET_NR_getgroups32
:
9849 int gidsetsize
= arg1
;
9850 uint32_t *target_grouplist
;
9854 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9855 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9856 if (gidsetsize
== 0)
9858 if (!is_error(ret
)) {
9859 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9860 if (!target_grouplist
) {
9861 ret
= -TARGET_EFAULT
;
9864 for(i
= 0;i
< ret
; i
++)
9865 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9866 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9871 #ifdef TARGET_NR_setgroups32
9872 case TARGET_NR_setgroups32
:
9874 int gidsetsize
= arg1
;
9875 uint32_t *target_grouplist
;
9879 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9880 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9881 if (!target_grouplist
) {
9882 ret
= -TARGET_EFAULT
;
9885 for(i
= 0;i
< gidsetsize
; i
++)
9886 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9887 unlock_user(target_grouplist
, arg2
, 0);
9888 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9892 #ifdef TARGET_NR_fchown32
9893 case TARGET_NR_fchown32
:
9894 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9897 #ifdef TARGET_NR_setresuid32
9898 case TARGET_NR_setresuid32
:
9899 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9902 #ifdef TARGET_NR_getresuid32
9903 case TARGET_NR_getresuid32
:
9905 uid_t ruid
, euid
, suid
;
9906 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9907 if (!is_error(ret
)) {
9908 if (put_user_u32(ruid
, arg1
)
9909 || put_user_u32(euid
, arg2
)
9910 || put_user_u32(suid
, arg3
))
9916 #ifdef TARGET_NR_setresgid32
9917 case TARGET_NR_setresgid32
:
9918 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9921 #ifdef TARGET_NR_getresgid32
9922 case TARGET_NR_getresgid32
:
9924 gid_t rgid
, egid
, sgid
;
9925 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9926 if (!is_error(ret
)) {
9927 if (put_user_u32(rgid
, arg1
)
9928 || put_user_u32(egid
, arg2
)
9929 || put_user_u32(sgid
, arg3
))
9935 #ifdef TARGET_NR_chown32
9936 case TARGET_NR_chown32
:
9937 if (!(p
= lock_user_string(arg1
)))
9939 ret
= get_errno(chown(p
, arg2
, arg3
));
9940 unlock_user(p
, arg1
, 0);
9943 #ifdef TARGET_NR_setuid32
9944 case TARGET_NR_setuid32
:
9945 ret
= get_errno(sys_setuid(arg1
));
9948 #ifdef TARGET_NR_setgid32
9949 case TARGET_NR_setgid32
:
9950 ret
= get_errno(sys_setgid(arg1
));
9953 #ifdef TARGET_NR_setfsuid32
9954 case TARGET_NR_setfsuid32
:
9955 ret
= get_errno(setfsuid(arg1
));
9958 #ifdef TARGET_NR_setfsgid32
9959 case TARGET_NR_setfsgid32
:
9960 ret
= get_errno(setfsgid(arg1
));
9964 case TARGET_NR_pivot_root
:
9966 #ifdef TARGET_NR_mincore
9967 case TARGET_NR_mincore
:
9970 ret
= -TARGET_EFAULT
;
9971 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9973 if (!(p
= lock_user_string(arg3
)))
9975 ret
= get_errno(mincore(a
, arg2
, p
));
9976 unlock_user(p
, arg3
, ret
);
9978 unlock_user(a
, arg1
, 0);
9982 #ifdef TARGET_NR_arm_fadvise64_64
9983 case TARGET_NR_arm_fadvise64_64
:
9984 /* arm_fadvise64_64 looks like fadvise64_64 but
9985 * with different argument order: fd, advice, offset, len
9986 * rather than the usual fd, offset, len, advice.
9987 * Note that offset and len are both 64-bit so appear as
9988 * pairs of 32-bit registers.
9990 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
9991 target_offset64(arg5
, arg6
), arg2
);
9992 ret
= -host_to_target_errno(ret
);
9996 #if TARGET_ABI_BITS == 32
9998 #ifdef TARGET_NR_fadvise64_64
9999 case TARGET_NR_fadvise64_64
:
10000 /* 6 args: fd, offset (high, low), len (high, low), advice */
10001 if (regpairs_aligned(cpu_env
)) {
10002 /* offset is in (3,4), len in (5,6) and advice in 7 */
10009 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10010 target_offset64(arg2
, arg3
),
10011 target_offset64(arg4
, arg5
),
10016 #ifdef TARGET_NR_fadvise64
10017 case TARGET_NR_fadvise64
:
10018 /* 5 args: fd, offset (high, low), len, advice */
10019 if (regpairs_aligned(cpu_env
)) {
10020 /* offset is in (3,4), len in 5 and advice in 6 */
10026 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10027 target_offset64(arg2
, arg3
),
10032 #else /* not a 32-bit ABI */
10033 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10034 #ifdef TARGET_NR_fadvise64_64
10035 case TARGET_NR_fadvise64_64
:
10037 #ifdef TARGET_NR_fadvise64
10038 case TARGET_NR_fadvise64
:
10040 #ifdef TARGET_S390X
10042 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10043 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10044 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10045 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10049 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10052 #endif /* end of 64-bit ABI fadvise handling */
10054 #ifdef TARGET_NR_madvise
10055 case TARGET_NR_madvise
:
10056 /* A straight passthrough may not be safe because qemu sometimes
10057 turns private file-backed mappings into anonymous mappings.
10058 This will break MADV_DONTNEED.
10059 This is a hint, so ignoring and returning success is ok. */
10060 ret
= get_errno(0);
10063 #if TARGET_ABI_BITS == 32
10064 case TARGET_NR_fcntl64
:
10068 struct target_flock64
*target_fl
;
10070 struct target_eabi_flock64
*target_efl
;
10073 cmd
= target_to_host_fcntl_cmd(arg2
);
10074 if (cmd
== -TARGET_EINVAL
) {
10080 case TARGET_F_GETLK64
:
10082 if (((CPUARMState
*)cpu_env
)->eabi
) {
10083 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10085 fl
.l_type
= tswap16(target_efl
->l_type
);
10086 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10087 fl
.l_start
= tswap64(target_efl
->l_start
);
10088 fl
.l_len
= tswap64(target_efl
->l_len
);
10089 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10090 unlock_user_struct(target_efl
, arg3
, 0);
10094 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10096 fl
.l_type
= tswap16(target_fl
->l_type
);
10097 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10098 fl
.l_start
= tswap64(target_fl
->l_start
);
10099 fl
.l_len
= tswap64(target_fl
->l_len
);
10100 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10101 unlock_user_struct(target_fl
, arg3
, 0);
10103 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10106 if (((CPUARMState
*)cpu_env
)->eabi
) {
10107 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
10109 target_efl
->l_type
= tswap16(fl
.l_type
);
10110 target_efl
->l_whence
= tswap16(fl
.l_whence
);
10111 target_efl
->l_start
= tswap64(fl
.l_start
);
10112 target_efl
->l_len
= tswap64(fl
.l_len
);
10113 target_efl
->l_pid
= tswap32(fl
.l_pid
);
10114 unlock_user_struct(target_efl
, arg3
, 1);
10118 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
10120 target_fl
->l_type
= tswap16(fl
.l_type
);
10121 target_fl
->l_whence
= tswap16(fl
.l_whence
);
10122 target_fl
->l_start
= tswap64(fl
.l_start
);
10123 target_fl
->l_len
= tswap64(fl
.l_len
);
10124 target_fl
->l_pid
= tswap32(fl
.l_pid
);
10125 unlock_user_struct(target_fl
, arg3
, 1);
10130 case TARGET_F_SETLK64
:
10131 case TARGET_F_SETLKW64
:
10133 if (((CPUARMState
*)cpu_env
)->eabi
) {
10134 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10136 fl
.l_type
= tswap16(target_efl
->l_type
);
10137 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10138 fl
.l_start
= tswap64(target_efl
->l_start
);
10139 fl
.l_len
= tswap64(target_efl
->l_len
);
10140 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10141 unlock_user_struct(target_efl
, arg3
, 0);
10145 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10147 fl
.l_type
= tswap16(target_fl
->l_type
);
10148 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10149 fl
.l_start
= tswap64(target_fl
->l_start
);
10150 fl
.l_len
= tswap64(target_fl
->l_len
);
10151 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10152 unlock_user_struct(target_fl
, arg3
, 0);
10154 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10157 ret
= do_fcntl(arg1
, arg2
, arg3
);
10163 #ifdef TARGET_NR_cacheflush
10164 case TARGET_NR_cacheflush
:
10165 /* self-modifying code is handled automatically, so nothing needed */
10169 #ifdef TARGET_NR_security
10170 case TARGET_NR_security
:
10171 goto unimplemented
;
10173 #ifdef TARGET_NR_getpagesize
10174 case TARGET_NR_getpagesize
:
10175 ret
= TARGET_PAGE_SIZE
;
10178 case TARGET_NR_gettid
:
10179 ret
= get_errno(gettid());
10181 #ifdef TARGET_NR_readahead
10182 case TARGET_NR_readahead
:
10183 #if TARGET_ABI_BITS == 32
10184 if (regpairs_aligned(cpu_env
)) {
10189 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10191 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10196 #ifdef TARGET_NR_setxattr
10197 case TARGET_NR_listxattr
:
10198 case TARGET_NR_llistxattr
:
10202 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10204 ret
= -TARGET_EFAULT
;
10208 p
= lock_user_string(arg1
);
10210 if (num
== TARGET_NR_listxattr
) {
10211 ret
= get_errno(listxattr(p
, b
, arg3
));
10213 ret
= get_errno(llistxattr(p
, b
, arg3
));
10216 ret
= -TARGET_EFAULT
;
10218 unlock_user(p
, arg1
, 0);
10219 unlock_user(b
, arg2
, arg3
);
10222 case TARGET_NR_flistxattr
:
10226 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10228 ret
= -TARGET_EFAULT
;
10232 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10233 unlock_user(b
, arg2
, arg3
);
10236 case TARGET_NR_setxattr
:
10237 case TARGET_NR_lsetxattr
:
10239 void *p
, *n
, *v
= 0;
10241 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10243 ret
= -TARGET_EFAULT
;
10247 p
= lock_user_string(arg1
);
10248 n
= lock_user_string(arg2
);
10250 if (num
== TARGET_NR_setxattr
) {
10251 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10253 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10256 ret
= -TARGET_EFAULT
;
10258 unlock_user(p
, arg1
, 0);
10259 unlock_user(n
, arg2
, 0);
10260 unlock_user(v
, arg3
, 0);
10263 case TARGET_NR_fsetxattr
:
10267 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10269 ret
= -TARGET_EFAULT
;
10273 n
= lock_user_string(arg2
);
10275 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10277 ret
= -TARGET_EFAULT
;
10279 unlock_user(n
, arg2
, 0);
10280 unlock_user(v
, arg3
, 0);
10283 case TARGET_NR_getxattr
:
10284 case TARGET_NR_lgetxattr
:
10286 void *p
, *n
, *v
= 0;
10288 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10290 ret
= -TARGET_EFAULT
;
10294 p
= lock_user_string(arg1
);
10295 n
= lock_user_string(arg2
);
10297 if (num
== TARGET_NR_getxattr
) {
10298 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10300 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10303 ret
= -TARGET_EFAULT
;
10305 unlock_user(p
, arg1
, 0);
10306 unlock_user(n
, arg2
, 0);
10307 unlock_user(v
, arg3
, arg4
);
10310 case TARGET_NR_fgetxattr
:
10314 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10316 ret
= -TARGET_EFAULT
;
10320 n
= lock_user_string(arg2
);
10322 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10324 ret
= -TARGET_EFAULT
;
10326 unlock_user(n
, arg2
, 0);
10327 unlock_user(v
, arg3
, arg4
);
10330 case TARGET_NR_removexattr
:
10331 case TARGET_NR_lremovexattr
:
10334 p
= lock_user_string(arg1
);
10335 n
= lock_user_string(arg2
);
10337 if (num
== TARGET_NR_removexattr
) {
10338 ret
= get_errno(removexattr(p
, n
));
10340 ret
= get_errno(lremovexattr(p
, n
));
10343 ret
= -TARGET_EFAULT
;
10345 unlock_user(p
, arg1
, 0);
10346 unlock_user(n
, arg2
, 0);
10349 case TARGET_NR_fremovexattr
:
10352 n
= lock_user_string(arg2
);
10354 ret
= get_errno(fremovexattr(arg1
, n
));
10356 ret
= -TARGET_EFAULT
;
10358 unlock_user(n
, arg2
, 0);
10362 #endif /* CONFIG_ATTR */
10363 #ifdef TARGET_NR_set_thread_area
10364 case TARGET_NR_set_thread_area
:
10365 #if defined(TARGET_MIPS)
10366 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10369 #elif defined(TARGET_CRIS)
10371 ret
= -TARGET_EINVAL
;
10373 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10377 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10378 ret
= do_set_thread_area(cpu_env
, arg1
);
10380 #elif defined(TARGET_M68K)
10382 TaskState
*ts
= cpu
->opaque
;
10383 ts
->tp_value
= arg1
;
10388 goto unimplemented_nowarn
;
10391 #ifdef TARGET_NR_get_thread_area
10392 case TARGET_NR_get_thread_area
:
10393 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10394 ret
= do_get_thread_area(cpu_env
, arg1
);
10396 #elif defined(TARGET_M68K)
10398 TaskState
*ts
= cpu
->opaque
;
10399 ret
= ts
->tp_value
;
10403 goto unimplemented_nowarn
;
10406 #ifdef TARGET_NR_getdomainname
10407 case TARGET_NR_getdomainname
:
10408 goto unimplemented_nowarn
;
10411 #ifdef TARGET_NR_clock_gettime
10412 case TARGET_NR_clock_gettime
:
10414 struct timespec ts
;
10415 ret
= get_errno(clock_gettime(arg1
, &ts
));
10416 if (!is_error(ret
)) {
10417 host_to_target_timespec(arg2
, &ts
);
10422 #ifdef TARGET_NR_clock_getres
10423 case TARGET_NR_clock_getres
:
10425 struct timespec ts
;
10426 ret
= get_errno(clock_getres(arg1
, &ts
));
10427 if (!is_error(ret
)) {
10428 host_to_target_timespec(arg2
, &ts
);
10433 #ifdef TARGET_NR_clock_nanosleep
10434 case TARGET_NR_clock_nanosleep
:
10436 struct timespec ts
;
10437 target_to_host_timespec(&ts
, arg3
);
10438 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
10440 host_to_target_timespec(arg4
, &ts
);
10442 #if defined(TARGET_PPC)
10443 /* clock_nanosleep is odd in that it returns positive errno values.
10444 * On PPC, CR0 bit 3 should be set in such a situation. */
10446 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10453 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10454 case TARGET_NR_set_tid_address
:
10455 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10459 case TARGET_NR_tkill
:
10460 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10463 case TARGET_NR_tgkill
:
10464 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10465 target_to_host_signal(arg3
)));
10468 #ifdef TARGET_NR_set_robust_list
10469 case TARGET_NR_set_robust_list
:
10470 case TARGET_NR_get_robust_list
:
10471 /* The ABI for supporting robust futexes has userspace pass
10472 * the kernel a pointer to a linked list which is updated by
10473 * userspace after the syscall; the list is walked by the kernel
10474 * when the thread exits. Since the linked list in QEMU guest
10475 * memory isn't a valid linked list for the host and we have
10476 * no way to reliably intercept the thread-death event, we can't
10477 * support these. Silently return ENOSYS so that guest userspace
10478 * falls back to a non-robust futex implementation (which should
10479 * be OK except in the corner case of the guest crashing while
10480 * holding a mutex that is shared with another process via
10483 goto unimplemented_nowarn
;
10486 #if defined(TARGET_NR_utimensat)
10487 case TARGET_NR_utimensat
:
10489 struct timespec
*tsp
, ts
[2];
10493 target_to_host_timespec(ts
, arg3
);
10494 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10498 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10500 if (!(p
= lock_user_string(arg2
))) {
10501 ret
= -TARGET_EFAULT
;
10504 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10505 unlock_user(p
, arg2
, 0);
10510 case TARGET_NR_futex
:
10511 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10513 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10514 case TARGET_NR_inotify_init
:
10515 ret
= get_errno(sys_inotify_init());
10518 #ifdef CONFIG_INOTIFY1
10519 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10520 case TARGET_NR_inotify_init1
:
10521 ret
= get_errno(sys_inotify_init1(arg1
));
10525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10526 case TARGET_NR_inotify_add_watch
:
10527 p
= lock_user_string(arg2
);
10528 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10529 unlock_user(p
, arg2
, 0);
10532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10533 case TARGET_NR_inotify_rm_watch
:
10534 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10538 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10539 case TARGET_NR_mq_open
:
10541 struct mq_attr posix_mq_attr
, *attrp
;
10543 p
= lock_user_string(arg1
- 1);
10545 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10546 attrp
= &posix_mq_attr
;
10550 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10551 unlock_user (p
, arg1
, 0);
10555 case TARGET_NR_mq_unlink
:
10556 p
= lock_user_string(arg1
- 1);
10557 ret
= get_errno(mq_unlink(p
));
10558 unlock_user (p
, arg1
, 0);
10561 case TARGET_NR_mq_timedsend
:
10563 struct timespec ts
;
10565 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10567 target_to_host_timespec(&ts
, arg5
);
10568 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10569 host_to_target_timespec(arg5
, &ts
);
10572 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
10573 unlock_user (p
, arg2
, arg3
);
10577 case TARGET_NR_mq_timedreceive
:
10579 struct timespec ts
;
10582 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10584 target_to_host_timespec(&ts
, arg5
);
10585 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
10586 host_to_target_timespec(arg5
, &ts
);
10589 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
10590 unlock_user (p
, arg2
, arg3
);
10592 put_user_u32(prio
, arg4
);
10596 /* Not implemented for now... */
10597 /* case TARGET_NR_mq_notify: */
10600 case TARGET_NR_mq_getsetattr
:
10602 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10605 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10606 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10609 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10610 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10617 #ifdef CONFIG_SPLICE
10618 #ifdef TARGET_NR_tee
10619 case TARGET_NR_tee
:
10621 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10625 #ifdef TARGET_NR_splice
10626 case TARGET_NR_splice
:
10628 loff_t loff_in
, loff_out
;
10629 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10631 if (get_user_u64(loff_in
, arg2
)) {
10634 ploff_in
= &loff_in
;
10637 if (get_user_u64(loff_out
, arg4
)) {
10640 ploff_out
= &loff_out
;
10642 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10644 if (put_user_u64(loff_in
, arg2
)) {
10649 if (put_user_u64(loff_out
, arg4
)) {
10656 #ifdef TARGET_NR_vmsplice
10657 case TARGET_NR_vmsplice
:
10659 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10661 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10662 unlock_iovec(vec
, arg2
, arg3
, 0);
10664 ret
= -host_to_target_errno(errno
);
10669 #endif /* CONFIG_SPLICE */
10670 #ifdef CONFIG_EVENTFD
10671 #if defined(TARGET_NR_eventfd)
10672 case TARGET_NR_eventfd
:
10673 ret
= get_errno(eventfd(arg1
, 0));
10674 fd_trans_unregister(ret
);
10677 #if defined(TARGET_NR_eventfd2)
10678 case TARGET_NR_eventfd2
:
10680 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10681 if (arg2
& TARGET_O_NONBLOCK
) {
10682 host_flags
|= O_NONBLOCK
;
10684 if (arg2
& TARGET_O_CLOEXEC
) {
10685 host_flags
|= O_CLOEXEC
;
10687 ret
= get_errno(eventfd(arg1
, host_flags
));
10688 fd_trans_unregister(ret
);
10692 #endif /* CONFIG_EVENTFD */
10693 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10694 case TARGET_NR_fallocate
:
10695 #if TARGET_ABI_BITS == 32
10696 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10697 target_offset64(arg5
, arg6
)));
10699 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10703 #if defined(CONFIG_SYNC_FILE_RANGE)
10704 #if defined(TARGET_NR_sync_file_range)
10705 case TARGET_NR_sync_file_range
:
10706 #if TARGET_ABI_BITS == 32
10707 #if defined(TARGET_MIPS)
10708 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10709 target_offset64(arg5
, arg6
), arg7
));
10711 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10712 target_offset64(arg4
, arg5
), arg6
));
10713 #endif /* !TARGET_MIPS */
10715 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10719 #if defined(TARGET_NR_sync_file_range2)
10720 case TARGET_NR_sync_file_range2
:
10721 /* This is like sync_file_range but the arguments are reordered */
10722 #if TARGET_ABI_BITS == 32
10723 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10724 target_offset64(arg5
, arg6
), arg2
));
10726 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10731 #if defined(TARGET_NR_signalfd4)
10732 case TARGET_NR_signalfd4
:
10733 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10736 #if defined(TARGET_NR_signalfd)
10737 case TARGET_NR_signalfd
:
10738 ret
= do_signalfd4(arg1
, arg2
, 0);
10741 #if defined(CONFIG_EPOLL)
10742 #if defined(TARGET_NR_epoll_create)
10743 case TARGET_NR_epoll_create
:
10744 ret
= get_errno(epoll_create(arg1
));
10747 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10748 case TARGET_NR_epoll_create1
:
10749 ret
= get_errno(epoll_create1(arg1
));
10752 #if defined(TARGET_NR_epoll_ctl)
10753 case TARGET_NR_epoll_ctl
:
10755 struct epoll_event ep
;
10756 struct epoll_event
*epp
= 0;
10758 struct target_epoll_event
*target_ep
;
10759 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10762 ep
.events
= tswap32(target_ep
->events
);
10763 /* The epoll_data_t union is just opaque data to the kernel,
10764 * so we transfer all 64 bits across and need not worry what
10765 * actual data type it is.
10767 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10768 unlock_user_struct(target_ep
, arg4
, 0);
10771 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10776 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10777 #define IMPLEMENT_EPOLL_PWAIT
10779 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10780 #if defined(TARGET_NR_epoll_wait)
10781 case TARGET_NR_epoll_wait
:
10783 #if defined(IMPLEMENT_EPOLL_PWAIT)
10784 case TARGET_NR_epoll_pwait
:
10787 struct target_epoll_event
*target_ep
;
10788 struct epoll_event
*ep
;
10790 int maxevents
= arg3
;
10791 int timeout
= arg4
;
10793 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10794 maxevents
* sizeof(struct target_epoll_event
), 1);
10799 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10802 #if defined(IMPLEMENT_EPOLL_PWAIT)
10803 case TARGET_NR_epoll_pwait
:
10805 target_sigset_t
*target_set
;
10806 sigset_t _set
, *set
= &_set
;
10809 target_set
= lock_user(VERIFY_READ
, arg5
,
10810 sizeof(target_sigset_t
), 1);
10812 unlock_user(target_ep
, arg2
, 0);
10815 target_to_host_sigset(set
, target_set
);
10816 unlock_user(target_set
, arg5
, 0);
10821 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10825 #if defined(TARGET_NR_epoll_wait)
10826 case TARGET_NR_epoll_wait
:
10827 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10831 ret
= -TARGET_ENOSYS
;
10833 if (!is_error(ret
)) {
10835 for (i
= 0; i
< ret
; i
++) {
10836 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10837 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10840 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10845 #ifdef TARGET_NR_prlimit64
10846 case TARGET_NR_prlimit64
:
10848 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10849 struct target_rlimit64
*target_rnew
, *target_rold
;
10850 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10851 int resource
= target_to_host_resource(arg2
);
10853 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10856 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10857 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10858 unlock_user_struct(target_rnew
, arg3
, 0);
10862 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10863 if (!is_error(ret
) && arg4
) {
10864 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10867 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10868 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10869 unlock_user_struct(target_rold
, arg4
, 1);
10874 #ifdef TARGET_NR_gethostname
10875 case TARGET_NR_gethostname
:
10877 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10879 ret
= get_errno(gethostname(name
, arg2
));
10880 unlock_user(name
, arg1
, arg2
);
10882 ret
= -TARGET_EFAULT
;
10887 #ifdef TARGET_NR_atomic_cmpxchg_32
10888 case TARGET_NR_atomic_cmpxchg_32
:
10890 /* should use start_exclusive from main.c */
10891 abi_ulong mem_value
;
10892 if (get_user_u32(mem_value
, arg6
)) {
10893 target_siginfo_t info
;
10894 info
.si_signo
= SIGSEGV
;
10896 info
.si_code
= TARGET_SEGV_MAPERR
;
10897 info
._sifields
._sigfault
._addr
= arg6
;
10898 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10902 if (mem_value
== arg2
)
10903 put_user_u32(arg1
, arg6
);
10908 #ifdef TARGET_NR_atomic_barrier
10909 case TARGET_NR_atomic_barrier
:
10911 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10917 #ifdef TARGET_NR_timer_create
10918 case TARGET_NR_timer_create
:
10920 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10922 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10925 int timer_index
= next_free_host_timer();
10927 if (timer_index
< 0) {
10928 ret
= -TARGET_EAGAIN
;
10930 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10933 phost_sevp
= &host_sevp
;
10934 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10940 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10944 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10953 #ifdef TARGET_NR_timer_settime
10954 case TARGET_NR_timer_settime
:
10956 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10957 * struct itimerspec * old_value */
10958 target_timer_t timerid
= get_timer_id(arg1
);
10962 } else if (arg3
== 0) {
10963 ret
= -TARGET_EINVAL
;
10965 timer_t htimer
= g_posix_timers
[timerid
];
10966 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10968 target_to_host_itimerspec(&hspec_new
, arg3
);
10970 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10971 host_to_target_itimerspec(arg2
, &hspec_old
);
10977 #ifdef TARGET_NR_timer_gettime
10978 case TARGET_NR_timer_gettime
:
10980 /* args: timer_t timerid, struct itimerspec *curr_value */
10981 target_timer_t timerid
= get_timer_id(arg1
);
10985 } else if (!arg2
) {
10986 ret
= -TARGET_EFAULT
;
10988 timer_t htimer
= g_posix_timers
[timerid
];
10989 struct itimerspec hspec
;
10990 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10992 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10993 ret
= -TARGET_EFAULT
;
11000 #ifdef TARGET_NR_timer_getoverrun
11001 case TARGET_NR_timer_getoverrun
:
11003 /* args: timer_t timerid */
11004 target_timer_t timerid
= get_timer_id(arg1
);
11009 timer_t htimer
= g_posix_timers
[timerid
];
11010 ret
= get_errno(timer_getoverrun(htimer
));
11012 fd_trans_unregister(ret
);
11017 #ifdef TARGET_NR_timer_delete
11018 case TARGET_NR_timer_delete
:
11020 /* args: timer_t timerid */
11021 target_timer_t timerid
= get_timer_id(arg1
);
11026 timer_t htimer
= g_posix_timers
[timerid
];
11027 ret
= get_errno(timer_delete(htimer
));
11028 g_posix_timers
[timerid
] = 0;
11034 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11035 case TARGET_NR_timerfd_create
:
11036 ret
= get_errno(timerfd_create(arg1
,
11037 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11041 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11042 case TARGET_NR_timerfd_gettime
:
11044 struct itimerspec its_curr
;
11046 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11048 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11055 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11056 case TARGET_NR_timerfd_settime
:
11058 struct itimerspec its_new
, its_old
, *p_new
;
11061 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11069 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11071 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11078 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11079 case TARGET_NR_ioprio_get
:
11080 ret
= get_errno(ioprio_get(arg1
, arg2
));
11084 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11085 case TARGET_NR_ioprio_set
:
11086 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11090 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11091 case TARGET_NR_setns
:
11092 ret
= get_errno(setns(arg1
, arg2
));
11095 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11096 case TARGET_NR_unshare
:
11097 ret
= get_errno(unshare(arg1
));
11103 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11104 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11105 unimplemented_nowarn
:
11107 ret
= -TARGET_ENOSYS
;
11112 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11115 print_syscall_ret(num
, ret
);
11118 ret
= -TARGET_EFAULT
;