4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
108 #include <linux/audit.h>
109 #include "linux_loop.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 #define __NR__llseek __NR_lseek
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
210 _syscall0(int, gettid
)
212 /* This is a replacement for the host gettid() and must return a host
214 static int gettid(void) {
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
227 loff_t
*, res
, uint
, wh
);
229 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
230 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
239 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
246 unsigned long *, user_mask_ptr
);
247 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
249 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
250 struct __user_cap_data_struct
*, data
);
251 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
252 struct __user_cap_data_struct
*, data
);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get
, int, which
, int, who
)
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
263 static bitmask_transtbl fcntl_flags_tbl
[] = {
264 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
265 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
266 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
267 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
268 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
269 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
270 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
271 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
272 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
273 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
274 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
275 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
276 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
287 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
296 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
298 typedef struct TargetFdTrans
{
299 TargetFdDataFunc host_to_target_data
;
300 TargetFdDataFunc target_to_host_data
;
301 TargetFdAddrFunc target_to_host_addr
;
304 static TargetFdTrans
**target_fd_trans
;
306 static unsigned int target_fd_max
;
308 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
310 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
311 return target_fd_trans
[fd
]->target_to_host_data
;
316 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
318 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
319 return target_fd_trans
[fd
]->host_to_target_data
;
324 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
326 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
327 return target_fd_trans
[fd
]->target_to_host_addr
;
332 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
336 if (fd
>= target_fd_max
) {
337 oldmax
= target_fd_max
;
338 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans
= g_renew(TargetFdTrans
*,
340 target_fd_trans
, target_fd_max
);
341 memset((void *)(target_fd_trans
+ oldmax
), 0,
342 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
344 target_fd_trans
[fd
] = trans
;
347 static void fd_trans_unregister(int fd
)
349 if (fd
>= 0 && fd
< target_fd_max
) {
350 target_fd_trans
[fd
] = NULL
;
354 static void fd_trans_dup(int oldfd
, int newfd
)
356 fd_trans_unregister(newfd
);
357 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
358 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
362 static int sys_getcwd1(char *buf
, size_t size
)
364 if (getcwd(buf
, size
) == NULL
) {
365 /* getcwd() sets errno */
368 return strlen(buf
)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd
, const char *pathname
,
374 const struct timespec times
[2], int flags
)
376 if (pathname
== NULL
)
377 return futimens(dirfd
, times
);
379 return utimensat(dirfd
, pathname
, times
, flags
);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
384 const struct timespec
*,tsp
,int,flags
)
386 static int sys_utimensat(int dirfd
, const char *pathname
,
387 const struct timespec times
[2], int flags
)
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
407 return (inotify_add_watch(fd
, pathname
, mask
));
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
413 return (inotify_rm_watch(fd
, wd
));
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags
)
420 return (inotify_init1(flags
));
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
434 # define __NR_ppoll -1
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
438 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
) {
479 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
487 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
489 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
504 [EAGAIN
] = TARGET_EAGAIN
,
505 [EIDRM
] = TARGET_EIDRM
,
506 [ECHRNG
] = TARGET_ECHRNG
,
507 [EL2NSYNC
] = TARGET_EL2NSYNC
,
508 [EL3HLT
] = TARGET_EL3HLT
,
509 [EL3RST
] = TARGET_EL3RST
,
510 [ELNRNG
] = TARGET_ELNRNG
,
511 [EUNATCH
] = TARGET_EUNATCH
,
512 [ENOCSI
] = TARGET_ENOCSI
,
513 [EL2HLT
] = TARGET_EL2HLT
,
514 [EDEADLK
] = TARGET_EDEADLK
,
515 [ENOLCK
] = TARGET_ENOLCK
,
516 [EBADE
] = TARGET_EBADE
,
517 [EBADR
] = TARGET_EBADR
,
518 [EXFULL
] = TARGET_EXFULL
,
519 [ENOANO
] = TARGET_ENOANO
,
520 [EBADRQC
] = TARGET_EBADRQC
,
521 [EBADSLT
] = TARGET_EBADSLT
,
522 [EBFONT
] = TARGET_EBFONT
,
523 [ENOSTR
] = TARGET_ENOSTR
,
524 [ENODATA
] = TARGET_ENODATA
,
525 [ETIME
] = TARGET_ETIME
,
526 [ENOSR
] = TARGET_ENOSR
,
527 [ENONET
] = TARGET_ENONET
,
528 [ENOPKG
] = TARGET_ENOPKG
,
529 [EREMOTE
] = TARGET_EREMOTE
,
530 [ENOLINK
] = TARGET_ENOLINK
,
531 [EADV
] = TARGET_EADV
,
532 [ESRMNT
] = TARGET_ESRMNT
,
533 [ECOMM
] = TARGET_ECOMM
,
534 [EPROTO
] = TARGET_EPROTO
,
535 [EDOTDOT
] = TARGET_EDOTDOT
,
536 [EMULTIHOP
] = TARGET_EMULTIHOP
,
537 [EBADMSG
] = TARGET_EBADMSG
,
538 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
539 [EOVERFLOW
] = TARGET_EOVERFLOW
,
540 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
541 [EBADFD
] = TARGET_EBADFD
,
542 [EREMCHG
] = TARGET_EREMCHG
,
543 [ELIBACC
] = TARGET_ELIBACC
,
544 [ELIBBAD
] = TARGET_ELIBBAD
,
545 [ELIBSCN
] = TARGET_ELIBSCN
,
546 [ELIBMAX
] = TARGET_ELIBMAX
,
547 [ELIBEXEC
] = TARGET_ELIBEXEC
,
548 [EILSEQ
] = TARGET_EILSEQ
,
549 [ENOSYS
] = TARGET_ENOSYS
,
550 [ELOOP
] = TARGET_ELOOP
,
551 [ERESTART
] = TARGET_ERESTART
,
552 [ESTRPIPE
] = TARGET_ESTRPIPE
,
553 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
554 [EUSERS
] = TARGET_EUSERS
,
555 [ENOTSOCK
] = TARGET_ENOTSOCK
,
556 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
557 [EMSGSIZE
] = TARGET_EMSGSIZE
,
558 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
559 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
560 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
561 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
562 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
563 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
564 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
565 [EADDRINUSE
] = TARGET_EADDRINUSE
,
566 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
567 [ENETDOWN
] = TARGET_ENETDOWN
,
568 [ENETUNREACH
] = TARGET_ENETUNREACH
,
569 [ENETRESET
] = TARGET_ENETRESET
,
570 [ECONNABORTED
] = TARGET_ECONNABORTED
,
571 [ECONNRESET
] = TARGET_ECONNRESET
,
572 [ENOBUFS
] = TARGET_ENOBUFS
,
573 [EISCONN
] = TARGET_EISCONN
,
574 [ENOTCONN
] = TARGET_ENOTCONN
,
575 [EUCLEAN
] = TARGET_EUCLEAN
,
576 [ENOTNAM
] = TARGET_ENOTNAM
,
577 [ENAVAIL
] = TARGET_ENAVAIL
,
578 [EISNAM
] = TARGET_EISNAM
,
579 [EREMOTEIO
] = TARGET_EREMOTEIO
,
580 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
581 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
582 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
583 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
584 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
585 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
586 [EALREADY
] = TARGET_EALREADY
,
587 [EINPROGRESS
] = TARGET_EINPROGRESS
,
588 [ESTALE
] = TARGET_ESTALE
,
589 [ECANCELED
] = TARGET_ECANCELED
,
590 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
591 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
593 [ENOKEY
] = TARGET_ENOKEY
,
596 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
599 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
602 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
605 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
612 static inline int host_to_target_errno(int err
)
614 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
615 host_to_target_errno_table
[err
]) {
616 return host_to_target_errno_table
[err
];
621 static inline int target_to_host_errno(int err
)
623 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
624 target_to_host_errno_table
[err
]) {
625 return target_to_host_errno_table
[err
];
630 static inline abi_long
get_errno(abi_long ret
)
633 return -host_to_target_errno(errno
);
638 static inline int is_error(abi_long ret
)
640 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
643 char *target_strerror(int err
)
645 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
648 return strerror(target_to_host_errno(err
));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
699 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
700 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
701 int, flags
, mode_t
, mode
)
702 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
703 struct rusage
*, rusage
)
704 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
705 int, options
, struct rusage
*, rusage
)
706 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
707 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
708 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
709 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
710 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
711 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
712 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
713 safe_syscall2(int, tkill
, int, tid
, int, sig
)
714 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
715 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
717 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
719 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
720 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
721 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
722 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
723 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
724 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
726 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
728 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
729 long, msgtype
, int, flags
)
731 /* This host kernel architecture uses a single ipc syscall; fake up
732 * wrappers for the sub-operations to hide this implementation detail.
733 * Annoyingly we can't include linux/ipc.h to get the constant definitions
734 * for the call parameter because some structs in there conflict with the
735 * sys/ipc.h ones. So we just define them here, and rely on them being
736 * the same for all host architectures.
740 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
742 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
743 void *, ptr
, long, fifth
)
744 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
746 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
748 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
750 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
753 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
754 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
755 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
756 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
757 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
760 static inline int host_to_target_sock_type(int host_type
)
764 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
766 target_type
= TARGET_SOCK_DGRAM
;
769 target_type
= TARGET_SOCK_STREAM
;
772 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
776 #if defined(SOCK_CLOEXEC)
777 if (host_type
& SOCK_CLOEXEC
) {
778 target_type
|= TARGET_SOCK_CLOEXEC
;
782 #if defined(SOCK_NONBLOCK)
783 if (host_type
& SOCK_NONBLOCK
) {
784 target_type
|= TARGET_SOCK_NONBLOCK
;
791 static abi_ulong target_brk
;
792 static abi_ulong target_original_brk
;
793 static abi_ulong brk_page
;
795 void target_set_brk(abi_ulong new_brk
)
797 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
798 brk_page
= HOST_PAGE_ALIGN(target_brk
);
801 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
802 #define DEBUGF_BRK(message, args...)
804 /* do_brk() must return target values and target errnos. */
805 abi_long
do_brk(abi_ulong new_brk
)
807 abi_long mapped_addr
;
810 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
813 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
816 if (new_brk
< target_original_brk
) {
817 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
822 /* If the new brk is less than the highest page reserved to the
823 * target heap allocation, set it and we're almost done... */
824 if (new_brk
<= brk_page
) {
825 /* Heap contents are initialized to zero, as for anonymous
827 if (new_brk
> target_brk
) {
828 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
830 target_brk
= new_brk
;
831 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
835 /* We need to allocate more memory after the brk... Note that
836 * we don't use MAP_FIXED because that will map over the top of
837 * any existing mapping (like the one with the host libc or qemu
838 * itself); instead we treat "mapped but at wrong address" as
839 * a failure and unmap again.
841 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
842 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
843 PROT_READ
|PROT_WRITE
,
844 MAP_ANON
|MAP_PRIVATE
, 0, 0));
846 if (mapped_addr
== brk_page
) {
847 /* Heap contents are initialized to zero, as for anonymous
848 * mapped pages. Technically the new pages are already
849 * initialized to zero since they *are* anonymous mapped
850 * pages, however we have to take care with the contents that
851 * come from the remaining part of the previous page: it may
852 * contains garbage data due to a previous heap usage (grown
854 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
856 target_brk
= new_brk
;
857 brk_page
= HOST_PAGE_ALIGN(target_brk
);
858 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
861 } else if (mapped_addr
!= -1) {
862 /* Mapped but at wrong address, meaning there wasn't actually
863 * enough space for this brk.
865 target_munmap(mapped_addr
, new_alloc_size
);
867 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
870 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
873 #if defined(TARGET_ALPHA)
874 /* We (partially) emulate OSF/1 on Alpha, which requires we
875 return a proper errno, not an unchanged brk value. */
876 return -TARGET_ENOMEM
;
878 /* For everything else, return the previous break. */
882 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
883 abi_ulong target_fds_addr
,
887 abi_ulong b
, *target_fds
;
889 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
890 if (!(target_fds
= lock_user(VERIFY_READ
,
892 sizeof(abi_ulong
) * nw
,
894 return -TARGET_EFAULT
;
898 for (i
= 0; i
< nw
; i
++) {
899 /* grab the abi_ulong */
900 __get_user(b
, &target_fds
[i
]);
901 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
902 /* check the bit inside the abi_ulong */
909 unlock_user(target_fds
, target_fds_addr
, 0);
914 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
915 abi_ulong target_fds_addr
,
918 if (target_fds_addr
) {
919 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
920 return -TARGET_EFAULT
;
928 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
934 abi_ulong
*target_fds
;
936 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
937 if (!(target_fds
= lock_user(VERIFY_WRITE
,
939 sizeof(abi_ulong
) * nw
,
941 return -TARGET_EFAULT
;
944 for (i
= 0; i
< nw
; i
++) {
946 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
947 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
950 __put_user(v
, &target_fds
[i
]);
953 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
958 #if defined(__alpha__)
964 static inline abi_long
host_to_target_clock_t(long ticks
)
966 #if HOST_HZ == TARGET_HZ
969 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
973 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
974 const struct rusage
*rusage
)
976 struct target_rusage
*target_rusage
;
978 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
979 return -TARGET_EFAULT
;
980 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
981 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
982 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
983 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
984 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
985 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
986 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
987 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
988 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
989 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
990 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
991 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
992 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
993 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
994 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
995 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
996 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
997 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
998 unlock_user_struct(target_rusage
, target_addr
, 1);
1003 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1005 abi_ulong target_rlim_swap
;
1008 target_rlim_swap
= tswapal(target_rlim
);
1009 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1010 return RLIM_INFINITY
;
1012 result
= target_rlim_swap
;
1013 if (target_rlim_swap
!= (rlim_t
)result
)
1014 return RLIM_INFINITY
;
1019 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1021 abi_ulong target_rlim_swap
;
1024 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1025 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1027 target_rlim_swap
= rlim
;
1028 result
= tswapal(target_rlim_swap
);
1033 static inline int target_to_host_resource(int code
)
1036 case TARGET_RLIMIT_AS
:
1038 case TARGET_RLIMIT_CORE
:
1040 case TARGET_RLIMIT_CPU
:
1042 case TARGET_RLIMIT_DATA
:
1044 case TARGET_RLIMIT_FSIZE
:
1045 return RLIMIT_FSIZE
;
1046 case TARGET_RLIMIT_LOCKS
:
1047 return RLIMIT_LOCKS
;
1048 case TARGET_RLIMIT_MEMLOCK
:
1049 return RLIMIT_MEMLOCK
;
1050 case TARGET_RLIMIT_MSGQUEUE
:
1051 return RLIMIT_MSGQUEUE
;
1052 case TARGET_RLIMIT_NICE
:
1054 case TARGET_RLIMIT_NOFILE
:
1055 return RLIMIT_NOFILE
;
1056 case TARGET_RLIMIT_NPROC
:
1057 return RLIMIT_NPROC
;
1058 case TARGET_RLIMIT_RSS
:
1060 case TARGET_RLIMIT_RTPRIO
:
1061 return RLIMIT_RTPRIO
;
1062 case TARGET_RLIMIT_SIGPENDING
:
1063 return RLIMIT_SIGPENDING
;
1064 case TARGET_RLIMIT_STACK
:
1065 return RLIMIT_STACK
;
1071 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1072 abi_ulong target_tv_addr
)
1074 struct target_timeval
*target_tv
;
1076 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1077 return -TARGET_EFAULT
;
1079 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1080 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1082 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1087 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1088 const struct timeval
*tv
)
1090 struct target_timeval
*target_tv
;
1092 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1093 return -TARGET_EFAULT
;
1095 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1096 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1098 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1103 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1104 abi_ulong target_tz_addr
)
1106 struct target_timezone
*target_tz
;
1108 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1109 return -TARGET_EFAULT
;
1112 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1113 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1115 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1120 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1123 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1124 abi_ulong target_mq_attr_addr
)
1126 struct target_mq_attr
*target_mq_attr
;
1128 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1129 target_mq_attr_addr
, 1))
1130 return -TARGET_EFAULT
;
1132 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1133 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1134 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1135 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1137 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1142 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1143 const struct mq_attr
*attr
)
1145 struct target_mq_attr
*target_mq_attr
;
1147 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1148 target_mq_attr_addr
, 0))
1149 return -TARGET_EFAULT
;
1151 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1152 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1153 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1154 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1156 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1162 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1163 /* do_select() must return target values and target errnos. */
1164 static abi_long
do_select(int n
,
1165 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1166 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1168 fd_set rfds
, wfds
, efds
;
1169 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1171 struct timespec ts
, *ts_ptr
;
1174 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1178 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1182 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1187 if (target_tv_addr
) {
1188 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1189 return -TARGET_EFAULT
;
1190 ts
.tv_sec
= tv
.tv_sec
;
1191 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1197 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1200 if (!is_error(ret
)) {
1201 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1202 return -TARGET_EFAULT
;
1203 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1204 return -TARGET_EFAULT
;
1205 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1206 return -TARGET_EFAULT
;
1208 if (target_tv_addr
) {
1209 tv
.tv_sec
= ts
.tv_sec
;
1210 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1211 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1212 return -TARGET_EFAULT
;
1221 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1224 return pipe2(host_pipe
, flags
);
1230 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1231 int flags
, int is_pipe2
)
1235 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1238 return get_errno(ret
);
1240 /* Several targets have special calling conventions for the original
1241 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1243 #if defined(TARGET_ALPHA)
1244 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1245 return host_pipe
[0];
1246 #elif defined(TARGET_MIPS)
1247 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1248 return host_pipe
[0];
1249 #elif defined(TARGET_SH4)
1250 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1251 return host_pipe
[0];
1252 #elif defined(TARGET_SPARC)
1253 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1254 return host_pipe
[0];
1258 if (put_user_s32(host_pipe
[0], pipedes
)
1259 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1260 return -TARGET_EFAULT
;
1261 return get_errno(ret
);
1264 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1265 abi_ulong target_addr
,
1268 struct target_ip_mreqn
*target_smreqn
;
1270 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1272 return -TARGET_EFAULT
;
1273 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1274 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1275 if (len
== sizeof(struct target_ip_mreqn
))
1276 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1277 unlock_user(target_smreqn
, target_addr
, 0);
1282 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1283 abi_ulong target_addr
,
1286 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1287 sa_family_t sa_family
;
1288 struct target_sockaddr
*target_saddr
;
1290 if (fd_trans_target_to_host_addr(fd
)) {
1291 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1294 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1296 return -TARGET_EFAULT
;
1298 sa_family
= tswap16(target_saddr
->sa_family
);
1300 /* Oops. The caller might send a incomplete sun_path; sun_path
1301 * must be terminated by \0 (see the manual page), but
1302 * unfortunately it is quite common to specify sockaddr_un
1303 * length as "strlen(x->sun_path)" while it should be
1304 * "strlen(...) + 1". We'll fix that here if needed.
1305 * Linux kernel has a similar feature.
1308 if (sa_family
== AF_UNIX
) {
1309 if (len
< unix_maxlen
&& len
> 0) {
1310 char *cp
= (char*)target_saddr
;
1312 if ( cp
[len
-1] && !cp
[len
] )
1315 if (len
> unix_maxlen
)
1319 memcpy(addr
, target_saddr
, len
);
1320 addr
->sa_family
= sa_family
;
1321 if (sa_family
== AF_NETLINK
) {
1322 struct sockaddr_nl
*nladdr
;
1324 nladdr
= (struct sockaddr_nl
*)addr
;
1325 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1326 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1327 } else if (sa_family
== AF_PACKET
) {
1328 struct target_sockaddr_ll
*lladdr
;
1330 lladdr
= (struct target_sockaddr_ll
*)addr
;
1331 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1332 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1334 unlock_user(target_saddr
, target_addr
, 0);
1339 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1340 struct sockaddr
*addr
,
1343 struct target_sockaddr
*target_saddr
;
1345 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1347 return -TARGET_EFAULT
;
1348 memcpy(target_saddr
, addr
, len
);
1349 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1350 if (addr
->sa_family
== AF_NETLINK
) {
1351 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1352 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1353 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1355 unlock_user(target_saddr
, target_addr
, len
);
1360 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1361 struct target_msghdr
*target_msgh
)
1363 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1364 abi_long msg_controllen
;
1365 abi_ulong target_cmsg_addr
;
1366 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1367 socklen_t space
= 0;
1369 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1370 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1372 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1373 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1374 target_cmsg_start
= target_cmsg
;
1376 return -TARGET_EFAULT
;
1378 while (cmsg
&& target_cmsg
) {
1379 void *data
= CMSG_DATA(cmsg
);
1380 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1382 int len
= tswapal(target_cmsg
->cmsg_len
)
1383 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1385 space
+= CMSG_SPACE(len
);
1386 if (space
> msgh
->msg_controllen
) {
1387 space
-= CMSG_SPACE(len
);
1388 /* This is a QEMU bug, since we allocated the payload
1389 * area ourselves (unlike overflow in host-to-target
1390 * conversion, which is just the guest giving us a buffer
1391 * that's too small). It can't happen for the payload types
1392 * we currently support; if it becomes an issue in future
1393 * we would need to improve our allocation strategy to
1394 * something more intelligent than "twice the size of the
1395 * target buffer we're reading from".
1397 gemu_log("Host cmsg overflow\n");
1401 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1402 cmsg
->cmsg_level
= SOL_SOCKET
;
1404 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1406 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1407 cmsg
->cmsg_len
= CMSG_LEN(len
);
1409 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1410 int *fd
= (int *)data
;
1411 int *target_fd
= (int *)target_data
;
1412 int i
, numfds
= len
/ sizeof(int);
1414 for (i
= 0; i
< numfds
; i
++) {
1415 __get_user(fd
[i
], target_fd
+ i
);
1417 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1418 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1419 struct ucred
*cred
= (struct ucred
*)data
;
1420 struct target_ucred
*target_cred
=
1421 (struct target_ucred
*)target_data
;
1423 __get_user(cred
->pid
, &target_cred
->pid
);
1424 __get_user(cred
->uid
, &target_cred
->uid
);
1425 __get_user(cred
->gid
, &target_cred
->gid
);
1427 gemu_log("Unsupported ancillary data: %d/%d\n",
1428 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1429 memcpy(data
, target_data
, len
);
1432 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1433 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1436 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1438 msgh
->msg_controllen
= space
;
1442 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1443 struct msghdr
*msgh
)
1445 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1446 abi_long msg_controllen
;
1447 abi_ulong target_cmsg_addr
;
1448 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1449 socklen_t space
= 0;
1451 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1452 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1454 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1455 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1456 target_cmsg_start
= target_cmsg
;
1458 return -TARGET_EFAULT
;
1460 while (cmsg
&& target_cmsg
) {
1461 void *data
= CMSG_DATA(cmsg
);
1462 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1464 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1465 int tgt_len
, tgt_space
;
1467 /* We never copy a half-header but may copy half-data;
1468 * this is Linux's behaviour in put_cmsg(). Note that
1469 * truncation here is a guest problem (which we report
1470 * to the guest via the CTRUNC bit), unlike truncation
1471 * in target_to_host_cmsg, which is a QEMU bug.
1473 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1474 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1478 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1479 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1481 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1483 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1485 tgt_len
= TARGET_CMSG_LEN(len
);
1487 /* Payload types which need a different size of payload on
1488 * the target must adjust tgt_len here.
1490 switch (cmsg
->cmsg_level
) {
1492 switch (cmsg
->cmsg_type
) {
1494 tgt_len
= sizeof(struct target_timeval
);
1503 if (msg_controllen
< tgt_len
) {
1504 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1505 tgt_len
= msg_controllen
;
1508 /* We must now copy-and-convert len bytes of payload
1509 * into tgt_len bytes of destination space. Bear in mind
1510 * that in both source and destination we may be dealing
1511 * with a truncated value!
1513 switch (cmsg
->cmsg_level
) {
1515 switch (cmsg
->cmsg_type
) {
1518 int *fd
= (int *)data
;
1519 int *target_fd
= (int *)target_data
;
1520 int i
, numfds
= tgt_len
/ sizeof(int);
1522 for (i
= 0; i
< numfds
; i
++) {
1523 __put_user(fd
[i
], target_fd
+ i
);
1529 struct timeval
*tv
= (struct timeval
*)data
;
1530 struct target_timeval
*target_tv
=
1531 (struct target_timeval
*)target_data
;
1533 if (len
!= sizeof(struct timeval
) ||
1534 tgt_len
!= sizeof(struct target_timeval
)) {
1538 /* copy struct timeval to target */
1539 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1540 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1543 case SCM_CREDENTIALS
:
1545 struct ucred
*cred
= (struct ucred
*)data
;
1546 struct target_ucred
*target_cred
=
1547 (struct target_ucred
*)target_data
;
1549 __put_user(cred
->pid
, &target_cred
->pid
);
1550 __put_user(cred
->uid
, &target_cred
->uid
);
1551 __put_user(cred
->gid
, &target_cred
->gid
);
1561 gemu_log("Unsupported ancillary data: %d/%d\n",
1562 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1563 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1564 if (tgt_len
> len
) {
1565 memset(target_data
+ len
, 0, tgt_len
- len
);
1569 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1570 tgt_space
= TARGET_CMSG_SPACE(len
);
1571 if (msg_controllen
< tgt_space
) {
1572 tgt_space
= msg_controllen
;
1574 msg_controllen
-= tgt_space
;
1576 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1577 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1580 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1582 target_msgh
->msg_controllen
= tswapal(space
);
1586 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1588 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1589 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1590 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1591 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1592 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1595 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1597 abi_long (*host_to_target_nlmsg
)
1598 (struct nlmsghdr
*))
1603 while (len
> sizeof(struct nlmsghdr
)) {
1605 nlmsg_len
= nlh
->nlmsg_len
;
1606 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1611 switch (nlh
->nlmsg_type
) {
1613 tswap_nlmsghdr(nlh
);
1619 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1620 e
->error
= tswap32(e
->error
);
1621 tswap_nlmsghdr(&e
->msg
);
1622 tswap_nlmsghdr(nlh
);
1626 ret
= host_to_target_nlmsg(nlh
);
1628 tswap_nlmsghdr(nlh
);
1633 tswap_nlmsghdr(nlh
);
1634 len
-= NLMSG_ALIGN(nlmsg_len
);
1635 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1640 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1642 abi_long (*target_to_host_nlmsg
)
1643 (struct nlmsghdr
*))
1647 while (len
> sizeof(struct nlmsghdr
)) {
1648 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1649 tswap32(nlh
->nlmsg_len
) > len
) {
1652 tswap_nlmsghdr(nlh
);
1653 switch (nlh
->nlmsg_type
) {
1660 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1661 e
->error
= tswap32(e
->error
);
1662 tswap_nlmsghdr(&e
->msg
);
1665 ret
= target_to_host_nlmsg(nlh
);
1670 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1671 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1676 #ifdef CONFIG_RTNETLINK
1677 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1679 abi_long (*host_to_target_rtattr
)
1682 unsigned short rta_len
;
1685 while (len
> sizeof(struct rtattr
)) {
1686 rta_len
= rtattr
->rta_len
;
1687 if (rta_len
< sizeof(struct rtattr
) ||
1691 ret
= host_to_target_rtattr(rtattr
);
1692 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1693 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1697 len
-= RTA_ALIGN(rta_len
);
1698 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1703 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1706 struct rtnl_link_stats
*st
;
1707 struct rtnl_link_stats64
*st64
;
1708 struct rtnl_link_ifmap
*map
;
1710 switch (rtattr
->rta_type
) {
1713 case IFLA_BROADCAST
:
1719 case IFLA_OPERSTATE
:
1722 case IFLA_PROTO_DOWN
:
1729 case IFLA_CARRIER_CHANGES
:
1730 case IFLA_NUM_RX_QUEUES
:
1731 case IFLA_NUM_TX_QUEUES
:
1732 case IFLA_PROMISCUITY
:
1734 case IFLA_LINK_NETNSID
:
1738 u32
= RTA_DATA(rtattr
);
1739 *u32
= tswap32(*u32
);
1741 /* struct rtnl_link_stats */
1743 st
= RTA_DATA(rtattr
);
1744 st
->rx_packets
= tswap32(st
->rx_packets
);
1745 st
->tx_packets
= tswap32(st
->tx_packets
);
1746 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1747 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1748 st
->rx_errors
= tswap32(st
->rx_errors
);
1749 st
->tx_errors
= tswap32(st
->tx_errors
);
1750 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1751 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1752 st
->multicast
= tswap32(st
->multicast
);
1753 st
->collisions
= tswap32(st
->collisions
);
1755 /* detailed rx_errors: */
1756 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1757 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1758 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1759 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1760 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1761 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1763 /* detailed tx_errors */
1764 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1765 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1766 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1767 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1768 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1771 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1772 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1774 /* struct rtnl_link_stats64 */
1776 st64
= RTA_DATA(rtattr
);
1777 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1778 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1779 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1780 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1781 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1782 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1783 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1784 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1785 st64
->multicast
= tswap64(st64
->multicast
);
1786 st64
->collisions
= tswap64(st64
->collisions
);
1788 /* detailed rx_errors: */
1789 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1790 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1791 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1792 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1793 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1794 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1796 /* detailed tx_errors */
1797 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1798 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1799 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1800 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1801 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1804 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1805 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1807 /* struct rtnl_link_ifmap */
1809 map
= RTA_DATA(rtattr
);
1810 map
->mem_start
= tswap64(map
->mem_start
);
1811 map
->mem_end
= tswap64(map
->mem_end
);
1812 map
->base_addr
= tswap64(map
->base_addr
);
1813 map
->irq
= tswap16(map
->irq
);
1818 /* FIXME: implement nested type */
1819 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1822 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1828 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1831 struct ifa_cacheinfo
*ci
;
1833 switch (rtattr
->rta_type
) {
1834 /* binary: depends on family type */
1844 u32
= RTA_DATA(rtattr
);
1845 *u32
= tswap32(*u32
);
1847 /* struct ifa_cacheinfo */
1849 ci
= RTA_DATA(rtattr
);
1850 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1851 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1852 ci
->cstamp
= tswap32(ci
->cstamp
);
1853 ci
->tstamp
= tswap32(ci
->tstamp
);
1856 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1862 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1865 switch (rtattr
->rta_type
) {
1866 /* binary: depends on family type */
1875 u32
= RTA_DATA(rtattr
);
1876 *u32
= tswap32(*u32
);
1879 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1885 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1886 uint32_t rtattr_len
)
1888 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1889 host_to_target_data_link_rtattr
);
1892 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1893 uint32_t rtattr_len
)
1895 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1896 host_to_target_data_addr_rtattr
);
1899 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1900 uint32_t rtattr_len
)
1902 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1903 host_to_target_data_route_rtattr
);
1906 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1909 struct ifinfomsg
*ifi
;
1910 struct ifaddrmsg
*ifa
;
1913 nlmsg_len
= nlh
->nlmsg_len
;
1914 switch (nlh
->nlmsg_type
) {
1918 ifi
= NLMSG_DATA(nlh
);
1919 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1920 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1921 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1922 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1923 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1924 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1929 ifa
= NLMSG_DATA(nlh
);
1930 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1931 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1932 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1937 rtm
= NLMSG_DATA(nlh
);
1938 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1939 host_to_target_route_rtattr(RTM_RTA(rtm
),
1940 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1943 return -TARGET_EINVAL
;
1948 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1951 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1954 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1956 abi_long (*target_to_host_rtattr
)
1961 while (len
>= sizeof(struct rtattr
)) {
1962 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1963 tswap16(rtattr
->rta_len
) > len
) {
1966 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1967 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1968 ret
= target_to_host_rtattr(rtattr
);
1972 len
-= RTA_ALIGN(rtattr
->rta_len
);
1973 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
1974 RTA_ALIGN(rtattr
->rta_len
));
1979 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
1981 switch (rtattr
->rta_type
) {
1983 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
1989 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
1991 switch (rtattr
->rta_type
) {
1992 /* binary: depends on family type */
1997 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2003 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2006 switch (rtattr
->rta_type
) {
2007 /* binary: depends on family type */
2014 u32
= RTA_DATA(rtattr
);
2015 *u32
= tswap32(*u32
);
2018 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2024 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2025 uint32_t rtattr_len
)
2027 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2028 target_to_host_data_link_rtattr
);
2031 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2032 uint32_t rtattr_len
)
2034 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2035 target_to_host_data_addr_rtattr
);
2038 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2039 uint32_t rtattr_len
)
2041 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2042 target_to_host_data_route_rtattr
);
2045 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2047 struct ifinfomsg
*ifi
;
2048 struct ifaddrmsg
*ifa
;
2051 switch (nlh
->nlmsg_type
) {
2056 ifi
= NLMSG_DATA(nlh
);
2057 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2058 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2059 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2060 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2061 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2062 NLMSG_LENGTH(sizeof(*ifi
)));
2067 ifa
= NLMSG_DATA(nlh
);
2068 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2069 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2070 NLMSG_LENGTH(sizeof(*ifa
)));
2076 rtm
= NLMSG_DATA(nlh
);
2077 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2078 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2079 NLMSG_LENGTH(sizeof(*rtm
)));
2082 return -TARGET_EOPNOTSUPP
;
2087 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2089 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2091 #endif /* CONFIG_RTNETLINK */
2093 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2095 switch (nlh
->nlmsg_type
) {
2097 gemu_log("Unknown host audit message type %d\n",
2099 return -TARGET_EINVAL
;
2104 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2107 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2110 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2112 switch (nlh
->nlmsg_type
) {
2114 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2115 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2118 gemu_log("Unknown target audit message type %d\n",
2120 return -TARGET_EINVAL
;
2126 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2128 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2131 /* do_setsockopt() Must return target values and target errnos. */
2132 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2133 abi_ulong optval_addr
, socklen_t optlen
)
2137 struct ip_mreqn
*ip_mreq
;
2138 struct ip_mreq_source
*ip_mreq_source
;
2142 /* TCP options all take an 'int' value. */
2143 if (optlen
< sizeof(uint32_t))
2144 return -TARGET_EINVAL
;
2146 if (get_user_u32(val
, optval_addr
))
2147 return -TARGET_EFAULT
;
2148 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2155 case IP_ROUTER_ALERT
:
2159 case IP_MTU_DISCOVER
:
2165 case IP_MULTICAST_TTL
:
2166 case IP_MULTICAST_LOOP
:
2168 if (optlen
>= sizeof(uint32_t)) {
2169 if (get_user_u32(val
, optval_addr
))
2170 return -TARGET_EFAULT
;
2171 } else if (optlen
>= 1) {
2172 if (get_user_u8(val
, optval_addr
))
2173 return -TARGET_EFAULT
;
2175 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2177 case IP_ADD_MEMBERSHIP
:
2178 case IP_DROP_MEMBERSHIP
:
2179 if (optlen
< sizeof (struct target_ip_mreq
) ||
2180 optlen
> sizeof (struct target_ip_mreqn
))
2181 return -TARGET_EINVAL
;
2183 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2184 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2185 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2188 case IP_BLOCK_SOURCE
:
2189 case IP_UNBLOCK_SOURCE
:
2190 case IP_ADD_SOURCE_MEMBERSHIP
:
2191 case IP_DROP_SOURCE_MEMBERSHIP
:
2192 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2193 return -TARGET_EINVAL
;
2195 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2196 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2197 unlock_user (ip_mreq_source
, optval_addr
, 0);
2206 case IPV6_MTU_DISCOVER
:
2209 case IPV6_RECVPKTINFO
:
2211 if (optlen
< sizeof(uint32_t)) {
2212 return -TARGET_EINVAL
;
2214 if (get_user_u32(val
, optval_addr
)) {
2215 return -TARGET_EFAULT
;
2217 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2218 &val
, sizeof(val
)));
2227 /* struct icmp_filter takes an u32 value */
2228 if (optlen
< sizeof(uint32_t)) {
2229 return -TARGET_EINVAL
;
2232 if (get_user_u32(val
, optval_addr
)) {
2233 return -TARGET_EFAULT
;
2235 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2236 &val
, sizeof(val
)));
2243 case TARGET_SOL_SOCKET
:
2245 case TARGET_SO_RCVTIMEO
:
2249 optname
= SO_RCVTIMEO
;
2252 if (optlen
!= sizeof(struct target_timeval
)) {
2253 return -TARGET_EINVAL
;
2256 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2257 return -TARGET_EFAULT
;
2260 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2264 case TARGET_SO_SNDTIMEO
:
2265 optname
= SO_SNDTIMEO
;
2267 case TARGET_SO_ATTACH_FILTER
:
2269 struct target_sock_fprog
*tfprog
;
2270 struct target_sock_filter
*tfilter
;
2271 struct sock_fprog fprog
;
2272 struct sock_filter
*filter
;
2275 if (optlen
!= sizeof(*tfprog
)) {
2276 return -TARGET_EINVAL
;
2278 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2279 return -TARGET_EFAULT
;
2281 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2282 tswapal(tfprog
->filter
), 0)) {
2283 unlock_user_struct(tfprog
, optval_addr
, 1);
2284 return -TARGET_EFAULT
;
2287 fprog
.len
= tswap16(tfprog
->len
);
2288 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2289 if (filter
== NULL
) {
2290 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2291 unlock_user_struct(tfprog
, optval_addr
, 1);
2292 return -TARGET_ENOMEM
;
2294 for (i
= 0; i
< fprog
.len
; i
++) {
2295 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2296 filter
[i
].jt
= tfilter
[i
].jt
;
2297 filter
[i
].jf
= tfilter
[i
].jf
;
2298 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2300 fprog
.filter
= filter
;
2302 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2303 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2306 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2307 unlock_user_struct(tfprog
, optval_addr
, 1);
2310 case TARGET_SO_BINDTODEVICE
:
2312 char *dev_ifname
, *addr_ifname
;
2314 if (optlen
> IFNAMSIZ
- 1) {
2315 optlen
= IFNAMSIZ
- 1;
2317 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2319 return -TARGET_EFAULT
;
2321 optname
= SO_BINDTODEVICE
;
2322 addr_ifname
= alloca(IFNAMSIZ
);
2323 memcpy(addr_ifname
, dev_ifname
, optlen
);
2324 addr_ifname
[optlen
] = 0;
2325 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2326 addr_ifname
, optlen
));
2327 unlock_user (dev_ifname
, optval_addr
, 0);
2330 /* Options with 'int' argument. */
2331 case TARGET_SO_DEBUG
:
2334 case TARGET_SO_REUSEADDR
:
2335 optname
= SO_REUSEADDR
;
2337 case TARGET_SO_TYPE
:
2340 case TARGET_SO_ERROR
:
2343 case TARGET_SO_DONTROUTE
:
2344 optname
= SO_DONTROUTE
;
2346 case TARGET_SO_BROADCAST
:
2347 optname
= SO_BROADCAST
;
2349 case TARGET_SO_SNDBUF
:
2350 optname
= SO_SNDBUF
;
2352 case TARGET_SO_SNDBUFFORCE
:
2353 optname
= SO_SNDBUFFORCE
;
2355 case TARGET_SO_RCVBUF
:
2356 optname
= SO_RCVBUF
;
2358 case TARGET_SO_RCVBUFFORCE
:
2359 optname
= SO_RCVBUFFORCE
;
2361 case TARGET_SO_KEEPALIVE
:
2362 optname
= SO_KEEPALIVE
;
2364 case TARGET_SO_OOBINLINE
:
2365 optname
= SO_OOBINLINE
;
2367 case TARGET_SO_NO_CHECK
:
2368 optname
= SO_NO_CHECK
;
2370 case TARGET_SO_PRIORITY
:
2371 optname
= SO_PRIORITY
;
2374 case TARGET_SO_BSDCOMPAT
:
2375 optname
= SO_BSDCOMPAT
;
2378 case TARGET_SO_PASSCRED
:
2379 optname
= SO_PASSCRED
;
2381 case TARGET_SO_PASSSEC
:
2382 optname
= SO_PASSSEC
;
2384 case TARGET_SO_TIMESTAMP
:
2385 optname
= SO_TIMESTAMP
;
2387 case TARGET_SO_RCVLOWAT
:
2388 optname
= SO_RCVLOWAT
;
2394 if (optlen
< sizeof(uint32_t))
2395 return -TARGET_EINVAL
;
2397 if (get_user_u32(val
, optval_addr
))
2398 return -TARGET_EFAULT
;
2399 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2403 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2404 ret
= -TARGET_ENOPROTOOPT
;
2409 /* do_getsockopt() Must return target values and target errnos. */
2410 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2411 abi_ulong optval_addr
, abi_ulong optlen
)
2418 case TARGET_SOL_SOCKET
:
2421 /* These don't just return a single integer */
2422 case TARGET_SO_LINGER
:
2423 case TARGET_SO_RCVTIMEO
:
2424 case TARGET_SO_SNDTIMEO
:
2425 case TARGET_SO_PEERNAME
:
2427 case TARGET_SO_PEERCRED
: {
2430 struct target_ucred
*tcr
;
2432 if (get_user_u32(len
, optlen
)) {
2433 return -TARGET_EFAULT
;
2436 return -TARGET_EINVAL
;
2440 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2448 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2449 return -TARGET_EFAULT
;
2451 __put_user(cr
.pid
, &tcr
->pid
);
2452 __put_user(cr
.uid
, &tcr
->uid
);
2453 __put_user(cr
.gid
, &tcr
->gid
);
2454 unlock_user_struct(tcr
, optval_addr
, 1);
2455 if (put_user_u32(len
, optlen
)) {
2456 return -TARGET_EFAULT
;
2460 /* Options with 'int' argument. */
2461 case TARGET_SO_DEBUG
:
2464 case TARGET_SO_REUSEADDR
:
2465 optname
= SO_REUSEADDR
;
2467 case TARGET_SO_TYPE
:
2470 case TARGET_SO_ERROR
:
2473 case TARGET_SO_DONTROUTE
:
2474 optname
= SO_DONTROUTE
;
2476 case TARGET_SO_BROADCAST
:
2477 optname
= SO_BROADCAST
;
2479 case TARGET_SO_SNDBUF
:
2480 optname
= SO_SNDBUF
;
2482 case TARGET_SO_RCVBUF
:
2483 optname
= SO_RCVBUF
;
2485 case TARGET_SO_KEEPALIVE
:
2486 optname
= SO_KEEPALIVE
;
2488 case TARGET_SO_OOBINLINE
:
2489 optname
= SO_OOBINLINE
;
2491 case TARGET_SO_NO_CHECK
:
2492 optname
= SO_NO_CHECK
;
2494 case TARGET_SO_PRIORITY
:
2495 optname
= SO_PRIORITY
;
2498 case TARGET_SO_BSDCOMPAT
:
2499 optname
= SO_BSDCOMPAT
;
2502 case TARGET_SO_PASSCRED
:
2503 optname
= SO_PASSCRED
;
2505 case TARGET_SO_TIMESTAMP
:
2506 optname
= SO_TIMESTAMP
;
2508 case TARGET_SO_RCVLOWAT
:
2509 optname
= SO_RCVLOWAT
;
2511 case TARGET_SO_ACCEPTCONN
:
2512 optname
= SO_ACCEPTCONN
;
2519 /* TCP options all take an 'int' value. */
2521 if (get_user_u32(len
, optlen
))
2522 return -TARGET_EFAULT
;
2524 return -TARGET_EINVAL
;
2526 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2529 if (optname
== SO_TYPE
) {
2530 val
= host_to_target_sock_type(val
);
2535 if (put_user_u32(val
, optval_addr
))
2536 return -TARGET_EFAULT
;
2538 if (put_user_u8(val
, optval_addr
))
2539 return -TARGET_EFAULT
;
2541 if (put_user_u32(len
, optlen
))
2542 return -TARGET_EFAULT
;
2549 case IP_ROUTER_ALERT
:
2553 case IP_MTU_DISCOVER
:
2559 case IP_MULTICAST_TTL
:
2560 case IP_MULTICAST_LOOP
:
2561 if (get_user_u32(len
, optlen
))
2562 return -TARGET_EFAULT
;
2564 return -TARGET_EINVAL
;
2566 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2569 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2571 if (put_user_u32(len
, optlen
)
2572 || put_user_u8(val
, optval_addr
))
2573 return -TARGET_EFAULT
;
2575 if (len
> sizeof(int))
2577 if (put_user_u32(len
, optlen
)
2578 || put_user_u32(val
, optval_addr
))
2579 return -TARGET_EFAULT
;
2583 ret
= -TARGET_ENOPROTOOPT
;
2589 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2591 ret
= -TARGET_EOPNOTSUPP
;
2597 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2598 int count
, int copy
)
2600 struct target_iovec
*target_vec
;
2602 abi_ulong total_len
, max_len
;
2605 bool bad_address
= false;
2611 if (count
< 0 || count
> IOV_MAX
) {
2616 vec
= g_try_new0(struct iovec
, count
);
2622 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2623 count
* sizeof(struct target_iovec
), 1);
2624 if (target_vec
== NULL
) {
2629 /* ??? If host page size > target page size, this will result in a
2630 value larger than what we can actually support. */
2631 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2634 for (i
= 0; i
< count
; i
++) {
2635 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2636 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2641 } else if (len
== 0) {
2642 /* Zero length pointer is ignored. */
2643 vec
[i
].iov_base
= 0;
2645 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2646 /* If the first buffer pointer is bad, this is a fault. But
2647 * subsequent bad buffers will result in a partial write; this
2648 * is realized by filling the vector with null pointers and
2650 if (!vec
[i
].iov_base
) {
2661 if (len
> max_len
- total_len
) {
2662 len
= max_len
- total_len
;
2665 vec
[i
].iov_len
= len
;
2669 unlock_user(target_vec
, target_addr
, 0);
2674 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2675 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2678 unlock_user(target_vec
, target_addr
, 0);
2685 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2686 int count
, int copy
)
2688 struct target_iovec
*target_vec
;
2691 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2692 count
* sizeof(struct target_iovec
), 1);
2694 for (i
= 0; i
< count
; i
++) {
2695 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2696 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2700 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2702 unlock_user(target_vec
, target_addr
, 0);
2708 static inline int target_to_host_sock_type(int *type
)
2711 int target_type
= *type
;
2713 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2714 case TARGET_SOCK_DGRAM
:
2715 host_type
= SOCK_DGRAM
;
2717 case TARGET_SOCK_STREAM
:
2718 host_type
= SOCK_STREAM
;
2721 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2724 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2725 #if defined(SOCK_CLOEXEC)
2726 host_type
|= SOCK_CLOEXEC
;
2728 return -TARGET_EINVAL
;
2731 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2732 #if defined(SOCK_NONBLOCK)
2733 host_type
|= SOCK_NONBLOCK
;
2734 #elif !defined(O_NONBLOCK)
2735 return -TARGET_EINVAL
;
2742 /* Try to emulate socket type flags after socket creation. */
2743 static int sock_flags_fixup(int fd
, int target_type
)
2745 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2746 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2747 int flags
= fcntl(fd
, F_GETFL
);
2748 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2750 return -TARGET_EINVAL
;
2757 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2758 abi_ulong target_addr
,
2761 struct sockaddr
*addr
= host_addr
;
2762 struct target_sockaddr
*target_saddr
;
2764 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2765 if (!target_saddr
) {
2766 return -TARGET_EFAULT
;
2769 memcpy(addr
, target_saddr
, len
);
2770 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2771 /* spkt_protocol is big-endian */
2773 unlock_user(target_saddr
, target_addr
, 0);
2777 static TargetFdTrans target_packet_trans
= {
2778 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2781 #ifdef CONFIG_RTNETLINK
2782 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2784 return target_to_host_nlmsg_route(buf
, len
);
2787 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2789 return host_to_target_nlmsg_route(buf
, len
);
2792 static TargetFdTrans target_netlink_route_trans
= {
2793 .target_to_host_data
= netlink_route_target_to_host
,
2794 .host_to_target_data
= netlink_route_host_to_target
,
2796 #endif /* CONFIG_RTNETLINK */
2798 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2800 return target_to_host_nlmsg_audit(buf
, len
);
2803 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2805 return host_to_target_nlmsg_audit(buf
, len
);
2808 static TargetFdTrans target_netlink_audit_trans
= {
2809 .target_to_host_data
= netlink_audit_target_to_host
,
2810 .host_to_target_data
= netlink_audit_host_to_target
,
2813 /* do_socket() Must return target values and target errnos. */
2814 static abi_long
do_socket(int domain
, int type
, int protocol
)
2816 int target_type
= type
;
2819 ret
= target_to_host_sock_type(&type
);
2824 if (domain
== PF_NETLINK
&& !(
2825 #ifdef CONFIG_RTNETLINK
2826 protocol
== NETLINK_ROUTE
||
2828 protocol
== NETLINK_KOBJECT_UEVENT
||
2829 protocol
== NETLINK_AUDIT
)) {
2830 return -EPFNOSUPPORT
;
2833 if (domain
== AF_PACKET
||
2834 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2835 protocol
= tswap16(protocol
);
2838 ret
= get_errno(socket(domain
, type
, protocol
));
2840 ret
= sock_flags_fixup(ret
, target_type
);
2841 if (type
== SOCK_PACKET
) {
2842 /* Manage an obsolete case :
2843 * if socket type is SOCK_PACKET, bind by name
2845 fd_trans_register(ret
, &target_packet_trans
);
2846 } else if (domain
== PF_NETLINK
) {
2848 #ifdef CONFIG_RTNETLINK
2850 fd_trans_register(ret
, &target_netlink_route_trans
);
2853 case NETLINK_KOBJECT_UEVENT
:
2854 /* nothing to do: messages are strings */
2857 fd_trans_register(ret
, &target_netlink_audit_trans
);
2860 g_assert_not_reached();
2867 /* do_bind() Must return target values and target errnos. */
2868 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2874 if ((int)addrlen
< 0) {
2875 return -TARGET_EINVAL
;
2878 addr
= alloca(addrlen
+1);
2880 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2884 return get_errno(bind(sockfd
, addr
, addrlen
));
2887 /* do_connect() Must return target values and target errnos. */
2888 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2894 if ((int)addrlen
< 0) {
2895 return -TARGET_EINVAL
;
2898 addr
= alloca(addrlen
+1);
2900 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2904 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2908 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2909 int flags
, int send
)
2915 abi_ulong target_vec
;
2917 if (msgp
->msg_name
) {
2918 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2919 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2920 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2921 tswapal(msgp
->msg_name
),
2927 msg
.msg_name
= NULL
;
2928 msg
.msg_namelen
= 0;
2930 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2931 msg
.msg_control
= alloca(msg
.msg_controllen
);
2932 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2934 count
= tswapal(msgp
->msg_iovlen
);
2935 target_vec
= tswapal(msgp
->msg_iov
);
2936 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2937 target_vec
, count
, send
);
2939 ret
= -host_to_target_errno(errno
);
2942 msg
.msg_iovlen
= count
;
2946 if (fd_trans_target_to_host_data(fd
)) {
2947 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2948 msg
.msg_iov
->iov_len
);
2950 ret
= target_to_host_cmsg(&msg
, msgp
);
2953 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2956 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2957 if (!is_error(ret
)) {
2959 if (fd_trans_host_to_target_data(fd
)) {
2960 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2961 msg
.msg_iov
->iov_len
);
2963 ret
= host_to_target_cmsg(msgp
, &msg
);
2965 if (!is_error(ret
)) {
2966 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2967 if (msg
.msg_name
!= NULL
) {
2968 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2969 msg
.msg_name
, msg
.msg_namelen
);
2981 unlock_iovec(vec
, target_vec
, count
, !send
);
2986 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2987 int flags
, int send
)
2990 struct target_msghdr
*msgp
;
2992 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2996 return -TARGET_EFAULT
;
2998 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2999 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3003 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3004 * so it might not have this *mmsg-specific flag either.
3006 #ifndef MSG_WAITFORONE
3007 #define MSG_WAITFORONE 0x10000
3010 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3011 unsigned int vlen
, unsigned int flags
,
3014 struct target_mmsghdr
*mmsgp
;
3018 if (vlen
> UIO_MAXIOV
) {
3022 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3024 return -TARGET_EFAULT
;
3027 for (i
= 0; i
< vlen
; i
++) {
3028 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3029 if (is_error(ret
)) {
3032 mmsgp
[i
].msg_len
= tswap32(ret
);
3033 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3034 if (flags
& MSG_WAITFORONE
) {
3035 flags
|= MSG_DONTWAIT
;
3039 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3041 /* Return number of datagrams sent if we sent any at all;
3042 * otherwise return the error.
3050 /* If we don't have a system accept4() then just call accept.
3051 * The callsites to do_accept4() will ensure that they don't
3052 * pass a non-zero flags argument in this config.
3054 #ifndef CONFIG_ACCEPT4
3055 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
3056 socklen_t
*addrlen
, int flags
)
3059 return accept(sockfd
, addr
, addrlen
);
3063 /* do_accept4() Must return target values and target errnos. */
3064 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3065 abi_ulong target_addrlen_addr
, int flags
)
3072 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3074 if (target_addr
== 0) {
3075 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
3078 /* linux returns EINVAL if addrlen pointer is invalid */
3079 if (get_user_u32(addrlen
, target_addrlen_addr
))
3080 return -TARGET_EINVAL
;
3082 if ((int)addrlen
< 0) {
3083 return -TARGET_EINVAL
;
3086 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3087 return -TARGET_EINVAL
;
3089 addr
= alloca(addrlen
);
3091 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
3092 if (!is_error(ret
)) {
3093 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3094 if (put_user_u32(addrlen
, target_addrlen_addr
))
3095 ret
= -TARGET_EFAULT
;
3100 /* do_getpeername() Must return target values and target errnos. */
3101 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3102 abi_ulong target_addrlen_addr
)
3108 if (get_user_u32(addrlen
, target_addrlen_addr
))
3109 return -TARGET_EFAULT
;
3111 if ((int)addrlen
< 0) {
3112 return -TARGET_EINVAL
;
3115 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3116 return -TARGET_EFAULT
;
3118 addr
= alloca(addrlen
);
3120 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3121 if (!is_error(ret
)) {
3122 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3123 if (put_user_u32(addrlen
, target_addrlen_addr
))
3124 ret
= -TARGET_EFAULT
;
3129 /* do_getsockname() Must return target values and target errnos. */
3130 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3131 abi_ulong target_addrlen_addr
)
3137 if (get_user_u32(addrlen
, target_addrlen_addr
))
3138 return -TARGET_EFAULT
;
3140 if ((int)addrlen
< 0) {
3141 return -TARGET_EINVAL
;
3144 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3145 return -TARGET_EFAULT
;
3147 addr
= alloca(addrlen
);
3149 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3150 if (!is_error(ret
)) {
3151 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3152 if (put_user_u32(addrlen
, target_addrlen_addr
))
3153 ret
= -TARGET_EFAULT
;
3158 /* do_socketpair() Must return target values and target errnos. */
3159 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3160 abi_ulong target_tab_addr
)
3165 target_to_host_sock_type(&type
);
3167 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3168 if (!is_error(ret
)) {
3169 if (put_user_s32(tab
[0], target_tab_addr
)
3170 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3171 ret
= -TARGET_EFAULT
;
3176 /* do_sendto() Must return target values and target errnos. */
3177 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3178 abi_ulong target_addr
, socklen_t addrlen
)
3184 if ((int)addrlen
< 0) {
3185 return -TARGET_EINVAL
;
3188 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3190 return -TARGET_EFAULT
;
3191 if (fd_trans_target_to_host_data(fd
)) {
3192 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3194 unlock_user(host_msg
, msg
, 0);
3199 addr
= alloca(addrlen
+1);
3200 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3202 unlock_user(host_msg
, msg
, 0);
3205 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3207 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3209 unlock_user(host_msg
, msg
, 0);
3213 /* do_recvfrom() Must return target values and target errnos. */
3214 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3215 abi_ulong target_addr
,
3216 abi_ulong target_addrlen
)
3223 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3225 return -TARGET_EFAULT
;
3227 if (get_user_u32(addrlen
, target_addrlen
)) {
3228 ret
= -TARGET_EFAULT
;
3231 if ((int)addrlen
< 0) {
3232 ret
= -TARGET_EINVAL
;
3235 addr
= alloca(addrlen
);
3236 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3239 addr
= NULL
; /* To keep compiler quiet. */
3240 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3242 if (!is_error(ret
)) {
3244 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3245 if (put_user_u32(addrlen
, target_addrlen
)) {
3246 ret
= -TARGET_EFAULT
;
3250 unlock_user(host_msg
, msg
, len
);
3253 unlock_user(host_msg
, msg
, 0);
3258 #ifdef TARGET_NR_socketcall
3259 /* do_socketcall() Must return target values and target errnos. */
3260 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3262 static const unsigned ac
[] = { /* number of arguments per call */
3263 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3264 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3265 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3266 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3267 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3268 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3269 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3270 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3271 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3272 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3273 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3274 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3275 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3276 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3277 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3278 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3279 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3280 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3281 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3282 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3284 abi_long a
[6]; /* max 6 args */
3286 /* first, collect the arguments in a[] according to ac[] */
3287 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3289 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3290 for (i
= 0; i
< ac
[num
]; ++i
) {
3291 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3292 return -TARGET_EFAULT
;
3297 /* now when we have the args, actually handle the call */
3299 case SOCKOP_socket
: /* domain, type, protocol */
3300 return do_socket(a
[0], a
[1], a
[2]);
3301 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3302 return do_bind(a
[0], a
[1], a
[2]);
3303 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3304 return do_connect(a
[0], a
[1], a
[2]);
3305 case SOCKOP_listen
: /* sockfd, backlog */
3306 return get_errno(listen(a
[0], a
[1]));
3307 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3308 return do_accept4(a
[0], a
[1], a
[2], 0);
3309 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3310 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3311 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3312 return do_getsockname(a
[0], a
[1], a
[2]);
3313 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3314 return do_getpeername(a
[0], a
[1], a
[2]);
3315 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3316 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3317 case SOCKOP_send
: /* sockfd, msg, len, flags */
3318 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3319 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3320 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3321 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3322 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3323 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3324 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3325 case SOCKOP_shutdown
: /* sockfd, how */
3326 return get_errno(shutdown(a
[0], a
[1]));
3327 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3328 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3329 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3330 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3331 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3332 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3333 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3334 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3335 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3336 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3337 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3338 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3340 gemu_log("Unsupported socketcall: %d\n", num
);
3341 return -TARGET_ENOSYS
;
3346 #define N_SHM_REGIONS 32
3348 static struct shm_region
{
3352 } shm_regions
[N_SHM_REGIONS
];
3354 struct target_semid_ds
3356 struct target_ipc_perm sem_perm
;
3357 abi_ulong sem_otime
;
3358 #if !defined(TARGET_PPC64)
3359 abi_ulong __unused1
;
3361 abi_ulong sem_ctime
;
3362 #if !defined(TARGET_PPC64)
3363 abi_ulong __unused2
;
3365 abi_ulong sem_nsems
;
3366 abi_ulong __unused3
;
3367 abi_ulong __unused4
;
3370 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3371 abi_ulong target_addr
)
3373 struct target_ipc_perm
*target_ip
;
3374 struct target_semid_ds
*target_sd
;
3376 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3377 return -TARGET_EFAULT
;
3378 target_ip
= &(target_sd
->sem_perm
);
3379 host_ip
->__key
= tswap32(target_ip
->__key
);
3380 host_ip
->uid
= tswap32(target_ip
->uid
);
3381 host_ip
->gid
= tswap32(target_ip
->gid
);
3382 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3383 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3384 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3385 host_ip
->mode
= tswap32(target_ip
->mode
);
3387 host_ip
->mode
= tswap16(target_ip
->mode
);
3389 #if defined(TARGET_PPC)
3390 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3392 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3394 unlock_user_struct(target_sd
, target_addr
, 0);
3398 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3399 struct ipc_perm
*host_ip
)
3401 struct target_ipc_perm
*target_ip
;
3402 struct target_semid_ds
*target_sd
;
3404 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3405 return -TARGET_EFAULT
;
3406 target_ip
= &(target_sd
->sem_perm
);
3407 target_ip
->__key
= tswap32(host_ip
->__key
);
3408 target_ip
->uid
= tswap32(host_ip
->uid
);
3409 target_ip
->gid
= tswap32(host_ip
->gid
);
3410 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3411 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3412 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3413 target_ip
->mode
= tswap32(host_ip
->mode
);
3415 target_ip
->mode
= tswap16(host_ip
->mode
);
3417 #if defined(TARGET_PPC)
3418 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3420 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3422 unlock_user_struct(target_sd
, target_addr
, 1);
3426 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3427 abi_ulong target_addr
)
3429 struct target_semid_ds
*target_sd
;
3431 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3432 return -TARGET_EFAULT
;
3433 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3434 return -TARGET_EFAULT
;
3435 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3436 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3437 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3438 unlock_user_struct(target_sd
, target_addr
, 0);
3442 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3443 struct semid_ds
*host_sd
)
3445 struct target_semid_ds
*target_sd
;
3447 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3448 return -TARGET_EFAULT
;
3449 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3450 return -TARGET_EFAULT
;
3451 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3452 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3453 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3454 unlock_user_struct(target_sd
, target_addr
, 1);
3458 struct target_seminfo
{
3471 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3472 struct seminfo
*host_seminfo
)
3474 struct target_seminfo
*target_seminfo
;
3475 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3476 return -TARGET_EFAULT
;
3477 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3478 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3479 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3480 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3481 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3482 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3483 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3484 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3485 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3486 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3487 unlock_user_struct(target_seminfo
, target_addr
, 1);
3493 struct semid_ds
*buf
;
3494 unsigned short *array
;
3495 struct seminfo
*__buf
;
3498 union target_semun
{
3505 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3506 abi_ulong target_addr
)
3509 unsigned short *array
;
3511 struct semid_ds semid_ds
;
3514 semun
.buf
= &semid_ds
;
3516 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3518 return get_errno(ret
);
3520 nsems
= semid_ds
.sem_nsems
;
3522 *host_array
= g_try_new(unsigned short, nsems
);
3524 return -TARGET_ENOMEM
;
3526 array
= lock_user(VERIFY_READ
, target_addr
,
3527 nsems
*sizeof(unsigned short), 1);
3529 g_free(*host_array
);
3530 return -TARGET_EFAULT
;
3533 for(i
=0; i
<nsems
; i
++) {
3534 __get_user((*host_array
)[i
], &array
[i
]);
3536 unlock_user(array
, target_addr
, 0);
3541 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3542 unsigned short **host_array
)
3545 unsigned short *array
;
3547 struct semid_ds semid_ds
;
3550 semun
.buf
= &semid_ds
;
3552 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3554 return get_errno(ret
);
3556 nsems
= semid_ds
.sem_nsems
;
3558 array
= lock_user(VERIFY_WRITE
, target_addr
,
3559 nsems
*sizeof(unsigned short), 0);
3561 return -TARGET_EFAULT
;
3563 for(i
=0; i
<nsems
; i
++) {
3564 __put_user((*host_array
)[i
], &array
[i
]);
3566 g_free(*host_array
);
3567 unlock_user(array
, target_addr
, 1);
3572 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3573 abi_ulong target_arg
)
3575 union target_semun target_su
= { .buf
= target_arg
};
3577 struct semid_ds dsarg
;
3578 unsigned short *array
= NULL
;
3579 struct seminfo seminfo
;
3580 abi_long ret
= -TARGET_EINVAL
;
3587 /* In 64 bit cross-endian situations, we will erroneously pick up
3588 * the wrong half of the union for the "val" element. To rectify
3589 * this, the entire 8-byte structure is byteswapped, followed by
3590 * a swap of the 4 byte val field. In other cases, the data is
3591 * already in proper host byte order. */
3592 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3593 target_su
.buf
= tswapal(target_su
.buf
);
3594 arg
.val
= tswap32(target_su
.val
);
3596 arg
.val
= target_su
.val
;
3598 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3602 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3606 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3607 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3614 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3618 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3619 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3625 arg
.__buf
= &seminfo
;
3626 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3627 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3635 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3642 struct target_sembuf
{
3643 unsigned short sem_num
;
3648 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3649 abi_ulong target_addr
,
3652 struct target_sembuf
*target_sembuf
;
3655 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3656 nsops
*sizeof(struct target_sembuf
), 1);
3658 return -TARGET_EFAULT
;
3660 for(i
=0; i
<nsops
; i
++) {
3661 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3662 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3663 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3666 unlock_user(target_sembuf
, target_addr
, 0);
3671 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3673 struct sembuf sops
[nsops
];
3675 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3676 return -TARGET_EFAULT
;
3678 return get_errno(semop(semid
, sops
, nsops
));
3681 struct target_msqid_ds
3683 struct target_ipc_perm msg_perm
;
3684 abi_ulong msg_stime
;
3685 #if TARGET_ABI_BITS == 32
3686 abi_ulong __unused1
;
3688 abi_ulong msg_rtime
;
3689 #if TARGET_ABI_BITS == 32
3690 abi_ulong __unused2
;
3692 abi_ulong msg_ctime
;
3693 #if TARGET_ABI_BITS == 32
3694 abi_ulong __unused3
;
3696 abi_ulong __msg_cbytes
;
3698 abi_ulong msg_qbytes
;
3699 abi_ulong msg_lspid
;
3700 abi_ulong msg_lrpid
;
3701 abi_ulong __unused4
;
3702 abi_ulong __unused5
;
3705 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3706 abi_ulong target_addr
)
3708 struct target_msqid_ds
*target_md
;
3710 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3711 return -TARGET_EFAULT
;
3712 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3713 return -TARGET_EFAULT
;
3714 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3715 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3716 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3717 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3718 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3719 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3720 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3721 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3722 unlock_user_struct(target_md
, target_addr
, 0);
3726 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3727 struct msqid_ds
*host_md
)
3729 struct target_msqid_ds
*target_md
;
3731 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3732 return -TARGET_EFAULT
;
3733 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3734 return -TARGET_EFAULT
;
3735 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3736 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3737 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3738 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3739 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3740 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3741 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3742 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3743 unlock_user_struct(target_md
, target_addr
, 1);
3747 struct target_msginfo
{
3755 unsigned short int msgseg
;
3758 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3759 struct msginfo
*host_msginfo
)
3761 struct target_msginfo
*target_msginfo
;
3762 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3763 return -TARGET_EFAULT
;
3764 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3765 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3766 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3767 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3768 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3769 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3770 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3771 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3772 unlock_user_struct(target_msginfo
, target_addr
, 1);
3776 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3778 struct msqid_ds dsarg
;
3779 struct msginfo msginfo
;
3780 abi_long ret
= -TARGET_EINVAL
;
3788 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3789 return -TARGET_EFAULT
;
3790 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3791 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3792 return -TARGET_EFAULT
;
3795 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3799 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3800 if (host_to_target_msginfo(ptr
, &msginfo
))
3801 return -TARGET_EFAULT
;
3808 struct target_msgbuf
{
3813 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3814 ssize_t msgsz
, int msgflg
)
3816 struct target_msgbuf
*target_mb
;
3817 struct msgbuf
*host_mb
;
3821 return -TARGET_EINVAL
;
3824 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3825 return -TARGET_EFAULT
;
3826 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3828 unlock_user_struct(target_mb
, msgp
, 0);
3829 return -TARGET_ENOMEM
;
3831 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3832 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3833 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3835 unlock_user_struct(target_mb
, msgp
, 0);
3840 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3841 ssize_t msgsz
, abi_long msgtyp
,
3844 struct target_msgbuf
*target_mb
;
3846 struct msgbuf
*host_mb
;
3850 return -TARGET_EINVAL
;
3853 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3854 return -TARGET_EFAULT
;
3856 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3858 ret
= -TARGET_ENOMEM
;
3861 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3864 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3865 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3866 if (!target_mtext
) {
3867 ret
= -TARGET_EFAULT
;
3870 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3871 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3874 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3878 unlock_user_struct(target_mb
, msgp
, 1);
3883 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3884 abi_ulong target_addr
)
3886 struct target_shmid_ds
*target_sd
;
3888 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3889 return -TARGET_EFAULT
;
3890 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3891 return -TARGET_EFAULT
;
3892 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3893 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3894 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3895 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3896 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3897 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3898 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3899 unlock_user_struct(target_sd
, target_addr
, 0);
3903 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3904 struct shmid_ds
*host_sd
)
3906 struct target_shmid_ds
*target_sd
;
3908 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3909 return -TARGET_EFAULT
;
3910 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3911 return -TARGET_EFAULT
;
3912 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3913 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3914 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3915 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3916 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3917 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3918 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3919 unlock_user_struct(target_sd
, target_addr
, 1);
3923 struct target_shminfo
{
3931 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3932 struct shminfo
*host_shminfo
)
3934 struct target_shminfo
*target_shminfo
;
3935 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3936 return -TARGET_EFAULT
;
3937 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3938 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3939 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3940 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3941 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3942 unlock_user_struct(target_shminfo
, target_addr
, 1);
3946 struct target_shm_info
{
3951 abi_ulong swap_attempts
;
3952 abi_ulong swap_successes
;
3955 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3956 struct shm_info
*host_shm_info
)
3958 struct target_shm_info
*target_shm_info
;
3959 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3960 return -TARGET_EFAULT
;
3961 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3962 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3963 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3964 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3965 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3966 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3967 unlock_user_struct(target_shm_info
, target_addr
, 1);
3971 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3973 struct shmid_ds dsarg
;
3974 struct shminfo shminfo
;
3975 struct shm_info shm_info
;
3976 abi_long ret
= -TARGET_EINVAL
;
3984 if (target_to_host_shmid_ds(&dsarg
, buf
))
3985 return -TARGET_EFAULT
;
3986 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3987 if (host_to_target_shmid_ds(buf
, &dsarg
))
3988 return -TARGET_EFAULT
;
3991 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3992 if (host_to_target_shminfo(buf
, &shminfo
))
3993 return -TARGET_EFAULT
;
3996 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3997 if (host_to_target_shm_info(buf
, &shm_info
))
3998 return -TARGET_EFAULT
;
4003 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4010 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4014 struct shmid_ds shm_info
;
4017 /* find out the length of the shared memory segment */
4018 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4019 if (is_error(ret
)) {
4020 /* can't get length, bail out */
4027 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4029 abi_ulong mmap_start
;
4031 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4033 if (mmap_start
== -1) {
4035 host_raddr
= (void *)-1;
4037 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4040 if (host_raddr
== (void *)-1) {
4042 return get_errno((long)host_raddr
);
4044 raddr
=h2g((unsigned long)host_raddr
);
4046 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4047 PAGE_VALID
| PAGE_READ
|
4048 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4050 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4051 if (!shm_regions
[i
].in_use
) {
4052 shm_regions
[i
].in_use
= true;
4053 shm_regions
[i
].start
= raddr
;
4054 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4064 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4068 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4069 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4070 shm_regions
[i
].in_use
= false;
4071 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4076 return get_errno(shmdt(g2h(shmaddr
)));
4079 #ifdef TARGET_NR_ipc
4080 /* ??? This only works with linear mappings. */
4081 /* do_ipc() must return target values and target errnos. */
4082 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4083 abi_long second
, abi_long third
,
4084 abi_long ptr
, abi_long fifth
)
4089 version
= call
>> 16;
4094 ret
= do_semop(first
, ptr
, second
);
4098 ret
= get_errno(semget(first
, second
, third
));
4101 case IPCOP_semctl
: {
4102 /* The semun argument to semctl is passed by value, so dereference the
4105 get_user_ual(atptr
, ptr
);
4106 ret
= do_semctl(first
, second
, third
, atptr
);
4111 ret
= get_errno(msgget(first
, second
));
4115 ret
= do_msgsnd(first
, ptr
, second
, third
);
4119 ret
= do_msgctl(first
, second
, ptr
);
4126 struct target_ipc_kludge
{
4131 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4132 ret
= -TARGET_EFAULT
;
4136 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4138 unlock_user_struct(tmp
, ptr
, 0);
4142 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4151 raddr
= do_shmat(first
, ptr
, second
);
4152 if (is_error(raddr
))
4153 return get_errno(raddr
);
4154 if (put_user_ual(raddr
, third
))
4155 return -TARGET_EFAULT
;
4159 ret
= -TARGET_EINVAL
;
4164 ret
= do_shmdt(ptr
);
4168 /* IPC_* flag values are the same on all linux platforms */
4169 ret
= get_errno(shmget(first
, second
, third
));
4172 /* IPC_* and SHM_* command values are the same on all linux platforms */
4174 ret
= do_shmctl(first
, second
, ptr
);
4177 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4178 ret
= -TARGET_ENOSYS
;
4185 /* kernel structure types definitions */
4187 #define STRUCT(name, ...) STRUCT_ ## name,
4188 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4190 #include "syscall_types.h"
4194 #undef STRUCT_SPECIAL
4196 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4197 #define STRUCT_SPECIAL(name)
4198 #include "syscall_types.h"
4200 #undef STRUCT_SPECIAL
4202 typedef struct IOCTLEntry IOCTLEntry
;
4204 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4205 int fd
, int cmd
, abi_long arg
);
4209 unsigned int host_cmd
;
4212 do_ioctl_fn
*do_ioctl
;
4213 const argtype arg_type
[5];
4216 #define IOC_R 0x0001
4217 #define IOC_W 0x0002
4218 #define IOC_RW (IOC_R | IOC_W)
4220 #define MAX_STRUCT_SIZE 4096
4222 #ifdef CONFIG_FIEMAP
4223 /* So fiemap access checks don't overflow on 32 bit systems.
4224 * This is very slightly smaller than the limit imposed by
4225 * the underlying kernel.
4227 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4228 / sizeof(struct fiemap_extent))
4230 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4231 int fd
, int cmd
, abi_long arg
)
4233 /* The parameter for this ioctl is a struct fiemap followed
4234 * by an array of struct fiemap_extent whose size is set
4235 * in fiemap->fm_extent_count. The array is filled in by the
4238 int target_size_in
, target_size_out
;
4240 const argtype
*arg_type
= ie
->arg_type
;
4241 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4244 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4248 assert(arg_type
[0] == TYPE_PTR
);
4249 assert(ie
->access
== IOC_RW
);
4251 target_size_in
= thunk_type_size(arg_type
, 0);
4252 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4254 return -TARGET_EFAULT
;
4256 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4257 unlock_user(argptr
, arg
, 0);
4258 fm
= (struct fiemap
*)buf_temp
;
4259 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4260 return -TARGET_EINVAL
;
4263 outbufsz
= sizeof (*fm
) +
4264 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4266 if (outbufsz
> MAX_STRUCT_SIZE
) {
4267 /* We can't fit all the extents into the fixed size buffer.
4268 * Allocate one that is large enough and use it instead.
4270 fm
= g_try_malloc(outbufsz
);
4272 return -TARGET_ENOMEM
;
4274 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4277 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
4278 if (!is_error(ret
)) {
4279 target_size_out
= target_size_in
;
4280 /* An extent_count of 0 means we were only counting the extents
4281 * so there are no structs to copy
4283 if (fm
->fm_extent_count
!= 0) {
4284 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4286 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4288 ret
= -TARGET_EFAULT
;
4290 /* Convert the struct fiemap */
4291 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4292 if (fm
->fm_extent_count
!= 0) {
4293 p
= argptr
+ target_size_in
;
4294 /* ...and then all the struct fiemap_extents */
4295 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4296 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4301 unlock_user(argptr
, arg
, target_size_out
);
4311 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4312 int fd
, int cmd
, abi_long arg
)
4314 const argtype
*arg_type
= ie
->arg_type
;
4318 struct ifconf
*host_ifconf
;
4320 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4321 int target_ifreq_size
;
4326 abi_long target_ifc_buf
;
4330 assert(arg_type
[0] == TYPE_PTR
);
4331 assert(ie
->access
== IOC_RW
);
4334 target_size
= thunk_type_size(arg_type
, 0);
4336 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4338 return -TARGET_EFAULT
;
4339 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4340 unlock_user(argptr
, arg
, 0);
4342 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4343 target_ifc_len
= host_ifconf
->ifc_len
;
4344 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4346 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4347 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4348 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4350 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4351 if (outbufsz
> MAX_STRUCT_SIZE
) {
4352 /* We can't fit all the extents into the fixed size buffer.
4353 * Allocate one that is large enough and use it instead.
4355 host_ifconf
= malloc(outbufsz
);
4357 return -TARGET_ENOMEM
;
4359 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4362 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4364 host_ifconf
->ifc_len
= host_ifc_len
;
4365 host_ifconf
->ifc_buf
= host_ifc_buf
;
4367 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4368 if (!is_error(ret
)) {
4369 /* convert host ifc_len to target ifc_len */
4371 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4372 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4373 host_ifconf
->ifc_len
= target_ifc_len
;
4375 /* restore target ifc_buf */
4377 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4379 /* copy struct ifconf to target user */
4381 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4383 return -TARGET_EFAULT
;
4384 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4385 unlock_user(argptr
, arg
, target_size
);
4387 /* copy ifreq[] to target user */
4389 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4390 for (i
= 0; i
< nb_ifreq
; i
++) {
4391 thunk_convert(argptr
+ i
* target_ifreq_size
,
4392 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4393 ifreq_arg_type
, THUNK_TARGET
);
4395 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4405 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4406 int cmd
, abi_long arg
)
4409 struct dm_ioctl
*host_dm
;
4410 abi_long guest_data
;
4411 uint32_t guest_data_size
;
4413 const argtype
*arg_type
= ie
->arg_type
;
4415 void *big_buf
= NULL
;
4419 target_size
= thunk_type_size(arg_type
, 0);
4420 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4422 ret
= -TARGET_EFAULT
;
4425 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4426 unlock_user(argptr
, arg
, 0);
4428 /* buf_temp is too small, so fetch things into a bigger buffer */
4429 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4430 memcpy(big_buf
, buf_temp
, target_size
);
4434 guest_data
= arg
+ host_dm
->data_start
;
4435 if ((guest_data
- arg
) < 0) {
4439 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4440 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4442 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4443 switch (ie
->host_cmd
) {
4445 case DM_LIST_DEVICES
:
4448 case DM_DEV_SUSPEND
:
4451 case DM_TABLE_STATUS
:
4452 case DM_TABLE_CLEAR
:
4454 case DM_LIST_VERSIONS
:
4458 case DM_DEV_SET_GEOMETRY
:
4459 /* data contains only strings */
4460 memcpy(host_data
, argptr
, guest_data_size
);
4463 memcpy(host_data
, argptr
, guest_data_size
);
4464 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4468 void *gspec
= argptr
;
4469 void *cur_data
= host_data
;
4470 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4471 int spec_size
= thunk_type_size(arg_type
, 0);
4474 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4475 struct dm_target_spec
*spec
= cur_data
;
4479 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4480 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4482 spec
->next
= sizeof(*spec
) + slen
;
4483 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4485 cur_data
+= spec
->next
;
4490 ret
= -TARGET_EINVAL
;
4491 unlock_user(argptr
, guest_data
, 0);
4494 unlock_user(argptr
, guest_data
, 0);
4496 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4497 if (!is_error(ret
)) {
4498 guest_data
= arg
+ host_dm
->data_start
;
4499 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4500 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4501 switch (ie
->host_cmd
) {
4506 case DM_DEV_SUSPEND
:
4509 case DM_TABLE_CLEAR
:
4511 case DM_DEV_SET_GEOMETRY
:
4512 /* no return data */
4514 case DM_LIST_DEVICES
:
4516 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4517 uint32_t remaining_data
= guest_data_size
;
4518 void *cur_data
= argptr
;
4519 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4520 int nl_size
= 12; /* can't use thunk_size due to alignment */
4523 uint32_t next
= nl
->next
;
4525 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4527 if (remaining_data
< nl
->next
) {
4528 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4531 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4532 strcpy(cur_data
+ nl_size
, nl
->name
);
4533 cur_data
+= nl
->next
;
4534 remaining_data
-= nl
->next
;
4538 nl
= (void*)nl
+ next
;
4543 case DM_TABLE_STATUS
:
4545 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4546 void *cur_data
= argptr
;
4547 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4548 int spec_size
= thunk_type_size(arg_type
, 0);
4551 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4552 uint32_t next
= spec
->next
;
4553 int slen
= strlen((char*)&spec
[1]) + 1;
4554 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4555 if (guest_data_size
< spec
->next
) {
4556 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4559 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4560 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4561 cur_data
= argptr
+ spec
->next
;
4562 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4568 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4569 int count
= *(uint32_t*)hdata
;
4570 uint64_t *hdev
= hdata
+ 8;
4571 uint64_t *gdev
= argptr
+ 8;
4574 *(uint32_t*)argptr
= tswap32(count
);
4575 for (i
= 0; i
< count
; i
++) {
4576 *gdev
= tswap64(*hdev
);
4582 case DM_LIST_VERSIONS
:
4584 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4585 uint32_t remaining_data
= guest_data_size
;
4586 void *cur_data
= argptr
;
4587 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4588 int vers_size
= thunk_type_size(arg_type
, 0);
4591 uint32_t next
= vers
->next
;
4593 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4595 if (remaining_data
< vers
->next
) {
4596 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4599 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4600 strcpy(cur_data
+ vers_size
, vers
->name
);
4601 cur_data
+= vers
->next
;
4602 remaining_data
-= vers
->next
;
4606 vers
= (void*)vers
+ next
;
4611 unlock_user(argptr
, guest_data
, 0);
4612 ret
= -TARGET_EINVAL
;
4615 unlock_user(argptr
, guest_data
, guest_data_size
);
4617 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4619 ret
= -TARGET_EFAULT
;
4622 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4623 unlock_user(argptr
, arg
, target_size
);
4630 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4631 int cmd
, abi_long arg
)
4635 const argtype
*arg_type
= ie
->arg_type
;
4636 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4639 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4640 struct blkpg_partition host_part
;
4642 /* Read and convert blkpg */
4644 target_size
= thunk_type_size(arg_type
, 0);
4645 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4647 ret
= -TARGET_EFAULT
;
4650 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4651 unlock_user(argptr
, arg
, 0);
4653 switch (host_blkpg
->op
) {
4654 case BLKPG_ADD_PARTITION
:
4655 case BLKPG_DEL_PARTITION
:
4656 /* payload is struct blkpg_partition */
4659 /* Unknown opcode */
4660 ret
= -TARGET_EINVAL
;
4664 /* Read and convert blkpg->data */
4665 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4666 target_size
= thunk_type_size(part_arg_type
, 0);
4667 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4669 ret
= -TARGET_EFAULT
;
4672 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4673 unlock_user(argptr
, arg
, 0);
4675 /* Swizzle the data pointer to our local copy and call! */
4676 host_blkpg
->data
= &host_part
;
4677 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4683 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4684 int fd
, int cmd
, abi_long arg
)
4686 const argtype
*arg_type
= ie
->arg_type
;
4687 const StructEntry
*se
;
4688 const argtype
*field_types
;
4689 const int *dst_offsets
, *src_offsets
;
4692 abi_ulong
*target_rt_dev_ptr
;
4693 unsigned long *host_rt_dev_ptr
;
4697 assert(ie
->access
== IOC_W
);
4698 assert(*arg_type
== TYPE_PTR
);
4700 assert(*arg_type
== TYPE_STRUCT
);
4701 target_size
= thunk_type_size(arg_type
, 0);
4702 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4704 return -TARGET_EFAULT
;
4707 assert(*arg_type
== (int)STRUCT_rtentry
);
4708 se
= struct_entries
+ *arg_type
++;
4709 assert(se
->convert
[0] == NULL
);
4710 /* convert struct here to be able to catch rt_dev string */
4711 field_types
= se
->field_types
;
4712 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4713 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4714 for (i
= 0; i
< se
->nb_fields
; i
++) {
4715 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4716 assert(*field_types
== TYPE_PTRVOID
);
4717 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4718 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4719 if (*target_rt_dev_ptr
!= 0) {
4720 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4721 tswapal(*target_rt_dev_ptr
));
4722 if (!*host_rt_dev_ptr
) {
4723 unlock_user(argptr
, arg
, 0);
4724 return -TARGET_EFAULT
;
4727 *host_rt_dev_ptr
= 0;
4732 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4733 argptr
+ src_offsets
[i
],
4734 field_types
, THUNK_HOST
);
4736 unlock_user(argptr
, arg
, 0);
4738 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4739 if (*host_rt_dev_ptr
!= 0) {
4740 unlock_user((void *)*host_rt_dev_ptr
,
4741 *target_rt_dev_ptr
, 0);
4746 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4747 int fd
, int cmd
, abi_long arg
)
4749 int sig
= target_to_host_signal(arg
);
4750 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4753 static IOCTLEntry ioctl_entries
[] = {
4754 #define IOCTL(cmd, access, ...) \
4755 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4756 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4757 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4762 /* ??? Implement proper locking for ioctls. */
4763 /* do_ioctl() Must return target values and target errnos. */
4764 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4766 const IOCTLEntry
*ie
;
4767 const argtype
*arg_type
;
4769 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4775 if (ie
->target_cmd
== 0) {
4776 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4777 return -TARGET_ENOSYS
;
4779 if (ie
->target_cmd
== cmd
)
4783 arg_type
= ie
->arg_type
;
4785 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4788 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4791 switch(arg_type
[0]) {
4794 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4798 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4802 target_size
= thunk_type_size(arg_type
, 0);
4803 switch(ie
->access
) {
4805 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4806 if (!is_error(ret
)) {
4807 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4809 return -TARGET_EFAULT
;
4810 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4811 unlock_user(argptr
, arg
, target_size
);
4815 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4817 return -TARGET_EFAULT
;
4818 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4819 unlock_user(argptr
, arg
, 0);
4820 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4824 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4826 return -TARGET_EFAULT
;
4827 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4828 unlock_user(argptr
, arg
, 0);
4829 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4830 if (!is_error(ret
)) {
4831 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4833 return -TARGET_EFAULT
;
4834 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4835 unlock_user(argptr
, arg
, target_size
);
4841 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4842 (long)cmd
, arg_type
[0]);
4843 ret
= -TARGET_ENOSYS
;
4849 static const bitmask_transtbl iflag_tbl
[] = {
4850 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4851 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4852 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4853 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4854 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4855 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4856 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4857 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4858 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4859 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4860 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4861 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4862 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4863 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4867 static const bitmask_transtbl oflag_tbl
[] = {
4868 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4869 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4870 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4871 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4872 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4873 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4874 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4875 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4876 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4877 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4878 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4879 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4880 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4881 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4882 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4883 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4884 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4885 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4886 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4887 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4888 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4889 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4890 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4891 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4895 static const bitmask_transtbl cflag_tbl
[] = {
4896 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4897 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4898 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4899 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4900 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4901 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4902 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4903 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4904 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4905 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4906 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4907 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4908 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4909 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4910 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4911 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4912 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4913 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4914 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4915 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4916 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4917 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4918 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4919 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4920 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4921 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4922 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4923 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4924 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4925 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4926 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4930 static const bitmask_transtbl lflag_tbl
[] = {
4931 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4932 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4933 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4934 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4935 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4936 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4937 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4938 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4939 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4940 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4941 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4942 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4943 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4944 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4945 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4949 static void target_to_host_termios (void *dst
, const void *src
)
4951 struct host_termios
*host
= dst
;
4952 const struct target_termios
*target
= src
;
4955 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4957 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4959 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4961 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4962 host
->c_line
= target
->c_line
;
4964 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4965 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4966 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4967 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4968 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4969 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4970 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4971 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4972 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4973 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4974 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4975 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4976 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4977 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4978 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4979 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4980 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4981 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4984 static void host_to_target_termios (void *dst
, const void *src
)
4986 struct target_termios
*target
= dst
;
4987 const struct host_termios
*host
= src
;
4990 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4992 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4994 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4996 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4997 target
->c_line
= host
->c_line
;
4999 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5000 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5001 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5002 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5003 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5004 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5005 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5006 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5007 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5008 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5009 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5010 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5011 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5012 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5013 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5014 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5015 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5016 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5019 static const StructEntry struct_termios_def
= {
5020 .convert
= { host_to_target_termios
, target_to_host_termios
},
5021 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5022 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5025 static bitmask_transtbl mmap_flags_tbl
[] = {
5026 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5027 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5028 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5029 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5030 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5031 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5032 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5033 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5034 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5039 #if defined(TARGET_I386)
5041 /* NOTE: there is really one LDT for all the threads */
5042 static uint8_t *ldt_table
;
5044 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5051 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5052 if (size
> bytecount
)
5054 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5056 return -TARGET_EFAULT
;
5057 /* ??? Should this by byteswapped? */
5058 memcpy(p
, ldt_table
, size
);
5059 unlock_user(p
, ptr
, size
);
5063 /* XXX: add locking support */
5064 static abi_long
write_ldt(CPUX86State
*env
,
5065 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5067 struct target_modify_ldt_ldt_s ldt_info
;
5068 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5069 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5070 int seg_not_present
, useable
, lm
;
5071 uint32_t *lp
, entry_1
, entry_2
;
5073 if (bytecount
!= sizeof(ldt_info
))
5074 return -TARGET_EINVAL
;
5075 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5076 return -TARGET_EFAULT
;
5077 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5078 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5079 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5080 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5081 unlock_user_struct(target_ldt_info
, ptr
, 0);
5083 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5084 return -TARGET_EINVAL
;
5085 seg_32bit
= ldt_info
.flags
& 1;
5086 contents
= (ldt_info
.flags
>> 1) & 3;
5087 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5088 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5089 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5090 useable
= (ldt_info
.flags
>> 6) & 1;
5094 lm
= (ldt_info
.flags
>> 7) & 1;
5096 if (contents
== 3) {
5098 return -TARGET_EINVAL
;
5099 if (seg_not_present
== 0)
5100 return -TARGET_EINVAL
;
5102 /* allocate the LDT */
5104 env
->ldt
.base
= target_mmap(0,
5105 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5106 PROT_READ
|PROT_WRITE
,
5107 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5108 if (env
->ldt
.base
== -1)
5109 return -TARGET_ENOMEM
;
5110 memset(g2h(env
->ldt
.base
), 0,
5111 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5112 env
->ldt
.limit
= 0xffff;
5113 ldt_table
= g2h(env
->ldt
.base
);
5116 /* NOTE: same code as Linux kernel */
5117 /* Allow LDTs to be cleared by the user. */
5118 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5121 read_exec_only
== 1 &&
5123 limit_in_pages
== 0 &&
5124 seg_not_present
== 1 &&
5132 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5133 (ldt_info
.limit
& 0x0ffff);
5134 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5135 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5136 (ldt_info
.limit
& 0xf0000) |
5137 ((read_exec_only
^ 1) << 9) |
5139 ((seg_not_present
^ 1) << 15) |
5141 (limit_in_pages
<< 23) |
5145 entry_2
|= (useable
<< 20);
5147 /* Install the new entry ... */
5149 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5150 lp
[0] = tswap32(entry_1
);
5151 lp
[1] = tswap32(entry_2
);
5155 /* specific and weird i386 syscalls */
5156 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5157 unsigned long bytecount
)
5163 ret
= read_ldt(ptr
, bytecount
);
5166 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5169 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5172 ret
= -TARGET_ENOSYS
;
5178 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5179 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5181 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5182 struct target_modify_ldt_ldt_s ldt_info
;
5183 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5184 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5185 int seg_not_present
, useable
, lm
;
5186 uint32_t *lp
, entry_1
, entry_2
;
5189 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5190 if (!target_ldt_info
)
5191 return -TARGET_EFAULT
;
5192 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5193 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5194 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5195 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5196 if (ldt_info
.entry_number
== -1) {
5197 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5198 if (gdt_table
[i
] == 0) {
5199 ldt_info
.entry_number
= i
;
5200 target_ldt_info
->entry_number
= tswap32(i
);
5205 unlock_user_struct(target_ldt_info
, ptr
, 1);
5207 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5208 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5209 return -TARGET_EINVAL
;
5210 seg_32bit
= ldt_info
.flags
& 1;
5211 contents
= (ldt_info
.flags
>> 1) & 3;
5212 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5213 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5214 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5215 useable
= (ldt_info
.flags
>> 6) & 1;
5219 lm
= (ldt_info
.flags
>> 7) & 1;
5222 if (contents
== 3) {
5223 if (seg_not_present
== 0)
5224 return -TARGET_EINVAL
;
5227 /* NOTE: same code as Linux kernel */
5228 /* Allow LDTs to be cleared by the user. */
5229 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5230 if ((contents
== 0 &&
5231 read_exec_only
== 1 &&
5233 limit_in_pages
== 0 &&
5234 seg_not_present
== 1 &&
5242 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5243 (ldt_info
.limit
& 0x0ffff);
5244 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5245 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5246 (ldt_info
.limit
& 0xf0000) |
5247 ((read_exec_only
^ 1) << 9) |
5249 ((seg_not_present
^ 1) << 15) |
5251 (limit_in_pages
<< 23) |
5256 /* Install the new entry ... */
5258 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5259 lp
[0] = tswap32(entry_1
);
5260 lp
[1] = tswap32(entry_2
);
5264 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5266 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5267 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5268 uint32_t base_addr
, limit
, flags
;
5269 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5270 int seg_not_present
, useable
, lm
;
5271 uint32_t *lp
, entry_1
, entry_2
;
5273 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5274 if (!target_ldt_info
)
5275 return -TARGET_EFAULT
;
5276 idx
= tswap32(target_ldt_info
->entry_number
);
5277 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5278 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5279 unlock_user_struct(target_ldt_info
, ptr
, 1);
5280 return -TARGET_EINVAL
;
5282 lp
= (uint32_t *)(gdt_table
+ idx
);
5283 entry_1
= tswap32(lp
[0]);
5284 entry_2
= tswap32(lp
[1]);
5286 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5287 contents
= (entry_2
>> 10) & 3;
5288 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5289 seg_32bit
= (entry_2
>> 22) & 1;
5290 limit_in_pages
= (entry_2
>> 23) & 1;
5291 useable
= (entry_2
>> 20) & 1;
5295 lm
= (entry_2
>> 21) & 1;
5297 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5298 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5299 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5300 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5301 base_addr
= (entry_1
>> 16) |
5302 (entry_2
& 0xff000000) |
5303 ((entry_2
& 0xff) << 16);
5304 target_ldt_info
->base_addr
= tswapal(base_addr
);
5305 target_ldt_info
->limit
= tswap32(limit
);
5306 target_ldt_info
->flags
= tswap32(flags
);
5307 unlock_user_struct(target_ldt_info
, ptr
, 1);
5310 #endif /* TARGET_I386 && TARGET_ABI32 */
5312 #ifndef TARGET_ABI32
5313 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5320 case TARGET_ARCH_SET_GS
:
5321 case TARGET_ARCH_SET_FS
:
5322 if (code
== TARGET_ARCH_SET_GS
)
5326 cpu_x86_load_seg(env
, idx
, 0);
5327 env
->segs
[idx
].base
= addr
;
5329 case TARGET_ARCH_GET_GS
:
5330 case TARGET_ARCH_GET_FS
:
5331 if (code
== TARGET_ARCH_GET_GS
)
5335 val
= env
->segs
[idx
].base
;
5336 if (put_user(val
, addr
, abi_ulong
))
5337 ret
= -TARGET_EFAULT
;
5340 ret
= -TARGET_EINVAL
;
5347 #endif /* defined(TARGET_I386) */
5349 #define NEW_STACK_SIZE 0x40000
5352 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5355 pthread_mutex_t mutex
;
5356 pthread_cond_t cond
;
5359 abi_ulong child_tidptr
;
5360 abi_ulong parent_tidptr
;
5364 static void *clone_func(void *arg
)
5366 new_thread_info
*info
= arg
;
5371 rcu_register_thread();
5373 cpu
= ENV_GET_CPU(env
);
5375 ts
= (TaskState
*)cpu
->opaque
;
5376 info
->tid
= gettid();
5377 cpu
->host_tid
= info
->tid
;
5379 if (info
->child_tidptr
)
5380 put_user_u32(info
->tid
, info
->child_tidptr
);
5381 if (info
->parent_tidptr
)
5382 put_user_u32(info
->tid
, info
->parent_tidptr
);
5383 /* Enable signals. */
5384 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5385 /* Signal to the parent that we're ready. */
5386 pthread_mutex_lock(&info
->mutex
);
5387 pthread_cond_broadcast(&info
->cond
);
5388 pthread_mutex_unlock(&info
->mutex
);
5389 /* Wait until the parent has finshed initializing the tls state. */
5390 pthread_mutex_lock(&clone_lock
);
5391 pthread_mutex_unlock(&clone_lock
);
5397 /* do_fork() Must return host values and target errnos (unlike most
5398 do_*() functions). */
5399 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5400 abi_ulong parent_tidptr
, target_ulong newtls
,
5401 abi_ulong child_tidptr
)
5403 CPUState
*cpu
= ENV_GET_CPU(env
);
5407 CPUArchState
*new_env
;
5408 unsigned int nptl_flags
;
5411 /* Emulate vfork() with fork() */
5412 if (flags
& CLONE_VFORK
)
5413 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5415 if (flags
& CLONE_VM
) {
5416 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5417 new_thread_info info
;
5418 pthread_attr_t attr
;
5420 ts
= g_new0(TaskState
, 1);
5421 init_task_state(ts
);
5422 /* we create a new CPU instance. */
5423 new_env
= cpu_copy(env
);
5424 /* Init regs that differ from the parent. */
5425 cpu_clone_regs(new_env
, newsp
);
5426 new_cpu
= ENV_GET_CPU(new_env
);
5427 new_cpu
->opaque
= ts
;
5428 ts
->bprm
= parent_ts
->bprm
;
5429 ts
->info
= parent_ts
->info
;
5430 ts
->signal_mask
= parent_ts
->signal_mask
;
5432 flags
&= ~CLONE_NPTL_FLAGS2
;
5434 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5435 ts
->child_tidptr
= child_tidptr
;
5438 if (nptl_flags
& CLONE_SETTLS
)
5439 cpu_set_tls (new_env
, newtls
);
5441 /* Grab a mutex so that thread setup appears atomic. */
5442 pthread_mutex_lock(&clone_lock
);
5444 memset(&info
, 0, sizeof(info
));
5445 pthread_mutex_init(&info
.mutex
, NULL
);
5446 pthread_mutex_lock(&info
.mutex
);
5447 pthread_cond_init(&info
.cond
, NULL
);
5449 if (nptl_flags
& CLONE_CHILD_SETTID
)
5450 info
.child_tidptr
= child_tidptr
;
5451 if (nptl_flags
& CLONE_PARENT_SETTID
)
5452 info
.parent_tidptr
= parent_tidptr
;
5454 ret
= pthread_attr_init(&attr
);
5455 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5456 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5457 /* It is not safe to deliver signals until the child has finished
5458 initializing, so temporarily block all signals. */
5459 sigfillset(&sigmask
);
5460 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5462 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5463 /* TODO: Free new CPU state if thread creation failed. */
5465 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5466 pthread_attr_destroy(&attr
);
5468 /* Wait for the child to initialize. */
5469 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5471 if (flags
& CLONE_PARENT_SETTID
)
5472 put_user_u32(ret
, parent_tidptr
);
5476 pthread_mutex_unlock(&info
.mutex
);
5477 pthread_cond_destroy(&info
.cond
);
5478 pthread_mutex_destroy(&info
.mutex
);
5479 pthread_mutex_unlock(&clone_lock
);
5481 /* if no CLONE_VM, we consider it is a fork */
5482 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5483 return -TARGET_EINVAL
;
5486 if (block_signals()) {
5487 return -TARGET_ERESTARTSYS
;
5493 /* Child Process. */
5495 cpu_clone_regs(env
, newsp
);
5497 /* There is a race condition here. The parent process could
5498 theoretically read the TID in the child process before the child
5499 tid is set. This would require using either ptrace
5500 (not implemented) or having *_tidptr to point at a shared memory
5501 mapping. We can't repeat the spinlock hack used above because
5502 the child process gets its own copy of the lock. */
5503 if (flags
& CLONE_CHILD_SETTID
)
5504 put_user_u32(gettid(), child_tidptr
);
5505 if (flags
& CLONE_PARENT_SETTID
)
5506 put_user_u32(gettid(), parent_tidptr
);
5507 ts
= (TaskState
*)cpu
->opaque
;
5508 if (flags
& CLONE_SETTLS
)
5509 cpu_set_tls (env
, newtls
);
5510 if (flags
& CLONE_CHILD_CLEARTID
)
5511 ts
->child_tidptr
= child_tidptr
;
5519 /* warning : doesn't handle linux specific flags... */
5520 static int target_to_host_fcntl_cmd(int cmd
)
5523 case TARGET_F_DUPFD
:
5524 case TARGET_F_GETFD
:
5525 case TARGET_F_SETFD
:
5526 case TARGET_F_GETFL
:
5527 case TARGET_F_SETFL
:
5529 case TARGET_F_GETLK
:
5531 case TARGET_F_SETLK
:
5533 case TARGET_F_SETLKW
:
5535 case TARGET_F_GETOWN
:
5537 case TARGET_F_SETOWN
:
5539 case TARGET_F_GETSIG
:
5541 case TARGET_F_SETSIG
:
5543 #if TARGET_ABI_BITS == 32
5544 case TARGET_F_GETLK64
:
5546 case TARGET_F_SETLK64
:
5548 case TARGET_F_SETLKW64
:
5551 case TARGET_F_SETLEASE
:
5553 case TARGET_F_GETLEASE
:
5555 #ifdef F_DUPFD_CLOEXEC
5556 case TARGET_F_DUPFD_CLOEXEC
:
5557 return F_DUPFD_CLOEXEC
;
5559 case TARGET_F_NOTIFY
:
5562 case TARGET_F_GETOWN_EX
:
5566 case TARGET_F_SETOWN_EX
:
5570 return -TARGET_EINVAL
;
5572 return -TARGET_EINVAL
;
5575 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5576 static const bitmask_transtbl flock_tbl
[] = {
5577 TRANSTBL_CONVERT(F_RDLCK
),
5578 TRANSTBL_CONVERT(F_WRLCK
),
5579 TRANSTBL_CONVERT(F_UNLCK
),
5580 TRANSTBL_CONVERT(F_EXLCK
),
5581 TRANSTBL_CONVERT(F_SHLCK
),
5585 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5588 struct target_flock
*target_fl
;
5589 struct flock64 fl64
;
5590 struct target_flock64
*target_fl64
;
5592 struct f_owner_ex fox
;
5593 struct target_f_owner_ex
*target_fox
;
5596 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5598 if (host_cmd
== -TARGET_EINVAL
)
5602 case TARGET_F_GETLK
:
5603 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5604 return -TARGET_EFAULT
;
5606 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5607 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5608 fl
.l_start
= tswapal(target_fl
->l_start
);
5609 fl
.l_len
= tswapal(target_fl
->l_len
);
5610 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5611 unlock_user_struct(target_fl
, arg
, 0);
5612 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5614 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
5615 return -TARGET_EFAULT
;
5617 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
5618 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5619 target_fl
->l_start
= tswapal(fl
.l_start
);
5620 target_fl
->l_len
= tswapal(fl
.l_len
);
5621 target_fl
->l_pid
= tswap32(fl
.l_pid
);
5622 unlock_user_struct(target_fl
, arg
, 1);
5626 case TARGET_F_SETLK
:
5627 case TARGET_F_SETLKW
:
5628 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5629 return -TARGET_EFAULT
;
5631 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5632 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5633 fl
.l_start
= tswapal(target_fl
->l_start
);
5634 fl
.l_len
= tswapal(target_fl
->l_len
);
5635 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5636 unlock_user_struct(target_fl
, arg
, 0);
5637 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5640 case TARGET_F_GETLK64
:
5641 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5642 return -TARGET_EFAULT
;
5644 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5645 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5646 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5647 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5648 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5649 unlock_user_struct(target_fl64
, arg
, 0);
5650 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5652 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
5653 return -TARGET_EFAULT
;
5654 target_fl64
->l_type
=
5655 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
5656 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
5657 target_fl64
->l_start
= tswap64(fl64
.l_start
);
5658 target_fl64
->l_len
= tswap64(fl64
.l_len
);
5659 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
5660 unlock_user_struct(target_fl64
, arg
, 1);
5663 case TARGET_F_SETLK64
:
5664 case TARGET_F_SETLKW64
:
5665 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5666 return -TARGET_EFAULT
;
5668 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5669 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5670 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5671 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5672 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5673 unlock_user_struct(target_fl64
, arg
, 0);
5674 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5677 case TARGET_F_GETFL
:
5678 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5680 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5684 case TARGET_F_SETFL
:
5685 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
5689 case TARGET_F_GETOWN_EX
:
5690 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5692 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5693 return -TARGET_EFAULT
;
5694 target_fox
->type
= tswap32(fox
.type
);
5695 target_fox
->pid
= tswap32(fox
.pid
);
5696 unlock_user_struct(target_fox
, arg
, 1);
5702 case TARGET_F_SETOWN_EX
:
5703 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5704 return -TARGET_EFAULT
;
5705 fox
.type
= tswap32(target_fox
->type
);
5706 fox
.pid
= tswap32(target_fox
->pid
);
5707 unlock_user_struct(target_fox
, arg
, 0);
5708 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5712 case TARGET_F_SETOWN
:
5713 case TARGET_F_GETOWN
:
5714 case TARGET_F_SETSIG
:
5715 case TARGET_F_GETSIG
:
5716 case TARGET_F_SETLEASE
:
5717 case TARGET_F_GETLEASE
:
5718 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5722 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5730 static inline int high2lowuid(int uid
)
5738 static inline int high2lowgid(int gid
)
5746 static inline int low2highuid(int uid
)
5748 if ((int16_t)uid
== -1)
5754 static inline int low2highgid(int gid
)
5756 if ((int16_t)gid
== -1)
5761 static inline int tswapid(int id
)
5766 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5768 #else /* !USE_UID16 */
5769 static inline int high2lowuid(int uid
)
5773 static inline int high2lowgid(int gid
)
5777 static inline int low2highuid(int uid
)
5781 static inline int low2highgid(int gid
)
5785 static inline int tswapid(int id
)
5790 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5792 #endif /* USE_UID16 */
5794 /* We must do direct syscalls for setting UID/GID, because we want to
5795 * implement the Linux system call semantics of "change only for this thread",
5796 * not the libc/POSIX semantics of "change for all threads in process".
5797 * (See http://ewontfix.com/17/ for more details.)
5798 * We use the 32-bit version of the syscalls if present; if it is not
5799 * then either the host architecture supports 32-bit UIDs natively with
5800 * the standard syscall, or the 16-bit UID is the best we can do.
5802 #ifdef __NR_setuid32
5803 #define __NR_sys_setuid __NR_setuid32
5805 #define __NR_sys_setuid __NR_setuid
5807 #ifdef __NR_setgid32
5808 #define __NR_sys_setgid __NR_setgid32
5810 #define __NR_sys_setgid __NR_setgid
5812 #ifdef __NR_setresuid32
5813 #define __NR_sys_setresuid __NR_setresuid32
5815 #define __NR_sys_setresuid __NR_setresuid
5817 #ifdef __NR_setresgid32
5818 #define __NR_sys_setresgid __NR_setresgid32
5820 #define __NR_sys_setresgid __NR_setresgid
5823 _syscall1(int, sys_setuid
, uid_t
, uid
)
5824 _syscall1(int, sys_setgid
, gid_t
, gid
)
5825 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5826 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5828 void syscall_init(void)
5831 const argtype
*arg_type
;
5835 thunk_init(STRUCT_MAX
);
5837 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5838 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5839 #include "syscall_types.h"
5841 #undef STRUCT_SPECIAL
5843 /* Build target_to_host_errno_table[] table from
5844 * host_to_target_errno_table[]. */
5845 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5846 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5849 /* we patch the ioctl size if necessary. We rely on the fact that
5850 no ioctl has all the bits at '1' in the size field */
5852 while (ie
->target_cmd
!= 0) {
5853 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5854 TARGET_IOC_SIZEMASK
) {
5855 arg_type
= ie
->arg_type
;
5856 if (arg_type
[0] != TYPE_PTR
) {
5857 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5862 size
= thunk_type_size(arg_type
, 0);
5863 ie
->target_cmd
= (ie
->target_cmd
&
5864 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5865 (size
<< TARGET_IOC_SIZESHIFT
);
5868 /* automatic consistency check if same arch */
5869 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5870 (defined(__x86_64__) && defined(TARGET_X86_64))
5871 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5872 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5873 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5880 #if TARGET_ABI_BITS == 32
5881 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5883 #ifdef TARGET_WORDS_BIGENDIAN
5884 return ((uint64_t)word0
<< 32) | word1
;
5886 return ((uint64_t)word1
<< 32) | word0
;
5889 #else /* TARGET_ABI_BITS == 32 */
5890 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5894 #endif /* TARGET_ABI_BITS != 32 */
5896 #ifdef TARGET_NR_truncate64
5897 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5902 if (regpairs_aligned(cpu_env
)) {
5906 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5910 #ifdef TARGET_NR_ftruncate64
5911 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5916 if (regpairs_aligned(cpu_env
)) {
5920 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5924 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5925 abi_ulong target_addr
)
5927 struct target_timespec
*target_ts
;
5929 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5930 return -TARGET_EFAULT
;
5931 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5932 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5933 unlock_user_struct(target_ts
, target_addr
, 0);
5937 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5938 struct timespec
*host_ts
)
5940 struct target_timespec
*target_ts
;
5942 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5943 return -TARGET_EFAULT
;
5944 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5945 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5946 unlock_user_struct(target_ts
, target_addr
, 1);
5950 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5951 abi_ulong target_addr
)
5953 struct target_itimerspec
*target_itspec
;
5955 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5956 return -TARGET_EFAULT
;
5959 host_itspec
->it_interval
.tv_sec
=
5960 tswapal(target_itspec
->it_interval
.tv_sec
);
5961 host_itspec
->it_interval
.tv_nsec
=
5962 tswapal(target_itspec
->it_interval
.tv_nsec
);
5963 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5964 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5966 unlock_user_struct(target_itspec
, target_addr
, 1);
5970 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5971 struct itimerspec
*host_its
)
5973 struct target_itimerspec
*target_itspec
;
5975 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5976 return -TARGET_EFAULT
;
5979 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5980 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5982 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5983 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5985 unlock_user_struct(target_itspec
, target_addr
, 0);
5989 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5990 abi_ulong target_addr
)
5992 struct target_sigevent
*target_sevp
;
5994 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5995 return -TARGET_EFAULT
;
5998 /* This union is awkward on 64 bit systems because it has a 32 bit
5999 * integer and a pointer in it; we follow the conversion approach
6000 * used for handling sigval types in signal.c so the guest should get
6001 * the correct value back even if we did a 64 bit byteswap and it's
6002 * using the 32 bit integer.
6004 host_sevp
->sigev_value
.sival_ptr
=
6005 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6006 host_sevp
->sigev_signo
=
6007 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6008 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6009 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6011 unlock_user_struct(target_sevp
, target_addr
, 1);
6015 #if defined(TARGET_NR_mlockall)
6016 static inline int target_to_host_mlockall_arg(int arg
)
6020 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6021 result
|= MCL_CURRENT
;
6023 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6024 result
|= MCL_FUTURE
;
6030 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6031 abi_ulong target_addr
,
6032 struct stat
*host_st
)
6034 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6035 if (((CPUARMState
*)cpu_env
)->eabi
) {
6036 struct target_eabi_stat64
*target_st
;
6038 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6039 return -TARGET_EFAULT
;
6040 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6041 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6042 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6043 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6044 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6046 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6047 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6048 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6049 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6050 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6051 __put_user(host_st
->st_size
, &target_st
->st_size
);
6052 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6053 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6054 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6055 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6056 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6057 unlock_user_struct(target_st
, target_addr
, 1);
6061 #if defined(TARGET_HAS_STRUCT_STAT64)
6062 struct target_stat64
*target_st
;
6064 struct target_stat
*target_st
;
6067 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6068 return -TARGET_EFAULT
;
6069 memset(target_st
, 0, sizeof(*target_st
));
6070 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6071 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6072 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6073 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6075 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6076 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6077 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6078 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6079 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6080 /* XXX: better use of kernel struct */
6081 __put_user(host_st
->st_size
, &target_st
->st_size
);
6082 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6083 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6084 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6085 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6086 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6087 unlock_user_struct(target_st
, target_addr
, 1);
6093 /* ??? Using host futex calls even when target atomic operations
6094 are not really atomic probably breaks things. However implementing
6095 futexes locally would make futexes shared between multiple processes
6096 tricky. However they're probably useless because guest atomic
6097 operations won't work either. */
6098 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6099 target_ulong uaddr2
, int val3
)
6101 struct timespec ts
, *pts
;
6104 /* ??? We assume FUTEX_* constants are the same on both host
6106 #ifdef FUTEX_CMD_MASK
6107 base_op
= op
& FUTEX_CMD_MASK
;
6113 case FUTEX_WAIT_BITSET
:
6116 target_to_host_timespec(pts
, timeout
);
6120 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6123 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6125 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6127 case FUTEX_CMP_REQUEUE
:
6129 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6130 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6131 But the prototype takes a `struct timespec *'; insert casts
6132 to satisfy the compiler. We do not need to tswap TIMEOUT
6133 since it's not compared to guest memory. */
6134 pts
= (struct timespec
*)(uintptr_t) timeout
;
6135 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6137 (base_op
== FUTEX_CMP_REQUEUE
6141 return -TARGET_ENOSYS
;
6144 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6145 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6146 abi_long handle
, abi_long mount_id
,
6149 struct file_handle
*target_fh
;
6150 struct file_handle
*fh
;
6154 unsigned int size
, total_size
;
6156 if (get_user_s32(size
, handle
)) {
6157 return -TARGET_EFAULT
;
6160 name
= lock_user_string(pathname
);
6162 return -TARGET_EFAULT
;
6165 total_size
= sizeof(struct file_handle
) + size
;
6166 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6168 unlock_user(name
, pathname
, 0);
6169 return -TARGET_EFAULT
;
6172 fh
= g_malloc0(total_size
);
6173 fh
->handle_bytes
= size
;
6175 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6176 unlock_user(name
, pathname
, 0);
6178 /* man name_to_handle_at(2):
6179 * Other than the use of the handle_bytes field, the caller should treat
6180 * the file_handle structure as an opaque data type
6183 memcpy(target_fh
, fh
, total_size
);
6184 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6185 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6187 unlock_user(target_fh
, handle
, total_size
);
6189 if (put_user_s32(mid
, mount_id
)) {
6190 return -TARGET_EFAULT
;
6198 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6199 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6202 struct file_handle
*target_fh
;
6203 struct file_handle
*fh
;
6204 unsigned int size
, total_size
;
6207 if (get_user_s32(size
, handle
)) {
6208 return -TARGET_EFAULT
;
6211 total_size
= sizeof(struct file_handle
) + size
;
6212 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6214 return -TARGET_EFAULT
;
6217 fh
= g_memdup(target_fh
, total_size
);
6218 fh
->handle_bytes
= size
;
6219 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6221 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6222 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6226 unlock_user(target_fh
, handle
, total_size
);
6232 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6234 /* signalfd siginfo conversion */
6237 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6238 const struct signalfd_siginfo
*info
)
6240 int sig
= host_to_target_signal(info
->ssi_signo
);
6242 /* linux/signalfd.h defines a ssi_addr_lsb
6243 * not defined in sys/signalfd.h but used by some kernels
6246 #ifdef BUS_MCEERR_AO
6247 if (tinfo
->ssi_signo
== SIGBUS
&&
6248 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6249 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6250 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6251 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6252 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6256 tinfo
->ssi_signo
= tswap32(sig
);
6257 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6258 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6259 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6260 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6261 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6262 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6263 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6264 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6265 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6266 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6267 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6268 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6269 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6270 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6271 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6274 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6278 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6279 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6285 static TargetFdTrans target_signalfd_trans
= {
6286 .host_to_target_data
= host_to_target_data_signalfd
,
6289 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6292 target_sigset_t
*target_mask
;
6296 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6297 return -TARGET_EINVAL
;
6299 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6300 return -TARGET_EFAULT
;
6303 target_to_host_sigset(&host_mask
, target_mask
);
6305 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6307 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6309 fd_trans_register(ret
, &target_signalfd_trans
);
6312 unlock_user_struct(target_mask
, mask
, 0);
6318 /* Map host to target signal numbers for the wait family of syscalls.
6319 Assume all other status bits are the same. */
6320 int host_to_target_waitstatus(int status
)
6322 if (WIFSIGNALED(status
)) {
6323 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6325 if (WIFSTOPPED(status
)) {
6326 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6332 static int open_self_cmdline(void *cpu_env
, int fd
)
6335 bool word_skipped
= false;
6337 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6347 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6350 fd_orig
= close(fd_orig
);
6353 } else if (nb_read
== 0) {
6357 if (!word_skipped
) {
6358 /* Skip the first string, which is the path to qemu-*-static
6359 instead of the actual command. */
6360 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6362 /* Null byte found, skip one string */
6364 nb_read
-= cp_buf
- buf
;
6365 word_skipped
= true;
6370 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6379 return close(fd_orig
);
6382 static int open_self_maps(void *cpu_env
, int fd
)
6384 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6385 TaskState
*ts
= cpu
->opaque
;
6391 fp
= fopen("/proc/self/maps", "r");
6396 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6397 int fields
, dev_maj
, dev_min
, inode
;
6398 uint64_t min
, max
, offset
;
6399 char flag_r
, flag_w
, flag_x
, flag_p
;
6400 char path
[512] = "";
6401 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6402 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6403 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6405 if ((fields
< 10) || (fields
> 11)) {
6408 if (h2g_valid(min
)) {
6409 int flags
= page_get_flags(h2g(min
));
6410 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6411 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6414 if (h2g(min
) == ts
->info
->stack_limit
) {
6415 pstrcpy(path
, sizeof(path
), " [stack]");
6417 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6418 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6419 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6420 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6421 path
[0] ? " " : "", path
);
6431 static int open_self_stat(void *cpu_env
, int fd
)
6433 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6434 TaskState
*ts
= cpu
->opaque
;
6435 abi_ulong start_stack
= ts
->info
->start_stack
;
6438 for (i
= 0; i
< 44; i
++) {
6446 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6447 } else if (i
== 1) {
6449 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6450 } else if (i
== 27) {
6453 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6455 /* for the rest, there is MasterCard */
6456 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6460 if (write(fd
, buf
, len
) != len
) {
6468 static int open_self_auxv(void *cpu_env
, int fd
)
6470 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6471 TaskState
*ts
= cpu
->opaque
;
6472 abi_ulong auxv
= ts
->info
->saved_auxv
;
6473 abi_ulong len
= ts
->info
->auxv_len
;
6477 * Auxiliary vector is stored in target process stack.
6478 * read in whole auxv vector and copy it to file
6480 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6484 r
= write(fd
, ptr
, len
);
6491 lseek(fd
, 0, SEEK_SET
);
6492 unlock_user(ptr
, auxv
, len
);
6498 static int is_proc_myself(const char *filename
, const char *entry
)
6500 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6501 filename
+= strlen("/proc/");
6502 if (!strncmp(filename
, "self/", strlen("self/"))) {
6503 filename
+= strlen("self/");
6504 } else if (*filename
>= '1' && *filename
<= '9') {
6506 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6507 if (!strncmp(filename
, myself
, strlen(myself
))) {
6508 filename
+= strlen(myself
);
6515 if (!strcmp(filename
, entry
)) {
6522 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6523 static int is_proc(const char *filename
, const char *entry
)
6525 return strcmp(filename
, entry
) == 0;
6528 static int open_net_route(void *cpu_env
, int fd
)
6535 fp
= fopen("/proc/net/route", "r");
6542 read
= getline(&line
, &len
, fp
);
6543 dprintf(fd
, "%s", line
);
6547 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6549 uint32_t dest
, gw
, mask
;
6550 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6551 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6552 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6553 &mask
, &mtu
, &window
, &irtt
);
6554 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6555 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6556 metric
, tswap32(mask
), mtu
, window
, irtt
);
6566 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6569 const char *filename
;
6570 int (*fill
)(void *cpu_env
, int fd
);
6571 int (*cmp
)(const char *s1
, const char *s2
);
6573 const struct fake_open
*fake_open
;
6574 static const struct fake_open fakes
[] = {
6575 { "maps", open_self_maps
, is_proc_myself
},
6576 { "stat", open_self_stat
, is_proc_myself
},
6577 { "auxv", open_self_auxv
, is_proc_myself
},
6578 { "cmdline", open_self_cmdline
, is_proc_myself
},
6579 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6580 { "/proc/net/route", open_net_route
, is_proc
},
6582 { NULL
, NULL
, NULL
}
6585 if (is_proc_myself(pathname
, "exe")) {
6586 int execfd
= qemu_getauxval(AT_EXECFD
);
6587 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6590 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6591 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6596 if (fake_open
->filename
) {
6598 char filename
[PATH_MAX
];
6601 /* create temporary file to map stat to */
6602 tmpdir
= getenv("TMPDIR");
6605 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6606 fd
= mkstemp(filename
);
6612 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6618 lseek(fd
, 0, SEEK_SET
);
6623 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6626 #define TIMER_MAGIC 0x0caf0000
6627 #define TIMER_MAGIC_MASK 0xffff0000
6629 /* Convert QEMU provided timer ID back to internal 16bit index format */
6630 static target_timer_t
get_timer_id(abi_long arg
)
6632 target_timer_t timerid
= arg
;
6634 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6635 return -TARGET_EINVAL
;
6640 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6641 return -TARGET_EINVAL
;
6647 /* do_syscall() should always have a single exit point at the end so
6648 that actions, such as logging of syscall results, can be performed.
6649 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6650 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6651 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6652 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6655 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6661 #if defined(DEBUG_ERESTARTSYS)
6662 /* Debug-only code for exercising the syscall-restart code paths
6663 * in the per-architecture cpu main loops: restart every syscall
6664 * the guest makes once before letting it through.
6671 return -TARGET_ERESTARTSYS
;
6677 gemu_log("syscall %d", num
);
6680 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6683 case TARGET_NR_exit
:
6684 /* In old applications this may be used to implement _exit(2).
6685 However in threaded applictions it is used for thread termination,
6686 and _exit_group is used for application termination.
6687 Do thread termination if we have more then one thread. */
6689 if (block_signals()) {
6690 ret
= -TARGET_ERESTARTSYS
;
6694 if (CPU_NEXT(first_cpu
)) {
6698 /* Remove the CPU from the list. */
6699 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6702 if (ts
->child_tidptr
) {
6703 put_user_u32(0, ts
->child_tidptr
);
6704 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6708 object_unref(OBJECT(cpu
));
6710 rcu_unregister_thread();
6716 gdb_exit(cpu_env
, arg1
);
6718 ret
= 0; /* avoid warning */
6720 case TARGET_NR_read
:
6724 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6726 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6728 fd_trans_host_to_target_data(arg1
)) {
6729 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6731 unlock_user(p
, arg2
, ret
);
6734 case TARGET_NR_write
:
6735 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6737 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6738 unlock_user(p
, arg2
, 0);
6740 #ifdef TARGET_NR_open
6741 case TARGET_NR_open
:
6742 if (!(p
= lock_user_string(arg1
)))
6744 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6745 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6747 fd_trans_unregister(ret
);
6748 unlock_user(p
, arg1
, 0);
6751 case TARGET_NR_openat
:
6752 if (!(p
= lock_user_string(arg2
)))
6754 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6755 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6757 fd_trans_unregister(ret
);
6758 unlock_user(p
, arg2
, 0);
6760 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6761 case TARGET_NR_name_to_handle_at
:
6762 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6765 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6766 case TARGET_NR_open_by_handle_at
:
6767 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6768 fd_trans_unregister(ret
);
6771 case TARGET_NR_close
:
6772 fd_trans_unregister(arg1
);
6773 ret
= get_errno(close(arg1
));
6778 #ifdef TARGET_NR_fork
6779 case TARGET_NR_fork
:
6780 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6783 #ifdef TARGET_NR_waitpid
6784 case TARGET_NR_waitpid
:
6787 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6788 if (!is_error(ret
) && arg2
&& ret
6789 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6794 #ifdef TARGET_NR_waitid
6795 case TARGET_NR_waitid
:
6799 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6800 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6801 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6803 host_to_target_siginfo(p
, &info
);
6804 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6809 #ifdef TARGET_NR_creat /* not on alpha */
6810 case TARGET_NR_creat
:
6811 if (!(p
= lock_user_string(arg1
)))
6813 ret
= get_errno(creat(p
, arg2
));
6814 fd_trans_unregister(ret
);
6815 unlock_user(p
, arg1
, 0);
6818 #ifdef TARGET_NR_link
6819 case TARGET_NR_link
:
6822 p
= lock_user_string(arg1
);
6823 p2
= lock_user_string(arg2
);
6825 ret
= -TARGET_EFAULT
;
6827 ret
= get_errno(link(p
, p2
));
6828 unlock_user(p2
, arg2
, 0);
6829 unlock_user(p
, arg1
, 0);
6833 #if defined(TARGET_NR_linkat)
6834 case TARGET_NR_linkat
:
6839 p
= lock_user_string(arg2
);
6840 p2
= lock_user_string(arg4
);
6842 ret
= -TARGET_EFAULT
;
6844 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6845 unlock_user(p
, arg2
, 0);
6846 unlock_user(p2
, arg4
, 0);
6850 #ifdef TARGET_NR_unlink
6851 case TARGET_NR_unlink
:
6852 if (!(p
= lock_user_string(arg1
)))
6854 ret
= get_errno(unlink(p
));
6855 unlock_user(p
, arg1
, 0);
6858 #if defined(TARGET_NR_unlinkat)
6859 case TARGET_NR_unlinkat
:
6860 if (!(p
= lock_user_string(arg2
)))
6862 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6863 unlock_user(p
, arg2
, 0);
6866 case TARGET_NR_execve
:
6868 char **argp
, **envp
;
6871 abi_ulong guest_argp
;
6872 abi_ulong guest_envp
;
6879 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6880 if (get_user_ual(addr
, gp
))
6888 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6889 if (get_user_ual(addr
, gp
))
6896 argp
= alloca((argc
+ 1) * sizeof(void *));
6897 envp
= alloca((envc
+ 1) * sizeof(void *));
6899 for (gp
= guest_argp
, q
= argp
; gp
;
6900 gp
+= sizeof(abi_ulong
), q
++) {
6901 if (get_user_ual(addr
, gp
))
6905 if (!(*q
= lock_user_string(addr
)))
6907 total_size
+= strlen(*q
) + 1;
6911 for (gp
= guest_envp
, q
= envp
; gp
;
6912 gp
+= sizeof(abi_ulong
), q
++) {
6913 if (get_user_ual(addr
, gp
))
6917 if (!(*q
= lock_user_string(addr
)))
6919 total_size
+= strlen(*q
) + 1;
6923 if (!(p
= lock_user_string(arg1
)))
6925 /* Although execve() is not an interruptible syscall it is
6926 * a special case where we must use the safe_syscall wrapper:
6927 * if we allow a signal to happen before we make the host
6928 * syscall then we will 'lose' it, because at the point of
6929 * execve the process leaves QEMU's control. So we use the
6930 * safe syscall wrapper to ensure that we either take the
6931 * signal as a guest signal, or else it does not happen
6932 * before the execve completes and makes it the other
6933 * program's problem.
6935 ret
= get_errno(safe_execve(p
, argp
, envp
));
6936 unlock_user(p
, arg1
, 0);
6941 ret
= -TARGET_EFAULT
;
6944 for (gp
= guest_argp
, q
= argp
; *q
;
6945 gp
+= sizeof(abi_ulong
), q
++) {
6946 if (get_user_ual(addr
, gp
)
6949 unlock_user(*q
, addr
, 0);
6951 for (gp
= guest_envp
, q
= envp
; *q
;
6952 gp
+= sizeof(abi_ulong
), q
++) {
6953 if (get_user_ual(addr
, gp
)
6956 unlock_user(*q
, addr
, 0);
6960 case TARGET_NR_chdir
:
6961 if (!(p
= lock_user_string(arg1
)))
6963 ret
= get_errno(chdir(p
));
6964 unlock_user(p
, arg1
, 0);
6966 #ifdef TARGET_NR_time
6967 case TARGET_NR_time
:
6970 ret
= get_errno(time(&host_time
));
6973 && put_user_sal(host_time
, arg1
))
6978 #ifdef TARGET_NR_mknod
6979 case TARGET_NR_mknod
:
6980 if (!(p
= lock_user_string(arg1
)))
6982 ret
= get_errno(mknod(p
, arg2
, arg3
));
6983 unlock_user(p
, arg1
, 0);
6986 #if defined(TARGET_NR_mknodat)
6987 case TARGET_NR_mknodat
:
6988 if (!(p
= lock_user_string(arg2
)))
6990 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6991 unlock_user(p
, arg2
, 0);
6994 #ifdef TARGET_NR_chmod
6995 case TARGET_NR_chmod
:
6996 if (!(p
= lock_user_string(arg1
)))
6998 ret
= get_errno(chmod(p
, arg2
));
6999 unlock_user(p
, arg1
, 0);
7002 #ifdef TARGET_NR_break
7003 case TARGET_NR_break
:
7006 #ifdef TARGET_NR_oldstat
7007 case TARGET_NR_oldstat
:
7010 case TARGET_NR_lseek
:
7011 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7013 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7014 /* Alpha specific */
7015 case TARGET_NR_getxpid
:
7016 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7017 ret
= get_errno(getpid());
7020 #ifdef TARGET_NR_getpid
7021 case TARGET_NR_getpid
:
7022 ret
= get_errno(getpid());
7025 case TARGET_NR_mount
:
7027 /* need to look at the data field */
7031 p
= lock_user_string(arg1
);
7039 p2
= lock_user_string(arg2
);
7042 unlock_user(p
, arg1
, 0);
7048 p3
= lock_user_string(arg3
);
7051 unlock_user(p
, arg1
, 0);
7053 unlock_user(p2
, arg2
, 0);
7060 /* FIXME - arg5 should be locked, but it isn't clear how to
7061 * do that since it's not guaranteed to be a NULL-terminated
7065 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7067 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7069 ret
= get_errno(ret
);
7072 unlock_user(p
, arg1
, 0);
7074 unlock_user(p2
, arg2
, 0);
7076 unlock_user(p3
, arg3
, 0);
7080 #ifdef TARGET_NR_umount
7081 case TARGET_NR_umount
:
7082 if (!(p
= lock_user_string(arg1
)))
7084 ret
= get_errno(umount(p
));
7085 unlock_user(p
, arg1
, 0);
7088 #ifdef TARGET_NR_stime /* not on alpha */
7089 case TARGET_NR_stime
:
7092 if (get_user_sal(host_time
, arg1
))
7094 ret
= get_errno(stime(&host_time
));
7098 case TARGET_NR_ptrace
:
7100 #ifdef TARGET_NR_alarm /* not on alpha */
7101 case TARGET_NR_alarm
:
7105 #ifdef TARGET_NR_oldfstat
7106 case TARGET_NR_oldfstat
:
7109 #ifdef TARGET_NR_pause /* not on alpha */
7110 case TARGET_NR_pause
:
7111 if (!block_signals()) {
7112 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7114 ret
= -TARGET_EINTR
;
7117 #ifdef TARGET_NR_utime
7118 case TARGET_NR_utime
:
7120 struct utimbuf tbuf
, *host_tbuf
;
7121 struct target_utimbuf
*target_tbuf
;
7123 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7125 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7126 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7127 unlock_user_struct(target_tbuf
, arg2
, 0);
7132 if (!(p
= lock_user_string(arg1
)))
7134 ret
= get_errno(utime(p
, host_tbuf
));
7135 unlock_user(p
, arg1
, 0);
7139 #ifdef TARGET_NR_utimes
7140 case TARGET_NR_utimes
:
7142 struct timeval
*tvp
, tv
[2];
7144 if (copy_from_user_timeval(&tv
[0], arg2
)
7145 || copy_from_user_timeval(&tv
[1],
7146 arg2
+ sizeof(struct target_timeval
)))
7152 if (!(p
= lock_user_string(arg1
)))
7154 ret
= get_errno(utimes(p
, tvp
));
7155 unlock_user(p
, arg1
, 0);
7159 #if defined(TARGET_NR_futimesat)
7160 case TARGET_NR_futimesat
:
7162 struct timeval
*tvp
, tv
[2];
7164 if (copy_from_user_timeval(&tv
[0], arg3
)
7165 || copy_from_user_timeval(&tv
[1],
7166 arg3
+ sizeof(struct target_timeval
)))
7172 if (!(p
= lock_user_string(arg2
)))
7174 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7175 unlock_user(p
, arg2
, 0);
7179 #ifdef TARGET_NR_stty
7180 case TARGET_NR_stty
:
7183 #ifdef TARGET_NR_gtty
7184 case TARGET_NR_gtty
:
7187 #ifdef TARGET_NR_access
7188 case TARGET_NR_access
:
7189 if (!(p
= lock_user_string(arg1
)))
7191 ret
= get_errno(access(path(p
), arg2
));
7192 unlock_user(p
, arg1
, 0);
7195 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7196 case TARGET_NR_faccessat
:
7197 if (!(p
= lock_user_string(arg2
)))
7199 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7200 unlock_user(p
, arg2
, 0);
7203 #ifdef TARGET_NR_nice /* not on alpha */
7204 case TARGET_NR_nice
:
7205 ret
= get_errno(nice(arg1
));
7208 #ifdef TARGET_NR_ftime
7209 case TARGET_NR_ftime
:
7212 case TARGET_NR_sync
:
7216 case TARGET_NR_kill
:
7217 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7219 #ifdef TARGET_NR_rename
7220 case TARGET_NR_rename
:
7223 p
= lock_user_string(arg1
);
7224 p2
= lock_user_string(arg2
);
7226 ret
= -TARGET_EFAULT
;
7228 ret
= get_errno(rename(p
, p2
));
7229 unlock_user(p2
, arg2
, 0);
7230 unlock_user(p
, arg1
, 0);
7234 #if defined(TARGET_NR_renameat)
7235 case TARGET_NR_renameat
:
7238 p
= lock_user_string(arg2
);
7239 p2
= lock_user_string(arg4
);
7241 ret
= -TARGET_EFAULT
;
7243 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7244 unlock_user(p2
, arg4
, 0);
7245 unlock_user(p
, arg2
, 0);
7249 #ifdef TARGET_NR_mkdir
7250 case TARGET_NR_mkdir
:
7251 if (!(p
= lock_user_string(arg1
)))
7253 ret
= get_errno(mkdir(p
, arg2
));
7254 unlock_user(p
, arg1
, 0);
7257 #if defined(TARGET_NR_mkdirat)
7258 case TARGET_NR_mkdirat
:
7259 if (!(p
= lock_user_string(arg2
)))
7261 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7262 unlock_user(p
, arg2
, 0);
7265 #ifdef TARGET_NR_rmdir
7266 case TARGET_NR_rmdir
:
7267 if (!(p
= lock_user_string(arg1
)))
7269 ret
= get_errno(rmdir(p
));
7270 unlock_user(p
, arg1
, 0);
7274 ret
= get_errno(dup(arg1
));
7276 fd_trans_dup(arg1
, ret
);
7279 #ifdef TARGET_NR_pipe
7280 case TARGET_NR_pipe
:
7281 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7284 #ifdef TARGET_NR_pipe2
7285 case TARGET_NR_pipe2
:
7286 ret
= do_pipe(cpu_env
, arg1
,
7287 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7290 case TARGET_NR_times
:
7292 struct target_tms
*tmsp
;
7294 ret
= get_errno(times(&tms
));
7296 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7299 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7300 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7301 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7302 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7305 ret
= host_to_target_clock_t(ret
);
7308 #ifdef TARGET_NR_prof
7309 case TARGET_NR_prof
:
7312 #ifdef TARGET_NR_signal
7313 case TARGET_NR_signal
:
7316 case TARGET_NR_acct
:
7318 ret
= get_errno(acct(NULL
));
7320 if (!(p
= lock_user_string(arg1
)))
7322 ret
= get_errno(acct(path(p
)));
7323 unlock_user(p
, arg1
, 0);
7326 #ifdef TARGET_NR_umount2
7327 case TARGET_NR_umount2
:
7328 if (!(p
= lock_user_string(arg1
)))
7330 ret
= get_errno(umount2(p
, arg2
));
7331 unlock_user(p
, arg1
, 0);
7334 #ifdef TARGET_NR_lock
7335 case TARGET_NR_lock
:
7338 case TARGET_NR_ioctl
:
7339 ret
= do_ioctl(arg1
, arg2
, arg3
);
7341 case TARGET_NR_fcntl
:
7342 ret
= do_fcntl(arg1
, arg2
, arg3
);
7344 #ifdef TARGET_NR_mpx
7348 case TARGET_NR_setpgid
:
7349 ret
= get_errno(setpgid(arg1
, arg2
));
7351 #ifdef TARGET_NR_ulimit
7352 case TARGET_NR_ulimit
:
7355 #ifdef TARGET_NR_oldolduname
7356 case TARGET_NR_oldolduname
:
7359 case TARGET_NR_umask
:
7360 ret
= get_errno(umask(arg1
));
7362 case TARGET_NR_chroot
:
7363 if (!(p
= lock_user_string(arg1
)))
7365 ret
= get_errno(chroot(p
));
7366 unlock_user(p
, arg1
, 0);
7368 #ifdef TARGET_NR_ustat
7369 case TARGET_NR_ustat
:
7372 #ifdef TARGET_NR_dup2
7373 case TARGET_NR_dup2
:
7374 ret
= get_errno(dup2(arg1
, arg2
));
7376 fd_trans_dup(arg1
, arg2
);
7380 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7381 case TARGET_NR_dup3
:
7382 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7384 fd_trans_dup(arg1
, arg2
);
7388 #ifdef TARGET_NR_getppid /* not on alpha */
7389 case TARGET_NR_getppid
:
7390 ret
= get_errno(getppid());
7393 #ifdef TARGET_NR_getpgrp
7394 case TARGET_NR_getpgrp
:
7395 ret
= get_errno(getpgrp());
7398 case TARGET_NR_setsid
:
7399 ret
= get_errno(setsid());
7401 #ifdef TARGET_NR_sigaction
7402 case TARGET_NR_sigaction
:
7404 #if defined(TARGET_ALPHA)
7405 struct target_sigaction act
, oact
, *pact
= 0;
7406 struct target_old_sigaction
*old_act
;
7408 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7410 act
._sa_handler
= old_act
->_sa_handler
;
7411 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7412 act
.sa_flags
= old_act
->sa_flags
;
7413 act
.sa_restorer
= 0;
7414 unlock_user_struct(old_act
, arg2
, 0);
7417 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7418 if (!is_error(ret
) && arg3
) {
7419 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7421 old_act
->_sa_handler
= oact
._sa_handler
;
7422 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7423 old_act
->sa_flags
= oact
.sa_flags
;
7424 unlock_user_struct(old_act
, arg3
, 1);
7426 #elif defined(TARGET_MIPS)
7427 struct target_sigaction act
, oact
, *pact
, *old_act
;
7430 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7432 act
._sa_handler
= old_act
->_sa_handler
;
7433 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7434 act
.sa_flags
= old_act
->sa_flags
;
7435 unlock_user_struct(old_act
, arg2
, 0);
7441 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7443 if (!is_error(ret
) && arg3
) {
7444 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7446 old_act
->_sa_handler
= oact
._sa_handler
;
7447 old_act
->sa_flags
= oact
.sa_flags
;
7448 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7449 old_act
->sa_mask
.sig
[1] = 0;
7450 old_act
->sa_mask
.sig
[2] = 0;
7451 old_act
->sa_mask
.sig
[3] = 0;
7452 unlock_user_struct(old_act
, arg3
, 1);
7455 struct target_old_sigaction
*old_act
;
7456 struct target_sigaction act
, oact
, *pact
;
7458 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7460 act
._sa_handler
= old_act
->_sa_handler
;
7461 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7462 act
.sa_flags
= old_act
->sa_flags
;
7463 act
.sa_restorer
= old_act
->sa_restorer
;
7464 unlock_user_struct(old_act
, arg2
, 0);
7469 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7470 if (!is_error(ret
) && arg3
) {
7471 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7473 old_act
->_sa_handler
= oact
._sa_handler
;
7474 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7475 old_act
->sa_flags
= oact
.sa_flags
;
7476 old_act
->sa_restorer
= oact
.sa_restorer
;
7477 unlock_user_struct(old_act
, arg3
, 1);
7483 case TARGET_NR_rt_sigaction
:
7485 #if defined(TARGET_ALPHA)
7486 struct target_sigaction act
, oact
, *pact
= 0;
7487 struct target_rt_sigaction
*rt_act
;
7488 /* ??? arg4 == sizeof(sigset_t). */
7490 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7492 act
._sa_handler
= rt_act
->_sa_handler
;
7493 act
.sa_mask
= rt_act
->sa_mask
;
7494 act
.sa_flags
= rt_act
->sa_flags
;
7495 act
.sa_restorer
= arg5
;
7496 unlock_user_struct(rt_act
, arg2
, 0);
7499 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7500 if (!is_error(ret
) && arg3
) {
7501 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7503 rt_act
->_sa_handler
= oact
._sa_handler
;
7504 rt_act
->sa_mask
= oact
.sa_mask
;
7505 rt_act
->sa_flags
= oact
.sa_flags
;
7506 unlock_user_struct(rt_act
, arg3
, 1);
7509 struct target_sigaction
*act
;
7510 struct target_sigaction
*oact
;
7513 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7518 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7519 ret
= -TARGET_EFAULT
;
7520 goto rt_sigaction_fail
;
7524 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7527 unlock_user_struct(act
, arg2
, 0);
7529 unlock_user_struct(oact
, arg3
, 1);
7533 #ifdef TARGET_NR_sgetmask /* not on alpha */
7534 case TARGET_NR_sgetmask
:
7537 abi_ulong target_set
;
7538 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7540 host_to_target_old_sigset(&target_set
, &cur_set
);
7546 #ifdef TARGET_NR_ssetmask /* not on alpha */
7547 case TARGET_NR_ssetmask
:
7549 sigset_t set
, oset
, cur_set
;
7550 abi_ulong target_set
= arg1
;
7551 /* We only have one word of the new mask so we must read
7552 * the rest of it with do_sigprocmask() and OR in this word.
7553 * We are guaranteed that a do_sigprocmask() that only queries
7554 * the signal mask will not fail.
7556 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7558 target_to_host_old_sigset(&set
, &target_set
);
7559 sigorset(&set
, &set
, &cur_set
);
7560 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7562 host_to_target_old_sigset(&target_set
, &oset
);
7568 #ifdef TARGET_NR_sigprocmask
7569 case TARGET_NR_sigprocmask
:
7571 #if defined(TARGET_ALPHA)
7572 sigset_t set
, oldset
;
7577 case TARGET_SIG_BLOCK
:
7580 case TARGET_SIG_UNBLOCK
:
7583 case TARGET_SIG_SETMASK
:
7587 ret
= -TARGET_EINVAL
;
7591 target_to_host_old_sigset(&set
, &mask
);
7593 ret
= do_sigprocmask(how
, &set
, &oldset
);
7594 if (!is_error(ret
)) {
7595 host_to_target_old_sigset(&mask
, &oldset
);
7597 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7600 sigset_t set
, oldset
, *set_ptr
;
7605 case TARGET_SIG_BLOCK
:
7608 case TARGET_SIG_UNBLOCK
:
7611 case TARGET_SIG_SETMASK
:
7615 ret
= -TARGET_EINVAL
;
7618 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7620 target_to_host_old_sigset(&set
, p
);
7621 unlock_user(p
, arg2
, 0);
7627 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7628 if (!is_error(ret
) && arg3
) {
7629 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7631 host_to_target_old_sigset(p
, &oldset
);
7632 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7638 case TARGET_NR_rt_sigprocmask
:
7641 sigset_t set
, oldset
, *set_ptr
;
7645 case TARGET_SIG_BLOCK
:
7648 case TARGET_SIG_UNBLOCK
:
7651 case TARGET_SIG_SETMASK
:
7655 ret
= -TARGET_EINVAL
;
7658 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7660 target_to_host_sigset(&set
, p
);
7661 unlock_user(p
, arg2
, 0);
7667 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7668 if (!is_error(ret
) && arg3
) {
7669 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7671 host_to_target_sigset(p
, &oldset
);
7672 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7676 #ifdef TARGET_NR_sigpending
7677 case TARGET_NR_sigpending
:
7680 ret
= get_errno(sigpending(&set
));
7681 if (!is_error(ret
)) {
7682 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7684 host_to_target_old_sigset(p
, &set
);
7685 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7690 case TARGET_NR_rt_sigpending
:
7693 ret
= get_errno(sigpending(&set
));
7694 if (!is_error(ret
)) {
7695 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7697 host_to_target_sigset(p
, &set
);
7698 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7702 #ifdef TARGET_NR_sigsuspend
7703 case TARGET_NR_sigsuspend
:
7705 TaskState
*ts
= cpu
->opaque
;
7706 #if defined(TARGET_ALPHA)
7707 abi_ulong mask
= arg1
;
7708 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7710 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7712 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7713 unlock_user(p
, arg1
, 0);
7715 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7717 if (ret
!= -TARGET_ERESTARTSYS
) {
7718 ts
->in_sigsuspend
= 1;
7723 case TARGET_NR_rt_sigsuspend
:
7725 TaskState
*ts
= cpu
->opaque
;
7726 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7728 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7729 unlock_user(p
, arg1
, 0);
7730 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7732 if (ret
!= -TARGET_ERESTARTSYS
) {
7733 ts
->in_sigsuspend
= 1;
7737 case TARGET_NR_rt_sigtimedwait
:
7740 struct timespec uts
, *puts
;
7743 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7745 target_to_host_sigset(&set
, p
);
7746 unlock_user(p
, arg1
, 0);
7749 target_to_host_timespec(puts
, arg3
);
7753 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
7754 if (!is_error(ret
)) {
7756 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7761 host_to_target_siginfo(p
, &uinfo
);
7762 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7764 ret
= host_to_target_signal(ret
);
7768 case TARGET_NR_rt_sigqueueinfo
:
7771 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7773 target_to_host_siginfo(&uinfo
, p
);
7774 unlock_user(p
, arg1
, 0);
7775 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7778 #ifdef TARGET_NR_sigreturn
7779 case TARGET_NR_sigreturn
:
7780 if (block_signals()) {
7781 ret
= -TARGET_ERESTARTSYS
;
7783 ret
= do_sigreturn(cpu_env
);
7787 case TARGET_NR_rt_sigreturn
:
7788 if (block_signals()) {
7789 ret
= -TARGET_ERESTARTSYS
;
7791 ret
= do_rt_sigreturn(cpu_env
);
7794 case TARGET_NR_sethostname
:
7795 if (!(p
= lock_user_string(arg1
)))
7797 ret
= get_errno(sethostname(p
, arg2
));
7798 unlock_user(p
, arg1
, 0);
7800 case TARGET_NR_setrlimit
:
7802 int resource
= target_to_host_resource(arg1
);
7803 struct target_rlimit
*target_rlim
;
7805 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7807 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7808 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7809 unlock_user_struct(target_rlim
, arg2
, 0);
7810 ret
= get_errno(setrlimit(resource
, &rlim
));
7813 case TARGET_NR_getrlimit
:
7815 int resource
= target_to_host_resource(arg1
);
7816 struct target_rlimit
*target_rlim
;
7819 ret
= get_errno(getrlimit(resource
, &rlim
));
7820 if (!is_error(ret
)) {
7821 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7823 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7824 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7825 unlock_user_struct(target_rlim
, arg2
, 1);
7829 case TARGET_NR_getrusage
:
7831 struct rusage rusage
;
7832 ret
= get_errno(getrusage(arg1
, &rusage
));
7833 if (!is_error(ret
)) {
7834 ret
= host_to_target_rusage(arg2
, &rusage
);
7838 case TARGET_NR_gettimeofday
:
7841 ret
= get_errno(gettimeofday(&tv
, NULL
));
7842 if (!is_error(ret
)) {
7843 if (copy_to_user_timeval(arg1
, &tv
))
7848 case TARGET_NR_settimeofday
:
7850 struct timeval tv
, *ptv
= NULL
;
7851 struct timezone tz
, *ptz
= NULL
;
7854 if (copy_from_user_timeval(&tv
, arg1
)) {
7861 if (copy_from_user_timezone(&tz
, arg2
)) {
7867 ret
= get_errno(settimeofday(ptv
, ptz
));
7870 #if defined(TARGET_NR_select)
7871 case TARGET_NR_select
:
7872 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7873 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7876 struct target_sel_arg_struct
*sel
;
7877 abi_ulong inp
, outp
, exp
, tvp
;
7880 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7882 nsel
= tswapal(sel
->n
);
7883 inp
= tswapal(sel
->inp
);
7884 outp
= tswapal(sel
->outp
);
7885 exp
= tswapal(sel
->exp
);
7886 tvp
= tswapal(sel
->tvp
);
7887 unlock_user_struct(sel
, arg1
, 0);
7888 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7893 #ifdef TARGET_NR_pselect6
7894 case TARGET_NR_pselect6
:
7896 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7897 fd_set rfds
, wfds
, efds
;
7898 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7899 struct timespec ts
, *ts_ptr
;
7902 * The 6th arg is actually two args smashed together,
7903 * so we cannot use the C library.
7911 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7912 target_sigset_t
*target_sigset
;
7920 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7924 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7928 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7934 * This takes a timespec, and not a timeval, so we cannot
7935 * use the do_select() helper ...
7938 if (target_to_host_timespec(&ts
, ts_addr
)) {
7946 /* Extract the two packed args for the sigset */
7949 sig
.size
= SIGSET_T_SIZE
;
7951 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7955 arg_sigset
= tswapal(arg7
[0]);
7956 arg_sigsize
= tswapal(arg7
[1]);
7957 unlock_user(arg7
, arg6
, 0);
7961 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7962 /* Like the kernel, we enforce correct size sigsets */
7963 ret
= -TARGET_EINVAL
;
7966 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7967 sizeof(*target_sigset
), 1);
7968 if (!target_sigset
) {
7971 target_to_host_sigset(&set
, target_sigset
);
7972 unlock_user(target_sigset
, arg_sigset
, 0);
7980 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7983 if (!is_error(ret
)) {
7984 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7986 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7988 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7991 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7997 #ifdef TARGET_NR_symlink
7998 case TARGET_NR_symlink
:
8001 p
= lock_user_string(arg1
);
8002 p2
= lock_user_string(arg2
);
8004 ret
= -TARGET_EFAULT
;
8006 ret
= get_errno(symlink(p
, p2
));
8007 unlock_user(p2
, arg2
, 0);
8008 unlock_user(p
, arg1
, 0);
8012 #if defined(TARGET_NR_symlinkat)
8013 case TARGET_NR_symlinkat
:
8016 p
= lock_user_string(arg1
);
8017 p2
= lock_user_string(arg3
);
8019 ret
= -TARGET_EFAULT
;
8021 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8022 unlock_user(p2
, arg3
, 0);
8023 unlock_user(p
, arg1
, 0);
8027 #ifdef TARGET_NR_oldlstat
8028 case TARGET_NR_oldlstat
:
8031 #ifdef TARGET_NR_readlink
8032 case TARGET_NR_readlink
:
8035 p
= lock_user_string(arg1
);
8036 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8038 ret
= -TARGET_EFAULT
;
8040 /* Short circuit this for the magic exe check. */
8041 ret
= -TARGET_EINVAL
;
8042 } else if (is_proc_myself((const char *)p
, "exe")) {
8043 char real
[PATH_MAX
], *temp
;
8044 temp
= realpath(exec_path
, real
);
8045 /* Return value is # of bytes that we wrote to the buffer. */
8047 ret
= get_errno(-1);
8049 /* Don't worry about sign mismatch as earlier mapping
8050 * logic would have thrown a bad address error. */
8051 ret
= MIN(strlen(real
), arg3
);
8052 /* We cannot NUL terminate the string. */
8053 memcpy(p2
, real
, ret
);
8056 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8058 unlock_user(p2
, arg2
, ret
);
8059 unlock_user(p
, arg1
, 0);
8063 #if defined(TARGET_NR_readlinkat)
8064 case TARGET_NR_readlinkat
:
8067 p
= lock_user_string(arg2
);
8068 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8070 ret
= -TARGET_EFAULT
;
8071 } else if (is_proc_myself((const char *)p
, "exe")) {
8072 char real
[PATH_MAX
], *temp
;
8073 temp
= realpath(exec_path
, real
);
8074 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8075 snprintf((char *)p2
, arg4
, "%s", real
);
8077 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8079 unlock_user(p2
, arg3
, ret
);
8080 unlock_user(p
, arg2
, 0);
8084 #ifdef TARGET_NR_uselib
8085 case TARGET_NR_uselib
:
8088 #ifdef TARGET_NR_swapon
8089 case TARGET_NR_swapon
:
8090 if (!(p
= lock_user_string(arg1
)))
8092 ret
= get_errno(swapon(p
, arg2
));
8093 unlock_user(p
, arg1
, 0);
8096 case TARGET_NR_reboot
:
8097 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8098 /* arg4 must be ignored in all other cases */
8099 p
= lock_user_string(arg4
);
8103 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8104 unlock_user(p
, arg4
, 0);
8106 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8109 #ifdef TARGET_NR_readdir
8110 case TARGET_NR_readdir
:
8113 #ifdef TARGET_NR_mmap
8114 case TARGET_NR_mmap
:
8115 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8116 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8117 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8118 || defined(TARGET_S390X)
8121 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8122 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8130 unlock_user(v
, arg1
, 0);
8131 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8132 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8136 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8137 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8143 #ifdef TARGET_NR_mmap2
8144 case TARGET_NR_mmap2
:
8146 #define MMAP_SHIFT 12
8148 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8149 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8151 arg6
<< MMAP_SHIFT
));
8154 case TARGET_NR_munmap
:
8155 ret
= get_errno(target_munmap(arg1
, arg2
));
8157 case TARGET_NR_mprotect
:
8159 TaskState
*ts
= cpu
->opaque
;
8160 /* Special hack to detect libc making the stack executable. */
8161 if ((arg3
& PROT_GROWSDOWN
)
8162 && arg1
>= ts
->info
->stack_limit
8163 && arg1
<= ts
->info
->start_stack
) {
8164 arg3
&= ~PROT_GROWSDOWN
;
8165 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8166 arg1
= ts
->info
->stack_limit
;
8169 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8171 #ifdef TARGET_NR_mremap
8172 case TARGET_NR_mremap
:
8173 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8176 /* ??? msync/mlock/munlock are broken for softmmu. */
8177 #ifdef TARGET_NR_msync
8178 case TARGET_NR_msync
:
8179 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8182 #ifdef TARGET_NR_mlock
8183 case TARGET_NR_mlock
:
8184 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8187 #ifdef TARGET_NR_munlock
8188 case TARGET_NR_munlock
:
8189 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8192 #ifdef TARGET_NR_mlockall
8193 case TARGET_NR_mlockall
:
8194 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8197 #ifdef TARGET_NR_munlockall
8198 case TARGET_NR_munlockall
:
8199 ret
= get_errno(munlockall());
8202 case TARGET_NR_truncate
:
8203 if (!(p
= lock_user_string(arg1
)))
8205 ret
= get_errno(truncate(p
, arg2
));
8206 unlock_user(p
, arg1
, 0);
8208 case TARGET_NR_ftruncate
:
8209 ret
= get_errno(ftruncate(arg1
, arg2
));
8211 case TARGET_NR_fchmod
:
8212 ret
= get_errno(fchmod(arg1
, arg2
));
8214 #if defined(TARGET_NR_fchmodat)
8215 case TARGET_NR_fchmodat
:
8216 if (!(p
= lock_user_string(arg2
)))
8218 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8219 unlock_user(p
, arg2
, 0);
8222 case TARGET_NR_getpriority
:
8223 /* Note that negative values are valid for getpriority, so we must
8224 differentiate based on errno settings. */
8226 ret
= getpriority(arg1
, arg2
);
8227 if (ret
== -1 && errno
!= 0) {
8228 ret
= -host_to_target_errno(errno
);
8232 /* Return value is the unbiased priority. Signal no error. */
8233 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8235 /* Return value is a biased priority to avoid negative numbers. */
8239 case TARGET_NR_setpriority
:
8240 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8242 #ifdef TARGET_NR_profil
8243 case TARGET_NR_profil
:
8246 case TARGET_NR_statfs
:
8247 if (!(p
= lock_user_string(arg1
)))
8249 ret
= get_errno(statfs(path(p
), &stfs
));
8250 unlock_user(p
, arg1
, 0);
8252 if (!is_error(ret
)) {
8253 struct target_statfs
*target_stfs
;
8255 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8257 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8258 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8259 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8260 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8261 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8262 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8263 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8264 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8265 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8266 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8267 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8268 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8269 unlock_user_struct(target_stfs
, arg2
, 1);
8272 case TARGET_NR_fstatfs
:
8273 ret
= get_errno(fstatfs(arg1
, &stfs
));
8274 goto convert_statfs
;
8275 #ifdef TARGET_NR_statfs64
8276 case TARGET_NR_statfs64
:
8277 if (!(p
= lock_user_string(arg1
)))
8279 ret
= get_errno(statfs(path(p
), &stfs
));
8280 unlock_user(p
, arg1
, 0);
8282 if (!is_error(ret
)) {
8283 struct target_statfs64
*target_stfs
;
8285 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8287 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8288 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8289 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8290 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8291 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8292 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8293 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8294 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8295 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8296 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8297 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8298 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8299 unlock_user_struct(target_stfs
, arg3
, 1);
8302 case TARGET_NR_fstatfs64
:
8303 ret
= get_errno(fstatfs(arg1
, &stfs
));
8304 goto convert_statfs64
;
8306 #ifdef TARGET_NR_ioperm
8307 case TARGET_NR_ioperm
:
8310 #ifdef TARGET_NR_socketcall
8311 case TARGET_NR_socketcall
:
8312 ret
= do_socketcall(arg1
, arg2
);
8315 #ifdef TARGET_NR_accept
8316 case TARGET_NR_accept
:
8317 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8320 #ifdef TARGET_NR_accept4
8321 case TARGET_NR_accept4
:
8322 #ifdef CONFIG_ACCEPT4
8323 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8329 #ifdef TARGET_NR_bind
8330 case TARGET_NR_bind
:
8331 ret
= do_bind(arg1
, arg2
, arg3
);
8334 #ifdef TARGET_NR_connect
8335 case TARGET_NR_connect
:
8336 ret
= do_connect(arg1
, arg2
, arg3
);
8339 #ifdef TARGET_NR_getpeername
8340 case TARGET_NR_getpeername
:
8341 ret
= do_getpeername(arg1
, arg2
, arg3
);
8344 #ifdef TARGET_NR_getsockname
8345 case TARGET_NR_getsockname
:
8346 ret
= do_getsockname(arg1
, arg2
, arg3
);
8349 #ifdef TARGET_NR_getsockopt
8350 case TARGET_NR_getsockopt
:
8351 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8354 #ifdef TARGET_NR_listen
8355 case TARGET_NR_listen
:
8356 ret
= get_errno(listen(arg1
, arg2
));
8359 #ifdef TARGET_NR_recv
8360 case TARGET_NR_recv
:
8361 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8364 #ifdef TARGET_NR_recvfrom
8365 case TARGET_NR_recvfrom
:
8366 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8369 #ifdef TARGET_NR_recvmsg
8370 case TARGET_NR_recvmsg
:
8371 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8374 #ifdef TARGET_NR_send
8375 case TARGET_NR_send
:
8376 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8379 #ifdef TARGET_NR_sendmsg
8380 case TARGET_NR_sendmsg
:
8381 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8384 #ifdef TARGET_NR_sendmmsg
8385 case TARGET_NR_sendmmsg
:
8386 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8388 case TARGET_NR_recvmmsg
:
8389 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8392 #ifdef TARGET_NR_sendto
8393 case TARGET_NR_sendto
:
8394 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8397 #ifdef TARGET_NR_shutdown
8398 case TARGET_NR_shutdown
:
8399 ret
= get_errno(shutdown(arg1
, arg2
));
8402 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8403 case TARGET_NR_getrandom
:
8404 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8408 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8409 unlock_user(p
, arg1
, ret
);
8412 #ifdef TARGET_NR_socket
8413 case TARGET_NR_socket
:
8414 ret
= do_socket(arg1
, arg2
, arg3
);
8415 fd_trans_unregister(ret
);
8418 #ifdef TARGET_NR_socketpair
8419 case TARGET_NR_socketpair
:
8420 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8423 #ifdef TARGET_NR_setsockopt
8424 case TARGET_NR_setsockopt
:
8425 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8429 case TARGET_NR_syslog
:
8430 if (!(p
= lock_user_string(arg2
)))
8432 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8433 unlock_user(p
, arg2
, 0);
8436 case TARGET_NR_setitimer
:
8438 struct itimerval value
, ovalue
, *pvalue
;
8442 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8443 || copy_from_user_timeval(&pvalue
->it_value
,
8444 arg2
+ sizeof(struct target_timeval
)))
8449 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8450 if (!is_error(ret
) && arg3
) {
8451 if (copy_to_user_timeval(arg3
,
8452 &ovalue
.it_interval
)
8453 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8459 case TARGET_NR_getitimer
:
8461 struct itimerval value
;
8463 ret
= get_errno(getitimer(arg1
, &value
));
8464 if (!is_error(ret
) && arg2
) {
8465 if (copy_to_user_timeval(arg2
,
8467 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8473 #ifdef TARGET_NR_stat
8474 case TARGET_NR_stat
:
8475 if (!(p
= lock_user_string(arg1
)))
8477 ret
= get_errno(stat(path(p
), &st
));
8478 unlock_user(p
, arg1
, 0);
8481 #ifdef TARGET_NR_lstat
8482 case TARGET_NR_lstat
:
8483 if (!(p
= lock_user_string(arg1
)))
8485 ret
= get_errno(lstat(path(p
), &st
));
8486 unlock_user(p
, arg1
, 0);
8489 case TARGET_NR_fstat
:
8491 ret
= get_errno(fstat(arg1
, &st
));
8492 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8495 if (!is_error(ret
)) {
8496 struct target_stat
*target_st
;
8498 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8500 memset(target_st
, 0, sizeof(*target_st
));
8501 __put_user(st
.st_dev
, &target_st
->st_dev
);
8502 __put_user(st
.st_ino
, &target_st
->st_ino
);
8503 __put_user(st
.st_mode
, &target_st
->st_mode
);
8504 __put_user(st
.st_uid
, &target_st
->st_uid
);
8505 __put_user(st
.st_gid
, &target_st
->st_gid
);
8506 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8507 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8508 __put_user(st
.st_size
, &target_st
->st_size
);
8509 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8510 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8511 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8512 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8513 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8514 unlock_user_struct(target_st
, arg2
, 1);
8518 #ifdef TARGET_NR_olduname
8519 case TARGET_NR_olduname
:
8522 #ifdef TARGET_NR_iopl
8523 case TARGET_NR_iopl
:
8526 case TARGET_NR_vhangup
:
8527 ret
= get_errno(vhangup());
8529 #ifdef TARGET_NR_idle
8530 case TARGET_NR_idle
:
8533 #ifdef TARGET_NR_syscall
8534 case TARGET_NR_syscall
:
8535 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8536 arg6
, arg7
, arg8
, 0);
8539 case TARGET_NR_wait4
:
8542 abi_long status_ptr
= arg2
;
8543 struct rusage rusage
, *rusage_ptr
;
8544 abi_ulong target_rusage
= arg4
;
8545 abi_long rusage_err
;
8547 rusage_ptr
= &rusage
;
8550 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8551 if (!is_error(ret
)) {
8552 if (status_ptr
&& ret
) {
8553 status
= host_to_target_waitstatus(status
);
8554 if (put_user_s32(status
, status_ptr
))
8557 if (target_rusage
) {
8558 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8566 #ifdef TARGET_NR_swapoff
8567 case TARGET_NR_swapoff
:
8568 if (!(p
= lock_user_string(arg1
)))
8570 ret
= get_errno(swapoff(p
));
8571 unlock_user(p
, arg1
, 0);
8574 case TARGET_NR_sysinfo
:
8576 struct target_sysinfo
*target_value
;
8577 struct sysinfo value
;
8578 ret
= get_errno(sysinfo(&value
));
8579 if (!is_error(ret
) && arg1
)
8581 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8583 __put_user(value
.uptime
, &target_value
->uptime
);
8584 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8585 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8586 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8587 __put_user(value
.totalram
, &target_value
->totalram
);
8588 __put_user(value
.freeram
, &target_value
->freeram
);
8589 __put_user(value
.sharedram
, &target_value
->sharedram
);
8590 __put_user(value
.bufferram
, &target_value
->bufferram
);
8591 __put_user(value
.totalswap
, &target_value
->totalswap
);
8592 __put_user(value
.freeswap
, &target_value
->freeswap
);
8593 __put_user(value
.procs
, &target_value
->procs
);
8594 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8595 __put_user(value
.freehigh
, &target_value
->freehigh
);
8596 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8597 unlock_user_struct(target_value
, arg1
, 1);
8601 #ifdef TARGET_NR_ipc
8603 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8606 #ifdef TARGET_NR_semget
8607 case TARGET_NR_semget
:
8608 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8611 #ifdef TARGET_NR_semop
8612 case TARGET_NR_semop
:
8613 ret
= do_semop(arg1
, arg2
, arg3
);
8616 #ifdef TARGET_NR_semctl
8617 case TARGET_NR_semctl
:
8618 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8621 #ifdef TARGET_NR_msgctl
8622 case TARGET_NR_msgctl
:
8623 ret
= do_msgctl(arg1
, arg2
, arg3
);
8626 #ifdef TARGET_NR_msgget
8627 case TARGET_NR_msgget
:
8628 ret
= get_errno(msgget(arg1
, arg2
));
8631 #ifdef TARGET_NR_msgrcv
8632 case TARGET_NR_msgrcv
:
8633 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8636 #ifdef TARGET_NR_msgsnd
8637 case TARGET_NR_msgsnd
:
8638 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8641 #ifdef TARGET_NR_shmget
8642 case TARGET_NR_shmget
:
8643 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8646 #ifdef TARGET_NR_shmctl
8647 case TARGET_NR_shmctl
:
8648 ret
= do_shmctl(arg1
, arg2
, arg3
);
8651 #ifdef TARGET_NR_shmat
8652 case TARGET_NR_shmat
:
8653 ret
= do_shmat(arg1
, arg2
, arg3
);
8656 #ifdef TARGET_NR_shmdt
8657 case TARGET_NR_shmdt
:
8658 ret
= do_shmdt(arg1
);
8661 case TARGET_NR_fsync
:
8662 ret
= get_errno(fsync(arg1
));
8664 case TARGET_NR_clone
:
8665 /* Linux manages to have three different orderings for its
8666 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8667 * match the kernel's CONFIG_CLONE_* settings.
8668 * Microblaze is further special in that it uses a sixth
8669 * implicit argument to clone for the TLS pointer.
8671 #if defined(TARGET_MICROBLAZE)
8672 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8673 #elif defined(TARGET_CLONE_BACKWARDS)
8674 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8675 #elif defined(TARGET_CLONE_BACKWARDS2)
8676 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8678 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8681 #ifdef __NR_exit_group
8682 /* new thread calls */
8683 case TARGET_NR_exit_group
:
8687 gdb_exit(cpu_env
, arg1
);
8688 ret
= get_errno(exit_group(arg1
));
8691 case TARGET_NR_setdomainname
:
8692 if (!(p
= lock_user_string(arg1
)))
8694 ret
= get_errno(setdomainname(p
, arg2
));
8695 unlock_user(p
, arg1
, 0);
8697 case TARGET_NR_uname
:
8698 /* no need to transcode because we use the linux syscall */
8700 struct new_utsname
* buf
;
8702 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8704 ret
= get_errno(sys_uname(buf
));
8705 if (!is_error(ret
)) {
8706 /* Overrite the native machine name with whatever is being
8708 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8709 /* Allow the user to override the reported release. */
8710 if (qemu_uname_release
&& *qemu_uname_release
)
8711 strcpy (buf
->release
, qemu_uname_release
);
8713 unlock_user_struct(buf
, arg1
, 1);
8717 case TARGET_NR_modify_ldt
:
8718 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8720 #if !defined(TARGET_X86_64)
8721 case TARGET_NR_vm86old
:
8723 case TARGET_NR_vm86
:
8724 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8728 case TARGET_NR_adjtimex
:
8730 #ifdef TARGET_NR_create_module
8731 case TARGET_NR_create_module
:
8733 case TARGET_NR_init_module
:
8734 case TARGET_NR_delete_module
:
8735 #ifdef TARGET_NR_get_kernel_syms
8736 case TARGET_NR_get_kernel_syms
:
8739 case TARGET_NR_quotactl
:
8741 case TARGET_NR_getpgid
:
8742 ret
= get_errno(getpgid(arg1
));
8744 case TARGET_NR_fchdir
:
8745 ret
= get_errno(fchdir(arg1
));
8747 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8748 case TARGET_NR_bdflush
:
8751 #ifdef TARGET_NR_sysfs
8752 case TARGET_NR_sysfs
:
8755 case TARGET_NR_personality
:
8756 ret
= get_errno(personality(arg1
));
8758 #ifdef TARGET_NR_afs_syscall
8759 case TARGET_NR_afs_syscall
:
8762 #ifdef TARGET_NR__llseek /* Not on alpha */
8763 case TARGET_NR__llseek
:
8766 #if !defined(__NR_llseek)
8767 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8769 ret
= get_errno(res
);
8774 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8776 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8782 #ifdef TARGET_NR_getdents
8783 case TARGET_NR_getdents
:
8784 #ifdef __NR_getdents
8785 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8787 struct target_dirent
*target_dirp
;
8788 struct linux_dirent
*dirp
;
8789 abi_long count
= arg3
;
8791 dirp
= g_try_malloc(count
);
8793 ret
= -TARGET_ENOMEM
;
8797 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8798 if (!is_error(ret
)) {
8799 struct linux_dirent
*de
;
8800 struct target_dirent
*tde
;
8802 int reclen
, treclen
;
8803 int count1
, tnamelen
;
8807 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8811 reclen
= de
->d_reclen
;
8812 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8813 assert(tnamelen
>= 0);
8814 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8815 assert(count1
+ treclen
<= count
);
8816 tde
->d_reclen
= tswap16(treclen
);
8817 tde
->d_ino
= tswapal(de
->d_ino
);
8818 tde
->d_off
= tswapal(de
->d_off
);
8819 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8820 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8822 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8826 unlock_user(target_dirp
, arg2
, ret
);
8832 struct linux_dirent
*dirp
;
8833 abi_long count
= arg3
;
8835 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8837 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8838 if (!is_error(ret
)) {
8839 struct linux_dirent
*de
;
8844 reclen
= de
->d_reclen
;
8847 de
->d_reclen
= tswap16(reclen
);
8848 tswapls(&de
->d_ino
);
8849 tswapls(&de
->d_off
);
8850 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8854 unlock_user(dirp
, arg2
, ret
);
8858 /* Implement getdents in terms of getdents64 */
8860 struct linux_dirent64
*dirp
;
8861 abi_long count
= arg3
;
8863 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8867 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8868 if (!is_error(ret
)) {
8869 /* Convert the dirent64 structs to target dirent. We do this
8870 * in-place, since we can guarantee that a target_dirent is no
8871 * larger than a dirent64; however this means we have to be
8872 * careful to read everything before writing in the new format.
8874 struct linux_dirent64
*de
;
8875 struct target_dirent
*tde
;
8880 tde
= (struct target_dirent
*)dirp
;
8882 int namelen
, treclen
;
8883 int reclen
= de
->d_reclen
;
8884 uint64_t ino
= de
->d_ino
;
8885 int64_t off
= de
->d_off
;
8886 uint8_t type
= de
->d_type
;
8888 namelen
= strlen(de
->d_name
);
8889 treclen
= offsetof(struct target_dirent
, d_name
)
8891 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8893 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8894 tde
->d_ino
= tswapal(ino
);
8895 tde
->d_off
= tswapal(off
);
8896 tde
->d_reclen
= tswap16(treclen
);
8897 /* The target_dirent type is in what was formerly a padding
8898 * byte at the end of the structure:
8900 *(((char *)tde
) + treclen
- 1) = type
;
8902 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8903 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8909 unlock_user(dirp
, arg2
, ret
);
8913 #endif /* TARGET_NR_getdents */
8914 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8915 case TARGET_NR_getdents64
:
8917 struct linux_dirent64
*dirp
;
8918 abi_long count
= arg3
;
8919 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8921 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8922 if (!is_error(ret
)) {
8923 struct linux_dirent64
*de
;
8928 reclen
= de
->d_reclen
;
8931 de
->d_reclen
= tswap16(reclen
);
8932 tswap64s((uint64_t *)&de
->d_ino
);
8933 tswap64s((uint64_t *)&de
->d_off
);
8934 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8938 unlock_user(dirp
, arg2
, ret
);
8941 #endif /* TARGET_NR_getdents64 */
8942 #if defined(TARGET_NR__newselect)
8943 case TARGET_NR__newselect
:
8944 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8947 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8948 # ifdef TARGET_NR_poll
8949 case TARGET_NR_poll
:
8951 # ifdef TARGET_NR_ppoll
8952 case TARGET_NR_ppoll
:
8955 struct target_pollfd
*target_pfd
;
8956 unsigned int nfds
= arg2
;
8964 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8965 sizeof(struct target_pollfd
) * nfds
, 1);
8970 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8971 for (i
= 0; i
< nfds
; i
++) {
8972 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8973 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8977 # ifdef TARGET_NR_ppoll
8978 if (num
== TARGET_NR_ppoll
) {
8979 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8980 target_sigset_t
*target_set
;
8981 sigset_t _set
, *set
= &_set
;
8984 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8985 unlock_user(target_pfd
, arg1
, 0);
8993 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8995 unlock_user(target_pfd
, arg1
, 0);
8998 target_to_host_sigset(set
, target_set
);
9003 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
,
9004 set
, SIGSET_T_SIZE
));
9006 if (!is_error(ret
) && arg3
) {
9007 host_to_target_timespec(arg3
, timeout_ts
);
9010 unlock_user(target_set
, arg4
, 0);
9014 ret
= get_errno(poll(pfd
, nfds
, timeout
));
9016 if (!is_error(ret
)) {
9017 for(i
= 0; i
< nfds
; i
++) {
9018 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9021 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9025 case TARGET_NR_flock
:
9026 /* NOTE: the flock constant seems to be the same for every
9028 ret
= get_errno(flock(arg1
, arg2
));
9030 case TARGET_NR_readv
:
9032 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9034 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9035 unlock_iovec(vec
, arg2
, arg3
, 1);
9037 ret
= -host_to_target_errno(errno
);
9041 case TARGET_NR_writev
:
9043 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9045 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9046 unlock_iovec(vec
, arg2
, arg3
, 0);
9048 ret
= -host_to_target_errno(errno
);
9052 case TARGET_NR_getsid
:
9053 ret
= get_errno(getsid(arg1
));
9055 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9056 case TARGET_NR_fdatasync
:
9057 ret
= get_errno(fdatasync(arg1
));
9060 #ifdef TARGET_NR__sysctl
9061 case TARGET_NR__sysctl
:
9062 /* We don't implement this, but ENOTDIR is always a safe
9064 ret
= -TARGET_ENOTDIR
;
9067 case TARGET_NR_sched_getaffinity
:
9069 unsigned int mask_size
;
9070 unsigned long *mask
;
9073 * sched_getaffinity needs multiples of ulong, so need to take
9074 * care of mismatches between target ulong and host ulong sizes.
9076 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9077 ret
= -TARGET_EINVAL
;
9080 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9082 mask
= alloca(mask_size
);
9083 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9085 if (!is_error(ret
)) {
9087 /* More data returned than the caller's buffer will fit.
9088 * This only happens if sizeof(abi_long) < sizeof(long)
9089 * and the caller passed us a buffer holding an odd number
9090 * of abi_longs. If the host kernel is actually using the
9091 * extra 4 bytes then fail EINVAL; otherwise we can just
9092 * ignore them and only copy the interesting part.
9094 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9095 if (numcpus
> arg2
* 8) {
9096 ret
= -TARGET_EINVAL
;
9102 if (copy_to_user(arg3
, mask
, ret
)) {
9108 case TARGET_NR_sched_setaffinity
:
9110 unsigned int mask_size
;
9111 unsigned long *mask
;
9114 * sched_setaffinity needs multiples of ulong, so need to take
9115 * care of mismatches between target ulong and host ulong sizes.
9117 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9118 ret
= -TARGET_EINVAL
;
9121 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9123 mask
= alloca(mask_size
);
9124 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9127 memcpy(mask
, p
, arg2
);
9128 unlock_user_struct(p
, arg2
, 0);
9130 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9133 case TARGET_NR_sched_setparam
:
9135 struct sched_param
*target_schp
;
9136 struct sched_param schp
;
9139 return -TARGET_EINVAL
;
9141 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9143 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9144 unlock_user_struct(target_schp
, arg2
, 0);
9145 ret
= get_errno(sched_setparam(arg1
, &schp
));
9148 case TARGET_NR_sched_getparam
:
9150 struct sched_param
*target_schp
;
9151 struct sched_param schp
;
9154 return -TARGET_EINVAL
;
9156 ret
= get_errno(sched_getparam(arg1
, &schp
));
9157 if (!is_error(ret
)) {
9158 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9160 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9161 unlock_user_struct(target_schp
, arg2
, 1);
9165 case TARGET_NR_sched_setscheduler
:
9167 struct sched_param
*target_schp
;
9168 struct sched_param schp
;
9170 return -TARGET_EINVAL
;
9172 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9174 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9175 unlock_user_struct(target_schp
, arg3
, 0);
9176 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9179 case TARGET_NR_sched_getscheduler
:
9180 ret
= get_errno(sched_getscheduler(arg1
));
9182 case TARGET_NR_sched_yield
:
9183 ret
= get_errno(sched_yield());
9185 case TARGET_NR_sched_get_priority_max
:
9186 ret
= get_errno(sched_get_priority_max(arg1
));
9188 case TARGET_NR_sched_get_priority_min
:
9189 ret
= get_errno(sched_get_priority_min(arg1
));
9191 case TARGET_NR_sched_rr_get_interval
:
9194 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9195 if (!is_error(ret
)) {
9196 ret
= host_to_target_timespec(arg2
, &ts
);
9200 case TARGET_NR_nanosleep
:
9202 struct timespec req
, rem
;
9203 target_to_host_timespec(&req
, arg1
);
9204 ret
= get_errno(nanosleep(&req
, &rem
));
9205 if (is_error(ret
) && arg2
) {
9206 host_to_target_timespec(arg2
, &rem
);
9210 #ifdef TARGET_NR_query_module
9211 case TARGET_NR_query_module
:
9214 #ifdef TARGET_NR_nfsservctl
9215 case TARGET_NR_nfsservctl
:
9218 case TARGET_NR_prctl
:
9220 case PR_GET_PDEATHSIG
:
9223 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9224 if (!is_error(ret
) && arg2
9225 && put_user_ual(deathsig
, arg2
)) {
9233 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9237 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9239 unlock_user(name
, arg2
, 16);
9244 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9248 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9250 unlock_user(name
, arg2
, 0);
9255 /* Most prctl options have no pointer arguments */
9256 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9260 #ifdef TARGET_NR_arch_prctl
9261 case TARGET_NR_arch_prctl
:
9262 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9263 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9269 #ifdef TARGET_NR_pread64
9270 case TARGET_NR_pread64
:
9271 if (regpairs_aligned(cpu_env
)) {
9275 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9277 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9278 unlock_user(p
, arg2
, ret
);
9280 case TARGET_NR_pwrite64
:
9281 if (regpairs_aligned(cpu_env
)) {
9285 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9287 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9288 unlock_user(p
, arg2
, 0);
9291 case TARGET_NR_getcwd
:
9292 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9294 ret
= get_errno(sys_getcwd1(p
, arg2
));
9295 unlock_user(p
, arg1
, ret
);
9297 case TARGET_NR_capget
:
9298 case TARGET_NR_capset
:
9300 struct target_user_cap_header
*target_header
;
9301 struct target_user_cap_data
*target_data
= NULL
;
9302 struct __user_cap_header_struct header
;
9303 struct __user_cap_data_struct data
[2];
9304 struct __user_cap_data_struct
*dataptr
= NULL
;
9305 int i
, target_datalen
;
9308 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9311 header
.version
= tswap32(target_header
->version
);
9312 header
.pid
= tswap32(target_header
->pid
);
9314 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9315 /* Version 2 and up takes pointer to two user_data structs */
9319 target_datalen
= sizeof(*target_data
) * data_items
;
9322 if (num
== TARGET_NR_capget
) {
9323 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9325 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9328 unlock_user_struct(target_header
, arg1
, 0);
9332 if (num
== TARGET_NR_capset
) {
9333 for (i
= 0; i
< data_items
; i
++) {
9334 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9335 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9336 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9343 if (num
== TARGET_NR_capget
) {
9344 ret
= get_errno(capget(&header
, dataptr
));
9346 ret
= get_errno(capset(&header
, dataptr
));
9349 /* The kernel always updates version for both capget and capset */
9350 target_header
->version
= tswap32(header
.version
);
9351 unlock_user_struct(target_header
, arg1
, 1);
9354 if (num
== TARGET_NR_capget
) {
9355 for (i
= 0; i
< data_items
; i
++) {
9356 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9357 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9358 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9360 unlock_user(target_data
, arg2
, target_datalen
);
9362 unlock_user(target_data
, arg2
, 0);
9367 case TARGET_NR_sigaltstack
:
9368 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9371 #ifdef CONFIG_SENDFILE
9372 case TARGET_NR_sendfile
:
9377 ret
= get_user_sal(off
, arg3
);
9378 if (is_error(ret
)) {
9383 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9384 if (!is_error(ret
) && arg3
) {
9385 abi_long ret2
= put_user_sal(off
, arg3
);
9386 if (is_error(ret2
)) {
9392 #ifdef TARGET_NR_sendfile64
9393 case TARGET_NR_sendfile64
:
9398 ret
= get_user_s64(off
, arg3
);
9399 if (is_error(ret
)) {
9404 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9405 if (!is_error(ret
) && arg3
) {
9406 abi_long ret2
= put_user_s64(off
, arg3
);
9407 if (is_error(ret2
)) {
9415 case TARGET_NR_sendfile
:
9416 #ifdef TARGET_NR_sendfile64
9417 case TARGET_NR_sendfile64
:
9422 #ifdef TARGET_NR_getpmsg
9423 case TARGET_NR_getpmsg
:
9426 #ifdef TARGET_NR_putpmsg
9427 case TARGET_NR_putpmsg
:
9430 #ifdef TARGET_NR_vfork
9431 case TARGET_NR_vfork
:
9432 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9436 #ifdef TARGET_NR_ugetrlimit
9437 case TARGET_NR_ugetrlimit
:
9440 int resource
= target_to_host_resource(arg1
);
9441 ret
= get_errno(getrlimit(resource
, &rlim
));
9442 if (!is_error(ret
)) {
9443 struct target_rlimit
*target_rlim
;
9444 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9446 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9447 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9448 unlock_user_struct(target_rlim
, arg2
, 1);
9453 #ifdef TARGET_NR_truncate64
9454 case TARGET_NR_truncate64
:
9455 if (!(p
= lock_user_string(arg1
)))
9457 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9458 unlock_user(p
, arg1
, 0);
9461 #ifdef TARGET_NR_ftruncate64
9462 case TARGET_NR_ftruncate64
:
9463 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9466 #ifdef TARGET_NR_stat64
9467 case TARGET_NR_stat64
:
9468 if (!(p
= lock_user_string(arg1
)))
9470 ret
= get_errno(stat(path(p
), &st
));
9471 unlock_user(p
, arg1
, 0);
9473 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9476 #ifdef TARGET_NR_lstat64
9477 case TARGET_NR_lstat64
:
9478 if (!(p
= lock_user_string(arg1
)))
9480 ret
= get_errno(lstat(path(p
), &st
));
9481 unlock_user(p
, arg1
, 0);
9483 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9486 #ifdef TARGET_NR_fstat64
9487 case TARGET_NR_fstat64
:
9488 ret
= get_errno(fstat(arg1
, &st
));
9490 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9493 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9494 #ifdef TARGET_NR_fstatat64
9495 case TARGET_NR_fstatat64
:
9497 #ifdef TARGET_NR_newfstatat
9498 case TARGET_NR_newfstatat
:
9500 if (!(p
= lock_user_string(arg2
)))
9502 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9504 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9507 #ifdef TARGET_NR_lchown
9508 case TARGET_NR_lchown
:
9509 if (!(p
= lock_user_string(arg1
)))
9511 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9512 unlock_user(p
, arg1
, 0);
9515 #ifdef TARGET_NR_getuid
9516 case TARGET_NR_getuid
:
9517 ret
= get_errno(high2lowuid(getuid()));
9520 #ifdef TARGET_NR_getgid
9521 case TARGET_NR_getgid
:
9522 ret
= get_errno(high2lowgid(getgid()));
9525 #ifdef TARGET_NR_geteuid
9526 case TARGET_NR_geteuid
:
9527 ret
= get_errno(high2lowuid(geteuid()));
9530 #ifdef TARGET_NR_getegid
9531 case TARGET_NR_getegid
:
9532 ret
= get_errno(high2lowgid(getegid()));
9535 case TARGET_NR_setreuid
:
9536 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9538 case TARGET_NR_setregid
:
9539 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9541 case TARGET_NR_getgroups
:
9543 int gidsetsize
= arg1
;
9544 target_id
*target_grouplist
;
9548 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9549 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9550 if (gidsetsize
== 0)
9552 if (!is_error(ret
)) {
9553 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9554 if (!target_grouplist
)
9556 for(i
= 0;i
< ret
; i
++)
9557 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9558 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9562 case TARGET_NR_setgroups
:
9564 int gidsetsize
= arg1
;
9565 target_id
*target_grouplist
;
9566 gid_t
*grouplist
= NULL
;
9569 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9570 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9571 if (!target_grouplist
) {
9572 ret
= -TARGET_EFAULT
;
9575 for (i
= 0; i
< gidsetsize
; i
++) {
9576 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9578 unlock_user(target_grouplist
, arg2
, 0);
9580 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9583 case TARGET_NR_fchown
:
9584 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9586 #if defined(TARGET_NR_fchownat)
9587 case TARGET_NR_fchownat
:
9588 if (!(p
= lock_user_string(arg2
)))
9590 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9591 low2highgid(arg4
), arg5
));
9592 unlock_user(p
, arg2
, 0);
9595 #ifdef TARGET_NR_setresuid
9596 case TARGET_NR_setresuid
:
9597 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9599 low2highuid(arg3
)));
9602 #ifdef TARGET_NR_getresuid
9603 case TARGET_NR_getresuid
:
9605 uid_t ruid
, euid
, suid
;
9606 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9607 if (!is_error(ret
)) {
9608 if (put_user_id(high2lowuid(ruid
), arg1
)
9609 || put_user_id(high2lowuid(euid
), arg2
)
9610 || put_user_id(high2lowuid(suid
), arg3
))
9616 #ifdef TARGET_NR_getresgid
9617 case TARGET_NR_setresgid
:
9618 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9620 low2highgid(arg3
)));
9623 #ifdef TARGET_NR_getresgid
9624 case TARGET_NR_getresgid
:
9626 gid_t rgid
, egid
, sgid
;
9627 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9628 if (!is_error(ret
)) {
9629 if (put_user_id(high2lowgid(rgid
), arg1
)
9630 || put_user_id(high2lowgid(egid
), arg2
)
9631 || put_user_id(high2lowgid(sgid
), arg3
))
9637 #ifdef TARGET_NR_chown
9638 case TARGET_NR_chown
:
9639 if (!(p
= lock_user_string(arg1
)))
9641 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9642 unlock_user(p
, arg1
, 0);
9645 case TARGET_NR_setuid
:
9646 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9648 case TARGET_NR_setgid
:
9649 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9651 case TARGET_NR_setfsuid
:
9652 ret
= get_errno(setfsuid(arg1
));
9654 case TARGET_NR_setfsgid
:
9655 ret
= get_errno(setfsgid(arg1
));
9658 #ifdef TARGET_NR_lchown32
9659 case TARGET_NR_lchown32
:
9660 if (!(p
= lock_user_string(arg1
)))
9662 ret
= get_errno(lchown(p
, arg2
, arg3
));
9663 unlock_user(p
, arg1
, 0);
9666 #ifdef TARGET_NR_getuid32
9667 case TARGET_NR_getuid32
:
9668 ret
= get_errno(getuid());
9672 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9673 /* Alpha specific */
9674 case TARGET_NR_getxuid
:
9678 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9680 ret
= get_errno(getuid());
9683 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9684 /* Alpha specific */
9685 case TARGET_NR_getxgid
:
9689 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9691 ret
= get_errno(getgid());
9694 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9695 /* Alpha specific */
9696 case TARGET_NR_osf_getsysinfo
:
9697 ret
= -TARGET_EOPNOTSUPP
;
9699 case TARGET_GSI_IEEE_FP_CONTROL
:
9701 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9703 /* Copied from linux ieee_fpcr_to_swcr. */
9704 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9705 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9706 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9707 | SWCR_TRAP_ENABLE_DZE
9708 | SWCR_TRAP_ENABLE_OVF
);
9709 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9710 | SWCR_TRAP_ENABLE_INE
);
9711 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9712 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9714 if (put_user_u64 (swcr
, arg2
))
9720 /* case GSI_IEEE_STATE_AT_SIGNAL:
9721 -- Not implemented in linux kernel.
9723 -- Retrieves current unaligned access state; not much used.
9725 -- Retrieves implver information; surely not used.
9727 -- Grabs a copy of the HWRPB; surely not used.
9732 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9733 /* Alpha specific */
9734 case TARGET_NR_osf_setsysinfo
:
9735 ret
= -TARGET_EOPNOTSUPP
;
9737 case TARGET_SSI_IEEE_FP_CONTROL
:
9739 uint64_t swcr
, fpcr
, orig_fpcr
;
9741 if (get_user_u64 (swcr
, arg2
)) {
9744 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9745 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9747 /* Copied from linux ieee_swcr_to_fpcr. */
9748 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9749 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9750 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9751 | SWCR_TRAP_ENABLE_DZE
9752 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9753 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9754 | SWCR_TRAP_ENABLE_INE
)) << 57;
9755 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9756 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9758 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9763 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9765 uint64_t exc
, fpcr
, orig_fpcr
;
9768 if (get_user_u64(exc
, arg2
)) {
9772 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9774 /* We only add to the exception status here. */
9775 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9777 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9780 /* Old exceptions are not signaled. */
9781 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9783 /* If any exceptions set by this call,
9784 and are unmasked, send a signal. */
9786 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9787 si_code
= TARGET_FPE_FLTRES
;
9789 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9790 si_code
= TARGET_FPE_FLTUND
;
9792 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9793 si_code
= TARGET_FPE_FLTOVF
;
9795 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9796 si_code
= TARGET_FPE_FLTDIV
;
9798 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9799 si_code
= TARGET_FPE_FLTINV
;
9802 target_siginfo_t info
;
9803 info
.si_signo
= SIGFPE
;
9805 info
.si_code
= si_code
;
9806 info
._sifields
._sigfault
._addr
9807 = ((CPUArchState
*)cpu_env
)->pc
;
9808 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9813 /* case SSI_NVPAIRS:
9814 -- Used with SSIN_UACPROC to enable unaligned accesses.
9815 case SSI_IEEE_STATE_AT_SIGNAL:
9816 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9817 -- Not implemented in linux kernel
9822 #ifdef TARGET_NR_osf_sigprocmask
9823 /* Alpha specific. */
9824 case TARGET_NR_osf_sigprocmask
:
9828 sigset_t set
, oldset
;
9831 case TARGET_SIG_BLOCK
:
9834 case TARGET_SIG_UNBLOCK
:
9837 case TARGET_SIG_SETMASK
:
9841 ret
= -TARGET_EINVAL
;
9845 target_to_host_old_sigset(&set
, &mask
);
9846 ret
= do_sigprocmask(how
, &set
, &oldset
);
9848 host_to_target_old_sigset(&mask
, &oldset
);
9855 #ifdef TARGET_NR_getgid32
9856 case TARGET_NR_getgid32
:
9857 ret
= get_errno(getgid());
9860 #ifdef TARGET_NR_geteuid32
9861 case TARGET_NR_geteuid32
:
9862 ret
= get_errno(geteuid());
9865 #ifdef TARGET_NR_getegid32
9866 case TARGET_NR_getegid32
:
9867 ret
= get_errno(getegid());
9870 #ifdef TARGET_NR_setreuid32
9871 case TARGET_NR_setreuid32
:
9872 ret
= get_errno(setreuid(arg1
, arg2
));
9875 #ifdef TARGET_NR_setregid32
9876 case TARGET_NR_setregid32
:
9877 ret
= get_errno(setregid(arg1
, arg2
));
9880 #ifdef TARGET_NR_getgroups32
9881 case TARGET_NR_getgroups32
:
9883 int gidsetsize
= arg1
;
9884 uint32_t *target_grouplist
;
9888 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9889 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9890 if (gidsetsize
== 0)
9892 if (!is_error(ret
)) {
9893 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9894 if (!target_grouplist
) {
9895 ret
= -TARGET_EFAULT
;
9898 for(i
= 0;i
< ret
; i
++)
9899 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9900 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9905 #ifdef TARGET_NR_setgroups32
9906 case TARGET_NR_setgroups32
:
9908 int gidsetsize
= arg1
;
9909 uint32_t *target_grouplist
;
9913 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9914 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9915 if (!target_grouplist
) {
9916 ret
= -TARGET_EFAULT
;
9919 for(i
= 0;i
< gidsetsize
; i
++)
9920 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9921 unlock_user(target_grouplist
, arg2
, 0);
9922 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9926 #ifdef TARGET_NR_fchown32
9927 case TARGET_NR_fchown32
:
9928 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9931 #ifdef TARGET_NR_setresuid32
9932 case TARGET_NR_setresuid32
:
9933 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9936 #ifdef TARGET_NR_getresuid32
9937 case TARGET_NR_getresuid32
:
9939 uid_t ruid
, euid
, suid
;
9940 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9941 if (!is_error(ret
)) {
9942 if (put_user_u32(ruid
, arg1
)
9943 || put_user_u32(euid
, arg2
)
9944 || put_user_u32(suid
, arg3
))
9950 #ifdef TARGET_NR_setresgid32
9951 case TARGET_NR_setresgid32
:
9952 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9955 #ifdef TARGET_NR_getresgid32
9956 case TARGET_NR_getresgid32
:
9958 gid_t rgid
, egid
, sgid
;
9959 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9960 if (!is_error(ret
)) {
9961 if (put_user_u32(rgid
, arg1
)
9962 || put_user_u32(egid
, arg2
)
9963 || put_user_u32(sgid
, arg3
))
9969 #ifdef TARGET_NR_chown32
9970 case TARGET_NR_chown32
:
9971 if (!(p
= lock_user_string(arg1
)))
9973 ret
= get_errno(chown(p
, arg2
, arg3
));
9974 unlock_user(p
, arg1
, 0);
9977 #ifdef TARGET_NR_setuid32
9978 case TARGET_NR_setuid32
:
9979 ret
= get_errno(sys_setuid(arg1
));
9982 #ifdef TARGET_NR_setgid32
9983 case TARGET_NR_setgid32
:
9984 ret
= get_errno(sys_setgid(arg1
));
9987 #ifdef TARGET_NR_setfsuid32
9988 case TARGET_NR_setfsuid32
:
9989 ret
= get_errno(setfsuid(arg1
));
9992 #ifdef TARGET_NR_setfsgid32
9993 case TARGET_NR_setfsgid32
:
9994 ret
= get_errno(setfsgid(arg1
));
9998 case TARGET_NR_pivot_root
:
10000 #ifdef TARGET_NR_mincore
10001 case TARGET_NR_mincore
:
10004 ret
= -TARGET_EFAULT
;
10005 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10007 if (!(p
= lock_user_string(arg3
)))
10009 ret
= get_errno(mincore(a
, arg2
, p
));
10010 unlock_user(p
, arg3
, ret
);
10012 unlock_user(a
, arg1
, 0);
10016 #ifdef TARGET_NR_arm_fadvise64_64
10017 case TARGET_NR_arm_fadvise64_64
:
10018 /* arm_fadvise64_64 looks like fadvise64_64 but
10019 * with different argument order: fd, advice, offset, len
10020 * rather than the usual fd, offset, len, advice.
10021 * Note that offset and len are both 64-bit so appear as
10022 * pairs of 32-bit registers.
10024 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10025 target_offset64(arg5
, arg6
), arg2
);
10026 ret
= -host_to_target_errno(ret
);
10030 #if TARGET_ABI_BITS == 32
10032 #ifdef TARGET_NR_fadvise64_64
10033 case TARGET_NR_fadvise64_64
:
10034 /* 6 args: fd, offset (high, low), len (high, low), advice */
10035 if (regpairs_aligned(cpu_env
)) {
10036 /* offset is in (3,4), len in (5,6) and advice in 7 */
10043 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10044 target_offset64(arg2
, arg3
),
10045 target_offset64(arg4
, arg5
),
10050 #ifdef TARGET_NR_fadvise64
10051 case TARGET_NR_fadvise64
:
10052 /* 5 args: fd, offset (high, low), len, advice */
10053 if (regpairs_aligned(cpu_env
)) {
10054 /* offset is in (3,4), len in 5 and advice in 6 */
10060 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10061 target_offset64(arg2
, arg3
),
10066 #else /* not a 32-bit ABI */
10067 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10068 #ifdef TARGET_NR_fadvise64_64
10069 case TARGET_NR_fadvise64_64
:
10071 #ifdef TARGET_NR_fadvise64
10072 case TARGET_NR_fadvise64
:
10074 #ifdef TARGET_S390X
10076 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10077 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10078 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10079 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10083 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10086 #endif /* end of 64-bit ABI fadvise handling */
10088 #ifdef TARGET_NR_madvise
10089 case TARGET_NR_madvise
:
10090 /* A straight passthrough may not be safe because qemu sometimes
10091 turns private file-backed mappings into anonymous mappings.
10092 This will break MADV_DONTNEED.
10093 This is a hint, so ignoring and returning success is ok. */
10094 ret
= get_errno(0);
10097 #if TARGET_ABI_BITS == 32
10098 case TARGET_NR_fcntl64
:
10102 struct target_flock64
*target_fl
;
10104 struct target_eabi_flock64
*target_efl
;
10107 cmd
= target_to_host_fcntl_cmd(arg2
);
10108 if (cmd
== -TARGET_EINVAL
) {
10114 case TARGET_F_GETLK64
:
10116 if (((CPUARMState
*)cpu_env
)->eabi
) {
10117 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10119 fl
.l_type
= tswap16(target_efl
->l_type
);
10120 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10121 fl
.l_start
= tswap64(target_efl
->l_start
);
10122 fl
.l_len
= tswap64(target_efl
->l_len
);
10123 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10124 unlock_user_struct(target_efl
, arg3
, 0);
10128 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10130 fl
.l_type
= tswap16(target_fl
->l_type
);
10131 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10132 fl
.l_start
= tswap64(target_fl
->l_start
);
10133 fl
.l_len
= tswap64(target_fl
->l_len
);
10134 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10135 unlock_user_struct(target_fl
, arg3
, 0);
10137 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10140 if (((CPUARMState
*)cpu_env
)->eabi
) {
10141 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
10143 target_efl
->l_type
= tswap16(fl
.l_type
);
10144 target_efl
->l_whence
= tswap16(fl
.l_whence
);
10145 target_efl
->l_start
= tswap64(fl
.l_start
);
10146 target_efl
->l_len
= tswap64(fl
.l_len
);
10147 target_efl
->l_pid
= tswap32(fl
.l_pid
);
10148 unlock_user_struct(target_efl
, arg3
, 1);
10152 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
10154 target_fl
->l_type
= tswap16(fl
.l_type
);
10155 target_fl
->l_whence
= tswap16(fl
.l_whence
);
10156 target_fl
->l_start
= tswap64(fl
.l_start
);
10157 target_fl
->l_len
= tswap64(fl
.l_len
);
10158 target_fl
->l_pid
= tswap32(fl
.l_pid
);
10159 unlock_user_struct(target_fl
, arg3
, 1);
10164 case TARGET_F_SETLK64
:
10165 case TARGET_F_SETLKW64
:
10167 if (((CPUARMState
*)cpu_env
)->eabi
) {
10168 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10170 fl
.l_type
= tswap16(target_efl
->l_type
);
10171 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10172 fl
.l_start
= tswap64(target_efl
->l_start
);
10173 fl
.l_len
= tswap64(target_efl
->l_len
);
10174 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10175 unlock_user_struct(target_efl
, arg3
, 0);
10179 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10181 fl
.l_type
= tswap16(target_fl
->l_type
);
10182 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10183 fl
.l_start
= tswap64(target_fl
->l_start
);
10184 fl
.l_len
= tswap64(target_fl
->l_len
);
10185 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10186 unlock_user_struct(target_fl
, arg3
, 0);
10188 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10191 ret
= do_fcntl(arg1
, arg2
, arg3
);
10197 #ifdef TARGET_NR_cacheflush
10198 case TARGET_NR_cacheflush
:
10199 /* self-modifying code is handled automatically, so nothing needed */
10203 #ifdef TARGET_NR_security
10204 case TARGET_NR_security
:
10205 goto unimplemented
;
10207 #ifdef TARGET_NR_getpagesize
10208 case TARGET_NR_getpagesize
:
10209 ret
= TARGET_PAGE_SIZE
;
10212 case TARGET_NR_gettid
:
10213 ret
= get_errno(gettid());
10215 #ifdef TARGET_NR_readahead
10216 case TARGET_NR_readahead
:
10217 #if TARGET_ABI_BITS == 32
10218 if (regpairs_aligned(cpu_env
)) {
10223 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10225 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10230 #ifdef TARGET_NR_setxattr
10231 case TARGET_NR_listxattr
:
10232 case TARGET_NR_llistxattr
:
10236 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10238 ret
= -TARGET_EFAULT
;
10242 p
= lock_user_string(arg1
);
10244 if (num
== TARGET_NR_listxattr
) {
10245 ret
= get_errno(listxattr(p
, b
, arg3
));
10247 ret
= get_errno(llistxattr(p
, b
, arg3
));
10250 ret
= -TARGET_EFAULT
;
10252 unlock_user(p
, arg1
, 0);
10253 unlock_user(b
, arg2
, arg3
);
10256 case TARGET_NR_flistxattr
:
10260 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10262 ret
= -TARGET_EFAULT
;
10266 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10267 unlock_user(b
, arg2
, arg3
);
10270 case TARGET_NR_setxattr
:
10271 case TARGET_NR_lsetxattr
:
10273 void *p
, *n
, *v
= 0;
10275 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10277 ret
= -TARGET_EFAULT
;
10281 p
= lock_user_string(arg1
);
10282 n
= lock_user_string(arg2
);
10284 if (num
== TARGET_NR_setxattr
) {
10285 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10287 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10290 ret
= -TARGET_EFAULT
;
10292 unlock_user(p
, arg1
, 0);
10293 unlock_user(n
, arg2
, 0);
10294 unlock_user(v
, arg3
, 0);
10297 case TARGET_NR_fsetxattr
:
10301 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10303 ret
= -TARGET_EFAULT
;
10307 n
= lock_user_string(arg2
);
10309 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10311 ret
= -TARGET_EFAULT
;
10313 unlock_user(n
, arg2
, 0);
10314 unlock_user(v
, arg3
, 0);
10317 case TARGET_NR_getxattr
:
10318 case TARGET_NR_lgetxattr
:
10320 void *p
, *n
, *v
= 0;
10322 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10324 ret
= -TARGET_EFAULT
;
10328 p
= lock_user_string(arg1
);
10329 n
= lock_user_string(arg2
);
10331 if (num
== TARGET_NR_getxattr
) {
10332 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10334 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10337 ret
= -TARGET_EFAULT
;
10339 unlock_user(p
, arg1
, 0);
10340 unlock_user(n
, arg2
, 0);
10341 unlock_user(v
, arg3
, arg4
);
10344 case TARGET_NR_fgetxattr
:
10348 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10350 ret
= -TARGET_EFAULT
;
10354 n
= lock_user_string(arg2
);
10356 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10358 ret
= -TARGET_EFAULT
;
10360 unlock_user(n
, arg2
, 0);
10361 unlock_user(v
, arg3
, arg4
);
10364 case TARGET_NR_removexattr
:
10365 case TARGET_NR_lremovexattr
:
10368 p
= lock_user_string(arg1
);
10369 n
= lock_user_string(arg2
);
10371 if (num
== TARGET_NR_removexattr
) {
10372 ret
= get_errno(removexattr(p
, n
));
10374 ret
= get_errno(lremovexattr(p
, n
));
10377 ret
= -TARGET_EFAULT
;
10379 unlock_user(p
, arg1
, 0);
10380 unlock_user(n
, arg2
, 0);
10383 case TARGET_NR_fremovexattr
:
10386 n
= lock_user_string(arg2
);
10388 ret
= get_errno(fremovexattr(arg1
, n
));
10390 ret
= -TARGET_EFAULT
;
10392 unlock_user(n
, arg2
, 0);
10396 #endif /* CONFIG_ATTR */
10397 #ifdef TARGET_NR_set_thread_area
10398 case TARGET_NR_set_thread_area
:
10399 #if defined(TARGET_MIPS)
10400 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10403 #elif defined(TARGET_CRIS)
10405 ret
= -TARGET_EINVAL
;
10407 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10411 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10412 ret
= do_set_thread_area(cpu_env
, arg1
);
10414 #elif defined(TARGET_M68K)
10416 TaskState
*ts
= cpu
->opaque
;
10417 ts
->tp_value
= arg1
;
10422 goto unimplemented_nowarn
;
10425 #ifdef TARGET_NR_get_thread_area
10426 case TARGET_NR_get_thread_area
:
10427 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10428 ret
= do_get_thread_area(cpu_env
, arg1
);
10430 #elif defined(TARGET_M68K)
10432 TaskState
*ts
= cpu
->opaque
;
10433 ret
= ts
->tp_value
;
10437 goto unimplemented_nowarn
;
10440 #ifdef TARGET_NR_getdomainname
10441 case TARGET_NR_getdomainname
:
10442 goto unimplemented_nowarn
;
10445 #ifdef TARGET_NR_clock_gettime
10446 case TARGET_NR_clock_gettime
:
10448 struct timespec ts
;
10449 ret
= get_errno(clock_gettime(arg1
, &ts
));
10450 if (!is_error(ret
)) {
10451 host_to_target_timespec(arg2
, &ts
);
10456 #ifdef TARGET_NR_clock_getres
10457 case TARGET_NR_clock_getres
:
10459 struct timespec ts
;
10460 ret
= get_errno(clock_getres(arg1
, &ts
));
10461 if (!is_error(ret
)) {
10462 host_to_target_timespec(arg2
, &ts
);
10467 #ifdef TARGET_NR_clock_nanosleep
10468 case TARGET_NR_clock_nanosleep
:
10470 struct timespec ts
;
10471 target_to_host_timespec(&ts
, arg3
);
10472 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
10474 host_to_target_timespec(arg4
, &ts
);
10476 #if defined(TARGET_PPC)
10477 /* clock_nanosleep is odd in that it returns positive errno values.
10478 * On PPC, CR0 bit 3 should be set in such a situation. */
10480 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10487 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10488 case TARGET_NR_set_tid_address
:
10489 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10493 case TARGET_NR_tkill
:
10494 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10497 case TARGET_NR_tgkill
:
10498 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10499 target_to_host_signal(arg3
)));
10502 #ifdef TARGET_NR_set_robust_list
10503 case TARGET_NR_set_robust_list
:
10504 case TARGET_NR_get_robust_list
:
10505 /* The ABI for supporting robust futexes has userspace pass
10506 * the kernel a pointer to a linked list which is updated by
10507 * userspace after the syscall; the list is walked by the kernel
10508 * when the thread exits. Since the linked list in QEMU guest
10509 * memory isn't a valid linked list for the host and we have
10510 * no way to reliably intercept the thread-death event, we can't
10511 * support these. Silently return ENOSYS so that guest userspace
10512 * falls back to a non-robust futex implementation (which should
10513 * be OK except in the corner case of the guest crashing while
10514 * holding a mutex that is shared with another process via
10517 goto unimplemented_nowarn
;
10520 #if defined(TARGET_NR_utimensat)
10521 case TARGET_NR_utimensat
:
10523 struct timespec
*tsp
, ts
[2];
10527 target_to_host_timespec(ts
, arg3
);
10528 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10532 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10534 if (!(p
= lock_user_string(arg2
))) {
10535 ret
= -TARGET_EFAULT
;
10538 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10539 unlock_user(p
, arg2
, 0);
10544 case TARGET_NR_futex
:
10545 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10547 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10548 case TARGET_NR_inotify_init
:
10549 ret
= get_errno(sys_inotify_init());
10552 #ifdef CONFIG_INOTIFY1
10553 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10554 case TARGET_NR_inotify_init1
:
10555 ret
= get_errno(sys_inotify_init1(arg1
));
10559 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10560 case TARGET_NR_inotify_add_watch
:
10561 p
= lock_user_string(arg2
);
10562 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10563 unlock_user(p
, arg2
, 0);
10566 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10567 case TARGET_NR_inotify_rm_watch
:
10568 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10572 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10573 case TARGET_NR_mq_open
:
10575 struct mq_attr posix_mq_attr
, *attrp
;
10577 p
= lock_user_string(arg1
- 1);
10579 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10580 attrp
= &posix_mq_attr
;
10584 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10585 unlock_user (p
, arg1
, 0);
10589 case TARGET_NR_mq_unlink
:
10590 p
= lock_user_string(arg1
- 1);
10591 ret
= get_errno(mq_unlink(p
));
10592 unlock_user (p
, arg1
, 0);
10595 case TARGET_NR_mq_timedsend
:
10597 struct timespec ts
;
10599 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10601 target_to_host_timespec(&ts
, arg5
);
10602 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10603 host_to_target_timespec(arg5
, &ts
);
10605 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10607 unlock_user (p
, arg2
, arg3
);
10611 case TARGET_NR_mq_timedreceive
:
10613 struct timespec ts
;
10616 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10618 target_to_host_timespec(&ts
, arg5
);
10619 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10621 host_to_target_timespec(arg5
, &ts
);
10623 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10626 unlock_user (p
, arg2
, arg3
);
10628 put_user_u32(prio
, arg4
);
10632 /* Not implemented for now... */
10633 /* case TARGET_NR_mq_notify: */
10636 case TARGET_NR_mq_getsetattr
:
10638 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10641 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10642 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10645 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10646 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10653 #ifdef CONFIG_SPLICE
10654 #ifdef TARGET_NR_tee
10655 case TARGET_NR_tee
:
10657 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10661 #ifdef TARGET_NR_splice
10662 case TARGET_NR_splice
:
10664 loff_t loff_in
, loff_out
;
10665 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10667 if (get_user_u64(loff_in
, arg2
)) {
10670 ploff_in
= &loff_in
;
10673 if (get_user_u64(loff_out
, arg4
)) {
10676 ploff_out
= &loff_out
;
10678 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10680 if (put_user_u64(loff_in
, arg2
)) {
10685 if (put_user_u64(loff_out
, arg4
)) {
10692 #ifdef TARGET_NR_vmsplice
10693 case TARGET_NR_vmsplice
:
10695 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10697 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10698 unlock_iovec(vec
, arg2
, arg3
, 0);
10700 ret
= -host_to_target_errno(errno
);
10705 #endif /* CONFIG_SPLICE */
10706 #ifdef CONFIG_EVENTFD
10707 #if defined(TARGET_NR_eventfd)
10708 case TARGET_NR_eventfd
:
10709 ret
= get_errno(eventfd(arg1
, 0));
10710 fd_trans_unregister(ret
);
10713 #if defined(TARGET_NR_eventfd2)
10714 case TARGET_NR_eventfd2
:
10716 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10717 if (arg2
& TARGET_O_NONBLOCK
) {
10718 host_flags
|= O_NONBLOCK
;
10720 if (arg2
& TARGET_O_CLOEXEC
) {
10721 host_flags
|= O_CLOEXEC
;
10723 ret
= get_errno(eventfd(arg1
, host_flags
));
10724 fd_trans_unregister(ret
);
10728 #endif /* CONFIG_EVENTFD */
10729 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10730 case TARGET_NR_fallocate
:
10731 #if TARGET_ABI_BITS == 32
10732 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10733 target_offset64(arg5
, arg6
)));
10735 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10739 #if defined(CONFIG_SYNC_FILE_RANGE)
10740 #if defined(TARGET_NR_sync_file_range)
10741 case TARGET_NR_sync_file_range
:
10742 #if TARGET_ABI_BITS == 32
10743 #if defined(TARGET_MIPS)
10744 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10745 target_offset64(arg5
, arg6
), arg7
));
10747 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10748 target_offset64(arg4
, arg5
), arg6
));
10749 #endif /* !TARGET_MIPS */
10751 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10755 #if defined(TARGET_NR_sync_file_range2)
10756 case TARGET_NR_sync_file_range2
:
10757 /* This is like sync_file_range but the arguments are reordered */
10758 #if TARGET_ABI_BITS == 32
10759 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10760 target_offset64(arg5
, arg6
), arg2
));
10762 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10767 #if defined(TARGET_NR_signalfd4)
10768 case TARGET_NR_signalfd4
:
10769 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10772 #if defined(TARGET_NR_signalfd)
10773 case TARGET_NR_signalfd
:
10774 ret
= do_signalfd4(arg1
, arg2
, 0);
10777 #if defined(CONFIG_EPOLL)
10778 #if defined(TARGET_NR_epoll_create)
10779 case TARGET_NR_epoll_create
:
10780 ret
= get_errno(epoll_create(arg1
));
10783 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10784 case TARGET_NR_epoll_create1
:
10785 ret
= get_errno(epoll_create1(arg1
));
10788 #if defined(TARGET_NR_epoll_ctl)
10789 case TARGET_NR_epoll_ctl
:
10791 struct epoll_event ep
;
10792 struct epoll_event
*epp
= 0;
10794 struct target_epoll_event
*target_ep
;
10795 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10798 ep
.events
= tswap32(target_ep
->events
);
10799 /* The epoll_data_t union is just opaque data to the kernel,
10800 * so we transfer all 64 bits across and need not worry what
10801 * actual data type it is.
10803 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10804 unlock_user_struct(target_ep
, arg4
, 0);
10807 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10812 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10813 #define IMPLEMENT_EPOLL_PWAIT
10815 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10816 #if defined(TARGET_NR_epoll_wait)
10817 case TARGET_NR_epoll_wait
:
10819 #if defined(IMPLEMENT_EPOLL_PWAIT)
10820 case TARGET_NR_epoll_pwait
:
10823 struct target_epoll_event
*target_ep
;
10824 struct epoll_event
*ep
;
10826 int maxevents
= arg3
;
10827 int timeout
= arg4
;
10829 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10830 maxevents
* sizeof(struct target_epoll_event
), 1);
10835 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10838 #if defined(IMPLEMENT_EPOLL_PWAIT)
10839 case TARGET_NR_epoll_pwait
:
10841 target_sigset_t
*target_set
;
10842 sigset_t _set
, *set
= &_set
;
10845 target_set
= lock_user(VERIFY_READ
, arg5
,
10846 sizeof(target_sigset_t
), 1);
10848 unlock_user(target_ep
, arg2
, 0);
10851 target_to_host_sigset(set
, target_set
);
10852 unlock_user(target_set
, arg5
, 0);
10857 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10861 #if defined(TARGET_NR_epoll_wait)
10862 case TARGET_NR_epoll_wait
:
10863 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10867 ret
= -TARGET_ENOSYS
;
10869 if (!is_error(ret
)) {
10871 for (i
= 0; i
< ret
; i
++) {
10872 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10873 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10876 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10881 #ifdef TARGET_NR_prlimit64
10882 case TARGET_NR_prlimit64
:
10884 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10885 struct target_rlimit64
*target_rnew
, *target_rold
;
10886 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10887 int resource
= target_to_host_resource(arg2
);
10889 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10892 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10893 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10894 unlock_user_struct(target_rnew
, arg3
, 0);
10898 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10899 if (!is_error(ret
) && arg4
) {
10900 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10903 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10904 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10905 unlock_user_struct(target_rold
, arg4
, 1);
10910 #ifdef TARGET_NR_gethostname
10911 case TARGET_NR_gethostname
:
10913 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10915 ret
= get_errno(gethostname(name
, arg2
));
10916 unlock_user(name
, arg1
, arg2
);
10918 ret
= -TARGET_EFAULT
;
10923 #ifdef TARGET_NR_atomic_cmpxchg_32
10924 case TARGET_NR_atomic_cmpxchg_32
:
10926 /* should use start_exclusive from main.c */
10927 abi_ulong mem_value
;
10928 if (get_user_u32(mem_value
, arg6
)) {
10929 target_siginfo_t info
;
10930 info
.si_signo
= SIGSEGV
;
10932 info
.si_code
= TARGET_SEGV_MAPERR
;
10933 info
._sifields
._sigfault
._addr
= arg6
;
10934 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10938 if (mem_value
== arg2
)
10939 put_user_u32(arg1
, arg6
);
10944 #ifdef TARGET_NR_atomic_barrier
10945 case TARGET_NR_atomic_barrier
:
10947 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10953 #ifdef TARGET_NR_timer_create
10954 case TARGET_NR_timer_create
:
10956 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10958 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10961 int timer_index
= next_free_host_timer();
10963 if (timer_index
< 0) {
10964 ret
= -TARGET_EAGAIN
;
10966 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10969 phost_sevp
= &host_sevp
;
10970 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10976 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10980 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10989 #ifdef TARGET_NR_timer_settime
10990 case TARGET_NR_timer_settime
:
10992 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10993 * struct itimerspec * old_value */
10994 target_timer_t timerid
= get_timer_id(arg1
);
10998 } else if (arg3
== 0) {
10999 ret
= -TARGET_EINVAL
;
11001 timer_t htimer
= g_posix_timers
[timerid
];
11002 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11004 target_to_host_itimerspec(&hspec_new
, arg3
);
11006 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11007 host_to_target_itimerspec(arg2
, &hspec_old
);
11013 #ifdef TARGET_NR_timer_gettime
11014 case TARGET_NR_timer_gettime
:
11016 /* args: timer_t timerid, struct itimerspec *curr_value */
11017 target_timer_t timerid
= get_timer_id(arg1
);
11021 } else if (!arg2
) {
11022 ret
= -TARGET_EFAULT
;
11024 timer_t htimer
= g_posix_timers
[timerid
];
11025 struct itimerspec hspec
;
11026 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11028 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11029 ret
= -TARGET_EFAULT
;
11036 #ifdef TARGET_NR_timer_getoverrun
11037 case TARGET_NR_timer_getoverrun
:
11039 /* args: timer_t timerid */
11040 target_timer_t timerid
= get_timer_id(arg1
);
11045 timer_t htimer
= g_posix_timers
[timerid
];
11046 ret
= get_errno(timer_getoverrun(htimer
));
11048 fd_trans_unregister(ret
);
11053 #ifdef TARGET_NR_timer_delete
11054 case TARGET_NR_timer_delete
:
11056 /* args: timer_t timerid */
11057 target_timer_t timerid
= get_timer_id(arg1
);
11062 timer_t htimer
= g_posix_timers
[timerid
];
11063 ret
= get_errno(timer_delete(htimer
));
11064 g_posix_timers
[timerid
] = 0;
11070 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11071 case TARGET_NR_timerfd_create
:
11072 ret
= get_errno(timerfd_create(arg1
,
11073 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11077 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11078 case TARGET_NR_timerfd_gettime
:
11080 struct itimerspec its_curr
;
11082 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11084 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11091 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11092 case TARGET_NR_timerfd_settime
:
11094 struct itimerspec its_new
, its_old
, *p_new
;
11097 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11105 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11107 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11114 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11115 case TARGET_NR_ioprio_get
:
11116 ret
= get_errno(ioprio_get(arg1
, arg2
));
11120 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11121 case TARGET_NR_ioprio_set
:
11122 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11126 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11127 case TARGET_NR_setns
:
11128 ret
= get_errno(setns(arg1
, arg2
));
11131 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11132 case TARGET_NR_unshare
:
11133 ret
= get_errno(unshare(arg1
));
11139 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11140 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11141 unimplemented_nowarn
:
11143 ret
= -TARGET_ENOSYS
;
11148 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11151 print_syscall_ret(num
, ret
);
11154 ret
= -TARGET_EFAULT
;