4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
108 #include <linux/audit.h>
109 #include "linux_loop.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 #define __NR__llseek __NR_lseek
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
210 _syscall0(int, gettid
)
212 /* This is a replacement for the host gettid() and must return a host
214 static int gettid(void) {
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
227 loff_t
*, res
, uint
, wh
);
229 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
230 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
239 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
246 unsigned long *, user_mask_ptr
);
247 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
249 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
250 struct __user_cap_data_struct
*, data
);
251 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
252 struct __user_cap_data_struct
*, data
);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get
, int, which
, int, who
)
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
263 static bitmask_transtbl fcntl_flags_tbl
[] = {
264 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
265 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
266 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
267 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
268 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
269 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
270 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
271 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
272 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
273 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
274 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
275 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
276 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
287 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
296 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
298 typedef struct TargetFdTrans
{
299 TargetFdDataFunc host_to_target_data
;
300 TargetFdDataFunc target_to_host_data
;
301 TargetFdAddrFunc target_to_host_addr
;
304 static TargetFdTrans
**target_fd_trans
;
306 static unsigned int target_fd_max
;
308 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
310 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
311 return target_fd_trans
[fd
]->target_to_host_data
;
316 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
318 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
319 return target_fd_trans
[fd
]->host_to_target_data
;
324 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
326 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
327 return target_fd_trans
[fd
]->target_to_host_addr
;
332 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
336 if (fd
>= target_fd_max
) {
337 oldmax
= target_fd_max
;
338 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans
= g_renew(TargetFdTrans
*,
340 target_fd_trans
, target_fd_max
);
341 memset((void *)(target_fd_trans
+ oldmax
), 0,
342 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
344 target_fd_trans
[fd
] = trans
;
347 static void fd_trans_unregister(int fd
)
349 if (fd
>= 0 && fd
< target_fd_max
) {
350 target_fd_trans
[fd
] = NULL
;
354 static void fd_trans_dup(int oldfd
, int newfd
)
356 fd_trans_unregister(newfd
);
357 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
358 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
362 static int sys_getcwd1(char *buf
, size_t size
)
364 if (getcwd(buf
, size
) == NULL
) {
365 /* getcwd() sets errno */
368 return strlen(buf
)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd
, const char *pathname
,
374 const struct timespec times
[2], int flags
)
376 if (pathname
== NULL
)
377 return futimens(dirfd
, times
);
379 return utimensat(dirfd
, pathname
, times
, flags
);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
384 const struct timespec
*,tsp
,int,flags
)
386 static int sys_utimensat(int dirfd
, const char *pathname
,
387 const struct timespec times
[2], int flags
)
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
407 return (inotify_add_watch(fd
, pathname
, mask
));
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
413 return (inotify_rm_watch(fd
, wd
));
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags
)
420 return (inotify_init1(flags
));
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
434 # define __NR_ppoll -1
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
438 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
) {
479 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
487 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
489 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
504 [EAGAIN
] = TARGET_EAGAIN
,
505 [EIDRM
] = TARGET_EIDRM
,
506 [ECHRNG
] = TARGET_ECHRNG
,
507 [EL2NSYNC
] = TARGET_EL2NSYNC
,
508 [EL3HLT
] = TARGET_EL3HLT
,
509 [EL3RST
] = TARGET_EL3RST
,
510 [ELNRNG
] = TARGET_ELNRNG
,
511 [EUNATCH
] = TARGET_EUNATCH
,
512 [ENOCSI
] = TARGET_ENOCSI
,
513 [EL2HLT
] = TARGET_EL2HLT
,
514 [EDEADLK
] = TARGET_EDEADLK
,
515 [ENOLCK
] = TARGET_ENOLCK
,
516 [EBADE
] = TARGET_EBADE
,
517 [EBADR
] = TARGET_EBADR
,
518 [EXFULL
] = TARGET_EXFULL
,
519 [ENOANO
] = TARGET_ENOANO
,
520 [EBADRQC
] = TARGET_EBADRQC
,
521 [EBADSLT
] = TARGET_EBADSLT
,
522 [EBFONT
] = TARGET_EBFONT
,
523 [ENOSTR
] = TARGET_ENOSTR
,
524 [ENODATA
] = TARGET_ENODATA
,
525 [ETIME
] = TARGET_ETIME
,
526 [ENOSR
] = TARGET_ENOSR
,
527 [ENONET
] = TARGET_ENONET
,
528 [ENOPKG
] = TARGET_ENOPKG
,
529 [EREMOTE
] = TARGET_EREMOTE
,
530 [ENOLINK
] = TARGET_ENOLINK
,
531 [EADV
] = TARGET_EADV
,
532 [ESRMNT
] = TARGET_ESRMNT
,
533 [ECOMM
] = TARGET_ECOMM
,
534 [EPROTO
] = TARGET_EPROTO
,
535 [EDOTDOT
] = TARGET_EDOTDOT
,
536 [EMULTIHOP
] = TARGET_EMULTIHOP
,
537 [EBADMSG
] = TARGET_EBADMSG
,
538 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
539 [EOVERFLOW
] = TARGET_EOVERFLOW
,
540 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
541 [EBADFD
] = TARGET_EBADFD
,
542 [EREMCHG
] = TARGET_EREMCHG
,
543 [ELIBACC
] = TARGET_ELIBACC
,
544 [ELIBBAD
] = TARGET_ELIBBAD
,
545 [ELIBSCN
] = TARGET_ELIBSCN
,
546 [ELIBMAX
] = TARGET_ELIBMAX
,
547 [ELIBEXEC
] = TARGET_ELIBEXEC
,
548 [EILSEQ
] = TARGET_EILSEQ
,
549 [ENOSYS
] = TARGET_ENOSYS
,
550 [ELOOP
] = TARGET_ELOOP
,
551 [ERESTART
] = TARGET_ERESTART
,
552 [ESTRPIPE
] = TARGET_ESTRPIPE
,
553 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
554 [EUSERS
] = TARGET_EUSERS
,
555 [ENOTSOCK
] = TARGET_ENOTSOCK
,
556 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
557 [EMSGSIZE
] = TARGET_EMSGSIZE
,
558 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
559 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
560 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
561 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
562 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
563 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
564 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
565 [EADDRINUSE
] = TARGET_EADDRINUSE
,
566 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
567 [ENETDOWN
] = TARGET_ENETDOWN
,
568 [ENETUNREACH
] = TARGET_ENETUNREACH
,
569 [ENETRESET
] = TARGET_ENETRESET
,
570 [ECONNABORTED
] = TARGET_ECONNABORTED
,
571 [ECONNRESET
] = TARGET_ECONNRESET
,
572 [ENOBUFS
] = TARGET_ENOBUFS
,
573 [EISCONN
] = TARGET_EISCONN
,
574 [ENOTCONN
] = TARGET_ENOTCONN
,
575 [EUCLEAN
] = TARGET_EUCLEAN
,
576 [ENOTNAM
] = TARGET_ENOTNAM
,
577 [ENAVAIL
] = TARGET_ENAVAIL
,
578 [EISNAM
] = TARGET_EISNAM
,
579 [EREMOTEIO
] = TARGET_EREMOTEIO
,
580 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
581 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
582 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
583 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
584 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
585 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
586 [EALREADY
] = TARGET_EALREADY
,
587 [EINPROGRESS
] = TARGET_EINPROGRESS
,
588 [ESTALE
] = TARGET_ESTALE
,
589 [ECANCELED
] = TARGET_ECANCELED
,
590 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
591 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
593 [ENOKEY
] = TARGET_ENOKEY
,
596 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
599 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
602 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
605 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
612 static inline int host_to_target_errno(int err
)
614 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
615 host_to_target_errno_table
[err
]) {
616 return host_to_target_errno_table
[err
];
621 static inline int target_to_host_errno(int err
)
623 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
624 target_to_host_errno_table
[err
]) {
625 return target_to_host_errno_table
[err
];
630 static inline abi_long
get_errno(abi_long ret
)
633 return -host_to_target_errno(errno
);
638 static inline int is_error(abi_long ret
)
640 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
643 char *target_strerror(int err
)
645 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
648 return strerror(target_to_host_errno(err
));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
699 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
700 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
701 int, flags
, mode_t
, mode
)
702 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
703 struct rusage
*, rusage
)
704 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
705 int, options
, struct rusage
*, rusage
)
706 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
707 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
708 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
709 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
710 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
711 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
712 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
713 safe_syscall2(int, tkill
, int, tid
, int, sig
)
714 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
715 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
717 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
719 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
720 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
721 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
722 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
723 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
724 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
725 safe_syscall2(int, flock
, int, fd
, int, operation
)
726 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
727 const struct timespec
*, uts
, size_t, sigsetsize
)
728 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
729 struct timespec
*, rem
)
730 #ifdef TARGET_NR_clock_nanosleep
731 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
732 const struct timespec
*, req
, struct timespec
*, rem
)
735 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
737 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
738 long, msgtype
, int, flags
)
740 /* This host kernel architecture uses a single ipc syscall; fake up
741 * wrappers for the sub-operations to hide this implementation detail.
742 * Annoyingly we can't include linux/ipc.h to get the constant definitions
743 * for the call parameter because some structs in there conflict with the
744 * sys/ipc.h ones. So we just define them here, and rely on them being
745 * the same for all host architectures.
749 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
751 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
752 void *, ptr
, long, fifth
)
753 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
755 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
757 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
759 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
762 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
763 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
764 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
765 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
766 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
769 static inline int host_to_target_sock_type(int host_type
)
773 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
775 target_type
= TARGET_SOCK_DGRAM
;
778 target_type
= TARGET_SOCK_STREAM
;
781 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
785 #if defined(SOCK_CLOEXEC)
786 if (host_type
& SOCK_CLOEXEC
) {
787 target_type
|= TARGET_SOCK_CLOEXEC
;
791 #if defined(SOCK_NONBLOCK)
792 if (host_type
& SOCK_NONBLOCK
) {
793 target_type
|= TARGET_SOCK_NONBLOCK
;
800 static abi_ulong target_brk
;
801 static abi_ulong target_original_brk
;
802 static abi_ulong brk_page
;
804 void target_set_brk(abi_ulong new_brk
)
806 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
807 brk_page
= HOST_PAGE_ALIGN(target_brk
);
810 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
811 #define DEBUGF_BRK(message, args...)
813 /* do_brk() must return target values and target errnos. */
814 abi_long
do_brk(abi_ulong new_brk
)
816 abi_long mapped_addr
;
819 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
822 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
825 if (new_brk
< target_original_brk
) {
826 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
831 /* If the new brk is less than the highest page reserved to the
832 * target heap allocation, set it and we're almost done... */
833 if (new_brk
<= brk_page
) {
834 /* Heap contents are initialized to zero, as for anonymous
836 if (new_brk
> target_brk
) {
837 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
839 target_brk
= new_brk
;
840 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
844 /* We need to allocate more memory after the brk... Note that
845 * we don't use MAP_FIXED because that will map over the top of
846 * any existing mapping (like the one with the host libc or qemu
847 * itself); instead we treat "mapped but at wrong address" as
848 * a failure and unmap again.
850 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
851 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
852 PROT_READ
|PROT_WRITE
,
853 MAP_ANON
|MAP_PRIVATE
, 0, 0));
855 if (mapped_addr
== brk_page
) {
856 /* Heap contents are initialized to zero, as for anonymous
857 * mapped pages. Technically the new pages are already
858 * initialized to zero since they *are* anonymous mapped
859 * pages, however we have to take care with the contents that
860 * come from the remaining part of the previous page: it may
861 * contains garbage data due to a previous heap usage (grown
863 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
865 target_brk
= new_brk
;
866 brk_page
= HOST_PAGE_ALIGN(target_brk
);
867 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
870 } else if (mapped_addr
!= -1) {
871 /* Mapped but at wrong address, meaning there wasn't actually
872 * enough space for this brk.
874 target_munmap(mapped_addr
, new_alloc_size
);
876 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
879 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
882 #if defined(TARGET_ALPHA)
883 /* We (partially) emulate OSF/1 on Alpha, which requires we
884 return a proper errno, not an unchanged brk value. */
885 return -TARGET_ENOMEM
;
887 /* For everything else, return the previous break. */
891 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
892 abi_ulong target_fds_addr
,
896 abi_ulong b
, *target_fds
;
898 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
899 if (!(target_fds
= lock_user(VERIFY_READ
,
901 sizeof(abi_ulong
) * nw
,
903 return -TARGET_EFAULT
;
907 for (i
= 0; i
< nw
; i
++) {
908 /* grab the abi_ulong */
909 __get_user(b
, &target_fds
[i
]);
910 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
911 /* check the bit inside the abi_ulong */
918 unlock_user(target_fds
, target_fds_addr
, 0);
923 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
924 abi_ulong target_fds_addr
,
927 if (target_fds_addr
) {
928 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
929 return -TARGET_EFAULT
;
937 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
943 abi_ulong
*target_fds
;
945 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
946 if (!(target_fds
= lock_user(VERIFY_WRITE
,
948 sizeof(abi_ulong
) * nw
,
950 return -TARGET_EFAULT
;
953 for (i
= 0; i
< nw
; i
++) {
955 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
956 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
959 __put_user(v
, &target_fds
[i
]);
962 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
967 #if defined(__alpha__)
973 static inline abi_long
host_to_target_clock_t(long ticks
)
975 #if HOST_HZ == TARGET_HZ
978 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
982 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
983 const struct rusage
*rusage
)
985 struct target_rusage
*target_rusage
;
987 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
988 return -TARGET_EFAULT
;
989 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
990 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
991 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
992 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
993 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
994 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
995 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
996 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
997 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
998 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
999 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1000 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1001 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1002 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1003 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1004 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1005 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1006 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1007 unlock_user_struct(target_rusage
, target_addr
, 1);
1012 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1014 abi_ulong target_rlim_swap
;
1017 target_rlim_swap
= tswapal(target_rlim
);
1018 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1019 return RLIM_INFINITY
;
1021 result
= target_rlim_swap
;
1022 if (target_rlim_swap
!= (rlim_t
)result
)
1023 return RLIM_INFINITY
;
1028 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1030 abi_ulong target_rlim_swap
;
1033 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1034 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1036 target_rlim_swap
= rlim
;
1037 result
= tswapal(target_rlim_swap
);
1042 static inline int target_to_host_resource(int code
)
1045 case TARGET_RLIMIT_AS
:
1047 case TARGET_RLIMIT_CORE
:
1049 case TARGET_RLIMIT_CPU
:
1051 case TARGET_RLIMIT_DATA
:
1053 case TARGET_RLIMIT_FSIZE
:
1054 return RLIMIT_FSIZE
;
1055 case TARGET_RLIMIT_LOCKS
:
1056 return RLIMIT_LOCKS
;
1057 case TARGET_RLIMIT_MEMLOCK
:
1058 return RLIMIT_MEMLOCK
;
1059 case TARGET_RLIMIT_MSGQUEUE
:
1060 return RLIMIT_MSGQUEUE
;
1061 case TARGET_RLIMIT_NICE
:
1063 case TARGET_RLIMIT_NOFILE
:
1064 return RLIMIT_NOFILE
;
1065 case TARGET_RLIMIT_NPROC
:
1066 return RLIMIT_NPROC
;
1067 case TARGET_RLIMIT_RSS
:
1069 case TARGET_RLIMIT_RTPRIO
:
1070 return RLIMIT_RTPRIO
;
1071 case TARGET_RLIMIT_SIGPENDING
:
1072 return RLIMIT_SIGPENDING
;
1073 case TARGET_RLIMIT_STACK
:
1074 return RLIMIT_STACK
;
1080 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1081 abi_ulong target_tv_addr
)
1083 struct target_timeval
*target_tv
;
1085 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1086 return -TARGET_EFAULT
;
1088 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1089 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1091 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1096 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1097 const struct timeval
*tv
)
1099 struct target_timeval
*target_tv
;
1101 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1102 return -TARGET_EFAULT
;
1104 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1105 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1107 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1112 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1113 abi_ulong target_tz_addr
)
1115 struct target_timezone
*target_tz
;
1117 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1118 return -TARGET_EFAULT
;
1121 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1122 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1124 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1129 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1132 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1133 abi_ulong target_mq_attr_addr
)
1135 struct target_mq_attr
*target_mq_attr
;
1137 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1138 target_mq_attr_addr
, 1))
1139 return -TARGET_EFAULT
;
1141 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1142 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1143 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1144 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1146 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1151 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1152 const struct mq_attr
*attr
)
1154 struct target_mq_attr
*target_mq_attr
;
1156 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1157 target_mq_attr_addr
, 0))
1158 return -TARGET_EFAULT
;
1160 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1161 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1162 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1163 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1165 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1171 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1172 /* do_select() must return target values and target errnos. */
1173 static abi_long
do_select(int n
,
1174 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1175 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1177 fd_set rfds
, wfds
, efds
;
1178 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1180 struct timespec ts
, *ts_ptr
;
1183 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1187 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1191 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1196 if (target_tv_addr
) {
1197 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1198 return -TARGET_EFAULT
;
1199 ts
.tv_sec
= tv
.tv_sec
;
1200 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1206 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1209 if (!is_error(ret
)) {
1210 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1211 return -TARGET_EFAULT
;
1212 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1213 return -TARGET_EFAULT
;
1214 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1215 return -TARGET_EFAULT
;
1217 if (target_tv_addr
) {
1218 tv
.tv_sec
= ts
.tv_sec
;
1219 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1220 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1221 return -TARGET_EFAULT
;
1230 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1233 return pipe2(host_pipe
, flags
);
1239 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1240 int flags
, int is_pipe2
)
1244 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1247 return get_errno(ret
);
1249 /* Several targets have special calling conventions for the original
1250 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1252 #if defined(TARGET_ALPHA)
1253 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1254 return host_pipe
[0];
1255 #elif defined(TARGET_MIPS)
1256 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1257 return host_pipe
[0];
1258 #elif defined(TARGET_SH4)
1259 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1260 return host_pipe
[0];
1261 #elif defined(TARGET_SPARC)
1262 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1263 return host_pipe
[0];
1267 if (put_user_s32(host_pipe
[0], pipedes
)
1268 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1269 return -TARGET_EFAULT
;
1270 return get_errno(ret
);
1273 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1274 abi_ulong target_addr
,
1277 struct target_ip_mreqn
*target_smreqn
;
1279 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1281 return -TARGET_EFAULT
;
1282 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1283 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1284 if (len
== sizeof(struct target_ip_mreqn
))
1285 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1286 unlock_user(target_smreqn
, target_addr
, 0);
1291 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1292 abi_ulong target_addr
,
1295 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1296 sa_family_t sa_family
;
1297 struct target_sockaddr
*target_saddr
;
1299 if (fd_trans_target_to_host_addr(fd
)) {
1300 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1303 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1305 return -TARGET_EFAULT
;
1307 sa_family
= tswap16(target_saddr
->sa_family
);
1309 /* Oops. The caller might send a incomplete sun_path; sun_path
1310 * must be terminated by \0 (see the manual page), but
1311 * unfortunately it is quite common to specify sockaddr_un
1312 * length as "strlen(x->sun_path)" while it should be
1313 * "strlen(...) + 1". We'll fix that here if needed.
1314 * Linux kernel has a similar feature.
1317 if (sa_family
== AF_UNIX
) {
1318 if (len
< unix_maxlen
&& len
> 0) {
1319 char *cp
= (char*)target_saddr
;
1321 if ( cp
[len
-1] && !cp
[len
] )
1324 if (len
> unix_maxlen
)
1328 memcpy(addr
, target_saddr
, len
);
1329 addr
->sa_family
= sa_family
;
1330 if (sa_family
== AF_NETLINK
) {
1331 struct sockaddr_nl
*nladdr
;
1333 nladdr
= (struct sockaddr_nl
*)addr
;
1334 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1335 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1336 } else if (sa_family
== AF_PACKET
) {
1337 struct target_sockaddr_ll
*lladdr
;
1339 lladdr
= (struct target_sockaddr_ll
*)addr
;
1340 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1341 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1343 unlock_user(target_saddr
, target_addr
, 0);
1348 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1349 struct sockaddr
*addr
,
1352 struct target_sockaddr
*target_saddr
;
1354 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1356 return -TARGET_EFAULT
;
1357 memcpy(target_saddr
, addr
, len
);
1358 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1359 if (addr
->sa_family
== AF_NETLINK
) {
1360 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1361 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1362 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1364 unlock_user(target_saddr
, target_addr
, len
);
1369 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1370 struct target_msghdr
*target_msgh
)
1372 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1373 abi_long msg_controllen
;
1374 abi_ulong target_cmsg_addr
;
1375 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1376 socklen_t space
= 0;
1378 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1379 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1381 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1382 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1383 target_cmsg_start
= target_cmsg
;
1385 return -TARGET_EFAULT
;
1387 while (cmsg
&& target_cmsg
) {
1388 void *data
= CMSG_DATA(cmsg
);
1389 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1391 int len
= tswapal(target_cmsg
->cmsg_len
)
1392 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1394 space
+= CMSG_SPACE(len
);
1395 if (space
> msgh
->msg_controllen
) {
1396 space
-= CMSG_SPACE(len
);
1397 /* This is a QEMU bug, since we allocated the payload
1398 * area ourselves (unlike overflow in host-to-target
1399 * conversion, which is just the guest giving us a buffer
1400 * that's too small). It can't happen for the payload types
1401 * we currently support; if it becomes an issue in future
1402 * we would need to improve our allocation strategy to
1403 * something more intelligent than "twice the size of the
1404 * target buffer we're reading from".
1406 gemu_log("Host cmsg overflow\n");
1410 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1411 cmsg
->cmsg_level
= SOL_SOCKET
;
1413 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1415 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1416 cmsg
->cmsg_len
= CMSG_LEN(len
);
1418 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1419 int *fd
= (int *)data
;
1420 int *target_fd
= (int *)target_data
;
1421 int i
, numfds
= len
/ sizeof(int);
1423 for (i
= 0; i
< numfds
; i
++) {
1424 __get_user(fd
[i
], target_fd
+ i
);
1426 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1427 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1428 struct ucred
*cred
= (struct ucred
*)data
;
1429 struct target_ucred
*target_cred
=
1430 (struct target_ucred
*)target_data
;
1432 __get_user(cred
->pid
, &target_cred
->pid
);
1433 __get_user(cred
->uid
, &target_cred
->uid
);
1434 __get_user(cred
->gid
, &target_cred
->gid
);
1436 gemu_log("Unsupported ancillary data: %d/%d\n",
1437 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1438 memcpy(data
, target_data
, len
);
1441 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1442 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1445 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1447 msgh
->msg_controllen
= space
;
1451 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1452 struct msghdr
*msgh
)
1454 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1455 abi_long msg_controllen
;
1456 abi_ulong target_cmsg_addr
;
1457 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1458 socklen_t space
= 0;
1460 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1461 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1463 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1464 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1465 target_cmsg_start
= target_cmsg
;
1467 return -TARGET_EFAULT
;
1469 while (cmsg
&& target_cmsg
) {
1470 void *data
= CMSG_DATA(cmsg
);
1471 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1473 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1474 int tgt_len
, tgt_space
;
1476 /* We never copy a half-header but may copy half-data;
1477 * this is Linux's behaviour in put_cmsg(). Note that
1478 * truncation here is a guest problem (which we report
1479 * to the guest via the CTRUNC bit), unlike truncation
1480 * in target_to_host_cmsg, which is a QEMU bug.
1482 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1483 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1487 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1488 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1490 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1492 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1494 tgt_len
= TARGET_CMSG_LEN(len
);
1496 /* Payload types which need a different size of payload on
1497 * the target must adjust tgt_len here.
1499 switch (cmsg
->cmsg_level
) {
1501 switch (cmsg
->cmsg_type
) {
1503 tgt_len
= sizeof(struct target_timeval
);
1512 if (msg_controllen
< tgt_len
) {
1513 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1514 tgt_len
= msg_controllen
;
1517 /* We must now copy-and-convert len bytes of payload
1518 * into tgt_len bytes of destination space. Bear in mind
1519 * that in both source and destination we may be dealing
1520 * with a truncated value!
1522 switch (cmsg
->cmsg_level
) {
1524 switch (cmsg
->cmsg_type
) {
1527 int *fd
= (int *)data
;
1528 int *target_fd
= (int *)target_data
;
1529 int i
, numfds
= tgt_len
/ sizeof(int);
1531 for (i
= 0; i
< numfds
; i
++) {
1532 __put_user(fd
[i
], target_fd
+ i
);
1538 struct timeval
*tv
= (struct timeval
*)data
;
1539 struct target_timeval
*target_tv
=
1540 (struct target_timeval
*)target_data
;
1542 if (len
!= sizeof(struct timeval
) ||
1543 tgt_len
!= sizeof(struct target_timeval
)) {
1547 /* copy struct timeval to target */
1548 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1549 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1552 case SCM_CREDENTIALS
:
1554 struct ucred
*cred
= (struct ucred
*)data
;
1555 struct target_ucred
*target_cred
=
1556 (struct target_ucred
*)target_data
;
1558 __put_user(cred
->pid
, &target_cred
->pid
);
1559 __put_user(cred
->uid
, &target_cred
->uid
);
1560 __put_user(cred
->gid
, &target_cred
->gid
);
1570 gemu_log("Unsupported ancillary data: %d/%d\n",
1571 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1572 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1573 if (tgt_len
> len
) {
1574 memset(target_data
+ len
, 0, tgt_len
- len
);
1578 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1579 tgt_space
= TARGET_CMSG_SPACE(len
);
1580 if (msg_controllen
< tgt_space
) {
1581 tgt_space
= msg_controllen
;
1583 msg_controllen
-= tgt_space
;
1585 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1586 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1589 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1591 target_msgh
->msg_controllen
= tswapal(space
);
1595 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1597 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1598 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1599 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1600 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1601 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1604 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1606 abi_long (*host_to_target_nlmsg
)
1607 (struct nlmsghdr
*))
1612 while (len
> sizeof(struct nlmsghdr
)) {
1614 nlmsg_len
= nlh
->nlmsg_len
;
1615 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1620 switch (nlh
->nlmsg_type
) {
1622 tswap_nlmsghdr(nlh
);
1628 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1629 e
->error
= tswap32(e
->error
);
1630 tswap_nlmsghdr(&e
->msg
);
1631 tswap_nlmsghdr(nlh
);
1635 ret
= host_to_target_nlmsg(nlh
);
1637 tswap_nlmsghdr(nlh
);
1642 tswap_nlmsghdr(nlh
);
1643 len
-= NLMSG_ALIGN(nlmsg_len
);
1644 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1649 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1651 abi_long (*target_to_host_nlmsg
)
1652 (struct nlmsghdr
*))
1656 while (len
> sizeof(struct nlmsghdr
)) {
1657 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1658 tswap32(nlh
->nlmsg_len
) > len
) {
1661 tswap_nlmsghdr(nlh
);
1662 switch (nlh
->nlmsg_type
) {
1669 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1670 e
->error
= tswap32(e
->error
);
1671 tswap_nlmsghdr(&e
->msg
);
1674 ret
= target_to_host_nlmsg(nlh
);
1679 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1680 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1685 #ifdef CONFIG_RTNETLINK
1686 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1688 abi_long (*host_to_target_rtattr
)
1691 unsigned short rta_len
;
1694 while (len
> sizeof(struct rtattr
)) {
1695 rta_len
= rtattr
->rta_len
;
1696 if (rta_len
< sizeof(struct rtattr
) ||
1700 ret
= host_to_target_rtattr(rtattr
);
1701 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1702 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1706 len
-= RTA_ALIGN(rta_len
);
1707 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1712 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1715 struct rtnl_link_stats
*st
;
1716 struct rtnl_link_stats64
*st64
;
1717 struct rtnl_link_ifmap
*map
;
1719 switch (rtattr
->rta_type
) {
1722 case IFLA_BROADCAST
:
1728 case IFLA_OPERSTATE
:
1731 case IFLA_PROTO_DOWN
:
1738 case IFLA_CARRIER_CHANGES
:
1739 case IFLA_NUM_RX_QUEUES
:
1740 case IFLA_NUM_TX_QUEUES
:
1741 case IFLA_PROMISCUITY
:
1743 case IFLA_LINK_NETNSID
:
1747 u32
= RTA_DATA(rtattr
);
1748 *u32
= tswap32(*u32
);
1750 /* struct rtnl_link_stats */
1752 st
= RTA_DATA(rtattr
);
1753 st
->rx_packets
= tswap32(st
->rx_packets
);
1754 st
->tx_packets
= tswap32(st
->tx_packets
);
1755 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1756 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1757 st
->rx_errors
= tswap32(st
->rx_errors
);
1758 st
->tx_errors
= tswap32(st
->tx_errors
);
1759 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1760 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1761 st
->multicast
= tswap32(st
->multicast
);
1762 st
->collisions
= tswap32(st
->collisions
);
1764 /* detailed rx_errors: */
1765 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1766 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1767 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1768 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1769 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1770 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1772 /* detailed tx_errors */
1773 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1774 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1775 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1776 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1777 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1780 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1781 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1783 /* struct rtnl_link_stats64 */
1785 st64
= RTA_DATA(rtattr
);
1786 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1787 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1788 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1789 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1790 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1791 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1792 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1793 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1794 st64
->multicast
= tswap64(st64
->multicast
);
1795 st64
->collisions
= tswap64(st64
->collisions
);
1797 /* detailed rx_errors: */
1798 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1799 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1800 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1801 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1802 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1803 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1805 /* detailed tx_errors */
1806 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1807 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1808 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1809 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1810 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1813 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1814 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1816 /* struct rtnl_link_ifmap */
1818 map
= RTA_DATA(rtattr
);
1819 map
->mem_start
= tswap64(map
->mem_start
);
1820 map
->mem_end
= tswap64(map
->mem_end
);
1821 map
->base_addr
= tswap64(map
->base_addr
);
1822 map
->irq
= tswap16(map
->irq
);
1827 /* FIXME: implement nested type */
1828 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1831 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1837 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1840 struct ifa_cacheinfo
*ci
;
1842 switch (rtattr
->rta_type
) {
1843 /* binary: depends on family type */
1853 u32
= RTA_DATA(rtattr
);
1854 *u32
= tswap32(*u32
);
1856 /* struct ifa_cacheinfo */
1858 ci
= RTA_DATA(rtattr
);
1859 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1860 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1861 ci
->cstamp
= tswap32(ci
->cstamp
);
1862 ci
->tstamp
= tswap32(ci
->tstamp
);
1865 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1871 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1874 switch (rtattr
->rta_type
) {
1875 /* binary: depends on family type */
1884 u32
= RTA_DATA(rtattr
);
1885 *u32
= tswap32(*u32
);
1888 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1894 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1895 uint32_t rtattr_len
)
1897 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1898 host_to_target_data_link_rtattr
);
1901 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1902 uint32_t rtattr_len
)
1904 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1905 host_to_target_data_addr_rtattr
);
1908 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1909 uint32_t rtattr_len
)
1911 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1912 host_to_target_data_route_rtattr
);
1915 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1918 struct ifinfomsg
*ifi
;
1919 struct ifaddrmsg
*ifa
;
1922 nlmsg_len
= nlh
->nlmsg_len
;
1923 switch (nlh
->nlmsg_type
) {
1927 ifi
= NLMSG_DATA(nlh
);
1928 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1929 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1930 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1931 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1932 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1933 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1938 ifa
= NLMSG_DATA(nlh
);
1939 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1940 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1941 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1946 rtm
= NLMSG_DATA(nlh
);
1947 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1948 host_to_target_route_rtattr(RTM_RTA(rtm
),
1949 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1952 return -TARGET_EINVAL
;
1957 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1960 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1963 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1965 abi_long (*target_to_host_rtattr
)
1970 while (len
>= sizeof(struct rtattr
)) {
1971 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1972 tswap16(rtattr
->rta_len
) > len
) {
1975 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1976 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1977 ret
= target_to_host_rtattr(rtattr
);
1981 len
-= RTA_ALIGN(rtattr
->rta_len
);
1982 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
1983 RTA_ALIGN(rtattr
->rta_len
));
1988 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
1990 switch (rtattr
->rta_type
) {
1992 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
1998 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2000 switch (rtattr
->rta_type
) {
2001 /* binary: depends on family type */
2006 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2012 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2015 switch (rtattr
->rta_type
) {
2016 /* binary: depends on family type */
2023 u32
= RTA_DATA(rtattr
);
2024 *u32
= tswap32(*u32
);
2027 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2033 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2034 uint32_t rtattr_len
)
2036 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2037 target_to_host_data_link_rtattr
);
2040 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2041 uint32_t rtattr_len
)
2043 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2044 target_to_host_data_addr_rtattr
);
2047 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2048 uint32_t rtattr_len
)
2050 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2051 target_to_host_data_route_rtattr
);
2054 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2056 struct ifinfomsg
*ifi
;
2057 struct ifaddrmsg
*ifa
;
2060 switch (nlh
->nlmsg_type
) {
2065 ifi
= NLMSG_DATA(nlh
);
2066 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2067 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2068 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2069 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2070 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2071 NLMSG_LENGTH(sizeof(*ifi
)));
2076 ifa
= NLMSG_DATA(nlh
);
2077 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2078 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2079 NLMSG_LENGTH(sizeof(*ifa
)));
2085 rtm
= NLMSG_DATA(nlh
);
2086 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2087 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2088 NLMSG_LENGTH(sizeof(*rtm
)));
2091 return -TARGET_EOPNOTSUPP
;
2096 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2098 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2100 #endif /* CONFIG_RTNETLINK */
2102 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2104 switch (nlh
->nlmsg_type
) {
2106 gemu_log("Unknown host audit message type %d\n",
2108 return -TARGET_EINVAL
;
2113 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2116 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2119 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2121 switch (nlh
->nlmsg_type
) {
2123 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2124 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2127 gemu_log("Unknown target audit message type %d\n",
2129 return -TARGET_EINVAL
;
2135 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2137 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2140 /* do_setsockopt() Must return target values and target errnos. */
2141 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2142 abi_ulong optval_addr
, socklen_t optlen
)
2146 struct ip_mreqn
*ip_mreq
;
2147 struct ip_mreq_source
*ip_mreq_source
;
2151 /* TCP options all take an 'int' value. */
2152 if (optlen
< sizeof(uint32_t))
2153 return -TARGET_EINVAL
;
2155 if (get_user_u32(val
, optval_addr
))
2156 return -TARGET_EFAULT
;
2157 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2164 case IP_ROUTER_ALERT
:
2168 case IP_MTU_DISCOVER
:
2174 case IP_MULTICAST_TTL
:
2175 case IP_MULTICAST_LOOP
:
2177 if (optlen
>= sizeof(uint32_t)) {
2178 if (get_user_u32(val
, optval_addr
))
2179 return -TARGET_EFAULT
;
2180 } else if (optlen
>= 1) {
2181 if (get_user_u8(val
, optval_addr
))
2182 return -TARGET_EFAULT
;
2184 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2186 case IP_ADD_MEMBERSHIP
:
2187 case IP_DROP_MEMBERSHIP
:
2188 if (optlen
< sizeof (struct target_ip_mreq
) ||
2189 optlen
> sizeof (struct target_ip_mreqn
))
2190 return -TARGET_EINVAL
;
2192 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2193 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2194 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2197 case IP_BLOCK_SOURCE
:
2198 case IP_UNBLOCK_SOURCE
:
2199 case IP_ADD_SOURCE_MEMBERSHIP
:
2200 case IP_DROP_SOURCE_MEMBERSHIP
:
2201 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2202 return -TARGET_EINVAL
;
2204 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2205 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2206 unlock_user (ip_mreq_source
, optval_addr
, 0);
2215 case IPV6_MTU_DISCOVER
:
2218 case IPV6_RECVPKTINFO
:
2220 if (optlen
< sizeof(uint32_t)) {
2221 return -TARGET_EINVAL
;
2223 if (get_user_u32(val
, optval_addr
)) {
2224 return -TARGET_EFAULT
;
2226 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2227 &val
, sizeof(val
)));
2236 /* struct icmp_filter takes an u32 value */
2237 if (optlen
< sizeof(uint32_t)) {
2238 return -TARGET_EINVAL
;
2241 if (get_user_u32(val
, optval_addr
)) {
2242 return -TARGET_EFAULT
;
2244 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2245 &val
, sizeof(val
)));
2252 case TARGET_SOL_SOCKET
:
2254 case TARGET_SO_RCVTIMEO
:
2258 optname
= SO_RCVTIMEO
;
2261 if (optlen
!= sizeof(struct target_timeval
)) {
2262 return -TARGET_EINVAL
;
2265 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2266 return -TARGET_EFAULT
;
2269 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2273 case TARGET_SO_SNDTIMEO
:
2274 optname
= SO_SNDTIMEO
;
2276 case TARGET_SO_ATTACH_FILTER
:
2278 struct target_sock_fprog
*tfprog
;
2279 struct target_sock_filter
*tfilter
;
2280 struct sock_fprog fprog
;
2281 struct sock_filter
*filter
;
2284 if (optlen
!= sizeof(*tfprog
)) {
2285 return -TARGET_EINVAL
;
2287 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2288 return -TARGET_EFAULT
;
2290 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2291 tswapal(tfprog
->filter
), 0)) {
2292 unlock_user_struct(tfprog
, optval_addr
, 1);
2293 return -TARGET_EFAULT
;
2296 fprog
.len
= tswap16(tfprog
->len
);
2297 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2298 if (filter
== NULL
) {
2299 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2300 unlock_user_struct(tfprog
, optval_addr
, 1);
2301 return -TARGET_ENOMEM
;
2303 for (i
= 0; i
< fprog
.len
; i
++) {
2304 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2305 filter
[i
].jt
= tfilter
[i
].jt
;
2306 filter
[i
].jf
= tfilter
[i
].jf
;
2307 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2309 fprog
.filter
= filter
;
2311 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2312 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2315 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2316 unlock_user_struct(tfprog
, optval_addr
, 1);
2319 case TARGET_SO_BINDTODEVICE
:
2321 char *dev_ifname
, *addr_ifname
;
2323 if (optlen
> IFNAMSIZ
- 1) {
2324 optlen
= IFNAMSIZ
- 1;
2326 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2328 return -TARGET_EFAULT
;
2330 optname
= SO_BINDTODEVICE
;
2331 addr_ifname
= alloca(IFNAMSIZ
);
2332 memcpy(addr_ifname
, dev_ifname
, optlen
);
2333 addr_ifname
[optlen
] = 0;
2334 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2335 addr_ifname
, optlen
));
2336 unlock_user (dev_ifname
, optval_addr
, 0);
2339 /* Options with 'int' argument. */
2340 case TARGET_SO_DEBUG
:
2343 case TARGET_SO_REUSEADDR
:
2344 optname
= SO_REUSEADDR
;
2346 case TARGET_SO_TYPE
:
2349 case TARGET_SO_ERROR
:
2352 case TARGET_SO_DONTROUTE
:
2353 optname
= SO_DONTROUTE
;
2355 case TARGET_SO_BROADCAST
:
2356 optname
= SO_BROADCAST
;
2358 case TARGET_SO_SNDBUF
:
2359 optname
= SO_SNDBUF
;
2361 case TARGET_SO_SNDBUFFORCE
:
2362 optname
= SO_SNDBUFFORCE
;
2364 case TARGET_SO_RCVBUF
:
2365 optname
= SO_RCVBUF
;
2367 case TARGET_SO_RCVBUFFORCE
:
2368 optname
= SO_RCVBUFFORCE
;
2370 case TARGET_SO_KEEPALIVE
:
2371 optname
= SO_KEEPALIVE
;
2373 case TARGET_SO_OOBINLINE
:
2374 optname
= SO_OOBINLINE
;
2376 case TARGET_SO_NO_CHECK
:
2377 optname
= SO_NO_CHECK
;
2379 case TARGET_SO_PRIORITY
:
2380 optname
= SO_PRIORITY
;
2383 case TARGET_SO_BSDCOMPAT
:
2384 optname
= SO_BSDCOMPAT
;
2387 case TARGET_SO_PASSCRED
:
2388 optname
= SO_PASSCRED
;
2390 case TARGET_SO_PASSSEC
:
2391 optname
= SO_PASSSEC
;
2393 case TARGET_SO_TIMESTAMP
:
2394 optname
= SO_TIMESTAMP
;
2396 case TARGET_SO_RCVLOWAT
:
2397 optname
= SO_RCVLOWAT
;
2403 if (optlen
< sizeof(uint32_t))
2404 return -TARGET_EINVAL
;
2406 if (get_user_u32(val
, optval_addr
))
2407 return -TARGET_EFAULT
;
2408 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2412 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2413 ret
= -TARGET_ENOPROTOOPT
;
2418 /* do_getsockopt() Must return target values and target errnos. */
2419 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2420 abi_ulong optval_addr
, abi_ulong optlen
)
2427 case TARGET_SOL_SOCKET
:
2430 /* These don't just return a single integer */
2431 case TARGET_SO_LINGER
:
2432 case TARGET_SO_RCVTIMEO
:
2433 case TARGET_SO_SNDTIMEO
:
2434 case TARGET_SO_PEERNAME
:
2436 case TARGET_SO_PEERCRED
: {
2439 struct target_ucred
*tcr
;
2441 if (get_user_u32(len
, optlen
)) {
2442 return -TARGET_EFAULT
;
2445 return -TARGET_EINVAL
;
2449 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2457 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2458 return -TARGET_EFAULT
;
2460 __put_user(cr
.pid
, &tcr
->pid
);
2461 __put_user(cr
.uid
, &tcr
->uid
);
2462 __put_user(cr
.gid
, &tcr
->gid
);
2463 unlock_user_struct(tcr
, optval_addr
, 1);
2464 if (put_user_u32(len
, optlen
)) {
2465 return -TARGET_EFAULT
;
2469 /* Options with 'int' argument. */
2470 case TARGET_SO_DEBUG
:
2473 case TARGET_SO_REUSEADDR
:
2474 optname
= SO_REUSEADDR
;
2476 case TARGET_SO_TYPE
:
2479 case TARGET_SO_ERROR
:
2482 case TARGET_SO_DONTROUTE
:
2483 optname
= SO_DONTROUTE
;
2485 case TARGET_SO_BROADCAST
:
2486 optname
= SO_BROADCAST
;
2488 case TARGET_SO_SNDBUF
:
2489 optname
= SO_SNDBUF
;
2491 case TARGET_SO_RCVBUF
:
2492 optname
= SO_RCVBUF
;
2494 case TARGET_SO_KEEPALIVE
:
2495 optname
= SO_KEEPALIVE
;
2497 case TARGET_SO_OOBINLINE
:
2498 optname
= SO_OOBINLINE
;
2500 case TARGET_SO_NO_CHECK
:
2501 optname
= SO_NO_CHECK
;
2503 case TARGET_SO_PRIORITY
:
2504 optname
= SO_PRIORITY
;
2507 case TARGET_SO_BSDCOMPAT
:
2508 optname
= SO_BSDCOMPAT
;
2511 case TARGET_SO_PASSCRED
:
2512 optname
= SO_PASSCRED
;
2514 case TARGET_SO_TIMESTAMP
:
2515 optname
= SO_TIMESTAMP
;
2517 case TARGET_SO_RCVLOWAT
:
2518 optname
= SO_RCVLOWAT
;
2520 case TARGET_SO_ACCEPTCONN
:
2521 optname
= SO_ACCEPTCONN
;
2528 /* TCP options all take an 'int' value. */
2530 if (get_user_u32(len
, optlen
))
2531 return -TARGET_EFAULT
;
2533 return -TARGET_EINVAL
;
2535 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2538 if (optname
== SO_TYPE
) {
2539 val
= host_to_target_sock_type(val
);
2544 if (put_user_u32(val
, optval_addr
))
2545 return -TARGET_EFAULT
;
2547 if (put_user_u8(val
, optval_addr
))
2548 return -TARGET_EFAULT
;
2550 if (put_user_u32(len
, optlen
))
2551 return -TARGET_EFAULT
;
2558 case IP_ROUTER_ALERT
:
2562 case IP_MTU_DISCOVER
:
2568 case IP_MULTICAST_TTL
:
2569 case IP_MULTICAST_LOOP
:
2570 if (get_user_u32(len
, optlen
))
2571 return -TARGET_EFAULT
;
2573 return -TARGET_EINVAL
;
2575 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2578 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2580 if (put_user_u32(len
, optlen
)
2581 || put_user_u8(val
, optval_addr
))
2582 return -TARGET_EFAULT
;
2584 if (len
> sizeof(int))
2586 if (put_user_u32(len
, optlen
)
2587 || put_user_u32(val
, optval_addr
))
2588 return -TARGET_EFAULT
;
2592 ret
= -TARGET_ENOPROTOOPT
;
2598 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2600 ret
= -TARGET_EOPNOTSUPP
;
2606 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2607 int count
, int copy
)
2609 struct target_iovec
*target_vec
;
2611 abi_ulong total_len
, max_len
;
2614 bool bad_address
= false;
2620 if (count
< 0 || count
> IOV_MAX
) {
2625 vec
= g_try_new0(struct iovec
, count
);
2631 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2632 count
* sizeof(struct target_iovec
), 1);
2633 if (target_vec
== NULL
) {
2638 /* ??? If host page size > target page size, this will result in a
2639 value larger than what we can actually support. */
2640 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2643 for (i
= 0; i
< count
; i
++) {
2644 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2645 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2650 } else if (len
== 0) {
2651 /* Zero length pointer is ignored. */
2652 vec
[i
].iov_base
= 0;
2654 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2655 /* If the first buffer pointer is bad, this is a fault. But
2656 * subsequent bad buffers will result in a partial write; this
2657 * is realized by filling the vector with null pointers and
2659 if (!vec
[i
].iov_base
) {
2670 if (len
> max_len
- total_len
) {
2671 len
= max_len
- total_len
;
2674 vec
[i
].iov_len
= len
;
2678 unlock_user(target_vec
, target_addr
, 0);
2683 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2684 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2687 unlock_user(target_vec
, target_addr
, 0);
2694 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2695 int count
, int copy
)
2697 struct target_iovec
*target_vec
;
2700 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2701 count
* sizeof(struct target_iovec
), 1);
2703 for (i
= 0; i
< count
; i
++) {
2704 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2705 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2709 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2711 unlock_user(target_vec
, target_addr
, 0);
2717 static inline int target_to_host_sock_type(int *type
)
2720 int target_type
= *type
;
2722 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2723 case TARGET_SOCK_DGRAM
:
2724 host_type
= SOCK_DGRAM
;
2726 case TARGET_SOCK_STREAM
:
2727 host_type
= SOCK_STREAM
;
2730 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2733 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2734 #if defined(SOCK_CLOEXEC)
2735 host_type
|= SOCK_CLOEXEC
;
2737 return -TARGET_EINVAL
;
2740 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2741 #if defined(SOCK_NONBLOCK)
2742 host_type
|= SOCK_NONBLOCK
;
2743 #elif !defined(O_NONBLOCK)
2744 return -TARGET_EINVAL
;
2751 /* Try to emulate socket type flags after socket creation. */
2752 static int sock_flags_fixup(int fd
, int target_type
)
2754 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2755 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2756 int flags
= fcntl(fd
, F_GETFL
);
2757 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2759 return -TARGET_EINVAL
;
2766 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2767 abi_ulong target_addr
,
2770 struct sockaddr
*addr
= host_addr
;
2771 struct target_sockaddr
*target_saddr
;
2773 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2774 if (!target_saddr
) {
2775 return -TARGET_EFAULT
;
2778 memcpy(addr
, target_saddr
, len
);
2779 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2780 /* spkt_protocol is big-endian */
2782 unlock_user(target_saddr
, target_addr
, 0);
2786 static TargetFdTrans target_packet_trans
= {
2787 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2790 #ifdef CONFIG_RTNETLINK
2791 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2793 return target_to_host_nlmsg_route(buf
, len
);
2796 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2798 return host_to_target_nlmsg_route(buf
, len
);
2801 static TargetFdTrans target_netlink_route_trans
= {
2802 .target_to_host_data
= netlink_route_target_to_host
,
2803 .host_to_target_data
= netlink_route_host_to_target
,
2805 #endif /* CONFIG_RTNETLINK */
2807 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2809 return target_to_host_nlmsg_audit(buf
, len
);
2812 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2814 return host_to_target_nlmsg_audit(buf
, len
);
2817 static TargetFdTrans target_netlink_audit_trans
= {
2818 .target_to_host_data
= netlink_audit_target_to_host
,
2819 .host_to_target_data
= netlink_audit_host_to_target
,
2822 /* do_socket() Must return target values and target errnos. */
2823 static abi_long
do_socket(int domain
, int type
, int protocol
)
2825 int target_type
= type
;
2828 ret
= target_to_host_sock_type(&type
);
2833 if (domain
== PF_NETLINK
&& !(
2834 #ifdef CONFIG_RTNETLINK
2835 protocol
== NETLINK_ROUTE
||
2837 protocol
== NETLINK_KOBJECT_UEVENT
||
2838 protocol
== NETLINK_AUDIT
)) {
2839 return -EPFNOSUPPORT
;
2842 if (domain
== AF_PACKET
||
2843 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2844 protocol
= tswap16(protocol
);
2847 ret
= get_errno(socket(domain
, type
, protocol
));
2849 ret
= sock_flags_fixup(ret
, target_type
);
2850 if (type
== SOCK_PACKET
) {
2851 /* Manage an obsolete case :
2852 * if socket type is SOCK_PACKET, bind by name
2854 fd_trans_register(ret
, &target_packet_trans
);
2855 } else if (domain
== PF_NETLINK
) {
2857 #ifdef CONFIG_RTNETLINK
2859 fd_trans_register(ret
, &target_netlink_route_trans
);
2862 case NETLINK_KOBJECT_UEVENT
:
2863 /* nothing to do: messages are strings */
2866 fd_trans_register(ret
, &target_netlink_audit_trans
);
2869 g_assert_not_reached();
2876 /* do_bind() Must return target values and target errnos. */
2877 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2883 if ((int)addrlen
< 0) {
2884 return -TARGET_EINVAL
;
2887 addr
= alloca(addrlen
+1);
2889 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2893 return get_errno(bind(sockfd
, addr
, addrlen
));
2896 /* do_connect() Must return target values and target errnos. */
2897 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2903 if ((int)addrlen
< 0) {
2904 return -TARGET_EINVAL
;
2907 addr
= alloca(addrlen
+1);
2909 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2913 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2916 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2917 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2918 int flags
, int send
)
2924 abi_ulong target_vec
;
2926 if (msgp
->msg_name
) {
2927 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2928 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2929 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2930 tswapal(msgp
->msg_name
),
2936 msg
.msg_name
= NULL
;
2937 msg
.msg_namelen
= 0;
2939 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2940 msg
.msg_control
= alloca(msg
.msg_controllen
);
2941 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2943 count
= tswapal(msgp
->msg_iovlen
);
2944 target_vec
= tswapal(msgp
->msg_iov
);
2945 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2946 target_vec
, count
, send
);
2948 ret
= -host_to_target_errno(errno
);
2951 msg
.msg_iovlen
= count
;
2955 if (fd_trans_target_to_host_data(fd
)) {
2956 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2957 msg
.msg_iov
->iov_len
);
2959 ret
= target_to_host_cmsg(&msg
, msgp
);
2962 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2965 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2966 if (!is_error(ret
)) {
2968 if (fd_trans_host_to_target_data(fd
)) {
2969 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2970 msg
.msg_iov
->iov_len
);
2972 ret
= host_to_target_cmsg(msgp
, &msg
);
2974 if (!is_error(ret
)) {
2975 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2976 if (msg
.msg_name
!= NULL
) {
2977 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2978 msg
.msg_name
, msg
.msg_namelen
);
2990 unlock_iovec(vec
, target_vec
, count
, !send
);
2995 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2996 int flags
, int send
)
2999 struct target_msghdr
*msgp
;
3001 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3005 return -TARGET_EFAULT
;
3007 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3008 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3012 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3013 * so it might not have this *mmsg-specific flag either.
3015 #ifndef MSG_WAITFORONE
3016 #define MSG_WAITFORONE 0x10000
3019 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3020 unsigned int vlen
, unsigned int flags
,
3023 struct target_mmsghdr
*mmsgp
;
3027 if (vlen
> UIO_MAXIOV
) {
3031 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3033 return -TARGET_EFAULT
;
3036 for (i
= 0; i
< vlen
; i
++) {
3037 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3038 if (is_error(ret
)) {
3041 mmsgp
[i
].msg_len
= tswap32(ret
);
3042 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3043 if (flags
& MSG_WAITFORONE
) {
3044 flags
|= MSG_DONTWAIT
;
3048 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3050 /* Return number of datagrams sent if we sent any at all;
3051 * otherwise return the error.
3059 /* If we don't have a system accept4() then just call accept.
3060 * The callsites to do_accept4() will ensure that they don't
3061 * pass a non-zero flags argument in this config.
3063 #ifndef CONFIG_ACCEPT4
3064 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
3065 socklen_t
*addrlen
, int flags
)
3068 return accept(sockfd
, addr
, addrlen
);
3072 /* do_accept4() Must return target values and target errnos. */
3073 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3074 abi_ulong target_addrlen_addr
, int flags
)
3081 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3083 if (target_addr
== 0) {
3084 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
3087 /* linux returns EINVAL if addrlen pointer is invalid */
3088 if (get_user_u32(addrlen
, target_addrlen_addr
))
3089 return -TARGET_EINVAL
;
3091 if ((int)addrlen
< 0) {
3092 return -TARGET_EINVAL
;
3095 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3096 return -TARGET_EINVAL
;
3098 addr
= alloca(addrlen
);
3100 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
3101 if (!is_error(ret
)) {
3102 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3103 if (put_user_u32(addrlen
, target_addrlen_addr
))
3104 ret
= -TARGET_EFAULT
;
3109 /* do_getpeername() Must return target values and target errnos. */
3110 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3111 abi_ulong target_addrlen_addr
)
3117 if (get_user_u32(addrlen
, target_addrlen_addr
))
3118 return -TARGET_EFAULT
;
3120 if ((int)addrlen
< 0) {
3121 return -TARGET_EINVAL
;
3124 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3125 return -TARGET_EFAULT
;
3127 addr
= alloca(addrlen
);
3129 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3130 if (!is_error(ret
)) {
3131 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3132 if (put_user_u32(addrlen
, target_addrlen_addr
))
3133 ret
= -TARGET_EFAULT
;
3138 /* do_getsockname() Must return target values and target errnos. */
3139 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3140 abi_ulong target_addrlen_addr
)
3146 if (get_user_u32(addrlen
, target_addrlen_addr
))
3147 return -TARGET_EFAULT
;
3149 if ((int)addrlen
< 0) {
3150 return -TARGET_EINVAL
;
3153 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3154 return -TARGET_EFAULT
;
3156 addr
= alloca(addrlen
);
3158 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3159 if (!is_error(ret
)) {
3160 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3161 if (put_user_u32(addrlen
, target_addrlen_addr
))
3162 ret
= -TARGET_EFAULT
;
3167 /* do_socketpair() Must return target values and target errnos. */
3168 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3169 abi_ulong target_tab_addr
)
3174 target_to_host_sock_type(&type
);
3176 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3177 if (!is_error(ret
)) {
3178 if (put_user_s32(tab
[0], target_tab_addr
)
3179 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3180 ret
= -TARGET_EFAULT
;
3185 /* do_sendto() Must return target values and target errnos. */
3186 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3187 abi_ulong target_addr
, socklen_t addrlen
)
3193 if ((int)addrlen
< 0) {
3194 return -TARGET_EINVAL
;
3197 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3199 return -TARGET_EFAULT
;
3200 if (fd_trans_target_to_host_data(fd
)) {
3201 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3203 unlock_user(host_msg
, msg
, 0);
3208 addr
= alloca(addrlen
+1);
3209 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3211 unlock_user(host_msg
, msg
, 0);
3214 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3216 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3218 unlock_user(host_msg
, msg
, 0);
3222 /* do_recvfrom() Must return target values and target errnos. */
3223 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3224 abi_ulong target_addr
,
3225 abi_ulong target_addrlen
)
3232 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3234 return -TARGET_EFAULT
;
3236 if (get_user_u32(addrlen
, target_addrlen
)) {
3237 ret
= -TARGET_EFAULT
;
3240 if ((int)addrlen
< 0) {
3241 ret
= -TARGET_EINVAL
;
3244 addr
= alloca(addrlen
);
3245 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3248 addr
= NULL
; /* To keep compiler quiet. */
3249 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3251 if (!is_error(ret
)) {
3253 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3254 if (put_user_u32(addrlen
, target_addrlen
)) {
3255 ret
= -TARGET_EFAULT
;
3259 unlock_user(host_msg
, msg
, len
);
3262 unlock_user(host_msg
, msg
, 0);
3267 #ifdef TARGET_NR_socketcall
3268 /* do_socketcall() Must return target values and target errnos. */
3269 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3271 static const unsigned ac
[] = { /* number of arguments per call */
3272 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3273 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3274 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3275 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3276 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3277 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3278 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3279 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3280 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3281 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3282 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3283 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3284 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3285 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3286 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3287 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3288 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3289 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3290 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3291 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3293 abi_long a
[6]; /* max 6 args */
3295 /* first, collect the arguments in a[] according to ac[] */
3296 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3298 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3299 for (i
= 0; i
< ac
[num
]; ++i
) {
3300 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3301 return -TARGET_EFAULT
;
3306 /* now when we have the args, actually handle the call */
3308 case SOCKOP_socket
: /* domain, type, protocol */
3309 return do_socket(a
[0], a
[1], a
[2]);
3310 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3311 return do_bind(a
[0], a
[1], a
[2]);
3312 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3313 return do_connect(a
[0], a
[1], a
[2]);
3314 case SOCKOP_listen
: /* sockfd, backlog */
3315 return get_errno(listen(a
[0], a
[1]));
3316 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3317 return do_accept4(a
[0], a
[1], a
[2], 0);
3318 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3319 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3320 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3321 return do_getsockname(a
[0], a
[1], a
[2]);
3322 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3323 return do_getpeername(a
[0], a
[1], a
[2]);
3324 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3325 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3326 case SOCKOP_send
: /* sockfd, msg, len, flags */
3327 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3328 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3329 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3330 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3331 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3332 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3333 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3334 case SOCKOP_shutdown
: /* sockfd, how */
3335 return get_errno(shutdown(a
[0], a
[1]));
3336 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3337 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3338 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3339 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3340 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3341 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3342 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3343 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3344 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3345 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3346 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3347 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3349 gemu_log("Unsupported socketcall: %d\n", num
);
3350 return -TARGET_ENOSYS
;
3355 #define N_SHM_REGIONS 32
3357 static struct shm_region
{
3361 } shm_regions
[N_SHM_REGIONS
];
3363 struct target_semid_ds
3365 struct target_ipc_perm sem_perm
;
3366 abi_ulong sem_otime
;
3367 #if !defined(TARGET_PPC64)
3368 abi_ulong __unused1
;
3370 abi_ulong sem_ctime
;
3371 #if !defined(TARGET_PPC64)
3372 abi_ulong __unused2
;
3374 abi_ulong sem_nsems
;
3375 abi_ulong __unused3
;
3376 abi_ulong __unused4
;
3379 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3380 abi_ulong target_addr
)
3382 struct target_ipc_perm
*target_ip
;
3383 struct target_semid_ds
*target_sd
;
3385 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3386 return -TARGET_EFAULT
;
3387 target_ip
= &(target_sd
->sem_perm
);
3388 host_ip
->__key
= tswap32(target_ip
->__key
);
3389 host_ip
->uid
= tswap32(target_ip
->uid
);
3390 host_ip
->gid
= tswap32(target_ip
->gid
);
3391 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3392 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3393 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3394 host_ip
->mode
= tswap32(target_ip
->mode
);
3396 host_ip
->mode
= tswap16(target_ip
->mode
);
3398 #if defined(TARGET_PPC)
3399 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3401 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3403 unlock_user_struct(target_sd
, target_addr
, 0);
3407 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3408 struct ipc_perm
*host_ip
)
3410 struct target_ipc_perm
*target_ip
;
3411 struct target_semid_ds
*target_sd
;
3413 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3414 return -TARGET_EFAULT
;
3415 target_ip
= &(target_sd
->sem_perm
);
3416 target_ip
->__key
= tswap32(host_ip
->__key
);
3417 target_ip
->uid
= tswap32(host_ip
->uid
);
3418 target_ip
->gid
= tswap32(host_ip
->gid
);
3419 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3420 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3421 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3422 target_ip
->mode
= tswap32(host_ip
->mode
);
3424 target_ip
->mode
= tswap16(host_ip
->mode
);
3426 #if defined(TARGET_PPC)
3427 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3429 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3431 unlock_user_struct(target_sd
, target_addr
, 1);
3435 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3436 abi_ulong target_addr
)
3438 struct target_semid_ds
*target_sd
;
3440 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3441 return -TARGET_EFAULT
;
3442 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3443 return -TARGET_EFAULT
;
3444 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3445 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3446 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3447 unlock_user_struct(target_sd
, target_addr
, 0);
3451 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3452 struct semid_ds
*host_sd
)
3454 struct target_semid_ds
*target_sd
;
3456 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3457 return -TARGET_EFAULT
;
3458 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3459 return -TARGET_EFAULT
;
3460 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3461 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3462 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3463 unlock_user_struct(target_sd
, target_addr
, 1);
3467 struct target_seminfo
{
3480 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3481 struct seminfo
*host_seminfo
)
3483 struct target_seminfo
*target_seminfo
;
3484 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3485 return -TARGET_EFAULT
;
3486 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3487 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3488 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3489 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3490 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3491 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3492 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3493 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3494 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3495 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3496 unlock_user_struct(target_seminfo
, target_addr
, 1);
3502 struct semid_ds
*buf
;
3503 unsigned short *array
;
3504 struct seminfo
*__buf
;
3507 union target_semun
{
3514 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3515 abi_ulong target_addr
)
3518 unsigned short *array
;
3520 struct semid_ds semid_ds
;
3523 semun
.buf
= &semid_ds
;
3525 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3527 return get_errno(ret
);
3529 nsems
= semid_ds
.sem_nsems
;
3531 *host_array
= g_try_new(unsigned short, nsems
);
3533 return -TARGET_ENOMEM
;
3535 array
= lock_user(VERIFY_READ
, target_addr
,
3536 nsems
*sizeof(unsigned short), 1);
3538 g_free(*host_array
);
3539 return -TARGET_EFAULT
;
3542 for(i
=0; i
<nsems
; i
++) {
3543 __get_user((*host_array
)[i
], &array
[i
]);
3545 unlock_user(array
, target_addr
, 0);
3550 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3551 unsigned short **host_array
)
3554 unsigned short *array
;
3556 struct semid_ds semid_ds
;
3559 semun
.buf
= &semid_ds
;
3561 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3563 return get_errno(ret
);
3565 nsems
= semid_ds
.sem_nsems
;
3567 array
= lock_user(VERIFY_WRITE
, target_addr
,
3568 nsems
*sizeof(unsigned short), 0);
3570 return -TARGET_EFAULT
;
3572 for(i
=0; i
<nsems
; i
++) {
3573 __put_user((*host_array
)[i
], &array
[i
]);
3575 g_free(*host_array
);
3576 unlock_user(array
, target_addr
, 1);
3581 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3582 abi_ulong target_arg
)
3584 union target_semun target_su
= { .buf
= target_arg
};
3586 struct semid_ds dsarg
;
3587 unsigned short *array
= NULL
;
3588 struct seminfo seminfo
;
3589 abi_long ret
= -TARGET_EINVAL
;
3596 /* In 64 bit cross-endian situations, we will erroneously pick up
3597 * the wrong half of the union for the "val" element. To rectify
3598 * this, the entire 8-byte structure is byteswapped, followed by
3599 * a swap of the 4 byte val field. In other cases, the data is
3600 * already in proper host byte order. */
3601 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3602 target_su
.buf
= tswapal(target_su
.buf
);
3603 arg
.val
= tswap32(target_su
.val
);
3605 arg
.val
= target_su
.val
;
3607 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3611 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3615 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3616 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3623 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3627 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3628 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3634 arg
.__buf
= &seminfo
;
3635 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3636 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3644 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3651 struct target_sembuf
{
3652 unsigned short sem_num
;
3657 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3658 abi_ulong target_addr
,
3661 struct target_sembuf
*target_sembuf
;
3664 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3665 nsops
*sizeof(struct target_sembuf
), 1);
3667 return -TARGET_EFAULT
;
3669 for(i
=0; i
<nsops
; i
++) {
3670 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3671 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3672 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3675 unlock_user(target_sembuf
, target_addr
, 0);
3680 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3682 struct sembuf sops
[nsops
];
3684 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3685 return -TARGET_EFAULT
;
3687 return get_errno(semop(semid
, sops
, nsops
));
3690 struct target_msqid_ds
3692 struct target_ipc_perm msg_perm
;
3693 abi_ulong msg_stime
;
3694 #if TARGET_ABI_BITS == 32
3695 abi_ulong __unused1
;
3697 abi_ulong msg_rtime
;
3698 #if TARGET_ABI_BITS == 32
3699 abi_ulong __unused2
;
3701 abi_ulong msg_ctime
;
3702 #if TARGET_ABI_BITS == 32
3703 abi_ulong __unused3
;
3705 abi_ulong __msg_cbytes
;
3707 abi_ulong msg_qbytes
;
3708 abi_ulong msg_lspid
;
3709 abi_ulong msg_lrpid
;
3710 abi_ulong __unused4
;
3711 abi_ulong __unused5
;
3714 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3715 abi_ulong target_addr
)
3717 struct target_msqid_ds
*target_md
;
3719 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3720 return -TARGET_EFAULT
;
3721 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3722 return -TARGET_EFAULT
;
3723 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3724 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3725 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3726 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3727 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3728 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3729 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3730 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3731 unlock_user_struct(target_md
, target_addr
, 0);
3735 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3736 struct msqid_ds
*host_md
)
3738 struct target_msqid_ds
*target_md
;
3740 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3741 return -TARGET_EFAULT
;
3742 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3743 return -TARGET_EFAULT
;
3744 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3745 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3746 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3747 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3748 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3749 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3750 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3751 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3752 unlock_user_struct(target_md
, target_addr
, 1);
3756 struct target_msginfo
{
3764 unsigned short int msgseg
;
3767 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3768 struct msginfo
*host_msginfo
)
3770 struct target_msginfo
*target_msginfo
;
3771 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3772 return -TARGET_EFAULT
;
3773 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3774 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3775 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3776 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3777 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3778 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3779 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3780 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3781 unlock_user_struct(target_msginfo
, target_addr
, 1);
3785 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3787 struct msqid_ds dsarg
;
3788 struct msginfo msginfo
;
3789 abi_long ret
= -TARGET_EINVAL
;
3797 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3798 return -TARGET_EFAULT
;
3799 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3800 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3801 return -TARGET_EFAULT
;
3804 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3808 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3809 if (host_to_target_msginfo(ptr
, &msginfo
))
3810 return -TARGET_EFAULT
;
3817 struct target_msgbuf
{
3822 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3823 ssize_t msgsz
, int msgflg
)
3825 struct target_msgbuf
*target_mb
;
3826 struct msgbuf
*host_mb
;
3830 return -TARGET_EINVAL
;
3833 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3834 return -TARGET_EFAULT
;
3835 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3837 unlock_user_struct(target_mb
, msgp
, 0);
3838 return -TARGET_ENOMEM
;
3840 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3841 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3842 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3844 unlock_user_struct(target_mb
, msgp
, 0);
3849 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3850 ssize_t msgsz
, abi_long msgtyp
,
3853 struct target_msgbuf
*target_mb
;
3855 struct msgbuf
*host_mb
;
3859 return -TARGET_EINVAL
;
3862 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3863 return -TARGET_EFAULT
;
3865 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3867 ret
= -TARGET_ENOMEM
;
3870 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3873 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3874 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3875 if (!target_mtext
) {
3876 ret
= -TARGET_EFAULT
;
3879 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3880 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3883 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3887 unlock_user_struct(target_mb
, msgp
, 1);
3892 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3893 abi_ulong target_addr
)
3895 struct target_shmid_ds
*target_sd
;
3897 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3898 return -TARGET_EFAULT
;
3899 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3900 return -TARGET_EFAULT
;
3901 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3902 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3903 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3904 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3905 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3906 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3907 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3908 unlock_user_struct(target_sd
, target_addr
, 0);
3912 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3913 struct shmid_ds
*host_sd
)
3915 struct target_shmid_ds
*target_sd
;
3917 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3918 return -TARGET_EFAULT
;
3919 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3920 return -TARGET_EFAULT
;
3921 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3922 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3923 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3924 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3925 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3926 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3927 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3928 unlock_user_struct(target_sd
, target_addr
, 1);
3932 struct target_shminfo
{
3940 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3941 struct shminfo
*host_shminfo
)
3943 struct target_shminfo
*target_shminfo
;
3944 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3945 return -TARGET_EFAULT
;
3946 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3947 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3948 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3949 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3950 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3951 unlock_user_struct(target_shminfo
, target_addr
, 1);
3955 struct target_shm_info
{
3960 abi_ulong swap_attempts
;
3961 abi_ulong swap_successes
;
3964 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3965 struct shm_info
*host_shm_info
)
3967 struct target_shm_info
*target_shm_info
;
3968 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3969 return -TARGET_EFAULT
;
3970 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3971 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3972 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3973 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3974 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3975 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3976 unlock_user_struct(target_shm_info
, target_addr
, 1);
3980 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3982 struct shmid_ds dsarg
;
3983 struct shminfo shminfo
;
3984 struct shm_info shm_info
;
3985 abi_long ret
= -TARGET_EINVAL
;
3993 if (target_to_host_shmid_ds(&dsarg
, buf
))
3994 return -TARGET_EFAULT
;
3995 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3996 if (host_to_target_shmid_ds(buf
, &dsarg
))
3997 return -TARGET_EFAULT
;
4000 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4001 if (host_to_target_shminfo(buf
, &shminfo
))
4002 return -TARGET_EFAULT
;
4005 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4006 if (host_to_target_shm_info(buf
, &shm_info
))
4007 return -TARGET_EFAULT
;
4012 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4019 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4023 struct shmid_ds shm_info
;
4026 /* find out the length of the shared memory segment */
4027 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4028 if (is_error(ret
)) {
4029 /* can't get length, bail out */
4036 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4038 abi_ulong mmap_start
;
4040 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4042 if (mmap_start
== -1) {
4044 host_raddr
= (void *)-1;
4046 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4049 if (host_raddr
== (void *)-1) {
4051 return get_errno((long)host_raddr
);
4053 raddr
=h2g((unsigned long)host_raddr
);
4055 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4056 PAGE_VALID
| PAGE_READ
|
4057 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4059 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4060 if (!shm_regions
[i
].in_use
) {
4061 shm_regions
[i
].in_use
= true;
4062 shm_regions
[i
].start
= raddr
;
4063 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4073 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4077 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4078 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4079 shm_regions
[i
].in_use
= false;
4080 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4085 return get_errno(shmdt(g2h(shmaddr
)));
4088 #ifdef TARGET_NR_ipc
4089 /* ??? This only works with linear mappings. */
4090 /* do_ipc() must return target values and target errnos. */
4091 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4092 abi_long second
, abi_long third
,
4093 abi_long ptr
, abi_long fifth
)
4098 version
= call
>> 16;
4103 ret
= do_semop(first
, ptr
, second
);
4107 ret
= get_errno(semget(first
, second
, third
));
4110 case IPCOP_semctl
: {
4111 /* The semun argument to semctl is passed by value, so dereference the
4114 get_user_ual(atptr
, ptr
);
4115 ret
= do_semctl(first
, second
, third
, atptr
);
4120 ret
= get_errno(msgget(first
, second
));
4124 ret
= do_msgsnd(first
, ptr
, second
, third
);
4128 ret
= do_msgctl(first
, second
, ptr
);
4135 struct target_ipc_kludge
{
4140 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4141 ret
= -TARGET_EFAULT
;
4145 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4147 unlock_user_struct(tmp
, ptr
, 0);
4151 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4160 raddr
= do_shmat(first
, ptr
, second
);
4161 if (is_error(raddr
))
4162 return get_errno(raddr
);
4163 if (put_user_ual(raddr
, third
))
4164 return -TARGET_EFAULT
;
4168 ret
= -TARGET_EINVAL
;
4173 ret
= do_shmdt(ptr
);
4177 /* IPC_* flag values are the same on all linux platforms */
4178 ret
= get_errno(shmget(first
, second
, third
));
4181 /* IPC_* and SHM_* command values are the same on all linux platforms */
4183 ret
= do_shmctl(first
, second
, ptr
);
4186 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4187 ret
= -TARGET_ENOSYS
;
4194 /* kernel structure types definitions */
4196 #define STRUCT(name, ...) STRUCT_ ## name,
4197 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4199 #include "syscall_types.h"
4203 #undef STRUCT_SPECIAL
4205 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4206 #define STRUCT_SPECIAL(name)
4207 #include "syscall_types.h"
4209 #undef STRUCT_SPECIAL
4211 typedef struct IOCTLEntry IOCTLEntry
;
4213 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4214 int fd
, int cmd
, abi_long arg
);
4218 unsigned int host_cmd
;
4221 do_ioctl_fn
*do_ioctl
;
4222 const argtype arg_type
[5];
4225 #define IOC_R 0x0001
4226 #define IOC_W 0x0002
4227 #define IOC_RW (IOC_R | IOC_W)
4229 #define MAX_STRUCT_SIZE 4096
4231 #ifdef CONFIG_FIEMAP
4232 /* So fiemap access checks don't overflow on 32 bit systems.
4233 * This is very slightly smaller than the limit imposed by
4234 * the underlying kernel.
4236 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4237 / sizeof(struct fiemap_extent))
4239 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4240 int fd
, int cmd
, abi_long arg
)
4242 /* The parameter for this ioctl is a struct fiemap followed
4243 * by an array of struct fiemap_extent whose size is set
4244 * in fiemap->fm_extent_count. The array is filled in by the
4247 int target_size_in
, target_size_out
;
4249 const argtype
*arg_type
= ie
->arg_type
;
4250 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4253 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4257 assert(arg_type
[0] == TYPE_PTR
);
4258 assert(ie
->access
== IOC_RW
);
4260 target_size_in
= thunk_type_size(arg_type
, 0);
4261 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4263 return -TARGET_EFAULT
;
4265 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4266 unlock_user(argptr
, arg
, 0);
4267 fm
= (struct fiemap
*)buf_temp
;
4268 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4269 return -TARGET_EINVAL
;
4272 outbufsz
= sizeof (*fm
) +
4273 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4275 if (outbufsz
> MAX_STRUCT_SIZE
) {
4276 /* We can't fit all the extents into the fixed size buffer.
4277 * Allocate one that is large enough and use it instead.
4279 fm
= g_try_malloc(outbufsz
);
4281 return -TARGET_ENOMEM
;
4283 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4286 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
4287 if (!is_error(ret
)) {
4288 target_size_out
= target_size_in
;
4289 /* An extent_count of 0 means we were only counting the extents
4290 * so there are no structs to copy
4292 if (fm
->fm_extent_count
!= 0) {
4293 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4295 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4297 ret
= -TARGET_EFAULT
;
4299 /* Convert the struct fiemap */
4300 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4301 if (fm
->fm_extent_count
!= 0) {
4302 p
= argptr
+ target_size_in
;
4303 /* ...and then all the struct fiemap_extents */
4304 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4305 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4310 unlock_user(argptr
, arg
, target_size_out
);
4320 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4321 int fd
, int cmd
, abi_long arg
)
4323 const argtype
*arg_type
= ie
->arg_type
;
4327 struct ifconf
*host_ifconf
;
4329 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4330 int target_ifreq_size
;
4335 abi_long target_ifc_buf
;
4339 assert(arg_type
[0] == TYPE_PTR
);
4340 assert(ie
->access
== IOC_RW
);
4343 target_size
= thunk_type_size(arg_type
, 0);
4345 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4347 return -TARGET_EFAULT
;
4348 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4349 unlock_user(argptr
, arg
, 0);
4351 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4352 target_ifc_len
= host_ifconf
->ifc_len
;
4353 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4355 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4356 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4357 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4359 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4360 if (outbufsz
> MAX_STRUCT_SIZE
) {
4361 /* We can't fit all the extents into the fixed size buffer.
4362 * Allocate one that is large enough and use it instead.
4364 host_ifconf
= malloc(outbufsz
);
4366 return -TARGET_ENOMEM
;
4368 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4371 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4373 host_ifconf
->ifc_len
= host_ifc_len
;
4374 host_ifconf
->ifc_buf
= host_ifc_buf
;
4376 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4377 if (!is_error(ret
)) {
4378 /* convert host ifc_len to target ifc_len */
4380 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4381 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4382 host_ifconf
->ifc_len
= target_ifc_len
;
4384 /* restore target ifc_buf */
4386 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4388 /* copy struct ifconf to target user */
4390 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4392 return -TARGET_EFAULT
;
4393 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4394 unlock_user(argptr
, arg
, target_size
);
4396 /* copy ifreq[] to target user */
4398 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4399 for (i
= 0; i
< nb_ifreq
; i
++) {
4400 thunk_convert(argptr
+ i
* target_ifreq_size
,
4401 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4402 ifreq_arg_type
, THUNK_TARGET
);
4404 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4414 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4415 int cmd
, abi_long arg
)
4418 struct dm_ioctl
*host_dm
;
4419 abi_long guest_data
;
4420 uint32_t guest_data_size
;
4422 const argtype
*arg_type
= ie
->arg_type
;
4424 void *big_buf
= NULL
;
4428 target_size
= thunk_type_size(arg_type
, 0);
4429 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4431 ret
= -TARGET_EFAULT
;
4434 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4435 unlock_user(argptr
, arg
, 0);
4437 /* buf_temp is too small, so fetch things into a bigger buffer */
4438 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4439 memcpy(big_buf
, buf_temp
, target_size
);
4443 guest_data
= arg
+ host_dm
->data_start
;
4444 if ((guest_data
- arg
) < 0) {
4448 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4449 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4451 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4452 switch (ie
->host_cmd
) {
4454 case DM_LIST_DEVICES
:
4457 case DM_DEV_SUSPEND
:
4460 case DM_TABLE_STATUS
:
4461 case DM_TABLE_CLEAR
:
4463 case DM_LIST_VERSIONS
:
4467 case DM_DEV_SET_GEOMETRY
:
4468 /* data contains only strings */
4469 memcpy(host_data
, argptr
, guest_data_size
);
4472 memcpy(host_data
, argptr
, guest_data_size
);
4473 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4477 void *gspec
= argptr
;
4478 void *cur_data
= host_data
;
4479 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4480 int spec_size
= thunk_type_size(arg_type
, 0);
4483 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4484 struct dm_target_spec
*spec
= cur_data
;
4488 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4489 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4491 spec
->next
= sizeof(*spec
) + slen
;
4492 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4494 cur_data
+= spec
->next
;
4499 ret
= -TARGET_EINVAL
;
4500 unlock_user(argptr
, guest_data
, 0);
4503 unlock_user(argptr
, guest_data
, 0);
4505 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4506 if (!is_error(ret
)) {
4507 guest_data
= arg
+ host_dm
->data_start
;
4508 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4509 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4510 switch (ie
->host_cmd
) {
4515 case DM_DEV_SUSPEND
:
4518 case DM_TABLE_CLEAR
:
4520 case DM_DEV_SET_GEOMETRY
:
4521 /* no return data */
4523 case DM_LIST_DEVICES
:
4525 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4526 uint32_t remaining_data
= guest_data_size
;
4527 void *cur_data
= argptr
;
4528 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4529 int nl_size
= 12; /* can't use thunk_size due to alignment */
4532 uint32_t next
= nl
->next
;
4534 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4536 if (remaining_data
< nl
->next
) {
4537 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4540 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4541 strcpy(cur_data
+ nl_size
, nl
->name
);
4542 cur_data
+= nl
->next
;
4543 remaining_data
-= nl
->next
;
4547 nl
= (void*)nl
+ next
;
4552 case DM_TABLE_STATUS
:
4554 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4555 void *cur_data
= argptr
;
4556 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4557 int spec_size
= thunk_type_size(arg_type
, 0);
4560 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4561 uint32_t next
= spec
->next
;
4562 int slen
= strlen((char*)&spec
[1]) + 1;
4563 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4564 if (guest_data_size
< spec
->next
) {
4565 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4568 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4569 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4570 cur_data
= argptr
+ spec
->next
;
4571 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4577 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4578 int count
= *(uint32_t*)hdata
;
4579 uint64_t *hdev
= hdata
+ 8;
4580 uint64_t *gdev
= argptr
+ 8;
4583 *(uint32_t*)argptr
= tswap32(count
);
4584 for (i
= 0; i
< count
; i
++) {
4585 *gdev
= tswap64(*hdev
);
4591 case DM_LIST_VERSIONS
:
4593 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4594 uint32_t remaining_data
= guest_data_size
;
4595 void *cur_data
= argptr
;
4596 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4597 int vers_size
= thunk_type_size(arg_type
, 0);
4600 uint32_t next
= vers
->next
;
4602 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4604 if (remaining_data
< vers
->next
) {
4605 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4608 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4609 strcpy(cur_data
+ vers_size
, vers
->name
);
4610 cur_data
+= vers
->next
;
4611 remaining_data
-= vers
->next
;
4615 vers
= (void*)vers
+ next
;
4620 unlock_user(argptr
, guest_data
, 0);
4621 ret
= -TARGET_EINVAL
;
4624 unlock_user(argptr
, guest_data
, guest_data_size
);
4626 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4628 ret
= -TARGET_EFAULT
;
4631 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4632 unlock_user(argptr
, arg
, target_size
);
4639 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4640 int cmd
, abi_long arg
)
4644 const argtype
*arg_type
= ie
->arg_type
;
4645 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4648 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4649 struct blkpg_partition host_part
;
4651 /* Read and convert blkpg */
4653 target_size
= thunk_type_size(arg_type
, 0);
4654 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4656 ret
= -TARGET_EFAULT
;
4659 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4660 unlock_user(argptr
, arg
, 0);
4662 switch (host_blkpg
->op
) {
4663 case BLKPG_ADD_PARTITION
:
4664 case BLKPG_DEL_PARTITION
:
4665 /* payload is struct blkpg_partition */
4668 /* Unknown opcode */
4669 ret
= -TARGET_EINVAL
;
4673 /* Read and convert blkpg->data */
4674 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4675 target_size
= thunk_type_size(part_arg_type
, 0);
4676 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4678 ret
= -TARGET_EFAULT
;
4681 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4682 unlock_user(argptr
, arg
, 0);
4684 /* Swizzle the data pointer to our local copy and call! */
4685 host_blkpg
->data
= &host_part
;
4686 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4692 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4693 int fd
, int cmd
, abi_long arg
)
4695 const argtype
*arg_type
= ie
->arg_type
;
4696 const StructEntry
*se
;
4697 const argtype
*field_types
;
4698 const int *dst_offsets
, *src_offsets
;
4701 abi_ulong
*target_rt_dev_ptr
;
4702 unsigned long *host_rt_dev_ptr
;
4706 assert(ie
->access
== IOC_W
);
4707 assert(*arg_type
== TYPE_PTR
);
4709 assert(*arg_type
== TYPE_STRUCT
);
4710 target_size
= thunk_type_size(arg_type
, 0);
4711 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4713 return -TARGET_EFAULT
;
4716 assert(*arg_type
== (int)STRUCT_rtentry
);
4717 se
= struct_entries
+ *arg_type
++;
4718 assert(se
->convert
[0] == NULL
);
4719 /* convert struct here to be able to catch rt_dev string */
4720 field_types
= se
->field_types
;
4721 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4722 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4723 for (i
= 0; i
< se
->nb_fields
; i
++) {
4724 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4725 assert(*field_types
== TYPE_PTRVOID
);
4726 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4727 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4728 if (*target_rt_dev_ptr
!= 0) {
4729 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4730 tswapal(*target_rt_dev_ptr
));
4731 if (!*host_rt_dev_ptr
) {
4732 unlock_user(argptr
, arg
, 0);
4733 return -TARGET_EFAULT
;
4736 *host_rt_dev_ptr
= 0;
4741 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4742 argptr
+ src_offsets
[i
],
4743 field_types
, THUNK_HOST
);
4745 unlock_user(argptr
, arg
, 0);
4747 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4748 if (*host_rt_dev_ptr
!= 0) {
4749 unlock_user((void *)*host_rt_dev_ptr
,
4750 *target_rt_dev_ptr
, 0);
4755 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4756 int fd
, int cmd
, abi_long arg
)
4758 int sig
= target_to_host_signal(arg
);
4759 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4762 static IOCTLEntry ioctl_entries
[] = {
4763 #define IOCTL(cmd, access, ...) \
4764 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4765 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4766 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4771 /* ??? Implement proper locking for ioctls. */
4772 /* do_ioctl() Must return target values and target errnos. */
4773 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4775 const IOCTLEntry
*ie
;
4776 const argtype
*arg_type
;
4778 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4784 if (ie
->target_cmd
== 0) {
4785 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4786 return -TARGET_ENOSYS
;
4788 if (ie
->target_cmd
== cmd
)
4792 arg_type
= ie
->arg_type
;
4794 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4797 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4800 switch(arg_type
[0]) {
4803 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4807 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4811 target_size
= thunk_type_size(arg_type
, 0);
4812 switch(ie
->access
) {
4814 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4815 if (!is_error(ret
)) {
4816 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4818 return -TARGET_EFAULT
;
4819 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4820 unlock_user(argptr
, arg
, target_size
);
4824 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4826 return -TARGET_EFAULT
;
4827 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4828 unlock_user(argptr
, arg
, 0);
4829 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4833 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4835 return -TARGET_EFAULT
;
4836 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4837 unlock_user(argptr
, arg
, 0);
4838 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4839 if (!is_error(ret
)) {
4840 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4842 return -TARGET_EFAULT
;
4843 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4844 unlock_user(argptr
, arg
, target_size
);
4850 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4851 (long)cmd
, arg_type
[0]);
4852 ret
= -TARGET_ENOSYS
;
4858 static const bitmask_transtbl iflag_tbl
[] = {
4859 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4860 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4861 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4862 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4863 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4864 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4865 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4866 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4867 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4868 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4869 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4870 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4871 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4872 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4876 static const bitmask_transtbl oflag_tbl
[] = {
4877 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4878 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4879 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4880 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4881 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4882 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4883 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4884 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4885 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4886 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4887 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4888 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4889 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4890 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4891 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4892 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4893 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4894 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4895 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4896 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4897 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4898 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4899 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4900 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4904 static const bitmask_transtbl cflag_tbl
[] = {
4905 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4906 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4907 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4908 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4909 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4910 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4911 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4912 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4913 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4914 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4915 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4916 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4917 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4918 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4919 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4920 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4921 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4922 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4923 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4924 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4925 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4926 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4927 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4928 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4929 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4930 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4931 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4932 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4933 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4934 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4935 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4939 static const bitmask_transtbl lflag_tbl
[] = {
4940 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4941 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4942 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4943 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4944 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4945 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4946 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4947 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4948 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4949 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4950 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4951 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4952 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4953 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4954 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4958 static void target_to_host_termios (void *dst
, const void *src
)
4960 struct host_termios
*host
= dst
;
4961 const struct target_termios
*target
= src
;
4964 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4966 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4968 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4970 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4971 host
->c_line
= target
->c_line
;
4973 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4974 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4975 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4976 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4977 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4978 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4979 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4980 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4981 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4982 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4983 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4984 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4985 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4986 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4987 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4988 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4989 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4990 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4993 static void host_to_target_termios (void *dst
, const void *src
)
4995 struct target_termios
*target
= dst
;
4996 const struct host_termios
*host
= src
;
4999 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5001 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5003 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5005 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5006 target
->c_line
= host
->c_line
;
5008 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5009 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5010 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5011 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5012 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5013 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5014 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5015 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5016 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5017 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5018 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5019 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5020 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5021 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5022 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5023 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5024 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5025 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5028 static const StructEntry struct_termios_def
= {
5029 .convert
= { host_to_target_termios
, target_to_host_termios
},
5030 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5031 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5034 static bitmask_transtbl mmap_flags_tbl
[] = {
5035 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5036 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5037 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5038 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5039 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5040 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5041 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5042 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5043 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5048 #if defined(TARGET_I386)
5050 /* NOTE: there is really one LDT for all the threads */
5051 static uint8_t *ldt_table
;
5053 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5060 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5061 if (size
> bytecount
)
5063 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5065 return -TARGET_EFAULT
;
5066 /* ??? Should this by byteswapped? */
5067 memcpy(p
, ldt_table
, size
);
5068 unlock_user(p
, ptr
, size
);
5072 /* XXX: add locking support */
5073 static abi_long
write_ldt(CPUX86State
*env
,
5074 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5076 struct target_modify_ldt_ldt_s ldt_info
;
5077 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5078 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5079 int seg_not_present
, useable
, lm
;
5080 uint32_t *lp
, entry_1
, entry_2
;
5082 if (bytecount
!= sizeof(ldt_info
))
5083 return -TARGET_EINVAL
;
5084 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5085 return -TARGET_EFAULT
;
5086 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5087 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5088 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5089 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5090 unlock_user_struct(target_ldt_info
, ptr
, 0);
5092 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5093 return -TARGET_EINVAL
;
5094 seg_32bit
= ldt_info
.flags
& 1;
5095 contents
= (ldt_info
.flags
>> 1) & 3;
5096 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5097 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5098 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5099 useable
= (ldt_info
.flags
>> 6) & 1;
5103 lm
= (ldt_info
.flags
>> 7) & 1;
5105 if (contents
== 3) {
5107 return -TARGET_EINVAL
;
5108 if (seg_not_present
== 0)
5109 return -TARGET_EINVAL
;
5111 /* allocate the LDT */
5113 env
->ldt
.base
= target_mmap(0,
5114 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5115 PROT_READ
|PROT_WRITE
,
5116 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5117 if (env
->ldt
.base
== -1)
5118 return -TARGET_ENOMEM
;
5119 memset(g2h(env
->ldt
.base
), 0,
5120 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5121 env
->ldt
.limit
= 0xffff;
5122 ldt_table
= g2h(env
->ldt
.base
);
5125 /* NOTE: same code as Linux kernel */
5126 /* Allow LDTs to be cleared by the user. */
5127 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5130 read_exec_only
== 1 &&
5132 limit_in_pages
== 0 &&
5133 seg_not_present
== 1 &&
5141 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5142 (ldt_info
.limit
& 0x0ffff);
5143 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5144 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5145 (ldt_info
.limit
& 0xf0000) |
5146 ((read_exec_only
^ 1) << 9) |
5148 ((seg_not_present
^ 1) << 15) |
5150 (limit_in_pages
<< 23) |
5154 entry_2
|= (useable
<< 20);
5156 /* Install the new entry ... */
5158 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5159 lp
[0] = tswap32(entry_1
);
5160 lp
[1] = tswap32(entry_2
);
5164 /* specific and weird i386 syscalls */
5165 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5166 unsigned long bytecount
)
5172 ret
= read_ldt(ptr
, bytecount
);
5175 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5178 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5181 ret
= -TARGET_ENOSYS
;
5187 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5188 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5190 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5191 struct target_modify_ldt_ldt_s ldt_info
;
5192 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5193 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5194 int seg_not_present
, useable
, lm
;
5195 uint32_t *lp
, entry_1
, entry_2
;
5198 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5199 if (!target_ldt_info
)
5200 return -TARGET_EFAULT
;
5201 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5202 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5203 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5204 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5205 if (ldt_info
.entry_number
== -1) {
5206 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5207 if (gdt_table
[i
] == 0) {
5208 ldt_info
.entry_number
= i
;
5209 target_ldt_info
->entry_number
= tswap32(i
);
5214 unlock_user_struct(target_ldt_info
, ptr
, 1);
5216 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5217 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5218 return -TARGET_EINVAL
;
5219 seg_32bit
= ldt_info
.flags
& 1;
5220 contents
= (ldt_info
.flags
>> 1) & 3;
5221 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5222 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5223 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5224 useable
= (ldt_info
.flags
>> 6) & 1;
5228 lm
= (ldt_info
.flags
>> 7) & 1;
5231 if (contents
== 3) {
5232 if (seg_not_present
== 0)
5233 return -TARGET_EINVAL
;
5236 /* NOTE: same code as Linux kernel */
5237 /* Allow LDTs to be cleared by the user. */
5238 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5239 if ((contents
== 0 &&
5240 read_exec_only
== 1 &&
5242 limit_in_pages
== 0 &&
5243 seg_not_present
== 1 &&
5251 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5252 (ldt_info
.limit
& 0x0ffff);
5253 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5254 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5255 (ldt_info
.limit
& 0xf0000) |
5256 ((read_exec_only
^ 1) << 9) |
5258 ((seg_not_present
^ 1) << 15) |
5260 (limit_in_pages
<< 23) |
5265 /* Install the new entry ... */
5267 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5268 lp
[0] = tswap32(entry_1
);
5269 lp
[1] = tswap32(entry_2
);
5273 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5275 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5276 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5277 uint32_t base_addr
, limit
, flags
;
5278 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5279 int seg_not_present
, useable
, lm
;
5280 uint32_t *lp
, entry_1
, entry_2
;
5282 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5283 if (!target_ldt_info
)
5284 return -TARGET_EFAULT
;
5285 idx
= tswap32(target_ldt_info
->entry_number
);
5286 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5287 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5288 unlock_user_struct(target_ldt_info
, ptr
, 1);
5289 return -TARGET_EINVAL
;
5291 lp
= (uint32_t *)(gdt_table
+ idx
);
5292 entry_1
= tswap32(lp
[0]);
5293 entry_2
= tswap32(lp
[1]);
5295 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5296 contents
= (entry_2
>> 10) & 3;
5297 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5298 seg_32bit
= (entry_2
>> 22) & 1;
5299 limit_in_pages
= (entry_2
>> 23) & 1;
5300 useable
= (entry_2
>> 20) & 1;
5304 lm
= (entry_2
>> 21) & 1;
5306 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5307 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5308 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5309 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5310 base_addr
= (entry_1
>> 16) |
5311 (entry_2
& 0xff000000) |
5312 ((entry_2
& 0xff) << 16);
5313 target_ldt_info
->base_addr
= tswapal(base_addr
);
5314 target_ldt_info
->limit
= tswap32(limit
);
5315 target_ldt_info
->flags
= tswap32(flags
);
5316 unlock_user_struct(target_ldt_info
, ptr
, 1);
5319 #endif /* TARGET_I386 && TARGET_ABI32 */
5321 #ifndef TARGET_ABI32
5322 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5329 case TARGET_ARCH_SET_GS
:
5330 case TARGET_ARCH_SET_FS
:
5331 if (code
== TARGET_ARCH_SET_GS
)
5335 cpu_x86_load_seg(env
, idx
, 0);
5336 env
->segs
[idx
].base
= addr
;
5338 case TARGET_ARCH_GET_GS
:
5339 case TARGET_ARCH_GET_FS
:
5340 if (code
== TARGET_ARCH_GET_GS
)
5344 val
= env
->segs
[idx
].base
;
5345 if (put_user(val
, addr
, abi_ulong
))
5346 ret
= -TARGET_EFAULT
;
5349 ret
= -TARGET_EINVAL
;
5356 #endif /* defined(TARGET_I386) */
5358 #define NEW_STACK_SIZE 0x40000
5361 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5364 pthread_mutex_t mutex
;
5365 pthread_cond_t cond
;
5368 abi_ulong child_tidptr
;
5369 abi_ulong parent_tidptr
;
5373 static void *clone_func(void *arg
)
5375 new_thread_info
*info
= arg
;
5380 rcu_register_thread();
5382 cpu
= ENV_GET_CPU(env
);
5384 ts
= (TaskState
*)cpu
->opaque
;
5385 info
->tid
= gettid();
5386 cpu
->host_tid
= info
->tid
;
5388 if (info
->child_tidptr
)
5389 put_user_u32(info
->tid
, info
->child_tidptr
);
5390 if (info
->parent_tidptr
)
5391 put_user_u32(info
->tid
, info
->parent_tidptr
);
5392 /* Enable signals. */
5393 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5394 /* Signal to the parent that we're ready. */
5395 pthread_mutex_lock(&info
->mutex
);
5396 pthread_cond_broadcast(&info
->cond
);
5397 pthread_mutex_unlock(&info
->mutex
);
5398 /* Wait until the parent has finshed initializing the tls state. */
5399 pthread_mutex_lock(&clone_lock
);
5400 pthread_mutex_unlock(&clone_lock
);
5406 /* do_fork() Must return host values and target errnos (unlike most
5407 do_*() functions). */
5408 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5409 abi_ulong parent_tidptr
, target_ulong newtls
,
5410 abi_ulong child_tidptr
)
5412 CPUState
*cpu
= ENV_GET_CPU(env
);
5416 CPUArchState
*new_env
;
5417 unsigned int nptl_flags
;
5420 /* Emulate vfork() with fork() */
5421 if (flags
& CLONE_VFORK
)
5422 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5424 if (flags
& CLONE_VM
) {
5425 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5426 new_thread_info info
;
5427 pthread_attr_t attr
;
5429 ts
= g_new0(TaskState
, 1);
5430 init_task_state(ts
);
5431 /* we create a new CPU instance. */
5432 new_env
= cpu_copy(env
);
5433 /* Init regs that differ from the parent. */
5434 cpu_clone_regs(new_env
, newsp
);
5435 new_cpu
= ENV_GET_CPU(new_env
);
5436 new_cpu
->opaque
= ts
;
5437 ts
->bprm
= parent_ts
->bprm
;
5438 ts
->info
= parent_ts
->info
;
5439 ts
->signal_mask
= parent_ts
->signal_mask
;
5441 flags
&= ~CLONE_NPTL_FLAGS2
;
5443 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5444 ts
->child_tidptr
= child_tidptr
;
5447 if (nptl_flags
& CLONE_SETTLS
)
5448 cpu_set_tls (new_env
, newtls
);
5450 /* Grab a mutex so that thread setup appears atomic. */
5451 pthread_mutex_lock(&clone_lock
);
5453 memset(&info
, 0, sizeof(info
));
5454 pthread_mutex_init(&info
.mutex
, NULL
);
5455 pthread_mutex_lock(&info
.mutex
);
5456 pthread_cond_init(&info
.cond
, NULL
);
5458 if (nptl_flags
& CLONE_CHILD_SETTID
)
5459 info
.child_tidptr
= child_tidptr
;
5460 if (nptl_flags
& CLONE_PARENT_SETTID
)
5461 info
.parent_tidptr
= parent_tidptr
;
5463 ret
= pthread_attr_init(&attr
);
5464 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5465 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5466 /* It is not safe to deliver signals until the child has finished
5467 initializing, so temporarily block all signals. */
5468 sigfillset(&sigmask
);
5469 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5471 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5472 /* TODO: Free new CPU state if thread creation failed. */
5474 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5475 pthread_attr_destroy(&attr
);
5477 /* Wait for the child to initialize. */
5478 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5480 if (flags
& CLONE_PARENT_SETTID
)
5481 put_user_u32(ret
, parent_tidptr
);
5485 pthread_mutex_unlock(&info
.mutex
);
5486 pthread_cond_destroy(&info
.cond
);
5487 pthread_mutex_destroy(&info
.mutex
);
5488 pthread_mutex_unlock(&clone_lock
);
5490 /* if no CLONE_VM, we consider it is a fork */
5491 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5492 return -TARGET_EINVAL
;
5495 if (block_signals()) {
5496 return -TARGET_ERESTARTSYS
;
5502 /* Child Process. */
5504 cpu_clone_regs(env
, newsp
);
5506 /* There is a race condition here. The parent process could
5507 theoretically read the TID in the child process before the child
5508 tid is set. This would require using either ptrace
5509 (not implemented) or having *_tidptr to point at a shared memory
5510 mapping. We can't repeat the spinlock hack used above because
5511 the child process gets its own copy of the lock. */
5512 if (flags
& CLONE_CHILD_SETTID
)
5513 put_user_u32(gettid(), child_tidptr
);
5514 if (flags
& CLONE_PARENT_SETTID
)
5515 put_user_u32(gettid(), parent_tidptr
);
5516 ts
= (TaskState
*)cpu
->opaque
;
5517 if (flags
& CLONE_SETTLS
)
5518 cpu_set_tls (env
, newtls
);
5519 if (flags
& CLONE_CHILD_CLEARTID
)
5520 ts
->child_tidptr
= child_tidptr
;
5528 /* warning : doesn't handle linux specific flags... */
5529 static int target_to_host_fcntl_cmd(int cmd
)
5532 case TARGET_F_DUPFD
:
5533 case TARGET_F_GETFD
:
5534 case TARGET_F_SETFD
:
5535 case TARGET_F_GETFL
:
5536 case TARGET_F_SETFL
:
5538 case TARGET_F_GETLK
:
5540 case TARGET_F_SETLK
:
5542 case TARGET_F_SETLKW
:
5544 case TARGET_F_GETOWN
:
5546 case TARGET_F_SETOWN
:
5548 case TARGET_F_GETSIG
:
5550 case TARGET_F_SETSIG
:
5552 #if TARGET_ABI_BITS == 32
5553 case TARGET_F_GETLK64
:
5555 case TARGET_F_SETLK64
:
5557 case TARGET_F_SETLKW64
:
5560 case TARGET_F_SETLEASE
:
5562 case TARGET_F_GETLEASE
:
5564 #ifdef F_DUPFD_CLOEXEC
5565 case TARGET_F_DUPFD_CLOEXEC
:
5566 return F_DUPFD_CLOEXEC
;
5568 case TARGET_F_NOTIFY
:
5571 case TARGET_F_GETOWN_EX
:
5575 case TARGET_F_SETOWN_EX
:
5579 return -TARGET_EINVAL
;
5581 return -TARGET_EINVAL
;
5584 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5585 static const bitmask_transtbl flock_tbl
[] = {
5586 TRANSTBL_CONVERT(F_RDLCK
),
5587 TRANSTBL_CONVERT(F_WRLCK
),
5588 TRANSTBL_CONVERT(F_UNLCK
),
5589 TRANSTBL_CONVERT(F_EXLCK
),
5590 TRANSTBL_CONVERT(F_SHLCK
),
5594 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5597 struct target_flock
*target_fl
;
5598 struct flock64 fl64
;
5599 struct target_flock64
*target_fl64
;
5601 struct f_owner_ex fox
;
5602 struct target_f_owner_ex
*target_fox
;
5605 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5607 if (host_cmd
== -TARGET_EINVAL
)
5611 case TARGET_F_GETLK
:
5612 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5613 return -TARGET_EFAULT
;
5615 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5616 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5617 fl
.l_start
= tswapal(target_fl
->l_start
);
5618 fl
.l_len
= tswapal(target_fl
->l_len
);
5619 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5620 unlock_user_struct(target_fl
, arg
, 0);
5621 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5623 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
5624 return -TARGET_EFAULT
;
5626 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
5627 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5628 target_fl
->l_start
= tswapal(fl
.l_start
);
5629 target_fl
->l_len
= tswapal(fl
.l_len
);
5630 target_fl
->l_pid
= tswap32(fl
.l_pid
);
5631 unlock_user_struct(target_fl
, arg
, 1);
5635 case TARGET_F_SETLK
:
5636 case TARGET_F_SETLKW
:
5637 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5638 return -TARGET_EFAULT
;
5640 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5641 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5642 fl
.l_start
= tswapal(target_fl
->l_start
);
5643 fl
.l_len
= tswapal(target_fl
->l_len
);
5644 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5645 unlock_user_struct(target_fl
, arg
, 0);
5646 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5649 case TARGET_F_GETLK64
:
5650 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5651 return -TARGET_EFAULT
;
5653 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5654 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5655 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5656 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5657 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5658 unlock_user_struct(target_fl64
, arg
, 0);
5659 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5661 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
5662 return -TARGET_EFAULT
;
5663 target_fl64
->l_type
=
5664 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
5665 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
5666 target_fl64
->l_start
= tswap64(fl64
.l_start
);
5667 target_fl64
->l_len
= tswap64(fl64
.l_len
);
5668 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
5669 unlock_user_struct(target_fl64
, arg
, 1);
5672 case TARGET_F_SETLK64
:
5673 case TARGET_F_SETLKW64
:
5674 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5675 return -TARGET_EFAULT
;
5677 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5678 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5679 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5680 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5681 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5682 unlock_user_struct(target_fl64
, arg
, 0);
5683 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5686 case TARGET_F_GETFL
:
5687 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5689 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5693 case TARGET_F_SETFL
:
5694 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
5698 case TARGET_F_GETOWN_EX
:
5699 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5701 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5702 return -TARGET_EFAULT
;
5703 target_fox
->type
= tswap32(fox
.type
);
5704 target_fox
->pid
= tswap32(fox
.pid
);
5705 unlock_user_struct(target_fox
, arg
, 1);
5711 case TARGET_F_SETOWN_EX
:
5712 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5713 return -TARGET_EFAULT
;
5714 fox
.type
= tswap32(target_fox
->type
);
5715 fox
.pid
= tswap32(target_fox
->pid
);
5716 unlock_user_struct(target_fox
, arg
, 0);
5717 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5721 case TARGET_F_SETOWN
:
5722 case TARGET_F_GETOWN
:
5723 case TARGET_F_SETSIG
:
5724 case TARGET_F_GETSIG
:
5725 case TARGET_F_SETLEASE
:
5726 case TARGET_F_GETLEASE
:
5727 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5731 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5739 static inline int high2lowuid(int uid
)
5747 static inline int high2lowgid(int gid
)
5755 static inline int low2highuid(int uid
)
5757 if ((int16_t)uid
== -1)
5763 static inline int low2highgid(int gid
)
5765 if ((int16_t)gid
== -1)
5770 static inline int tswapid(int id
)
5775 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5777 #else /* !USE_UID16 */
5778 static inline int high2lowuid(int uid
)
5782 static inline int high2lowgid(int gid
)
5786 static inline int low2highuid(int uid
)
5790 static inline int low2highgid(int gid
)
5794 static inline int tswapid(int id
)
5799 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5801 #endif /* USE_UID16 */
5803 /* We must do direct syscalls for setting UID/GID, because we want to
5804 * implement the Linux system call semantics of "change only for this thread",
5805 * not the libc/POSIX semantics of "change for all threads in process".
5806 * (See http://ewontfix.com/17/ for more details.)
5807 * We use the 32-bit version of the syscalls if present; if it is not
5808 * then either the host architecture supports 32-bit UIDs natively with
5809 * the standard syscall, or the 16-bit UID is the best we can do.
5811 #ifdef __NR_setuid32
5812 #define __NR_sys_setuid __NR_setuid32
5814 #define __NR_sys_setuid __NR_setuid
5816 #ifdef __NR_setgid32
5817 #define __NR_sys_setgid __NR_setgid32
5819 #define __NR_sys_setgid __NR_setgid
5821 #ifdef __NR_setresuid32
5822 #define __NR_sys_setresuid __NR_setresuid32
5824 #define __NR_sys_setresuid __NR_setresuid
5826 #ifdef __NR_setresgid32
5827 #define __NR_sys_setresgid __NR_setresgid32
5829 #define __NR_sys_setresgid __NR_setresgid
5832 _syscall1(int, sys_setuid
, uid_t
, uid
)
5833 _syscall1(int, sys_setgid
, gid_t
, gid
)
5834 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5835 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5837 void syscall_init(void)
5840 const argtype
*arg_type
;
5844 thunk_init(STRUCT_MAX
);
5846 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5847 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5848 #include "syscall_types.h"
5850 #undef STRUCT_SPECIAL
5852 /* Build target_to_host_errno_table[] table from
5853 * host_to_target_errno_table[]. */
5854 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5855 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5858 /* we patch the ioctl size if necessary. We rely on the fact that
5859 no ioctl has all the bits at '1' in the size field */
5861 while (ie
->target_cmd
!= 0) {
5862 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5863 TARGET_IOC_SIZEMASK
) {
5864 arg_type
= ie
->arg_type
;
5865 if (arg_type
[0] != TYPE_PTR
) {
5866 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5871 size
= thunk_type_size(arg_type
, 0);
5872 ie
->target_cmd
= (ie
->target_cmd
&
5873 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5874 (size
<< TARGET_IOC_SIZESHIFT
);
5877 /* automatic consistency check if same arch */
5878 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5879 (defined(__x86_64__) && defined(TARGET_X86_64))
5880 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5881 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5882 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5889 #if TARGET_ABI_BITS == 32
5890 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5892 #ifdef TARGET_WORDS_BIGENDIAN
5893 return ((uint64_t)word0
<< 32) | word1
;
5895 return ((uint64_t)word1
<< 32) | word0
;
5898 #else /* TARGET_ABI_BITS == 32 */
5899 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5903 #endif /* TARGET_ABI_BITS != 32 */
5905 #ifdef TARGET_NR_truncate64
5906 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5911 if (regpairs_aligned(cpu_env
)) {
5915 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5919 #ifdef TARGET_NR_ftruncate64
5920 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5925 if (regpairs_aligned(cpu_env
)) {
5929 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5933 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5934 abi_ulong target_addr
)
5936 struct target_timespec
*target_ts
;
5938 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5939 return -TARGET_EFAULT
;
5940 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5941 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5942 unlock_user_struct(target_ts
, target_addr
, 0);
5946 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5947 struct timespec
*host_ts
)
5949 struct target_timespec
*target_ts
;
5951 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5952 return -TARGET_EFAULT
;
5953 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5954 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5955 unlock_user_struct(target_ts
, target_addr
, 1);
5959 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5960 abi_ulong target_addr
)
5962 struct target_itimerspec
*target_itspec
;
5964 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5965 return -TARGET_EFAULT
;
5968 host_itspec
->it_interval
.tv_sec
=
5969 tswapal(target_itspec
->it_interval
.tv_sec
);
5970 host_itspec
->it_interval
.tv_nsec
=
5971 tswapal(target_itspec
->it_interval
.tv_nsec
);
5972 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5973 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5975 unlock_user_struct(target_itspec
, target_addr
, 1);
5979 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5980 struct itimerspec
*host_its
)
5982 struct target_itimerspec
*target_itspec
;
5984 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5985 return -TARGET_EFAULT
;
5988 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5989 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5991 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5992 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5994 unlock_user_struct(target_itspec
, target_addr
, 0);
5998 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5999 abi_ulong target_addr
)
6001 struct target_sigevent
*target_sevp
;
6003 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6004 return -TARGET_EFAULT
;
6007 /* This union is awkward on 64 bit systems because it has a 32 bit
6008 * integer and a pointer in it; we follow the conversion approach
6009 * used for handling sigval types in signal.c so the guest should get
6010 * the correct value back even if we did a 64 bit byteswap and it's
6011 * using the 32 bit integer.
6013 host_sevp
->sigev_value
.sival_ptr
=
6014 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6015 host_sevp
->sigev_signo
=
6016 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6017 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6018 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6020 unlock_user_struct(target_sevp
, target_addr
, 1);
6024 #if defined(TARGET_NR_mlockall)
6025 static inline int target_to_host_mlockall_arg(int arg
)
6029 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6030 result
|= MCL_CURRENT
;
6032 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6033 result
|= MCL_FUTURE
;
6039 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6040 abi_ulong target_addr
,
6041 struct stat
*host_st
)
6043 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6044 if (((CPUARMState
*)cpu_env
)->eabi
) {
6045 struct target_eabi_stat64
*target_st
;
6047 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6048 return -TARGET_EFAULT
;
6049 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6050 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6051 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6052 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6053 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6055 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6056 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6057 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6058 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6059 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6060 __put_user(host_st
->st_size
, &target_st
->st_size
);
6061 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6062 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6063 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6064 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6065 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6066 unlock_user_struct(target_st
, target_addr
, 1);
6070 #if defined(TARGET_HAS_STRUCT_STAT64)
6071 struct target_stat64
*target_st
;
6073 struct target_stat
*target_st
;
6076 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6077 return -TARGET_EFAULT
;
6078 memset(target_st
, 0, sizeof(*target_st
));
6079 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6080 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6081 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6082 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6084 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6085 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6086 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6087 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6088 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6089 /* XXX: better use of kernel struct */
6090 __put_user(host_st
->st_size
, &target_st
->st_size
);
6091 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6092 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6093 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6094 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6095 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6096 unlock_user_struct(target_st
, target_addr
, 1);
6102 /* ??? Using host futex calls even when target atomic operations
6103 are not really atomic probably breaks things. However implementing
6104 futexes locally would make futexes shared between multiple processes
6105 tricky. However they're probably useless because guest atomic
6106 operations won't work either. */
6107 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6108 target_ulong uaddr2
, int val3
)
6110 struct timespec ts
, *pts
;
6113 /* ??? We assume FUTEX_* constants are the same on both host
6115 #ifdef FUTEX_CMD_MASK
6116 base_op
= op
& FUTEX_CMD_MASK
;
6122 case FUTEX_WAIT_BITSET
:
6125 target_to_host_timespec(pts
, timeout
);
6129 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6132 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6134 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6136 case FUTEX_CMP_REQUEUE
:
6138 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6139 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6140 But the prototype takes a `struct timespec *'; insert casts
6141 to satisfy the compiler. We do not need to tswap TIMEOUT
6142 since it's not compared to guest memory. */
6143 pts
= (struct timespec
*)(uintptr_t) timeout
;
6144 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6146 (base_op
== FUTEX_CMP_REQUEUE
6150 return -TARGET_ENOSYS
;
6153 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6154 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6155 abi_long handle
, abi_long mount_id
,
6158 struct file_handle
*target_fh
;
6159 struct file_handle
*fh
;
6163 unsigned int size
, total_size
;
6165 if (get_user_s32(size
, handle
)) {
6166 return -TARGET_EFAULT
;
6169 name
= lock_user_string(pathname
);
6171 return -TARGET_EFAULT
;
6174 total_size
= sizeof(struct file_handle
) + size
;
6175 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6177 unlock_user(name
, pathname
, 0);
6178 return -TARGET_EFAULT
;
6181 fh
= g_malloc0(total_size
);
6182 fh
->handle_bytes
= size
;
6184 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6185 unlock_user(name
, pathname
, 0);
6187 /* man name_to_handle_at(2):
6188 * Other than the use of the handle_bytes field, the caller should treat
6189 * the file_handle structure as an opaque data type
6192 memcpy(target_fh
, fh
, total_size
);
6193 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6194 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6196 unlock_user(target_fh
, handle
, total_size
);
6198 if (put_user_s32(mid
, mount_id
)) {
6199 return -TARGET_EFAULT
;
6207 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6208 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6211 struct file_handle
*target_fh
;
6212 struct file_handle
*fh
;
6213 unsigned int size
, total_size
;
6216 if (get_user_s32(size
, handle
)) {
6217 return -TARGET_EFAULT
;
6220 total_size
= sizeof(struct file_handle
) + size
;
6221 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6223 return -TARGET_EFAULT
;
6226 fh
= g_memdup(target_fh
, total_size
);
6227 fh
->handle_bytes
= size
;
6228 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6230 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6231 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6235 unlock_user(target_fh
, handle
, total_size
);
6241 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6243 /* signalfd siginfo conversion */
6246 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6247 const struct signalfd_siginfo
*info
)
6249 int sig
= host_to_target_signal(info
->ssi_signo
);
6251 /* linux/signalfd.h defines a ssi_addr_lsb
6252 * not defined in sys/signalfd.h but used by some kernels
6255 #ifdef BUS_MCEERR_AO
6256 if (tinfo
->ssi_signo
== SIGBUS
&&
6257 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6258 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6259 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6260 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6261 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6265 tinfo
->ssi_signo
= tswap32(sig
);
6266 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6267 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6268 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6269 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6270 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6271 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6272 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6273 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6274 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6275 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6276 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6277 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6278 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6279 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6280 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6283 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6287 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6288 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6294 static TargetFdTrans target_signalfd_trans
= {
6295 .host_to_target_data
= host_to_target_data_signalfd
,
6298 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6301 target_sigset_t
*target_mask
;
6305 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6306 return -TARGET_EINVAL
;
6308 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6309 return -TARGET_EFAULT
;
6312 target_to_host_sigset(&host_mask
, target_mask
);
6314 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6316 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6318 fd_trans_register(ret
, &target_signalfd_trans
);
6321 unlock_user_struct(target_mask
, mask
, 0);
6327 /* Map host to target signal numbers for the wait family of syscalls.
6328 Assume all other status bits are the same. */
6329 int host_to_target_waitstatus(int status
)
6331 if (WIFSIGNALED(status
)) {
6332 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6334 if (WIFSTOPPED(status
)) {
6335 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6341 static int open_self_cmdline(void *cpu_env
, int fd
)
6344 bool word_skipped
= false;
6346 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6356 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6359 fd_orig
= close(fd_orig
);
6362 } else if (nb_read
== 0) {
6366 if (!word_skipped
) {
6367 /* Skip the first string, which is the path to qemu-*-static
6368 instead of the actual command. */
6369 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6371 /* Null byte found, skip one string */
6373 nb_read
-= cp_buf
- buf
;
6374 word_skipped
= true;
6379 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6388 return close(fd_orig
);
6391 static int open_self_maps(void *cpu_env
, int fd
)
6393 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6394 TaskState
*ts
= cpu
->opaque
;
6400 fp
= fopen("/proc/self/maps", "r");
6405 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6406 int fields
, dev_maj
, dev_min
, inode
;
6407 uint64_t min
, max
, offset
;
6408 char flag_r
, flag_w
, flag_x
, flag_p
;
6409 char path
[512] = "";
6410 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6411 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6412 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6414 if ((fields
< 10) || (fields
> 11)) {
6417 if (h2g_valid(min
)) {
6418 int flags
= page_get_flags(h2g(min
));
6419 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6420 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6423 if (h2g(min
) == ts
->info
->stack_limit
) {
6424 pstrcpy(path
, sizeof(path
), " [stack]");
6426 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6427 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6428 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6429 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6430 path
[0] ? " " : "", path
);
6440 static int open_self_stat(void *cpu_env
, int fd
)
6442 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6443 TaskState
*ts
= cpu
->opaque
;
6444 abi_ulong start_stack
= ts
->info
->start_stack
;
6447 for (i
= 0; i
< 44; i
++) {
6455 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6456 } else if (i
== 1) {
6458 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6459 } else if (i
== 27) {
6462 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6464 /* for the rest, there is MasterCard */
6465 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6469 if (write(fd
, buf
, len
) != len
) {
6477 static int open_self_auxv(void *cpu_env
, int fd
)
6479 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6480 TaskState
*ts
= cpu
->opaque
;
6481 abi_ulong auxv
= ts
->info
->saved_auxv
;
6482 abi_ulong len
= ts
->info
->auxv_len
;
6486 * Auxiliary vector is stored in target process stack.
6487 * read in whole auxv vector and copy it to file
6489 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6493 r
= write(fd
, ptr
, len
);
6500 lseek(fd
, 0, SEEK_SET
);
6501 unlock_user(ptr
, auxv
, len
);
6507 static int is_proc_myself(const char *filename
, const char *entry
)
6509 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6510 filename
+= strlen("/proc/");
6511 if (!strncmp(filename
, "self/", strlen("self/"))) {
6512 filename
+= strlen("self/");
6513 } else if (*filename
>= '1' && *filename
<= '9') {
6515 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6516 if (!strncmp(filename
, myself
, strlen(myself
))) {
6517 filename
+= strlen(myself
);
6524 if (!strcmp(filename
, entry
)) {
6531 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6532 static int is_proc(const char *filename
, const char *entry
)
6534 return strcmp(filename
, entry
) == 0;
6537 static int open_net_route(void *cpu_env
, int fd
)
6544 fp
= fopen("/proc/net/route", "r");
6551 read
= getline(&line
, &len
, fp
);
6552 dprintf(fd
, "%s", line
);
6556 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6558 uint32_t dest
, gw
, mask
;
6559 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6560 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6561 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6562 &mask
, &mtu
, &window
, &irtt
);
6563 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6564 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6565 metric
, tswap32(mask
), mtu
, window
, irtt
);
6575 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6578 const char *filename
;
6579 int (*fill
)(void *cpu_env
, int fd
);
6580 int (*cmp
)(const char *s1
, const char *s2
);
6582 const struct fake_open
*fake_open
;
6583 static const struct fake_open fakes
[] = {
6584 { "maps", open_self_maps
, is_proc_myself
},
6585 { "stat", open_self_stat
, is_proc_myself
},
6586 { "auxv", open_self_auxv
, is_proc_myself
},
6587 { "cmdline", open_self_cmdline
, is_proc_myself
},
6588 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6589 { "/proc/net/route", open_net_route
, is_proc
},
6591 { NULL
, NULL
, NULL
}
6594 if (is_proc_myself(pathname
, "exe")) {
6595 int execfd
= qemu_getauxval(AT_EXECFD
);
6596 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6599 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6600 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6605 if (fake_open
->filename
) {
6607 char filename
[PATH_MAX
];
6610 /* create temporary file to map stat to */
6611 tmpdir
= getenv("TMPDIR");
6614 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6615 fd
= mkstemp(filename
);
6621 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6627 lseek(fd
, 0, SEEK_SET
);
6632 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6635 #define TIMER_MAGIC 0x0caf0000
6636 #define TIMER_MAGIC_MASK 0xffff0000
6638 /* Convert QEMU provided timer ID back to internal 16bit index format */
6639 static target_timer_t
get_timer_id(abi_long arg
)
6641 target_timer_t timerid
= arg
;
6643 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6644 return -TARGET_EINVAL
;
6649 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6650 return -TARGET_EINVAL
;
6656 /* do_syscall() should always have a single exit point at the end so
6657 that actions, such as logging of syscall results, can be performed.
6658 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6659 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6660 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6661 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6664 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6670 #if defined(DEBUG_ERESTARTSYS)
6671 /* Debug-only code for exercising the syscall-restart code paths
6672 * in the per-architecture cpu main loops: restart every syscall
6673 * the guest makes once before letting it through.
6680 return -TARGET_ERESTARTSYS
;
6686 gemu_log("syscall %d", num
);
6689 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6692 case TARGET_NR_exit
:
6693 /* In old applications this may be used to implement _exit(2).
6694 However in threaded applictions it is used for thread termination,
6695 and _exit_group is used for application termination.
6696 Do thread termination if we have more then one thread. */
6698 if (block_signals()) {
6699 ret
= -TARGET_ERESTARTSYS
;
6703 if (CPU_NEXT(first_cpu
)) {
6707 /* Remove the CPU from the list. */
6708 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6711 if (ts
->child_tidptr
) {
6712 put_user_u32(0, ts
->child_tidptr
);
6713 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6717 object_unref(OBJECT(cpu
));
6719 rcu_unregister_thread();
6725 gdb_exit(cpu_env
, arg1
);
6727 ret
= 0; /* avoid warning */
6729 case TARGET_NR_read
:
6733 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6735 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6737 fd_trans_host_to_target_data(arg1
)) {
6738 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6740 unlock_user(p
, arg2
, ret
);
6743 case TARGET_NR_write
:
6744 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6746 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6747 unlock_user(p
, arg2
, 0);
6749 #ifdef TARGET_NR_open
6750 case TARGET_NR_open
:
6751 if (!(p
= lock_user_string(arg1
)))
6753 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6754 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6756 fd_trans_unregister(ret
);
6757 unlock_user(p
, arg1
, 0);
6760 case TARGET_NR_openat
:
6761 if (!(p
= lock_user_string(arg2
)))
6763 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6764 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6766 fd_trans_unregister(ret
);
6767 unlock_user(p
, arg2
, 0);
6769 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6770 case TARGET_NR_name_to_handle_at
:
6771 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6774 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6775 case TARGET_NR_open_by_handle_at
:
6776 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6777 fd_trans_unregister(ret
);
6780 case TARGET_NR_close
:
6781 fd_trans_unregister(arg1
);
6782 ret
= get_errno(close(arg1
));
6787 #ifdef TARGET_NR_fork
6788 case TARGET_NR_fork
:
6789 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6792 #ifdef TARGET_NR_waitpid
6793 case TARGET_NR_waitpid
:
6796 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6797 if (!is_error(ret
) && arg2
&& ret
6798 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6803 #ifdef TARGET_NR_waitid
6804 case TARGET_NR_waitid
:
6808 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6809 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6810 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6812 host_to_target_siginfo(p
, &info
);
6813 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6818 #ifdef TARGET_NR_creat /* not on alpha */
6819 case TARGET_NR_creat
:
6820 if (!(p
= lock_user_string(arg1
)))
6822 ret
= get_errno(creat(p
, arg2
));
6823 fd_trans_unregister(ret
);
6824 unlock_user(p
, arg1
, 0);
6827 #ifdef TARGET_NR_link
6828 case TARGET_NR_link
:
6831 p
= lock_user_string(arg1
);
6832 p2
= lock_user_string(arg2
);
6834 ret
= -TARGET_EFAULT
;
6836 ret
= get_errno(link(p
, p2
));
6837 unlock_user(p2
, arg2
, 0);
6838 unlock_user(p
, arg1
, 0);
6842 #if defined(TARGET_NR_linkat)
6843 case TARGET_NR_linkat
:
6848 p
= lock_user_string(arg2
);
6849 p2
= lock_user_string(arg4
);
6851 ret
= -TARGET_EFAULT
;
6853 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6854 unlock_user(p
, arg2
, 0);
6855 unlock_user(p2
, arg4
, 0);
6859 #ifdef TARGET_NR_unlink
6860 case TARGET_NR_unlink
:
6861 if (!(p
= lock_user_string(arg1
)))
6863 ret
= get_errno(unlink(p
));
6864 unlock_user(p
, arg1
, 0);
6867 #if defined(TARGET_NR_unlinkat)
6868 case TARGET_NR_unlinkat
:
6869 if (!(p
= lock_user_string(arg2
)))
6871 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6872 unlock_user(p
, arg2
, 0);
6875 case TARGET_NR_execve
:
6877 char **argp
, **envp
;
6880 abi_ulong guest_argp
;
6881 abi_ulong guest_envp
;
6888 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6889 if (get_user_ual(addr
, gp
))
6897 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6898 if (get_user_ual(addr
, gp
))
6905 argp
= alloca((argc
+ 1) * sizeof(void *));
6906 envp
= alloca((envc
+ 1) * sizeof(void *));
6908 for (gp
= guest_argp
, q
= argp
; gp
;
6909 gp
+= sizeof(abi_ulong
), q
++) {
6910 if (get_user_ual(addr
, gp
))
6914 if (!(*q
= lock_user_string(addr
)))
6916 total_size
+= strlen(*q
) + 1;
6920 for (gp
= guest_envp
, q
= envp
; gp
;
6921 gp
+= sizeof(abi_ulong
), q
++) {
6922 if (get_user_ual(addr
, gp
))
6926 if (!(*q
= lock_user_string(addr
)))
6928 total_size
+= strlen(*q
) + 1;
6932 if (!(p
= lock_user_string(arg1
)))
6934 /* Although execve() is not an interruptible syscall it is
6935 * a special case where we must use the safe_syscall wrapper:
6936 * if we allow a signal to happen before we make the host
6937 * syscall then we will 'lose' it, because at the point of
6938 * execve the process leaves QEMU's control. So we use the
6939 * safe syscall wrapper to ensure that we either take the
6940 * signal as a guest signal, or else it does not happen
6941 * before the execve completes and makes it the other
6942 * program's problem.
6944 ret
= get_errno(safe_execve(p
, argp
, envp
));
6945 unlock_user(p
, arg1
, 0);
6950 ret
= -TARGET_EFAULT
;
6953 for (gp
= guest_argp
, q
= argp
; *q
;
6954 gp
+= sizeof(abi_ulong
), q
++) {
6955 if (get_user_ual(addr
, gp
)
6958 unlock_user(*q
, addr
, 0);
6960 for (gp
= guest_envp
, q
= envp
; *q
;
6961 gp
+= sizeof(abi_ulong
), q
++) {
6962 if (get_user_ual(addr
, gp
)
6965 unlock_user(*q
, addr
, 0);
6969 case TARGET_NR_chdir
:
6970 if (!(p
= lock_user_string(arg1
)))
6972 ret
= get_errno(chdir(p
));
6973 unlock_user(p
, arg1
, 0);
6975 #ifdef TARGET_NR_time
6976 case TARGET_NR_time
:
6979 ret
= get_errno(time(&host_time
));
6982 && put_user_sal(host_time
, arg1
))
6987 #ifdef TARGET_NR_mknod
6988 case TARGET_NR_mknod
:
6989 if (!(p
= lock_user_string(arg1
)))
6991 ret
= get_errno(mknod(p
, arg2
, arg3
));
6992 unlock_user(p
, arg1
, 0);
6995 #if defined(TARGET_NR_mknodat)
6996 case TARGET_NR_mknodat
:
6997 if (!(p
= lock_user_string(arg2
)))
6999 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7000 unlock_user(p
, arg2
, 0);
7003 #ifdef TARGET_NR_chmod
7004 case TARGET_NR_chmod
:
7005 if (!(p
= lock_user_string(arg1
)))
7007 ret
= get_errno(chmod(p
, arg2
));
7008 unlock_user(p
, arg1
, 0);
7011 #ifdef TARGET_NR_break
7012 case TARGET_NR_break
:
7015 #ifdef TARGET_NR_oldstat
7016 case TARGET_NR_oldstat
:
7019 case TARGET_NR_lseek
:
7020 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7022 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7023 /* Alpha specific */
7024 case TARGET_NR_getxpid
:
7025 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7026 ret
= get_errno(getpid());
7029 #ifdef TARGET_NR_getpid
7030 case TARGET_NR_getpid
:
7031 ret
= get_errno(getpid());
7034 case TARGET_NR_mount
:
7036 /* need to look at the data field */
7040 p
= lock_user_string(arg1
);
7048 p2
= lock_user_string(arg2
);
7051 unlock_user(p
, arg1
, 0);
7057 p3
= lock_user_string(arg3
);
7060 unlock_user(p
, arg1
, 0);
7062 unlock_user(p2
, arg2
, 0);
7069 /* FIXME - arg5 should be locked, but it isn't clear how to
7070 * do that since it's not guaranteed to be a NULL-terminated
7074 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7076 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7078 ret
= get_errno(ret
);
7081 unlock_user(p
, arg1
, 0);
7083 unlock_user(p2
, arg2
, 0);
7085 unlock_user(p3
, arg3
, 0);
7089 #ifdef TARGET_NR_umount
7090 case TARGET_NR_umount
:
7091 if (!(p
= lock_user_string(arg1
)))
7093 ret
= get_errno(umount(p
));
7094 unlock_user(p
, arg1
, 0);
7097 #ifdef TARGET_NR_stime /* not on alpha */
7098 case TARGET_NR_stime
:
7101 if (get_user_sal(host_time
, arg1
))
7103 ret
= get_errno(stime(&host_time
));
7107 case TARGET_NR_ptrace
:
7109 #ifdef TARGET_NR_alarm /* not on alpha */
7110 case TARGET_NR_alarm
:
7114 #ifdef TARGET_NR_oldfstat
7115 case TARGET_NR_oldfstat
:
7118 #ifdef TARGET_NR_pause /* not on alpha */
7119 case TARGET_NR_pause
:
7120 if (!block_signals()) {
7121 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7123 ret
= -TARGET_EINTR
;
7126 #ifdef TARGET_NR_utime
7127 case TARGET_NR_utime
:
7129 struct utimbuf tbuf
, *host_tbuf
;
7130 struct target_utimbuf
*target_tbuf
;
7132 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7134 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7135 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7136 unlock_user_struct(target_tbuf
, arg2
, 0);
7141 if (!(p
= lock_user_string(arg1
)))
7143 ret
= get_errno(utime(p
, host_tbuf
));
7144 unlock_user(p
, arg1
, 0);
7148 #ifdef TARGET_NR_utimes
7149 case TARGET_NR_utimes
:
7151 struct timeval
*tvp
, tv
[2];
7153 if (copy_from_user_timeval(&tv
[0], arg2
)
7154 || copy_from_user_timeval(&tv
[1],
7155 arg2
+ sizeof(struct target_timeval
)))
7161 if (!(p
= lock_user_string(arg1
)))
7163 ret
= get_errno(utimes(p
, tvp
));
7164 unlock_user(p
, arg1
, 0);
7168 #if defined(TARGET_NR_futimesat)
7169 case TARGET_NR_futimesat
:
7171 struct timeval
*tvp
, tv
[2];
7173 if (copy_from_user_timeval(&tv
[0], arg3
)
7174 || copy_from_user_timeval(&tv
[1],
7175 arg3
+ sizeof(struct target_timeval
)))
7181 if (!(p
= lock_user_string(arg2
)))
7183 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7184 unlock_user(p
, arg2
, 0);
7188 #ifdef TARGET_NR_stty
7189 case TARGET_NR_stty
:
7192 #ifdef TARGET_NR_gtty
7193 case TARGET_NR_gtty
:
7196 #ifdef TARGET_NR_access
7197 case TARGET_NR_access
:
7198 if (!(p
= lock_user_string(arg1
)))
7200 ret
= get_errno(access(path(p
), arg2
));
7201 unlock_user(p
, arg1
, 0);
7204 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7205 case TARGET_NR_faccessat
:
7206 if (!(p
= lock_user_string(arg2
)))
7208 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7209 unlock_user(p
, arg2
, 0);
7212 #ifdef TARGET_NR_nice /* not on alpha */
7213 case TARGET_NR_nice
:
7214 ret
= get_errno(nice(arg1
));
7217 #ifdef TARGET_NR_ftime
7218 case TARGET_NR_ftime
:
7221 case TARGET_NR_sync
:
7225 case TARGET_NR_kill
:
7226 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7228 #ifdef TARGET_NR_rename
7229 case TARGET_NR_rename
:
7232 p
= lock_user_string(arg1
);
7233 p2
= lock_user_string(arg2
);
7235 ret
= -TARGET_EFAULT
;
7237 ret
= get_errno(rename(p
, p2
));
7238 unlock_user(p2
, arg2
, 0);
7239 unlock_user(p
, arg1
, 0);
7243 #if defined(TARGET_NR_renameat)
7244 case TARGET_NR_renameat
:
7247 p
= lock_user_string(arg2
);
7248 p2
= lock_user_string(arg4
);
7250 ret
= -TARGET_EFAULT
;
7252 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7253 unlock_user(p2
, arg4
, 0);
7254 unlock_user(p
, arg2
, 0);
7258 #ifdef TARGET_NR_mkdir
7259 case TARGET_NR_mkdir
:
7260 if (!(p
= lock_user_string(arg1
)))
7262 ret
= get_errno(mkdir(p
, arg2
));
7263 unlock_user(p
, arg1
, 0);
7266 #if defined(TARGET_NR_mkdirat)
7267 case TARGET_NR_mkdirat
:
7268 if (!(p
= lock_user_string(arg2
)))
7270 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7271 unlock_user(p
, arg2
, 0);
7274 #ifdef TARGET_NR_rmdir
7275 case TARGET_NR_rmdir
:
7276 if (!(p
= lock_user_string(arg1
)))
7278 ret
= get_errno(rmdir(p
));
7279 unlock_user(p
, arg1
, 0);
7283 ret
= get_errno(dup(arg1
));
7285 fd_trans_dup(arg1
, ret
);
7288 #ifdef TARGET_NR_pipe
7289 case TARGET_NR_pipe
:
7290 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7293 #ifdef TARGET_NR_pipe2
7294 case TARGET_NR_pipe2
:
7295 ret
= do_pipe(cpu_env
, arg1
,
7296 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7299 case TARGET_NR_times
:
7301 struct target_tms
*tmsp
;
7303 ret
= get_errno(times(&tms
));
7305 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7308 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7309 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7310 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7311 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7314 ret
= host_to_target_clock_t(ret
);
7317 #ifdef TARGET_NR_prof
7318 case TARGET_NR_prof
:
7321 #ifdef TARGET_NR_signal
7322 case TARGET_NR_signal
:
7325 case TARGET_NR_acct
:
7327 ret
= get_errno(acct(NULL
));
7329 if (!(p
= lock_user_string(arg1
)))
7331 ret
= get_errno(acct(path(p
)));
7332 unlock_user(p
, arg1
, 0);
7335 #ifdef TARGET_NR_umount2
7336 case TARGET_NR_umount2
:
7337 if (!(p
= lock_user_string(arg1
)))
7339 ret
= get_errno(umount2(p
, arg2
));
7340 unlock_user(p
, arg1
, 0);
7343 #ifdef TARGET_NR_lock
7344 case TARGET_NR_lock
:
7347 case TARGET_NR_ioctl
:
7348 ret
= do_ioctl(arg1
, arg2
, arg3
);
7350 case TARGET_NR_fcntl
:
7351 ret
= do_fcntl(arg1
, arg2
, arg3
);
7353 #ifdef TARGET_NR_mpx
7357 case TARGET_NR_setpgid
:
7358 ret
= get_errno(setpgid(arg1
, arg2
));
7360 #ifdef TARGET_NR_ulimit
7361 case TARGET_NR_ulimit
:
7364 #ifdef TARGET_NR_oldolduname
7365 case TARGET_NR_oldolduname
:
7368 case TARGET_NR_umask
:
7369 ret
= get_errno(umask(arg1
));
7371 case TARGET_NR_chroot
:
7372 if (!(p
= lock_user_string(arg1
)))
7374 ret
= get_errno(chroot(p
));
7375 unlock_user(p
, arg1
, 0);
7377 #ifdef TARGET_NR_ustat
7378 case TARGET_NR_ustat
:
7381 #ifdef TARGET_NR_dup2
7382 case TARGET_NR_dup2
:
7383 ret
= get_errno(dup2(arg1
, arg2
));
7385 fd_trans_dup(arg1
, arg2
);
7389 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7390 case TARGET_NR_dup3
:
7391 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7393 fd_trans_dup(arg1
, arg2
);
7397 #ifdef TARGET_NR_getppid /* not on alpha */
7398 case TARGET_NR_getppid
:
7399 ret
= get_errno(getppid());
7402 #ifdef TARGET_NR_getpgrp
7403 case TARGET_NR_getpgrp
:
7404 ret
= get_errno(getpgrp());
7407 case TARGET_NR_setsid
:
7408 ret
= get_errno(setsid());
7410 #ifdef TARGET_NR_sigaction
7411 case TARGET_NR_sigaction
:
7413 #if defined(TARGET_ALPHA)
7414 struct target_sigaction act
, oact
, *pact
= 0;
7415 struct target_old_sigaction
*old_act
;
7417 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7419 act
._sa_handler
= old_act
->_sa_handler
;
7420 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7421 act
.sa_flags
= old_act
->sa_flags
;
7422 act
.sa_restorer
= 0;
7423 unlock_user_struct(old_act
, arg2
, 0);
7426 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7427 if (!is_error(ret
) && arg3
) {
7428 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7430 old_act
->_sa_handler
= oact
._sa_handler
;
7431 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7432 old_act
->sa_flags
= oact
.sa_flags
;
7433 unlock_user_struct(old_act
, arg3
, 1);
7435 #elif defined(TARGET_MIPS)
7436 struct target_sigaction act
, oact
, *pact
, *old_act
;
7439 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7441 act
._sa_handler
= old_act
->_sa_handler
;
7442 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7443 act
.sa_flags
= old_act
->sa_flags
;
7444 unlock_user_struct(old_act
, arg2
, 0);
7450 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7452 if (!is_error(ret
) && arg3
) {
7453 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7455 old_act
->_sa_handler
= oact
._sa_handler
;
7456 old_act
->sa_flags
= oact
.sa_flags
;
7457 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7458 old_act
->sa_mask
.sig
[1] = 0;
7459 old_act
->sa_mask
.sig
[2] = 0;
7460 old_act
->sa_mask
.sig
[3] = 0;
7461 unlock_user_struct(old_act
, arg3
, 1);
7464 struct target_old_sigaction
*old_act
;
7465 struct target_sigaction act
, oact
, *pact
;
7467 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7469 act
._sa_handler
= old_act
->_sa_handler
;
7470 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7471 act
.sa_flags
= old_act
->sa_flags
;
7472 act
.sa_restorer
= old_act
->sa_restorer
;
7473 unlock_user_struct(old_act
, arg2
, 0);
7478 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7479 if (!is_error(ret
) && arg3
) {
7480 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7482 old_act
->_sa_handler
= oact
._sa_handler
;
7483 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7484 old_act
->sa_flags
= oact
.sa_flags
;
7485 old_act
->sa_restorer
= oact
.sa_restorer
;
7486 unlock_user_struct(old_act
, arg3
, 1);
7492 case TARGET_NR_rt_sigaction
:
7494 #if defined(TARGET_ALPHA)
7495 struct target_sigaction act
, oact
, *pact
= 0;
7496 struct target_rt_sigaction
*rt_act
;
7497 /* ??? arg4 == sizeof(sigset_t). */
7499 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7501 act
._sa_handler
= rt_act
->_sa_handler
;
7502 act
.sa_mask
= rt_act
->sa_mask
;
7503 act
.sa_flags
= rt_act
->sa_flags
;
7504 act
.sa_restorer
= arg5
;
7505 unlock_user_struct(rt_act
, arg2
, 0);
7508 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7509 if (!is_error(ret
) && arg3
) {
7510 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7512 rt_act
->_sa_handler
= oact
._sa_handler
;
7513 rt_act
->sa_mask
= oact
.sa_mask
;
7514 rt_act
->sa_flags
= oact
.sa_flags
;
7515 unlock_user_struct(rt_act
, arg3
, 1);
7518 struct target_sigaction
*act
;
7519 struct target_sigaction
*oact
;
7522 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7527 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7528 ret
= -TARGET_EFAULT
;
7529 goto rt_sigaction_fail
;
7533 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7536 unlock_user_struct(act
, arg2
, 0);
7538 unlock_user_struct(oact
, arg3
, 1);
7542 #ifdef TARGET_NR_sgetmask /* not on alpha */
7543 case TARGET_NR_sgetmask
:
7546 abi_ulong target_set
;
7547 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7549 host_to_target_old_sigset(&target_set
, &cur_set
);
7555 #ifdef TARGET_NR_ssetmask /* not on alpha */
7556 case TARGET_NR_ssetmask
:
7558 sigset_t set
, oset
, cur_set
;
7559 abi_ulong target_set
= arg1
;
7560 /* We only have one word of the new mask so we must read
7561 * the rest of it with do_sigprocmask() and OR in this word.
7562 * We are guaranteed that a do_sigprocmask() that only queries
7563 * the signal mask will not fail.
7565 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7567 target_to_host_old_sigset(&set
, &target_set
);
7568 sigorset(&set
, &set
, &cur_set
);
7569 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7571 host_to_target_old_sigset(&target_set
, &oset
);
7577 #ifdef TARGET_NR_sigprocmask
7578 case TARGET_NR_sigprocmask
:
7580 #if defined(TARGET_ALPHA)
7581 sigset_t set
, oldset
;
7586 case TARGET_SIG_BLOCK
:
7589 case TARGET_SIG_UNBLOCK
:
7592 case TARGET_SIG_SETMASK
:
7596 ret
= -TARGET_EINVAL
;
7600 target_to_host_old_sigset(&set
, &mask
);
7602 ret
= do_sigprocmask(how
, &set
, &oldset
);
7603 if (!is_error(ret
)) {
7604 host_to_target_old_sigset(&mask
, &oldset
);
7606 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7609 sigset_t set
, oldset
, *set_ptr
;
7614 case TARGET_SIG_BLOCK
:
7617 case TARGET_SIG_UNBLOCK
:
7620 case TARGET_SIG_SETMASK
:
7624 ret
= -TARGET_EINVAL
;
7627 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7629 target_to_host_old_sigset(&set
, p
);
7630 unlock_user(p
, arg2
, 0);
7636 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7637 if (!is_error(ret
) && arg3
) {
7638 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7640 host_to_target_old_sigset(p
, &oldset
);
7641 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7647 case TARGET_NR_rt_sigprocmask
:
7650 sigset_t set
, oldset
, *set_ptr
;
7654 case TARGET_SIG_BLOCK
:
7657 case TARGET_SIG_UNBLOCK
:
7660 case TARGET_SIG_SETMASK
:
7664 ret
= -TARGET_EINVAL
;
7667 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7669 target_to_host_sigset(&set
, p
);
7670 unlock_user(p
, arg2
, 0);
7676 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7677 if (!is_error(ret
) && arg3
) {
7678 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7680 host_to_target_sigset(p
, &oldset
);
7681 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7685 #ifdef TARGET_NR_sigpending
7686 case TARGET_NR_sigpending
:
7689 ret
= get_errno(sigpending(&set
));
7690 if (!is_error(ret
)) {
7691 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7693 host_to_target_old_sigset(p
, &set
);
7694 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7699 case TARGET_NR_rt_sigpending
:
7702 ret
= get_errno(sigpending(&set
));
7703 if (!is_error(ret
)) {
7704 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7706 host_to_target_sigset(p
, &set
);
7707 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7711 #ifdef TARGET_NR_sigsuspend
7712 case TARGET_NR_sigsuspend
:
7714 TaskState
*ts
= cpu
->opaque
;
7715 #if defined(TARGET_ALPHA)
7716 abi_ulong mask
= arg1
;
7717 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7719 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7721 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7722 unlock_user(p
, arg1
, 0);
7724 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7726 if (ret
!= -TARGET_ERESTARTSYS
) {
7727 ts
->in_sigsuspend
= 1;
7732 case TARGET_NR_rt_sigsuspend
:
7734 TaskState
*ts
= cpu
->opaque
;
7735 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7737 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7738 unlock_user(p
, arg1
, 0);
7739 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7741 if (ret
!= -TARGET_ERESTARTSYS
) {
7742 ts
->in_sigsuspend
= 1;
7746 case TARGET_NR_rt_sigtimedwait
:
7749 struct timespec uts
, *puts
;
7752 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7754 target_to_host_sigset(&set
, p
);
7755 unlock_user(p
, arg1
, 0);
7758 target_to_host_timespec(puts
, arg3
);
7762 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7764 if (!is_error(ret
)) {
7766 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7771 host_to_target_siginfo(p
, &uinfo
);
7772 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7774 ret
= host_to_target_signal(ret
);
7778 case TARGET_NR_rt_sigqueueinfo
:
7781 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7783 target_to_host_siginfo(&uinfo
, p
);
7784 unlock_user(p
, arg1
, 0);
7785 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7788 #ifdef TARGET_NR_sigreturn
7789 case TARGET_NR_sigreturn
:
7790 if (block_signals()) {
7791 ret
= -TARGET_ERESTARTSYS
;
7793 ret
= do_sigreturn(cpu_env
);
7797 case TARGET_NR_rt_sigreturn
:
7798 if (block_signals()) {
7799 ret
= -TARGET_ERESTARTSYS
;
7801 ret
= do_rt_sigreturn(cpu_env
);
7804 case TARGET_NR_sethostname
:
7805 if (!(p
= lock_user_string(arg1
)))
7807 ret
= get_errno(sethostname(p
, arg2
));
7808 unlock_user(p
, arg1
, 0);
7810 case TARGET_NR_setrlimit
:
7812 int resource
= target_to_host_resource(arg1
);
7813 struct target_rlimit
*target_rlim
;
7815 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7817 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7818 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7819 unlock_user_struct(target_rlim
, arg2
, 0);
7820 ret
= get_errno(setrlimit(resource
, &rlim
));
7823 case TARGET_NR_getrlimit
:
7825 int resource
= target_to_host_resource(arg1
);
7826 struct target_rlimit
*target_rlim
;
7829 ret
= get_errno(getrlimit(resource
, &rlim
));
7830 if (!is_error(ret
)) {
7831 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7833 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7834 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7835 unlock_user_struct(target_rlim
, arg2
, 1);
7839 case TARGET_NR_getrusage
:
7841 struct rusage rusage
;
7842 ret
= get_errno(getrusage(arg1
, &rusage
));
7843 if (!is_error(ret
)) {
7844 ret
= host_to_target_rusage(arg2
, &rusage
);
7848 case TARGET_NR_gettimeofday
:
7851 ret
= get_errno(gettimeofday(&tv
, NULL
));
7852 if (!is_error(ret
)) {
7853 if (copy_to_user_timeval(arg1
, &tv
))
7858 case TARGET_NR_settimeofday
:
7860 struct timeval tv
, *ptv
= NULL
;
7861 struct timezone tz
, *ptz
= NULL
;
7864 if (copy_from_user_timeval(&tv
, arg1
)) {
7871 if (copy_from_user_timezone(&tz
, arg2
)) {
7877 ret
= get_errno(settimeofday(ptv
, ptz
));
7880 #if defined(TARGET_NR_select)
7881 case TARGET_NR_select
:
7882 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7883 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7886 struct target_sel_arg_struct
*sel
;
7887 abi_ulong inp
, outp
, exp
, tvp
;
7890 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7892 nsel
= tswapal(sel
->n
);
7893 inp
= tswapal(sel
->inp
);
7894 outp
= tswapal(sel
->outp
);
7895 exp
= tswapal(sel
->exp
);
7896 tvp
= tswapal(sel
->tvp
);
7897 unlock_user_struct(sel
, arg1
, 0);
7898 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7903 #ifdef TARGET_NR_pselect6
7904 case TARGET_NR_pselect6
:
7906 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7907 fd_set rfds
, wfds
, efds
;
7908 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7909 struct timespec ts
, *ts_ptr
;
7912 * The 6th arg is actually two args smashed together,
7913 * so we cannot use the C library.
7921 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7922 target_sigset_t
*target_sigset
;
7930 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7934 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7938 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7944 * This takes a timespec, and not a timeval, so we cannot
7945 * use the do_select() helper ...
7948 if (target_to_host_timespec(&ts
, ts_addr
)) {
7956 /* Extract the two packed args for the sigset */
7959 sig
.size
= SIGSET_T_SIZE
;
7961 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7965 arg_sigset
= tswapal(arg7
[0]);
7966 arg_sigsize
= tswapal(arg7
[1]);
7967 unlock_user(arg7
, arg6
, 0);
7971 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7972 /* Like the kernel, we enforce correct size sigsets */
7973 ret
= -TARGET_EINVAL
;
7976 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7977 sizeof(*target_sigset
), 1);
7978 if (!target_sigset
) {
7981 target_to_host_sigset(&set
, target_sigset
);
7982 unlock_user(target_sigset
, arg_sigset
, 0);
7990 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7993 if (!is_error(ret
)) {
7994 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7996 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7998 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8001 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8007 #ifdef TARGET_NR_symlink
8008 case TARGET_NR_symlink
:
8011 p
= lock_user_string(arg1
);
8012 p2
= lock_user_string(arg2
);
8014 ret
= -TARGET_EFAULT
;
8016 ret
= get_errno(symlink(p
, p2
));
8017 unlock_user(p2
, arg2
, 0);
8018 unlock_user(p
, arg1
, 0);
8022 #if defined(TARGET_NR_symlinkat)
8023 case TARGET_NR_symlinkat
:
8026 p
= lock_user_string(arg1
);
8027 p2
= lock_user_string(arg3
);
8029 ret
= -TARGET_EFAULT
;
8031 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8032 unlock_user(p2
, arg3
, 0);
8033 unlock_user(p
, arg1
, 0);
8037 #ifdef TARGET_NR_oldlstat
8038 case TARGET_NR_oldlstat
:
8041 #ifdef TARGET_NR_readlink
8042 case TARGET_NR_readlink
:
8045 p
= lock_user_string(arg1
);
8046 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8048 ret
= -TARGET_EFAULT
;
8050 /* Short circuit this for the magic exe check. */
8051 ret
= -TARGET_EINVAL
;
8052 } else if (is_proc_myself((const char *)p
, "exe")) {
8053 char real
[PATH_MAX
], *temp
;
8054 temp
= realpath(exec_path
, real
);
8055 /* Return value is # of bytes that we wrote to the buffer. */
8057 ret
= get_errno(-1);
8059 /* Don't worry about sign mismatch as earlier mapping
8060 * logic would have thrown a bad address error. */
8061 ret
= MIN(strlen(real
), arg3
);
8062 /* We cannot NUL terminate the string. */
8063 memcpy(p2
, real
, ret
);
8066 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8068 unlock_user(p2
, arg2
, ret
);
8069 unlock_user(p
, arg1
, 0);
8073 #if defined(TARGET_NR_readlinkat)
8074 case TARGET_NR_readlinkat
:
8077 p
= lock_user_string(arg2
);
8078 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8080 ret
= -TARGET_EFAULT
;
8081 } else if (is_proc_myself((const char *)p
, "exe")) {
8082 char real
[PATH_MAX
], *temp
;
8083 temp
= realpath(exec_path
, real
);
8084 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8085 snprintf((char *)p2
, arg4
, "%s", real
);
8087 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8089 unlock_user(p2
, arg3
, ret
);
8090 unlock_user(p
, arg2
, 0);
8094 #ifdef TARGET_NR_uselib
8095 case TARGET_NR_uselib
:
8098 #ifdef TARGET_NR_swapon
8099 case TARGET_NR_swapon
:
8100 if (!(p
= lock_user_string(arg1
)))
8102 ret
= get_errno(swapon(p
, arg2
));
8103 unlock_user(p
, arg1
, 0);
8106 case TARGET_NR_reboot
:
8107 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8108 /* arg4 must be ignored in all other cases */
8109 p
= lock_user_string(arg4
);
8113 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8114 unlock_user(p
, arg4
, 0);
8116 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8119 #ifdef TARGET_NR_readdir
8120 case TARGET_NR_readdir
:
8123 #ifdef TARGET_NR_mmap
8124 case TARGET_NR_mmap
:
8125 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8126 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8127 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8128 || defined(TARGET_S390X)
8131 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8132 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8140 unlock_user(v
, arg1
, 0);
8141 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8142 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8146 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8147 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8153 #ifdef TARGET_NR_mmap2
8154 case TARGET_NR_mmap2
:
8156 #define MMAP_SHIFT 12
8158 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8159 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8161 arg6
<< MMAP_SHIFT
));
8164 case TARGET_NR_munmap
:
8165 ret
= get_errno(target_munmap(arg1
, arg2
));
8167 case TARGET_NR_mprotect
:
8169 TaskState
*ts
= cpu
->opaque
;
8170 /* Special hack to detect libc making the stack executable. */
8171 if ((arg3
& PROT_GROWSDOWN
)
8172 && arg1
>= ts
->info
->stack_limit
8173 && arg1
<= ts
->info
->start_stack
) {
8174 arg3
&= ~PROT_GROWSDOWN
;
8175 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8176 arg1
= ts
->info
->stack_limit
;
8179 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8181 #ifdef TARGET_NR_mremap
8182 case TARGET_NR_mremap
:
8183 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8186 /* ??? msync/mlock/munlock are broken for softmmu. */
8187 #ifdef TARGET_NR_msync
8188 case TARGET_NR_msync
:
8189 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8192 #ifdef TARGET_NR_mlock
8193 case TARGET_NR_mlock
:
8194 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8197 #ifdef TARGET_NR_munlock
8198 case TARGET_NR_munlock
:
8199 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8202 #ifdef TARGET_NR_mlockall
8203 case TARGET_NR_mlockall
:
8204 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8207 #ifdef TARGET_NR_munlockall
8208 case TARGET_NR_munlockall
:
8209 ret
= get_errno(munlockall());
8212 case TARGET_NR_truncate
:
8213 if (!(p
= lock_user_string(arg1
)))
8215 ret
= get_errno(truncate(p
, arg2
));
8216 unlock_user(p
, arg1
, 0);
8218 case TARGET_NR_ftruncate
:
8219 ret
= get_errno(ftruncate(arg1
, arg2
));
8221 case TARGET_NR_fchmod
:
8222 ret
= get_errno(fchmod(arg1
, arg2
));
8224 #if defined(TARGET_NR_fchmodat)
8225 case TARGET_NR_fchmodat
:
8226 if (!(p
= lock_user_string(arg2
)))
8228 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8229 unlock_user(p
, arg2
, 0);
8232 case TARGET_NR_getpriority
:
8233 /* Note that negative values are valid for getpriority, so we must
8234 differentiate based on errno settings. */
8236 ret
= getpriority(arg1
, arg2
);
8237 if (ret
== -1 && errno
!= 0) {
8238 ret
= -host_to_target_errno(errno
);
8242 /* Return value is the unbiased priority. Signal no error. */
8243 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8245 /* Return value is a biased priority to avoid negative numbers. */
8249 case TARGET_NR_setpriority
:
8250 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8252 #ifdef TARGET_NR_profil
8253 case TARGET_NR_profil
:
8256 case TARGET_NR_statfs
:
8257 if (!(p
= lock_user_string(arg1
)))
8259 ret
= get_errno(statfs(path(p
), &stfs
));
8260 unlock_user(p
, arg1
, 0);
8262 if (!is_error(ret
)) {
8263 struct target_statfs
*target_stfs
;
8265 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8267 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8268 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8269 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8270 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8271 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8272 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8273 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8274 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8275 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8276 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8277 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8278 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8279 unlock_user_struct(target_stfs
, arg2
, 1);
8282 case TARGET_NR_fstatfs
:
8283 ret
= get_errno(fstatfs(arg1
, &stfs
));
8284 goto convert_statfs
;
8285 #ifdef TARGET_NR_statfs64
8286 case TARGET_NR_statfs64
:
8287 if (!(p
= lock_user_string(arg1
)))
8289 ret
= get_errno(statfs(path(p
), &stfs
));
8290 unlock_user(p
, arg1
, 0);
8292 if (!is_error(ret
)) {
8293 struct target_statfs64
*target_stfs
;
8295 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8297 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8298 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8299 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8300 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8301 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8302 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8303 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8304 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8305 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8306 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8307 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8308 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8309 unlock_user_struct(target_stfs
, arg3
, 1);
8312 case TARGET_NR_fstatfs64
:
8313 ret
= get_errno(fstatfs(arg1
, &stfs
));
8314 goto convert_statfs64
;
8316 #ifdef TARGET_NR_ioperm
8317 case TARGET_NR_ioperm
:
8320 #ifdef TARGET_NR_socketcall
8321 case TARGET_NR_socketcall
:
8322 ret
= do_socketcall(arg1
, arg2
);
8325 #ifdef TARGET_NR_accept
8326 case TARGET_NR_accept
:
8327 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8330 #ifdef TARGET_NR_accept4
8331 case TARGET_NR_accept4
:
8332 #ifdef CONFIG_ACCEPT4
8333 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8339 #ifdef TARGET_NR_bind
8340 case TARGET_NR_bind
:
8341 ret
= do_bind(arg1
, arg2
, arg3
);
8344 #ifdef TARGET_NR_connect
8345 case TARGET_NR_connect
:
8346 ret
= do_connect(arg1
, arg2
, arg3
);
8349 #ifdef TARGET_NR_getpeername
8350 case TARGET_NR_getpeername
:
8351 ret
= do_getpeername(arg1
, arg2
, arg3
);
8354 #ifdef TARGET_NR_getsockname
8355 case TARGET_NR_getsockname
:
8356 ret
= do_getsockname(arg1
, arg2
, arg3
);
8359 #ifdef TARGET_NR_getsockopt
8360 case TARGET_NR_getsockopt
:
8361 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8364 #ifdef TARGET_NR_listen
8365 case TARGET_NR_listen
:
8366 ret
= get_errno(listen(arg1
, arg2
));
8369 #ifdef TARGET_NR_recv
8370 case TARGET_NR_recv
:
8371 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8374 #ifdef TARGET_NR_recvfrom
8375 case TARGET_NR_recvfrom
:
8376 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8379 #ifdef TARGET_NR_recvmsg
8380 case TARGET_NR_recvmsg
:
8381 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8384 #ifdef TARGET_NR_send
8385 case TARGET_NR_send
:
8386 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8389 #ifdef TARGET_NR_sendmsg
8390 case TARGET_NR_sendmsg
:
8391 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8394 #ifdef TARGET_NR_sendmmsg
8395 case TARGET_NR_sendmmsg
:
8396 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8398 case TARGET_NR_recvmmsg
:
8399 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8402 #ifdef TARGET_NR_sendto
8403 case TARGET_NR_sendto
:
8404 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8407 #ifdef TARGET_NR_shutdown
8408 case TARGET_NR_shutdown
:
8409 ret
= get_errno(shutdown(arg1
, arg2
));
8412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8413 case TARGET_NR_getrandom
:
8414 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8418 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8419 unlock_user(p
, arg1
, ret
);
8422 #ifdef TARGET_NR_socket
8423 case TARGET_NR_socket
:
8424 ret
= do_socket(arg1
, arg2
, arg3
);
8425 fd_trans_unregister(ret
);
8428 #ifdef TARGET_NR_socketpair
8429 case TARGET_NR_socketpair
:
8430 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8433 #ifdef TARGET_NR_setsockopt
8434 case TARGET_NR_setsockopt
:
8435 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8439 case TARGET_NR_syslog
:
8440 if (!(p
= lock_user_string(arg2
)))
8442 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8443 unlock_user(p
, arg2
, 0);
8446 case TARGET_NR_setitimer
:
8448 struct itimerval value
, ovalue
, *pvalue
;
8452 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8453 || copy_from_user_timeval(&pvalue
->it_value
,
8454 arg2
+ sizeof(struct target_timeval
)))
8459 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8460 if (!is_error(ret
) && arg3
) {
8461 if (copy_to_user_timeval(arg3
,
8462 &ovalue
.it_interval
)
8463 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8469 case TARGET_NR_getitimer
:
8471 struct itimerval value
;
8473 ret
= get_errno(getitimer(arg1
, &value
));
8474 if (!is_error(ret
) && arg2
) {
8475 if (copy_to_user_timeval(arg2
,
8477 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8483 #ifdef TARGET_NR_stat
8484 case TARGET_NR_stat
:
8485 if (!(p
= lock_user_string(arg1
)))
8487 ret
= get_errno(stat(path(p
), &st
));
8488 unlock_user(p
, arg1
, 0);
8491 #ifdef TARGET_NR_lstat
8492 case TARGET_NR_lstat
:
8493 if (!(p
= lock_user_string(arg1
)))
8495 ret
= get_errno(lstat(path(p
), &st
));
8496 unlock_user(p
, arg1
, 0);
8499 case TARGET_NR_fstat
:
8501 ret
= get_errno(fstat(arg1
, &st
));
8502 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8505 if (!is_error(ret
)) {
8506 struct target_stat
*target_st
;
8508 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8510 memset(target_st
, 0, sizeof(*target_st
));
8511 __put_user(st
.st_dev
, &target_st
->st_dev
);
8512 __put_user(st
.st_ino
, &target_st
->st_ino
);
8513 __put_user(st
.st_mode
, &target_st
->st_mode
);
8514 __put_user(st
.st_uid
, &target_st
->st_uid
);
8515 __put_user(st
.st_gid
, &target_st
->st_gid
);
8516 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8517 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8518 __put_user(st
.st_size
, &target_st
->st_size
);
8519 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8520 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8521 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8522 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8523 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8524 unlock_user_struct(target_st
, arg2
, 1);
8528 #ifdef TARGET_NR_olduname
8529 case TARGET_NR_olduname
:
8532 #ifdef TARGET_NR_iopl
8533 case TARGET_NR_iopl
:
8536 case TARGET_NR_vhangup
:
8537 ret
= get_errno(vhangup());
8539 #ifdef TARGET_NR_idle
8540 case TARGET_NR_idle
:
8543 #ifdef TARGET_NR_syscall
8544 case TARGET_NR_syscall
:
8545 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8546 arg6
, arg7
, arg8
, 0);
8549 case TARGET_NR_wait4
:
8552 abi_long status_ptr
= arg2
;
8553 struct rusage rusage
, *rusage_ptr
;
8554 abi_ulong target_rusage
= arg4
;
8555 abi_long rusage_err
;
8557 rusage_ptr
= &rusage
;
8560 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8561 if (!is_error(ret
)) {
8562 if (status_ptr
&& ret
) {
8563 status
= host_to_target_waitstatus(status
);
8564 if (put_user_s32(status
, status_ptr
))
8567 if (target_rusage
) {
8568 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8576 #ifdef TARGET_NR_swapoff
8577 case TARGET_NR_swapoff
:
8578 if (!(p
= lock_user_string(arg1
)))
8580 ret
= get_errno(swapoff(p
));
8581 unlock_user(p
, arg1
, 0);
8584 case TARGET_NR_sysinfo
:
8586 struct target_sysinfo
*target_value
;
8587 struct sysinfo value
;
8588 ret
= get_errno(sysinfo(&value
));
8589 if (!is_error(ret
) && arg1
)
8591 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8593 __put_user(value
.uptime
, &target_value
->uptime
);
8594 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8595 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8596 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8597 __put_user(value
.totalram
, &target_value
->totalram
);
8598 __put_user(value
.freeram
, &target_value
->freeram
);
8599 __put_user(value
.sharedram
, &target_value
->sharedram
);
8600 __put_user(value
.bufferram
, &target_value
->bufferram
);
8601 __put_user(value
.totalswap
, &target_value
->totalswap
);
8602 __put_user(value
.freeswap
, &target_value
->freeswap
);
8603 __put_user(value
.procs
, &target_value
->procs
);
8604 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8605 __put_user(value
.freehigh
, &target_value
->freehigh
);
8606 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8607 unlock_user_struct(target_value
, arg1
, 1);
8611 #ifdef TARGET_NR_ipc
8613 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8616 #ifdef TARGET_NR_semget
8617 case TARGET_NR_semget
:
8618 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8621 #ifdef TARGET_NR_semop
8622 case TARGET_NR_semop
:
8623 ret
= do_semop(arg1
, arg2
, arg3
);
8626 #ifdef TARGET_NR_semctl
8627 case TARGET_NR_semctl
:
8628 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8631 #ifdef TARGET_NR_msgctl
8632 case TARGET_NR_msgctl
:
8633 ret
= do_msgctl(arg1
, arg2
, arg3
);
8636 #ifdef TARGET_NR_msgget
8637 case TARGET_NR_msgget
:
8638 ret
= get_errno(msgget(arg1
, arg2
));
8641 #ifdef TARGET_NR_msgrcv
8642 case TARGET_NR_msgrcv
:
8643 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8646 #ifdef TARGET_NR_msgsnd
8647 case TARGET_NR_msgsnd
:
8648 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8651 #ifdef TARGET_NR_shmget
8652 case TARGET_NR_shmget
:
8653 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8656 #ifdef TARGET_NR_shmctl
8657 case TARGET_NR_shmctl
:
8658 ret
= do_shmctl(arg1
, arg2
, arg3
);
8661 #ifdef TARGET_NR_shmat
8662 case TARGET_NR_shmat
:
8663 ret
= do_shmat(arg1
, arg2
, arg3
);
8666 #ifdef TARGET_NR_shmdt
8667 case TARGET_NR_shmdt
:
8668 ret
= do_shmdt(arg1
);
8671 case TARGET_NR_fsync
:
8672 ret
= get_errno(fsync(arg1
));
8674 case TARGET_NR_clone
:
8675 /* Linux manages to have three different orderings for its
8676 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8677 * match the kernel's CONFIG_CLONE_* settings.
8678 * Microblaze is further special in that it uses a sixth
8679 * implicit argument to clone for the TLS pointer.
8681 #if defined(TARGET_MICROBLAZE)
8682 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8683 #elif defined(TARGET_CLONE_BACKWARDS)
8684 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8685 #elif defined(TARGET_CLONE_BACKWARDS2)
8686 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8688 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8691 #ifdef __NR_exit_group
8692 /* new thread calls */
8693 case TARGET_NR_exit_group
:
8697 gdb_exit(cpu_env
, arg1
);
8698 ret
= get_errno(exit_group(arg1
));
8701 case TARGET_NR_setdomainname
:
8702 if (!(p
= lock_user_string(arg1
)))
8704 ret
= get_errno(setdomainname(p
, arg2
));
8705 unlock_user(p
, arg1
, 0);
8707 case TARGET_NR_uname
:
8708 /* no need to transcode because we use the linux syscall */
8710 struct new_utsname
* buf
;
8712 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8714 ret
= get_errno(sys_uname(buf
));
8715 if (!is_error(ret
)) {
8716 /* Overrite the native machine name with whatever is being
8718 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8719 /* Allow the user to override the reported release. */
8720 if (qemu_uname_release
&& *qemu_uname_release
)
8721 strcpy (buf
->release
, qemu_uname_release
);
8723 unlock_user_struct(buf
, arg1
, 1);
8727 case TARGET_NR_modify_ldt
:
8728 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8730 #if !defined(TARGET_X86_64)
8731 case TARGET_NR_vm86old
:
8733 case TARGET_NR_vm86
:
8734 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8738 case TARGET_NR_adjtimex
:
8740 #ifdef TARGET_NR_create_module
8741 case TARGET_NR_create_module
:
8743 case TARGET_NR_init_module
:
8744 case TARGET_NR_delete_module
:
8745 #ifdef TARGET_NR_get_kernel_syms
8746 case TARGET_NR_get_kernel_syms
:
8749 case TARGET_NR_quotactl
:
8751 case TARGET_NR_getpgid
:
8752 ret
= get_errno(getpgid(arg1
));
8754 case TARGET_NR_fchdir
:
8755 ret
= get_errno(fchdir(arg1
));
8757 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8758 case TARGET_NR_bdflush
:
8761 #ifdef TARGET_NR_sysfs
8762 case TARGET_NR_sysfs
:
8765 case TARGET_NR_personality
:
8766 ret
= get_errno(personality(arg1
));
8768 #ifdef TARGET_NR_afs_syscall
8769 case TARGET_NR_afs_syscall
:
8772 #ifdef TARGET_NR__llseek /* Not on alpha */
8773 case TARGET_NR__llseek
:
8776 #if !defined(__NR_llseek)
8777 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8779 ret
= get_errno(res
);
8784 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8786 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8792 #ifdef TARGET_NR_getdents
8793 case TARGET_NR_getdents
:
8794 #ifdef __NR_getdents
8795 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8797 struct target_dirent
*target_dirp
;
8798 struct linux_dirent
*dirp
;
8799 abi_long count
= arg3
;
8801 dirp
= g_try_malloc(count
);
8803 ret
= -TARGET_ENOMEM
;
8807 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8808 if (!is_error(ret
)) {
8809 struct linux_dirent
*de
;
8810 struct target_dirent
*tde
;
8812 int reclen
, treclen
;
8813 int count1
, tnamelen
;
8817 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8821 reclen
= de
->d_reclen
;
8822 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8823 assert(tnamelen
>= 0);
8824 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8825 assert(count1
+ treclen
<= count
);
8826 tde
->d_reclen
= tswap16(treclen
);
8827 tde
->d_ino
= tswapal(de
->d_ino
);
8828 tde
->d_off
= tswapal(de
->d_off
);
8829 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8830 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8832 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8836 unlock_user(target_dirp
, arg2
, ret
);
8842 struct linux_dirent
*dirp
;
8843 abi_long count
= arg3
;
8845 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8847 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8848 if (!is_error(ret
)) {
8849 struct linux_dirent
*de
;
8854 reclen
= de
->d_reclen
;
8857 de
->d_reclen
= tswap16(reclen
);
8858 tswapls(&de
->d_ino
);
8859 tswapls(&de
->d_off
);
8860 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8864 unlock_user(dirp
, arg2
, ret
);
8868 /* Implement getdents in terms of getdents64 */
8870 struct linux_dirent64
*dirp
;
8871 abi_long count
= arg3
;
8873 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8877 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8878 if (!is_error(ret
)) {
8879 /* Convert the dirent64 structs to target dirent. We do this
8880 * in-place, since we can guarantee that a target_dirent is no
8881 * larger than a dirent64; however this means we have to be
8882 * careful to read everything before writing in the new format.
8884 struct linux_dirent64
*de
;
8885 struct target_dirent
*tde
;
8890 tde
= (struct target_dirent
*)dirp
;
8892 int namelen
, treclen
;
8893 int reclen
= de
->d_reclen
;
8894 uint64_t ino
= de
->d_ino
;
8895 int64_t off
= de
->d_off
;
8896 uint8_t type
= de
->d_type
;
8898 namelen
= strlen(de
->d_name
);
8899 treclen
= offsetof(struct target_dirent
, d_name
)
8901 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8903 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8904 tde
->d_ino
= tswapal(ino
);
8905 tde
->d_off
= tswapal(off
);
8906 tde
->d_reclen
= tswap16(treclen
);
8907 /* The target_dirent type is in what was formerly a padding
8908 * byte at the end of the structure:
8910 *(((char *)tde
) + treclen
- 1) = type
;
8912 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8913 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8919 unlock_user(dirp
, arg2
, ret
);
8923 #endif /* TARGET_NR_getdents */
8924 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8925 case TARGET_NR_getdents64
:
8927 struct linux_dirent64
*dirp
;
8928 abi_long count
= arg3
;
8929 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8931 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8932 if (!is_error(ret
)) {
8933 struct linux_dirent64
*de
;
8938 reclen
= de
->d_reclen
;
8941 de
->d_reclen
= tswap16(reclen
);
8942 tswap64s((uint64_t *)&de
->d_ino
);
8943 tswap64s((uint64_t *)&de
->d_off
);
8944 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8948 unlock_user(dirp
, arg2
, ret
);
8951 #endif /* TARGET_NR_getdents64 */
8952 #if defined(TARGET_NR__newselect)
8953 case TARGET_NR__newselect
:
8954 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8957 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8958 # ifdef TARGET_NR_poll
8959 case TARGET_NR_poll
:
8961 # ifdef TARGET_NR_ppoll
8962 case TARGET_NR_ppoll
:
8965 struct target_pollfd
*target_pfd
;
8966 unsigned int nfds
= arg2
;
8974 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8975 sizeof(struct target_pollfd
) * nfds
, 1);
8980 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8981 for (i
= 0; i
< nfds
; i
++) {
8982 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8983 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8987 # ifdef TARGET_NR_ppoll
8988 if (num
== TARGET_NR_ppoll
) {
8989 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8990 target_sigset_t
*target_set
;
8991 sigset_t _set
, *set
= &_set
;
8994 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8995 unlock_user(target_pfd
, arg1
, 0);
9003 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9005 unlock_user(target_pfd
, arg1
, 0);
9008 target_to_host_sigset(set
, target_set
);
9013 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
,
9014 set
, SIGSET_T_SIZE
));
9016 if (!is_error(ret
) && arg3
) {
9017 host_to_target_timespec(arg3
, timeout_ts
);
9020 unlock_user(target_set
, arg4
, 0);
9024 ret
= get_errno(poll(pfd
, nfds
, timeout
));
9026 if (!is_error(ret
)) {
9027 for(i
= 0; i
< nfds
; i
++) {
9028 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9031 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9035 case TARGET_NR_flock
:
9036 /* NOTE: the flock constant seems to be the same for every
9038 ret
= get_errno(safe_flock(arg1
, arg2
));
9040 case TARGET_NR_readv
:
9042 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9044 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9045 unlock_iovec(vec
, arg2
, arg3
, 1);
9047 ret
= -host_to_target_errno(errno
);
9051 case TARGET_NR_writev
:
9053 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9055 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9056 unlock_iovec(vec
, arg2
, arg3
, 0);
9058 ret
= -host_to_target_errno(errno
);
9062 case TARGET_NR_getsid
:
9063 ret
= get_errno(getsid(arg1
));
9065 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9066 case TARGET_NR_fdatasync
:
9067 ret
= get_errno(fdatasync(arg1
));
9070 #ifdef TARGET_NR__sysctl
9071 case TARGET_NR__sysctl
:
9072 /* We don't implement this, but ENOTDIR is always a safe
9074 ret
= -TARGET_ENOTDIR
;
9077 case TARGET_NR_sched_getaffinity
:
9079 unsigned int mask_size
;
9080 unsigned long *mask
;
9083 * sched_getaffinity needs multiples of ulong, so need to take
9084 * care of mismatches between target ulong and host ulong sizes.
9086 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9087 ret
= -TARGET_EINVAL
;
9090 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9092 mask
= alloca(mask_size
);
9093 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9095 if (!is_error(ret
)) {
9097 /* More data returned than the caller's buffer will fit.
9098 * This only happens if sizeof(abi_long) < sizeof(long)
9099 * and the caller passed us a buffer holding an odd number
9100 * of abi_longs. If the host kernel is actually using the
9101 * extra 4 bytes then fail EINVAL; otherwise we can just
9102 * ignore them and only copy the interesting part.
9104 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9105 if (numcpus
> arg2
* 8) {
9106 ret
= -TARGET_EINVAL
;
9112 if (copy_to_user(arg3
, mask
, ret
)) {
9118 case TARGET_NR_sched_setaffinity
:
9120 unsigned int mask_size
;
9121 unsigned long *mask
;
9124 * sched_setaffinity needs multiples of ulong, so need to take
9125 * care of mismatches between target ulong and host ulong sizes.
9127 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9128 ret
= -TARGET_EINVAL
;
9131 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9133 mask
= alloca(mask_size
);
9134 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9137 memcpy(mask
, p
, arg2
);
9138 unlock_user_struct(p
, arg2
, 0);
9140 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9143 case TARGET_NR_sched_setparam
:
9145 struct sched_param
*target_schp
;
9146 struct sched_param schp
;
9149 return -TARGET_EINVAL
;
9151 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9153 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9154 unlock_user_struct(target_schp
, arg2
, 0);
9155 ret
= get_errno(sched_setparam(arg1
, &schp
));
9158 case TARGET_NR_sched_getparam
:
9160 struct sched_param
*target_schp
;
9161 struct sched_param schp
;
9164 return -TARGET_EINVAL
;
9166 ret
= get_errno(sched_getparam(arg1
, &schp
));
9167 if (!is_error(ret
)) {
9168 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9170 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9171 unlock_user_struct(target_schp
, arg2
, 1);
9175 case TARGET_NR_sched_setscheduler
:
9177 struct sched_param
*target_schp
;
9178 struct sched_param schp
;
9180 return -TARGET_EINVAL
;
9182 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9184 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9185 unlock_user_struct(target_schp
, arg3
, 0);
9186 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9189 case TARGET_NR_sched_getscheduler
:
9190 ret
= get_errno(sched_getscheduler(arg1
));
9192 case TARGET_NR_sched_yield
:
9193 ret
= get_errno(sched_yield());
9195 case TARGET_NR_sched_get_priority_max
:
9196 ret
= get_errno(sched_get_priority_max(arg1
));
9198 case TARGET_NR_sched_get_priority_min
:
9199 ret
= get_errno(sched_get_priority_min(arg1
));
9201 case TARGET_NR_sched_rr_get_interval
:
9204 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9205 if (!is_error(ret
)) {
9206 ret
= host_to_target_timespec(arg2
, &ts
);
9210 case TARGET_NR_nanosleep
:
9212 struct timespec req
, rem
;
9213 target_to_host_timespec(&req
, arg1
);
9214 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9215 if (is_error(ret
) && arg2
) {
9216 host_to_target_timespec(arg2
, &rem
);
9220 #ifdef TARGET_NR_query_module
9221 case TARGET_NR_query_module
:
9224 #ifdef TARGET_NR_nfsservctl
9225 case TARGET_NR_nfsservctl
:
9228 case TARGET_NR_prctl
:
9230 case PR_GET_PDEATHSIG
:
9233 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9234 if (!is_error(ret
) && arg2
9235 && put_user_ual(deathsig
, arg2
)) {
9243 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9247 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9249 unlock_user(name
, arg2
, 16);
9254 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9258 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9260 unlock_user(name
, arg2
, 0);
9265 /* Most prctl options have no pointer arguments */
9266 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9270 #ifdef TARGET_NR_arch_prctl
9271 case TARGET_NR_arch_prctl
:
9272 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9273 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9279 #ifdef TARGET_NR_pread64
9280 case TARGET_NR_pread64
:
9281 if (regpairs_aligned(cpu_env
)) {
9285 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9287 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9288 unlock_user(p
, arg2
, ret
);
9290 case TARGET_NR_pwrite64
:
9291 if (regpairs_aligned(cpu_env
)) {
9295 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9297 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9298 unlock_user(p
, arg2
, 0);
9301 case TARGET_NR_getcwd
:
9302 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9304 ret
= get_errno(sys_getcwd1(p
, arg2
));
9305 unlock_user(p
, arg1
, ret
);
9307 case TARGET_NR_capget
:
9308 case TARGET_NR_capset
:
9310 struct target_user_cap_header
*target_header
;
9311 struct target_user_cap_data
*target_data
= NULL
;
9312 struct __user_cap_header_struct header
;
9313 struct __user_cap_data_struct data
[2];
9314 struct __user_cap_data_struct
*dataptr
= NULL
;
9315 int i
, target_datalen
;
9318 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9321 header
.version
= tswap32(target_header
->version
);
9322 header
.pid
= tswap32(target_header
->pid
);
9324 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9325 /* Version 2 and up takes pointer to two user_data structs */
9329 target_datalen
= sizeof(*target_data
) * data_items
;
9332 if (num
== TARGET_NR_capget
) {
9333 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9335 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9338 unlock_user_struct(target_header
, arg1
, 0);
9342 if (num
== TARGET_NR_capset
) {
9343 for (i
= 0; i
< data_items
; i
++) {
9344 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9345 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9346 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9353 if (num
== TARGET_NR_capget
) {
9354 ret
= get_errno(capget(&header
, dataptr
));
9356 ret
= get_errno(capset(&header
, dataptr
));
9359 /* The kernel always updates version for both capget and capset */
9360 target_header
->version
= tswap32(header
.version
);
9361 unlock_user_struct(target_header
, arg1
, 1);
9364 if (num
== TARGET_NR_capget
) {
9365 for (i
= 0; i
< data_items
; i
++) {
9366 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9367 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9368 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9370 unlock_user(target_data
, arg2
, target_datalen
);
9372 unlock_user(target_data
, arg2
, 0);
9377 case TARGET_NR_sigaltstack
:
9378 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9381 #ifdef CONFIG_SENDFILE
9382 case TARGET_NR_sendfile
:
9387 ret
= get_user_sal(off
, arg3
);
9388 if (is_error(ret
)) {
9393 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9394 if (!is_error(ret
) && arg3
) {
9395 abi_long ret2
= put_user_sal(off
, arg3
);
9396 if (is_error(ret2
)) {
9402 #ifdef TARGET_NR_sendfile64
9403 case TARGET_NR_sendfile64
:
9408 ret
= get_user_s64(off
, arg3
);
9409 if (is_error(ret
)) {
9414 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9415 if (!is_error(ret
) && arg3
) {
9416 abi_long ret2
= put_user_s64(off
, arg3
);
9417 if (is_error(ret2
)) {
9425 case TARGET_NR_sendfile
:
9426 #ifdef TARGET_NR_sendfile64
9427 case TARGET_NR_sendfile64
:
9432 #ifdef TARGET_NR_getpmsg
9433 case TARGET_NR_getpmsg
:
9436 #ifdef TARGET_NR_putpmsg
9437 case TARGET_NR_putpmsg
:
9440 #ifdef TARGET_NR_vfork
9441 case TARGET_NR_vfork
:
9442 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9446 #ifdef TARGET_NR_ugetrlimit
9447 case TARGET_NR_ugetrlimit
:
9450 int resource
= target_to_host_resource(arg1
);
9451 ret
= get_errno(getrlimit(resource
, &rlim
));
9452 if (!is_error(ret
)) {
9453 struct target_rlimit
*target_rlim
;
9454 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9456 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9457 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9458 unlock_user_struct(target_rlim
, arg2
, 1);
9463 #ifdef TARGET_NR_truncate64
9464 case TARGET_NR_truncate64
:
9465 if (!(p
= lock_user_string(arg1
)))
9467 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9468 unlock_user(p
, arg1
, 0);
9471 #ifdef TARGET_NR_ftruncate64
9472 case TARGET_NR_ftruncate64
:
9473 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9476 #ifdef TARGET_NR_stat64
9477 case TARGET_NR_stat64
:
9478 if (!(p
= lock_user_string(arg1
)))
9480 ret
= get_errno(stat(path(p
), &st
));
9481 unlock_user(p
, arg1
, 0);
9483 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9486 #ifdef TARGET_NR_lstat64
9487 case TARGET_NR_lstat64
:
9488 if (!(p
= lock_user_string(arg1
)))
9490 ret
= get_errno(lstat(path(p
), &st
));
9491 unlock_user(p
, arg1
, 0);
9493 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9496 #ifdef TARGET_NR_fstat64
9497 case TARGET_NR_fstat64
:
9498 ret
= get_errno(fstat(arg1
, &st
));
9500 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9503 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9504 #ifdef TARGET_NR_fstatat64
9505 case TARGET_NR_fstatat64
:
9507 #ifdef TARGET_NR_newfstatat
9508 case TARGET_NR_newfstatat
:
9510 if (!(p
= lock_user_string(arg2
)))
9512 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9514 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9517 #ifdef TARGET_NR_lchown
9518 case TARGET_NR_lchown
:
9519 if (!(p
= lock_user_string(arg1
)))
9521 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9522 unlock_user(p
, arg1
, 0);
9525 #ifdef TARGET_NR_getuid
9526 case TARGET_NR_getuid
:
9527 ret
= get_errno(high2lowuid(getuid()));
9530 #ifdef TARGET_NR_getgid
9531 case TARGET_NR_getgid
:
9532 ret
= get_errno(high2lowgid(getgid()));
9535 #ifdef TARGET_NR_geteuid
9536 case TARGET_NR_geteuid
:
9537 ret
= get_errno(high2lowuid(geteuid()));
9540 #ifdef TARGET_NR_getegid
9541 case TARGET_NR_getegid
:
9542 ret
= get_errno(high2lowgid(getegid()));
9545 case TARGET_NR_setreuid
:
9546 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9548 case TARGET_NR_setregid
:
9549 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9551 case TARGET_NR_getgroups
:
9553 int gidsetsize
= arg1
;
9554 target_id
*target_grouplist
;
9558 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9559 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9560 if (gidsetsize
== 0)
9562 if (!is_error(ret
)) {
9563 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9564 if (!target_grouplist
)
9566 for(i
= 0;i
< ret
; i
++)
9567 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9568 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9572 case TARGET_NR_setgroups
:
9574 int gidsetsize
= arg1
;
9575 target_id
*target_grouplist
;
9576 gid_t
*grouplist
= NULL
;
9579 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9580 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9581 if (!target_grouplist
) {
9582 ret
= -TARGET_EFAULT
;
9585 for (i
= 0; i
< gidsetsize
; i
++) {
9586 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9588 unlock_user(target_grouplist
, arg2
, 0);
9590 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9593 case TARGET_NR_fchown
:
9594 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9596 #if defined(TARGET_NR_fchownat)
9597 case TARGET_NR_fchownat
:
9598 if (!(p
= lock_user_string(arg2
)))
9600 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9601 low2highgid(arg4
), arg5
));
9602 unlock_user(p
, arg2
, 0);
9605 #ifdef TARGET_NR_setresuid
9606 case TARGET_NR_setresuid
:
9607 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9609 low2highuid(arg3
)));
9612 #ifdef TARGET_NR_getresuid
9613 case TARGET_NR_getresuid
:
9615 uid_t ruid
, euid
, suid
;
9616 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9617 if (!is_error(ret
)) {
9618 if (put_user_id(high2lowuid(ruid
), arg1
)
9619 || put_user_id(high2lowuid(euid
), arg2
)
9620 || put_user_id(high2lowuid(suid
), arg3
))
9626 #ifdef TARGET_NR_getresgid
9627 case TARGET_NR_setresgid
:
9628 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9630 low2highgid(arg3
)));
9633 #ifdef TARGET_NR_getresgid
9634 case TARGET_NR_getresgid
:
9636 gid_t rgid
, egid
, sgid
;
9637 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9638 if (!is_error(ret
)) {
9639 if (put_user_id(high2lowgid(rgid
), arg1
)
9640 || put_user_id(high2lowgid(egid
), arg2
)
9641 || put_user_id(high2lowgid(sgid
), arg3
))
9647 #ifdef TARGET_NR_chown
9648 case TARGET_NR_chown
:
9649 if (!(p
= lock_user_string(arg1
)))
9651 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9652 unlock_user(p
, arg1
, 0);
9655 case TARGET_NR_setuid
:
9656 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9658 case TARGET_NR_setgid
:
9659 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9661 case TARGET_NR_setfsuid
:
9662 ret
= get_errno(setfsuid(arg1
));
9664 case TARGET_NR_setfsgid
:
9665 ret
= get_errno(setfsgid(arg1
));
9668 #ifdef TARGET_NR_lchown32
9669 case TARGET_NR_lchown32
:
9670 if (!(p
= lock_user_string(arg1
)))
9672 ret
= get_errno(lchown(p
, arg2
, arg3
));
9673 unlock_user(p
, arg1
, 0);
9676 #ifdef TARGET_NR_getuid32
9677 case TARGET_NR_getuid32
:
9678 ret
= get_errno(getuid());
9682 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9683 /* Alpha specific */
9684 case TARGET_NR_getxuid
:
9688 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9690 ret
= get_errno(getuid());
9693 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9694 /* Alpha specific */
9695 case TARGET_NR_getxgid
:
9699 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9701 ret
= get_errno(getgid());
9704 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9705 /* Alpha specific */
9706 case TARGET_NR_osf_getsysinfo
:
9707 ret
= -TARGET_EOPNOTSUPP
;
9709 case TARGET_GSI_IEEE_FP_CONTROL
:
9711 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9713 /* Copied from linux ieee_fpcr_to_swcr. */
9714 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9715 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9716 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9717 | SWCR_TRAP_ENABLE_DZE
9718 | SWCR_TRAP_ENABLE_OVF
);
9719 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9720 | SWCR_TRAP_ENABLE_INE
);
9721 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9722 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9724 if (put_user_u64 (swcr
, arg2
))
9730 /* case GSI_IEEE_STATE_AT_SIGNAL:
9731 -- Not implemented in linux kernel.
9733 -- Retrieves current unaligned access state; not much used.
9735 -- Retrieves implver information; surely not used.
9737 -- Grabs a copy of the HWRPB; surely not used.
9742 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9743 /* Alpha specific */
9744 case TARGET_NR_osf_setsysinfo
:
9745 ret
= -TARGET_EOPNOTSUPP
;
9747 case TARGET_SSI_IEEE_FP_CONTROL
:
9749 uint64_t swcr
, fpcr
, orig_fpcr
;
9751 if (get_user_u64 (swcr
, arg2
)) {
9754 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9755 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9757 /* Copied from linux ieee_swcr_to_fpcr. */
9758 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9759 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9760 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9761 | SWCR_TRAP_ENABLE_DZE
9762 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9763 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9764 | SWCR_TRAP_ENABLE_INE
)) << 57;
9765 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9766 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9768 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9773 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9775 uint64_t exc
, fpcr
, orig_fpcr
;
9778 if (get_user_u64(exc
, arg2
)) {
9782 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9784 /* We only add to the exception status here. */
9785 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9787 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9790 /* Old exceptions are not signaled. */
9791 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9793 /* If any exceptions set by this call,
9794 and are unmasked, send a signal. */
9796 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9797 si_code
= TARGET_FPE_FLTRES
;
9799 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9800 si_code
= TARGET_FPE_FLTUND
;
9802 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9803 si_code
= TARGET_FPE_FLTOVF
;
9805 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9806 si_code
= TARGET_FPE_FLTDIV
;
9808 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9809 si_code
= TARGET_FPE_FLTINV
;
9812 target_siginfo_t info
;
9813 info
.si_signo
= SIGFPE
;
9815 info
.si_code
= si_code
;
9816 info
._sifields
._sigfault
._addr
9817 = ((CPUArchState
*)cpu_env
)->pc
;
9818 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9823 /* case SSI_NVPAIRS:
9824 -- Used with SSIN_UACPROC to enable unaligned accesses.
9825 case SSI_IEEE_STATE_AT_SIGNAL:
9826 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9827 -- Not implemented in linux kernel
9832 #ifdef TARGET_NR_osf_sigprocmask
9833 /* Alpha specific. */
9834 case TARGET_NR_osf_sigprocmask
:
9838 sigset_t set
, oldset
;
9841 case TARGET_SIG_BLOCK
:
9844 case TARGET_SIG_UNBLOCK
:
9847 case TARGET_SIG_SETMASK
:
9851 ret
= -TARGET_EINVAL
;
9855 target_to_host_old_sigset(&set
, &mask
);
9856 ret
= do_sigprocmask(how
, &set
, &oldset
);
9858 host_to_target_old_sigset(&mask
, &oldset
);
9865 #ifdef TARGET_NR_getgid32
9866 case TARGET_NR_getgid32
:
9867 ret
= get_errno(getgid());
9870 #ifdef TARGET_NR_geteuid32
9871 case TARGET_NR_geteuid32
:
9872 ret
= get_errno(geteuid());
9875 #ifdef TARGET_NR_getegid32
9876 case TARGET_NR_getegid32
:
9877 ret
= get_errno(getegid());
9880 #ifdef TARGET_NR_setreuid32
9881 case TARGET_NR_setreuid32
:
9882 ret
= get_errno(setreuid(arg1
, arg2
));
9885 #ifdef TARGET_NR_setregid32
9886 case TARGET_NR_setregid32
:
9887 ret
= get_errno(setregid(arg1
, arg2
));
9890 #ifdef TARGET_NR_getgroups32
9891 case TARGET_NR_getgroups32
:
9893 int gidsetsize
= arg1
;
9894 uint32_t *target_grouplist
;
9898 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9899 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9900 if (gidsetsize
== 0)
9902 if (!is_error(ret
)) {
9903 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9904 if (!target_grouplist
) {
9905 ret
= -TARGET_EFAULT
;
9908 for(i
= 0;i
< ret
; i
++)
9909 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9910 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9915 #ifdef TARGET_NR_setgroups32
9916 case TARGET_NR_setgroups32
:
9918 int gidsetsize
= arg1
;
9919 uint32_t *target_grouplist
;
9923 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9924 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9925 if (!target_grouplist
) {
9926 ret
= -TARGET_EFAULT
;
9929 for(i
= 0;i
< gidsetsize
; i
++)
9930 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9931 unlock_user(target_grouplist
, arg2
, 0);
9932 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9936 #ifdef TARGET_NR_fchown32
9937 case TARGET_NR_fchown32
:
9938 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9941 #ifdef TARGET_NR_setresuid32
9942 case TARGET_NR_setresuid32
:
9943 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9946 #ifdef TARGET_NR_getresuid32
9947 case TARGET_NR_getresuid32
:
9949 uid_t ruid
, euid
, suid
;
9950 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9951 if (!is_error(ret
)) {
9952 if (put_user_u32(ruid
, arg1
)
9953 || put_user_u32(euid
, arg2
)
9954 || put_user_u32(suid
, arg3
))
9960 #ifdef TARGET_NR_setresgid32
9961 case TARGET_NR_setresgid32
:
9962 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9965 #ifdef TARGET_NR_getresgid32
9966 case TARGET_NR_getresgid32
:
9968 gid_t rgid
, egid
, sgid
;
9969 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9970 if (!is_error(ret
)) {
9971 if (put_user_u32(rgid
, arg1
)
9972 || put_user_u32(egid
, arg2
)
9973 || put_user_u32(sgid
, arg3
))
9979 #ifdef TARGET_NR_chown32
9980 case TARGET_NR_chown32
:
9981 if (!(p
= lock_user_string(arg1
)))
9983 ret
= get_errno(chown(p
, arg2
, arg3
));
9984 unlock_user(p
, arg1
, 0);
9987 #ifdef TARGET_NR_setuid32
9988 case TARGET_NR_setuid32
:
9989 ret
= get_errno(sys_setuid(arg1
));
9992 #ifdef TARGET_NR_setgid32
9993 case TARGET_NR_setgid32
:
9994 ret
= get_errno(sys_setgid(arg1
));
9997 #ifdef TARGET_NR_setfsuid32
9998 case TARGET_NR_setfsuid32
:
9999 ret
= get_errno(setfsuid(arg1
));
10002 #ifdef TARGET_NR_setfsgid32
10003 case TARGET_NR_setfsgid32
:
10004 ret
= get_errno(setfsgid(arg1
));
10008 case TARGET_NR_pivot_root
:
10009 goto unimplemented
;
10010 #ifdef TARGET_NR_mincore
10011 case TARGET_NR_mincore
:
10014 ret
= -TARGET_EFAULT
;
10015 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10017 if (!(p
= lock_user_string(arg3
)))
10019 ret
= get_errno(mincore(a
, arg2
, p
));
10020 unlock_user(p
, arg3
, ret
);
10022 unlock_user(a
, arg1
, 0);
10026 #ifdef TARGET_NR_arm_fadvise64_64
10027 case TARGET_NR_arm_fadvise64_64
:
10028 /* arm_fadvise64_64 looks like fadvise64_64 but
10029 * with different argument order: fd, advice, offset, len
10030 * rather than the usual fd, offset, len, advice.
10031 * Note that offset and len are both 64-bit so appear as
10032 * pairs of 32-bit registers.
10034 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10035 target_offset64(arg5
, arg6
), arg2
);
10036 ret
= -host_to_target_errno(ret
);
10040 #if TARGET_ABI_BITS == 32
10042 #ifdef TARGET_NR_fadvise64_64
10043 case TARGET_NR_fadvise64_64
:
10044 /* 6 args: fd, offset (high, low), len (high, low), advice */
10045 if (regpairs_aligned(cpu_env
)) {
10046 /* offset is in (3,4), len in (5,6) and advice in 7 */
10053 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10054 target_offset64(arg2
, arg3
),
10055 target_offset64(arg4
, arg5
),
10060 #ifdef TARGET_NR_fadvise64
10061 case TARGET_NR_fadvise64
:
10062 /* 5 args: fd, offset (high, low), len, advice */
10063 if (regpairs_aligned(cpu_env
)) {
10064 /* offset is in (3,4), len in 5 and advice in 6 */
10070 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10071 target_offset64(arg2
, arg3
),
10076 #else /* not a 32-bit ABI */
10077 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10078 #ifdef TARGET_NR_fadvise64_64
10079 case TARGET_NR_fadvise64_64
:
10081 #ifdef TARGET_NR_fadvise64
10082 case TARGET_NR_fadvise64
:
10084 #ifdef TARGET_S390X
10086 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10087 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10088 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10089 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10093 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10096 #endif /* end of 64-bit ABI fadvise handling */
10098 #ifdef TARGET_NR_madvise
10099 case TARGET_NR_madvise
:
10100 /* A straight passthrough may not be safe because qemu sometimes
10101 turns private file-backed mappings into anonymous mappings.
10102 This will break MADV_DONTNEED.
10103 This is a hint, so ignoring and returning success is ok. */
10104 ret
= get_errno(0);
10107 #if TARGET_ABI_BITS == 32
10108 case TARGET_NR_fcntl64
:
10112 struct target_flock64
*target_fl
;
10114 struct target_eabi_flock64
*target_efl
;
10117 cmd
= target_to_host_fcntl_cmd(arg2
);
10118 if (cmd
== -TARGET_EINVAL
) {
10124 case TARGET_F_GETLK64
:
10126 if (((CPUARMState
*)cpu_env
)->eabi
) {
10127 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10129 fl
.l_type
= tswap16(target_efl
->l_type
);
10130 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10131 fl
.l_start
= tswap64(target_efl
->l_start
);
10132 fl
.l_len
= tswap64(target_efl
->l_len
);
10133 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10134 unlock_user_struct(target_efl
, arg3
, 0);
10138 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10140 fl
.l_type
= tswap16(target_fl
->l_type
);
10141 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10142 fl
.l_start
= tswap64(target_fl
->l_start
);
10143 fl
.l_len
= tswap64(target_fl
->l_len
);
10144 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10145 unlock_user_struct(target_fl
, arg3
, 0);
10147 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10150 if (((CPUARMState
*)cpu_env
)->eabi
) {
10151 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
10153 target_efl
->l_type
= tswap16(fl
.l_type
);
10154 target_efl
->l_whence
= tswap16(fl
.l_whence
);
10155 target_efl
->l_start
= tswap64(fl
.l_start
);
10156 target_efl
->l_len
= tswap64(fl
.l_len
);
10157 target_efl
->l_pid
= tswap32(fl
.l_pid
);
10158 unlock_user_struct(target_efl
, arg3
, 1);
10162 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
10164 target_fl
->l_type
= tswap16(fl
.l_type
);
10165 target_fl
->l_whence
= tswap16(fl
.l_whence
);
10166 target_fl
->l_start
= tswap64(fl
.l_start
);
10167 target_fl
->l_len
= tswap64(fl
.l_len
);
10168 target_fl
->l_pid
= tswap32(fl
.l_pid
);
10169 unlock_user_struct(target_fl
, arg3
, 1);
10174 case TARGET_F_SETLK64
:
10175 case TARGET_F_SETLKW64
:
10177 if (((CPUARMState
*)cpu_env
)->eabi
) {
10178 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10180 fl
.l_type
= tswap16(target_efl
->l_type
);
10181 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10182 fl
.l_start
= tswap64(target_efl
->l_start
);
10183 fl
.l_len
= tswap64(target_efl
->l_len
);
10184 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10185 unlock_user_struct(target_efl
, arg3
, 0);
10189 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10191 fl
.l_type
= tswap16(target_fl
->l_type
);
10192 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10193 fl
.l_start
= tswap64(target_fl
->l_start
);
10194 fl
.l_len
= tswap64(target_fl
->l_len
);
10195 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10196 unlock_user_struct(target_fl
, arg3
, 0);
10198 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10201 ret
= do_fcntl(arg1
, arg2
, arg3
);
10207 #ifdef TARGET_NR_cacheflush
10208 case TARGET_NR_cacheflush
:
10209 /* self-modifying code is handled automatically, so nothing needed */
10213 #ifdef TARGET_NR_security
10214 case TARGET_NR_security
:
10215 goto unimplemented
;
10217 #ifdef TARGET_NR_getpagesize
10218 case TARGET_NR_getpagesize
:
10219 ret
= TARGET_PAGE_SIZE
;
10222 case TARGET_NR_gettid
:
10223 ret
= get_errno(gettid());
10225 #ifdef TARGET_NR_readahead
10226 case TARGET_NR_readahead
:
10227 #if TARGET_ABI_BITS == 32
10228 if (regpairs_aligned(cpu_env
)) {
10233 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10235 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10240 #ifdef TARGET_NR_setxattr
10241 case TARGET_NR_listxattr
:
10242 case TARGET_NR_llistxattr
:
10246 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10248 ret
= -TARGET_EFAULT
;
10252 p
= lock_user_string(arg1
);
10254 if (num
== TARGET_NR_listxattr
) {
10255 ret
= get_errno(listxattr(p
, b
, arg3
));
10257 ret
= get_errno(llistxattr(p
, b
, arg3
));
10260 ret
= -TARGET_EFAULT
;
10262 unlock_user(p
, arg1
, 0);
10263 unlock_user(b
, arg2
, arg3
);
10266 case TARGET_NR_flistxattr
:
10270 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10272 ret
= -TARGET_EFAULT
;
10276 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10277 unlock_user(b
, arg2
, arg3
);
10280 case TARGET_NR_setxattr
:
10281 case TARGET_NR_lsetxattr
:
10283 void *p
, *n
, *v
= 0;
10285 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10287 ret
= -TARGET_EFAULT
;
10291 p
= lock_user_string(arg1
);
10292 n
= lock_user_string(arg2
);
10294 if (num
== TARGET_NR_setxattr
) {
10295 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10297 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10300 ret
= -TARGET_EFAULT
;
10302 unlock_user(p
, arg1
, 0);
10303 unlock_user(n
, arg2
, 0);
10304 unlock_user(v
, arg3
, 0);
10307 case TARGET_NR_fsetxattr
:
10311 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10313 ret
= -TARGET_EFAULT
;
10317 n
= lock_user_string(arg2
);
10319 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10321 ret
= -TARGET_EFAULT
;
10323 unlock_user(n
, arg2
, 0);
10324 unlock_user(v
, arg3
, 0);
10327 case TARGET_NR_getxattr
:
10328 case TARGET_NR_lgetxattr
:
10330 void *p
, *n
, *v
= 0;
10332 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10334 ret
= -TARGET_EFAULT
;
10338 p
= lock_user_string(arg1
);
10339 n
= lock_user_string(arg2
);
10341 if (num
== TARGET_NR_getxattr
) {
10342 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10344 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10347 ret
= -TARGET_EFAULT
;
10349 unlock_user(p
, arg1
, 0);
10350 unlock_user(n
, arg2
, 0);
10351 unlock_user(v
, arg3
, arg4
);
10354 case TARGET_NR_fgetxattr
:
10358 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10360 ret
= -TARGET_EFAULT
;
10364 n
= lock_user_string(arg2
);
10366 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10368 ret
= -TARGET_EFAULT
;
10370 unlock_user(n
, arg2
, 0);
10371 unlock_user(v
, arg3
, arg4
);
10374 case TARGET_NR_removexattr
:
10375 case TARGET_NR_lremovexattr
:
10378 p
= lock_user_string(arg1
);
10379 n
= lock_user_string(arg2
);
10381 if (num
== TARGET_NR_removexattr
) {
10382 ret
= get_errno(removexattr(p
, n
));
10384 ret
= get_errno(lremovexattr(p
, n
));
10387 ret
= -TARGET_EFAULT
;
10389 unlock_user(p
, arg1
, 0);
10390 unlock_user(n
, arg2
, 0);
10393 case TARGET_NR_fremovexattr
:
10396 n
= lock_user_string(arg2
);
10398 ret
= get_errno(fremovexattr(arg1
, n
));
10400 ret
= -TARGET_EFAULT
;
10402 unlock_user(n
, arg2
, 0);
10406 #endif /* CONFIG_ATTR */
10407 #ifdef TARGET_NR_set_thread_area
10408 case TARGET_NR_set_thread_area
:
10409 #if defined(TARGET_MIPS)
10410 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10413 #elif defined(TARGET_CRIS)
10415 ret
= -TARGET_EINVAL
;
10417 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10421 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10422 ret
= do_set_thread_area(cpu_env
, arg1
);
10424 #elif defined(TARGET_M68K)
10426 TaskState
*ts
= cpu
->opaque
;
10427 ts
->tp_value
= arg1
;
10432 goto unimplemented_nowarn
;
10435 #ifdef TARGET_NR_get_thread_area
10436 case TARGET_NR_get_thread_area
:
10437 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10438 ret
= do_get_thread_area(cpu_env
, arg1
);
10440 #elif defined(TARGET_M68K)
10442 TaskState
*ts
= cpu
->opaque
;
10443 ret
= ts
->tp_value
;
10447 goto unimplemented_nowarn
;
10450 #ifdef TARGET_NR_getdomainname
10451 case TARGET_NR_getdomainname
:
10452 goto unimplemented_nowarn
;
10455 #ifdef TARGET_NR_clock_gettime
10456 case TARGET_NR_clock_gettime
:
10458 struct timespec ts
;
10459 ret
= get_errno(clock_gettime(arg1
, &ts
));
10460 if (!is_error(ret
)) {
10461 host_to_target_timespec(arg2
, &ts
);
10466 #ifdef TARGET_NR_clock_getres
10467 case TARGET_NR_clock_getres
:
10469 struct timespec ts
;
10470 ret
= get_errno(clock_getres(arg1
, &ts
));
10471 if (!is_error(ret
)) {
10472 host_to_target_timespec(arg2
, &ts
);
10477 #ifdef TARGET_NR_clock_nanosleep
10478 case TARGET_NR_clock_nanosleep
:
10480 struct timespec ts
;
10481 target_to_host_timespec(&ts
, arg3
);
10482 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10483 &ts
, arg4
? &ts
: NULL
));
10485 host_to_target_timespec(arg4
, &ts
);
10487 #if defined(TARGET_PPC)
10488 /* clock_nanosleep is odd in that it returns positive errno values.
10489 * On PPC, CR0 bit 3 should be set in such a situation. */
10490 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10491 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10498 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10499 case TARGET_NR_set_tid_address
:
10500 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10504 case TARGET_NR_tkill
:
10505 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10508 case TARGET_NR_tgkill
:
10509 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10510 target_to_host_signal(arg3
)));
10513 #ifdef TARGET_NR_set_robust_list
10514 case TARGET_NR_set_robust_list
:
10515 case TARGET_NR_get_robust_list
:
10516 /* The ABI for supporting robust futexes has userspace pass
10517 * the kernel a pointer to a linked list which is updated by
10518 * userspace after the syscall; the list is walked by the kernel
10519 * when the thread exits. Since the linked list in QEMU guest
10520 * memory isn't a valid linked list for the host and we have
10521 * no way to reliably intercept the thread-death event, we can't
10522 * support these. Silently return ENOSYS so that guest userspace
10523 * falls back to a non-robust futex implementation (which should
10524 * be OK except in the corner case of the guest crashing while
10525 * holding a mutex that is shared with another process via
10528 goto unimplemented_nowarn
;
10531 #if defined(TARGET_NR_utimensat)
10532 case TARGET_NR_utimensat
:
10534 struct timespec
*tsp
, ts
[2];
10538 target_to_host_timespec(ts
, arg3
);
10539 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10543 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10545 if (!(p
= lock_user_string(arg2
))) {
10546 ret
= -TARGET_EFAULT
;
10549 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10550 unlock_user(p
, arg2
, 0);
10555 case TARGET_NR_futex
:
10556 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10558 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10559 case TARGET_NR_inotify_init
:
10560 ret
= get_errno(sys_inotify_init());
10563 #ifdef CONFIG_INOTIFY1
10564 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10565 case TARGET_NR_inotify_init1
:
10566 ret
= get_errno(sys_inotify_init1(arg1
));
10570 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10571 case TARGET_NR_inotify_add_watch
:
10572 p
= lock_user_string(arg2
);
10573 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10574 unlock_user(p
, arg2
, 0);
10577 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10578 case TARGET_NR_inotify_rm_watch
:
10579 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10583 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10584 case TARGET_NR_mq_open
:
10586 struct mq_attr posix_mq_attr
, *attrp
;
10588 p
= lock_user_string(arg1
- 1);
10590 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10591 attrp
= &posix_mq_attr
;
10595 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10596 unlock_user (p
, arg1
, 0);
10600 case TARGET_NR_mq_unlink
:
10601 p
= lock_user_string(arg1
- 1);
10602 ret
= get_errno(mq_unlink(p
));
10603 unlock_user (p
, arg1
, 0);
10606 case TARGET_NR_mq_timedsend
:
10608 struct timespec ts
;
10610 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10612 target_to_host_timespec(&ts
, arg5
);
10613 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10614 host_to_target_timespec(arg5
, &ts
);
10616 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10618 unlock_user (p
, arg2
, arg3
);
10622 case TARGET_NR_mq_timedreceive
:
10624 struct timespec ts
;
10627 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10629 target_to_host_timespec(&ts
, arg5
);
10630 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10632 host_to_target_timespec(arg5
, &ts
);
10634 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10637 unlock_user (p
, arg2
, arg3
);
10639 put_user_u32(prio
, arg4
);
10643 /* Not implemented for now... */
10644 /* case TARGET_NR_mq_notify: */
10647 case TARGET_NR_mq_getsetattr
:
10649 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10652 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10653 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10656 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10657 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10664 #ifdef CONFIG_SPLICE
10665 #ifdef TARGET_NR_tee
10666 case TARGET_NR_tee
:
10668 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10672 #ifdef TARGET_NR_splice
10673 case TARGET_NR_splice
:
10675 loff_t loff_in
, loff_out
;
10676 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10678 if (get_user_u64(loff_in
, arg2
)) {
10681 ploff_in
= &loff_in
;
10684 if (get_user_u64(loff_out
, arg4
)) {
10687 ploff_out
= &loff_out
;
10689 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10691 if (put_user_u64(loff_in
, arg2
)) {
10696 if (put_user_u64(loff_out
, arg4
)) {
10703 #ifdef TARGET_NR_vmsplice
10704 case TARGET_NR_vmsplice
:
10706 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10708 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10709 unlock_iovec(vec
, arg2
, arg3
, 0);
10711 ret
= -host_to_target_errno(errno
);
10716 #endif /* CONFIG_SPLICE */
10717 #ifdef CONFIG_EVENTFD
10718 #if defined(TARGET_NR_eventfd)
10719 case TARGET_NR_eventfd
:
10720 ret
= get_errno(eventfd(arg1
, 0));
10721 fd_trans_unregister(ret
);
10724 #if defined(TARGET_NR_eventfd2)
10725 case TARGET_NR_eventfd2
:
10727 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10728 if (arg2
& TARGET_O_NONBLOCK
) {
10729 host_flags
|= O_NONBLOCK
;
10731 if (arg2
& TARGET_O_CLOEXEC
) {
10732 host_flags
|= O_CLOEXEC
;
10734 ret
= get_errno(eventfd(arg1
, host_flags
));
10735 fd_trans_unregister(ret
);
10739 #endif /* CONFIG_EVENTFD */
10740 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10741 case TARGET_NR_fallocate
:
10742 #if TARGET_ABI_BITS == 32
10743 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10744 target_offset64(arg5
, arg6
)));
10746 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10750 #if defined(CONFIG_SYNC_FILE_RANGE)
10751 #if defined(TARGET_NR_sync_file_range)
10752 case TARGET_NR_sync_file_range
:
10753 #if TARGET_ABI_BITS == 32
10754 #if defined(TARGET_MIPS)
10755 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10756 target_offset64(arg5
, arg6
), arg7
));
10758 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10759 target_offset64(arg4
, arg5
), arg6
));
10760 #endif /* !TARGET_MIPS */
10762 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10766 #if defined(TARGET_NR_sync_file_range2)
10767 case TARGET_NR_sync_file_range2
:
10768 /* This is like sync_file_range but the arguments are reordered */
10769 #if TARGET_ABI_BITS == 32
10770 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10771 target_offset64(arg5
, arg6
), arg2
));
10773 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10778 #if defined(TARGET_NR_signalfd4)
10779 case TARGET_NR_signalfd4
:
10780 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10783 #if defined(TARGET_NR_signalfd)
10784 case TARGET_NR_signalfd
:
10785 ret
= do_signalfd4(arg1
, arg2
, 0);
10788 #if defined(CONFIG_EPOLL)
10789 #if defined(TARGET_NR_epoll_create)
10790 case TARGET_NR_epoll_create
:
10791 ret
= get_errno(epoll_create(arg1
));
10794 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10795 case TARGET_NR_epoll_create1
:
10796 ret
= get_errno(epoll_create1(arg1
));
10799 #if defined(TARGET_NR_epoll_ctl)
10800 case TARGET_NR_epoll_ctl
:
10802 struct epoll_event ep
;
10803 struct epoll_event
*epp
= 0;
10805 struct target_epoll_event
*target_ep
;
10806 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10809 ep
.events
= tswap32(target_ep
->events
);
10810 /* The epoll_data_t union is just opaque data to the kernel,
10811 * so we transfer all 64 bits across and need not worry what
10812 * actual data type it is.
10814 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10815 unlock_user_struct(target_ep
, arg4
, 0);
10818 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10823 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10824 #define IMPLEMENT_EPOLL_PWAIT
10826 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10827 #if defined(TARGET_NR_epoll_wait)
10828 case TARGET_NR_epoll_wait
:
10830 #if defined(IMPLEMENT_EPOLL_PWAIT)
10831 case TARGET_NR_epoll_pwait
:
10834 struct target_epoll_event
*target_ep
;
10835 struct epoll_event
*ep
;
10837 int maxevents
= arg3
;
10838 int timeout
= arg4
;
10840 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10841 maxevents
* sizeof(struct target_epoll_event
), 1);
10846 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10849 #if defined(IMPLEMENT_EPOLL_PWAIT)
10850 case TARGET_NR_epoll_pwait
:
10852 target_sigset_t
*target_set
;
10853 sigset_t _set
, *set
= &_set
;
10856 target_set
= lock_user(VERIFY_READ
, arg5
,
10857 sizeof(target_sigset_t
), 1);
10859 unlock_user(target_ep
, arg2
, 0);
10862 target_to_host_sigset(set
, target_set
);
10863 unlock_user(target_set
, arg5
, 0);
10868 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10872 #if defined(TARGET_NR_epoll_wait)
10873 case TARGET_NR_epoll_wait
:
10874 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10878 ret
= -TARGET_ENOSYS
;
10880 if (!is_error(ret
)) {
10882 for (i
= 0; i
< ret
; i
++) {
10883 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10884 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10887 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10892 #ifdef TARGET_NR_prlimit64
10893 case TARGET_NR_prlimit64
:
10895 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10896 struct target_rlimit64
*target_rnew
, *target_rold
;
10897 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10898 int resource
= target_to_host_resource(arg2
);
10900 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10903 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10904 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10905 unlock_user_struct(target_rnew
, arg3
, 0);
10909 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10910 if (!is_error(ret
) && arg4
) {
10911 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10914 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10915 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10916 unlock_user_struct(target_rold
, arg4
, 1);
10921 #ifdef TARGET_NR_gethostname
10922 case TARGET_NR_gethostname
:
10924 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10926 ret
= get_errno(gethostname(name
, arg2
));
10927 unlock_user(name
, arg1
, arg2
);
10929 ret
= -TARGET_EFAULT
;
10934 #ifdef TARGET_NR_atomic_cmpxchg_32
10935 case TARGET_NR_atomic_cmpxchg_32
:
10937 /* should use start_exclusive from main.c */
10938 abi_ulong mem_value
;
10939 if (get_user_u32(mem_value
, arg6
)) {
10940 target_siginfo_t info
;
10941 info
.si_signo
= SIGSEGV
;
10943 info
.si_code
= TARGET_SEGV_MAPERR
;
10944 info
._sifields
._sigfault
._addr
= arg6
;
10945 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10949 if (mem_value
== arg2
)
10950 put_user_u32(arg1
, arg6
);
10955 #ifdef TARGET_NR_atomic_barrier
10956 case TARGET_NR_atomic_barrier
:
10958 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10964 #ifdef TARGET_NR_timer_create
10965 case TARGET_NR_timer_create
:
10967 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10969 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10972 int timer_index
= next_free_host_timer();
10974 if (timer_index
< 0) {
10975 ret
= -TARGET_EAGAIN
;
10977 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10980 phost_sevp
= &host_sevp
;
10981 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10987 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10991 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11000 #ifdef TARGET_NR_timer_settime
11001 case TARGET_NR_timer_settime
:
11003 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11004 * struct itimerspec * old_value */
11005 target_timer_t timerid
= get_timer_id(arg1
);
11009 } else if (arg3
== 0) {
11010 ret
= -TARGET_EINVAL
;
11012 timer_t htimer
= g_posix_timers
[timerid
];
11013 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11015 target_to_host_itimerspec(&hspec_new
, arg3
);
11017 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11018 host_to_target_itimerspec(arg2
, &hspec_old
);
11024 #ifdef TARGET_NR_timer_gettime
11025 case TARGET_NR_timer_gettime
:
11027 /* args: timer_t timerid, struct itimerspec *curr_value */
11028 target_timer_t timerid
= get_timer_id(arg1
);
11032 } else if (!arg2
) {
11033 ret
= -TARGET_EFAULT
;
11035 timer_t htimer
= g_posix_timers
[timerid
];
11036 struct itimerspec hspec
;
11037 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11039 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11040 ret
= -TARGET_EFAULT
;
11047 #ifdef TARGET_NR_timer_getoverrun
11048 case TARGET_NR_timer_getoverrun
:
11050 /* args: timer_t timerid */
11051 target_timer_t timerid
= get_timer_id(arg1
);
11056 timer_t htimer
= g_posix_timers
[timerid
];
11057 ret
= get_errno(timer_getoverrun(htimer
));
11059 fd_trans_unregister(ret
);
11064 #ifdef TARGET_NR_timer_delete
11065 case TARGET_NR_timer_delete
:
11067 /* args: timer_t timerid */
11068 target_timer_t timerid
= get_timer_id(arg1
);
11073 timer_t htimer
= g_posix_timers
[timerid
];
11074 ret
= get_errno(timer_delete(htimer
));
11075 g_posix_timers
[timerid
] = 0;
11081 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11082 case TARGET_NR_timerfd_create
:
11083 ret
= get_errno(timerfd_create(arg1
,
11084 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11088 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11089 case TARGET_NR_timerfd_gettime
:
11091 struct itimerspec its_curr
;
11093 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11095 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11102 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11103 case TARGET_NR_timerfd_settime
:
11105 struct itimerspec its_new
, its_old
, *p_new
;
11108 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11116 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11118 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11125 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11126 case TARGET_NR_ioprio_get
:
11127 ret
= get_errno(ioprio_get(arg1
, arg2
));
11131 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11132 case TARGET_NR_ioprio_set
:
11133 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11137 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11138 case TARGET_NR_setns
:
11139 ret
= get_errno(setns(arg1
, arg2
));
11142 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11143 case TARGET_NR_unshare
:
11144 ret
= get_errno(unshare(arg1
));
11150 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11151 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11152 unimplemented_nowarn
:
11154 ret
= -TARGET_ENOSYS
;
11159 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11162 print_syscall_ret(num
, ret
);
11165 ret
= -TARGET_EFAULT
;