4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
108 #include <linux/audit.h>
109 #include "linux_loop.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_futex __NR_futex
197 #define __NR_sys_inotify_init __NR_inotify_init
198 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
199 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
201 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 #define __NR__llseek __NR_lseek
206 /* Newer kernel ports have llseek() instead of _llseek() */
207 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
208 #define TARGET_NR__llseek TARGET_NR_llseek
212 _syscall0(int, gettid
)
214 /* This is a replacement for the host gettid() and must return a host
216 static int gettid(void) {
220 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
221 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
223 #if !defined(__NR_getdents) || \
224 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
225 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
228 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
229 loff_t
*, res
, uint
, wh
);
231 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
232 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
234 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
237 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
239 #ifdef __NR_exit_group
240 _syscall1(int,exit_group
,int,error_code
)
242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
243 _syscall1(int,set_tid_address
,int *,tidptr
)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
247 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
249 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
250 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
253 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
254 unsigned long *, user_mask_ptr
);
255 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
257 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
258 struct __user_cap_data_struct
*, data
);
259 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
260 struct __user_cap_data_struct
*, data
);
261 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
262 _syscall2(int, ioprio_get
, int, which
, int, who
)
264 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
265 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
267 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
268 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
271 static bitmask_transtbl fcntl_flags_tbl
[] = {
272 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
273 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
274 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
275 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
276 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
277 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
278 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
279 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
280 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
281 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
282 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
283 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
284 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
285 #if defined(O_DIRECT)
286 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
288 #if defined(O_NOATIME)
289 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
291 #if defined(O_CLOEXEC)
292 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
295 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
297 /* Don't terminate the list prematurely on 64-bit host+guest. */
298 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
299 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
304 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
305 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
306 typedef struct TargetFdTrans
{
307 TargetFdDataFunc host_to_target_data
;
308 TargetFdDataFunc target_to_host_data
;
309 TargetFdAddrFunc target_to_host_addr
;
312 static TargetFdTrans
**target_fd_trans
;
314 static unsigned int target_fd_max
;
316 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
318 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
319 return target_fd_trans
[fd
]->target_to_host_data
;
324 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
326 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
327 return target_fd_trans
[fd
]->host_to_target_data
;
332 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
334 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
335 return target_fd_trans
[fd
]->target_to_host_addr
;
340 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
344 if (fd
>= target_fd_max
) {
345 oldmax
= target_fd_max
;
346 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
347 target_fd_trans
= g_renew(TargetFdTrans
*,
348 target_fd_trans
, target_fd_max
);
349 memset((void *)(target_fd_trans
+ oldmax
), 0,
350 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
352 target_fd_trans
[fd
] = trans
;
355 static void fd_trans_unregister(int fd
)
357 if (fd
>= 0 && fd
< target_fd_max
) {
358 target_fd_trans
[fd
] = NULL
;
362 static void fd_trans_dup(int oldfd
, int newfd
)
364 fd_trans_unregister(newfd
);
365 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
366 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
370 static int sys_getcwd1(char *buf
, size_t size
)
372 if (getcwd(buf
, size
) == NULL
) {
373 /* getcwd() sets errno */
376 return strlen(buf
)+1;
379 #ifdef TARGET_NR_utimensat
380 #ifdef CONFIG_UTIMENSAT
381 static int sys_utimensat(int dirfd
, const char *pathname
,
382 const struct timespec times
[2], int flags
)
384 if (pathname
== NULL
)
385 return futimens(dirfd
, times
);
387 return utimensat(dirfd
, pathname
, times
, flags
);
389 #elif defined(__NR_utimensat)
390 #define __NR_sys_utimensat __NR_utimensat
391 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
392 const struct timespec
*,tsp
,int,flags
)
394 static int sys_utimensat(int dirfd
, const char *pathname
,
395 const struct timespec times
[2], int flags
)
401 #endif /* TARGET_NR_utimensat */
403 #ifdef CONFIG_INOTIFY
404 #include <sys/inotify.h>
406 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
407 static int sys_inotify_init(void)
409 return (inotify_init());
412 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
413 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
415 return (inotify_add_watch(fd
, pathname
, mask
));
418 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
419 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
421 return (inotify_rm_watch(fd
, wd
));
424 #ifdef CONFIG_INOTIFY1
425 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
426 static int sys_inotify_init1(int flags
)
428 return (inotify_init1(flags
));
433 /* Userspace can usually survive runtime without inotify */
434 #undef TARGET_NR_inotify_init
435 #undef TARGET_NR_inotify_init1
436 #undef TARGET_NR_inotify_add_watch
437 #undef TARGET_NR_inotify_rm_watch
438 #endif /* CONFIG_INOTIFY */
440 #if defined(TARGET_NR_ppoll)
442 # define __NR_ppoll -1
444 #define __NR_sys_ppoll __NR_ppoll
445 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
446 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
450 #if defined(TARGET_NR_prlimit64)
451 #ifndef __NR_prlimit64
452 # define __NR_prlimit64 -1
454 #define __NR_sys_prlimit64 __NR_prlimit64
455 /* The glibc rlimit structure may not be that used by the underlying syscall */
456 struct host_rlimit64
{
460 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
461 const struct host_rlimit64
*, new_limit
,
462 struct host_rlimit64
*, old_limit
)
466 #if defined(TARGET_NR_timer_create)
467 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
468 static timer_t g_posix_timers
[32] = { 0, } ;
470 static inline int next_free_host_timer(void)
473 /* FIXME: Does finding the next free slot require a lock? */
474 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
475 if (g_posix_timers
[k
] == 0) {
476 g_posix_timers
[k
] = (timer_t
) 1;
484 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
486 static inline int regpairs_aligned(void *cpu_env
) {
487 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
489 #elif defined(TARGET_MIPS)
490 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
493 * of registers which translates to the same as ARM/MIPS, because we start with
495 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
497 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
512 [EAGAIN
] = TARGET_EAGAIN
,
513 [EIDRM
] = TARGET_EIDRM
,
514 [ECHRNG
] = TARGET_ECHRNG
,
515 [EL2NSYNC
] = TARGET_EL2NSYNC
,
516 [EL3HLT
] = TARGET_EL3HLT
,
517 [EL3RST
] = TARGET_EL3RST
,
518 [ELNRNG
] = TARGET_ELNRNG
,
519 [EUNATCH
] = TARGET_EUNATCH
,
520 [ENOCSI
] = TARGET_ENOCSI
,
521 [EL2HLT
] = TARGET_EL2HLT
,
522 [EDEADLK
] = TARGET_EDEADLK
,
523 [ENOLCK
] = TARGET_ENOLCK
,
524 [EBADE
] = TARGET_EBADE
,
525 [EBADR
] = TARGET_EBADR
,
526 [EXFULL
] = TARGET_EXFULL
,
527 [ENOANO
] = TARGET_ENOANO
,
528 [EBADRQC
] = TARGET_EBADRQC
,
529 [EBADSLT
] = TARGET_EBADSLT
,
530 [EBFONT
] = TARGET_EBFONT
,
531 [ENOSTR
] = TARGET_ENOSTR
,
532 [ENODATA
] = TARGET_ENODATA
,
533 [ETIME
] = TARGET_ETIME
,
534 [ENOSR
] = TARGET_ENOSR
,
535 [ENONET
] = TARGET_ENONET
,
536 [ENOPKG
] = TARGET_ENOPKG
,
537 [EREMOTE
] = TARGET_EREMOTE
,
538 [ENOLINK
] = TARGET_ENOLINK
,
539 [EADV
] = TARGET_EADV
,
540 [ESRMNT
] = TARGET_ESRMNT
,
541 [ECOMM
] = TARGET_ECOMM
,
542 [EPROTO
] = TARGET_EPROTO
,
543 [EDOTDOT
] = TARGET_EDOTDOT
,
544 [EMULTIHOP
] = TARGET_EMULTIHOP
,
545 [EBADMSG
] = TARGET_EBADMSG
,
546 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
547 [EOVERFLOW
] = TARGET_EOVERFLOW
,
548 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
549 [EBADFD
] = TARGET_EBADFD
,
550 [EREMCHG
] = TARGET_EREMCHG
,
551 [ELIBACC
] = TARGET_ELIBACC
,
552 [ELIBBAD
] = TARGET_ELIBBAD
,
553 [ELIBSCN
] = TARGET_ELIBSCN
,
554 [ELIBMAX
] = TARGET_ELIBMAX
,
555 [ELIBEXEC
] = TARGET_ELIBEXEC
,
556 [EILSEQ
] = TARGET_EILSEQ
,
557 [ENOSYS
] = TARGET_ENOSYS
,
558 [ELOOP
] = TARGET_ELOOP
,
559 [ERESTART
] = TARGET_ERESTART
,
560 [ESTRPIPE
] = TARGET_ESTRPIPE
,
561 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
562 [EUSERS
] = TARGET_EUSERS
,
563 [ENOTSOCK
] = TARGET_ENOTSOCK
,
564 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
565 [EMSGSIZE
] = TARGET_EMSGSIZE
,
566 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
567 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
568 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
569 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
570 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
571 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
572 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
573 [EADDRINUSE
] = TARGET_EADDRINUSE
,
574 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
575 [ENETDOWN
] = TARGET_ENETDOWN
,
576 [ENETUNREACH
] = TARGET_ENETUNREACH
,
577 [ENETRESET
] = TARGET_ENETRESET
,
578 [ECONNABORTED
] = TARGET_ECONNABORTED
,
579 [ECONNRESET
] = TARGET_ECONNRESET
,
580 [ENOBUFS
] = TARGET_ENOBUFS
,
581 [EISCONN
] = TARGET_EISCONN
,
582 [ENOTCONN
] = TARGET_ENOTCONN
,
583 [EUCLEAN
] = TARGET_EUCLEAN
,
584 [ENOTNAM
] = TARGET_ENOTNAM
,
585 [ENAVAIL
] = TARGET_ENAVAIL
,
586 [EISNAM
] = TARGET_EISNAM
,
587 [EREMOTEIO
] = TARGET_EREMOTEIO
,
588 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
589 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
590 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
591 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
592 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
593 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
594 [EALREADY
] = TARGET_EALREADY
,
595 [EINPROGRESS
] = TARGET_EINPROGRESS
,
596 [ESTALE
] = TARGET_ESTALE
,
597 [ECANCELED
] = TARGET_ECANCELED
,
598 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
599 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
601 [ENOKEY
] = TARGET_ENOKEY
,
604 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
607 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
610 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
613 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
615 #ifdef ENOTRECOVERABLE
616 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
620 static inline int host_to_target_errno(int err
)
622 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
623 host_to_target_errno_table
[err
]) {
624 return host_to_target_errno_table
[err
];
629 static inline int target_to_host_errno(int err
)
631 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
632 target_to_host_errno_table
[err
]) {
633 return target_to_host_errno_table
[err
];
638 static inline abi_long
get_errno(abi_long ret
)
641 return -host_to_target_errno(errno
);
646 static inline int is_error(abi_long ret
)
648 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
651 char *target_strerror(int err
)
653 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
656 return strerror(target_to_host_errno(err
));
659 #define safe_syscall0(type, name) \
660 static type safe_##name(void) \
662 return safe_syscall(__NR_##name); \
665 #define safe_syscall1(type, name, type1, arg1) \
666 static type safe_##name(type1 arg1) \
668 return safe_syscall(__NR_##name, arg1); \
671 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
672 static type safe_##name(type1 arg1, type2 arg2) \
674 return safe_syscall(__NR_##name, arg1, arg2); \
677 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
683 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
690 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
698 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
699 type4, arg4, type5, arg5, type6, arg6) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
701 type5 arg5, type6 arg6) \
703 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
706 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
707 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
708 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
709 int, flags
, mode_t
, mode
)
710 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
711 struct rusage
*, rusage
)
712 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
713 int, options
, struct rusage
*, rusage
)
714 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
715 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
716 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
717 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
718 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
719 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
721 static inline int host_to_target_sock_type(int host_type
)
725 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
727 target_type
= TARGET_SOCK_DGRAM
;
730 target_type
= TARGET_SOCK_STREAM
;
733 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
737 #if defined(SOCK_CLOEXEC)
738 if (host_type
& SOCK_CLOEXEC
) {
739 target_type
|= TARGET_SOCK_CLOEXEC
;
743 #if defined(SOCK_NONBLOCK)
744 if (host_type
& SOCK_NONBLOCK
) {
745 target_type
|= TARGET_SOCK_NONBLOCK
;
752 static abi_ulong target_brk
;
753 static abi_ulong target_original_brk
;
754 static abi_ulong brk_page
;
756 void target_set_brk(abi_ulong new_brk
)
758 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
759 brk_page
= HOST_PAGE_ALIGN(target_brk
);
762 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
763 #define DEBUGF_BRK(message, args...)
765 /* do_brk() must return target values and target errnos. */
766 abi_long
do_brk(abi_ulong new_brk
)
768 abi_long mapped_addr
;
771 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
774 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
777 if (new_brk
< target_original_brk
) {
778 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
783 /* If the new brk is less than the highest page reserved to the
784 * target heap allocation, set it and we're almost done... */
785 if (new_brk
<= brk_page
) {
786 /* Heap contents are initialized to zero, as for anonymous
788 if (new_brk
> target_brk
) {
789 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
791 target_brk
= new_brk
;
792 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
796 /* We need to allocate more memory after the brk... Note that
797 * we don't use MAP_FIXED because that will map over the top of
798 * any existing mapping (like the one with the host libc or qemu
799 * itself); instead we treat "mapped but at wrong address" as
800 * a failure and unmap again.
802 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
803 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
804 PROT_READ
|PROT_WRITE
,
805 MAP_ANON
|MAP_PRIVATE
, 0, 0));
807 if (mapped_addr
== brk_page
) {
808 /* Heap contents are initialized to zero, as for anonymous
809 * mapped pages. Technically the new pages are already
810 * initialized to zero since they *are* anonymous mapped
811 * pages, however we have to take care with the contents that
812 * come from the remaining part of the previous page: it may
813 * contains garbage data due to a previous heap usage (grown
815 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
817 target_brk
= new_brk
;
818 brk_page
= HOST_PAGE_ALIGN(target_brk
);
819 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
822 } else if (mapped_addr
!= -1) {
823 /* Mapped but at wrong address, meaning there wasn't actually
824 * enough space for this brk.
826 target_munmap(mapped_addr
, new_alloc_size
);
828 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
831 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
834 #if defined(TARGET_ALPHA)
835 /* We (partially) emulate OSF/1 on Alpha, which requires we
836 return a proper errno, not an unchanged brk value. */
837 return -TARGET_ENOMEM
;
839 /* For everything else, return the previous break. */
843 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
844 abi_ulong target_fds_addr
,
848 abi_ulong b
, *target_fds
;
850 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
851 if (!(target_fds
= lock_user(VERIFY_READ
,
853 sizeof(abi_ulong
) * nw
,
855 return -TARGET_EFAULT
;
859 for (i
= 0; i
< nw
; i
++) {
860 /* grab the abi_ulong */
861 __get_user(b
, &target_fds
[i
]);
862 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
863 /* check the bit inside the abi_ulong */
870 unlock_user(target_fds
, target_fds_addr
, 0);
875 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
876 abi_ulong target_fds_addr
,
879 if (target_fds_addr
) {
880 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
881 return -TARGET_EFAULT
;
889 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
895 abi_ulong
*target_fds
;
897 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
898 if (!(target_fds
= lock_user(VERIFY_WRITE
,
900 sizeof(abi_ulong
) * nw
,
902 return -TARGET_EFAULT
;
905 for (i
= 0; i
< nw
; i
++) {
907 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
908 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
911 __put_user(v
, &target_fds
[i
]);
914 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
919 #if defined(__alpha__)
925 static inline abi_long
host_to_target_clock_t(long ticks
)
927 #if HOST_HZ == TARGET_HZ
930 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
934 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
935 const struct rusage
*rusage
)
937 struct target_rusage
*target_rusage
;
939 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
940 return -TARGET_EFAULT
;
941 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
942 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
943 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
944 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
945 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
946 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
947 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
948 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
949 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
950 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
951 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
952 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
953 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
954 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
955 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
956 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
957 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
958 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
959 unlock_user_struct(target_rusage
, target_addr
, 1);
964 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
966 abi_ulong target_rlim_swap
;
969 target_rlim_swap
= tswapal(target_rlim
);
970 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
971 return RLIM_INFINITY
;
973 result
= target_rlim_swap
;
974 if (target_rlim_swap
!= (rlim_t
)result
)
975 return RLIM_INFINITY
;
980 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
982 abi_ulong target_rlim_swap
;
985 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
986 target_rlim_swap
= TARGET_RLIM_INFINITY
;
988 target_rlim_swap
= rlim
;
989 result
= tswapal(target_rlim_swap
);
994 static inline int target_to_host_resource(int code
)
997 case TARGET_RLIMIT_AS
:
999 case TARGET_RLIMIT_CORE
:
1001 case TARGET_RLIMIT_CPU
:
1003 case TARGET_RLIMIT_DATA
:
1005 case TARGET_RLIMIT_FSIZE
:
1006 return RLIMIT_FSIZE
;
1007 case TARGET_RLIMIT_LOCKS
:
1008 return RLIMIT_LOCKS
;
1009 case TARGET_RLIMIT_MEMLOCK
:
1010 return RLIMIT_MEMLOCK
;
1011 case TARGET_RLIMIT_MSGQUEUE
:
1012 return RLIMIT_MSGQUEUE
;
1013 case TARGET_RLIMIT_NICE
:
1015 case TARGET_RLIMIT_NOFILE
:
1016 return RLIMIT_NOFILE
;
1017 case TARGET_RLIMIT_NPROC
:
1018 return RLIMIT_NPROC
;
1019 case TARGET_RLIMIT_RSS
:
1021 case TARGET_RLIMIT_RTPRIO
:
1022 return RLIMIT_RTPRIO
;
1023 case TARGET_RLIMIT_SIGPENDING
:
1024 return RLIMIT_SIGPENDING
;
1025 case TARGET_RLIMIT_STACK
:
1026 return RLIMIT_STACK
;
1032 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1033 abi_ulong target_tv_addr
)
1035 struct target_timeval
*target_tv
;
1037 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1038 return -TARGET_EFAULT
;
1040 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1041 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1043 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1048 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1049 const struct timeval
*tv
)
1051 struct target_timeval
*target_tv
;
1053 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1054 return -TARGET_EFAULT
;
1056 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1057 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1059 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1064 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1065 abi_ulong target_tz_addr
)
1067 struct target_timezone
*target_tz
;
1069 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1070 return -TARGET_EFAULT
;
1073 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1074 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1076 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1081 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1084 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1085 abi_ulong target_mq_attr_addr
)
1087 struct target_mq_attr
*target_mq_attr
;
1089 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1090 target_mq_attr_addr
, 1))
1091 return -TARGET_EFAULT
;
1093 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1094 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1095 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1096 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1098 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1103 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1104 const struct mq_attr
*attr
)
1106 struct target_mq_attr
*target_mq_attr
;
1108 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1109 target_mq_attr_addr
, 0))
1110 return -TARGET_EFAULT
;
1112 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1113 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1114 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1115 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1117 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1123 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1124 /* do_select() must return target values and target errnos. */
1125 static abi_long
do_select(int n
,
1126 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1127 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1129 fd_set rfds
, wfds
, efds
;
1130 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1132 struct timespec ts
, *ts_ptr
;
1135 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1139 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1143 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1148 if (target_tv_addr
) {
1149 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1150 return -TARGET_EFAULT
;
1151 ts
.tv_sec
= tv
.tv_sec
;
1152 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1158 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1161 if (!is_error(ret
)) {
1162 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1163 return -TARGET_EFAULT
;
1164 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1165 return -TARGET_EFAULT
;
1166 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1167 return -TARGET_EFAULT
;
1169 if (target_tv_addr
) {
1170 tv
.tv_sec
= ts
.tv_sec
;
1171 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1172 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1173 return -TARGET_EFAULT
;
1182 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1185 return pipe2(host_pipe
, flags
);
1191 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1192 int flags
, int is_pipe2
)
1196 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1199 return get_errno(ret
);
1201 /* Several targets have special calling conventions for the original
1202 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1204 #if defined(TARGET_ALPHA)
1205 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1206 return host_pipe
[0];
1207 #elif defined(TARGET_MIPS)
1208 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1209 return host_pipe
[0];
1210 #elif defined(TARGET_SH4)
1211 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1212 return host_pipe
[0];
1213 #elif defined(TARGET_SPARC)
1214 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1215 return host_pipe
[0];
1219 if (put_user_s32(host_pipe
[0], pipedes
)
1220 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1221 return -TARGET_EFAULT
;
1222 return get_errno(ret
);
1225 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1226 abi_ulong target_addr
,
1229 struct target_ip_mreqn
*target_smreqn
;
1231 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1233 return -TARGET_EFAULT
;
1234 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1235 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1236 if (len
== sizeof(struct target_ip_mreqn
))
1237 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1238 unlock_user(target_smreqn
, target_addr
, 0);
1243 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1244 abi_ulong target_addr
,
1247 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1248 sa_family_t sa_family
;
1249 struct target_sockaddr
*target_saddr
;
1251 if (fd_trans_target_to_host_addr(fd
)) {
1252 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1255 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1257 return -TARGET_EFAULT
;
1259 sa_family
= tswap16(target_saddr
->sa_family
);
1261 /* Oops. The caller might send a incomplete sun_path; sun_path
1262 * must be terminated by \0 (see the manual page), but
1263 * unfortunately it is quite common to specify sockaddr_un
1264 * length as "strlen(x->sun_path)" while it should be
1265 * "strlen(...) + 1". We'll fix that here if needed.
1266 * Linux kernel has a similar feature.
1269 if (sa_family
== AF_UNIX
) {
1270 if (len
< unix_maxlen
&& len
> 0) {
1271 char *cp
= (char*)target_saddr
;
1273 if ( cp
[len
-1] && !cp
[len
] )
1276 if (len
> unix_maxlen
)
1280 memcpy(addr
, target_saddr
, len
);
1281 addr
->sa_family
= sa_family
;
1282 if (sa_family
== AF_NETLINK
) {
1283 struct sockaddr_nl
*nladdr
;
1285 nladdr
= (struct sockaddr_nl
*)addr
;
1286 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1287 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1288 } else if (sa_family
== AF_PACKET
) {
1289 struct target_sockaddr_ll
*lladdr
;
1291 lladdr
= (struct target_sockaddr_ll
*)addr
;
1292 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1293 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1295 unlock_user(target_saddr
, target_addr
, 0);
1300 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1301 struct sockaddr
*addr
,
1304 struct target_sockaddr
*target_saddr
;
1306 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1308 return -TARGET_EFAULT
;
1309 memcpy(target_saddr
, addr
, len
);
1310 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1311 if (addr
->sa_family
== AF_NETLINK
) {
1312 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1313 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1314 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1316 unlock_user(target_saddr
, target_addr
, len
);
1321 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1322 struct target_msghdr
*target_msgh
)
1324 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1325 abi_long msg_controllen
;
1326 abi_ulong target_cmsg_addr
;
1327 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1328 socklen_t space
= 0;
1330 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1331 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1333 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1334 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1335 target_cmsg_start
= target_cmsg
;
1337 return -TARGET_EFAULT
;
1339 while (cmsg
&& target_cmsg
) {
1340 void *data
= CMSG_DATA(cmsg
);
1341 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1343 int len
= tswapal(target_cmsg
->cmsg_len
)
1344 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1346 space
+= CMSG_SPACE(len
);
1347 if (space
> msgh
->msg_controllen
) {
1348 space
-= CMSG_SPACE(len
);
1349 /* This is a QEMU bug, since we allocated the payload
1350 * area ourselves (unlike overflow in host-to-target
1351 * conversion, which is just the guest giving us a buffer
1352 * that's too small). It can't happen for the payload types
1353 * we currently support; if it becomes an issue in future
1354 * we would need to improve our allocation strategy to
1355 * something more intelligent than "twice the size of the
1356 * target buffer we're reading from".
1358 gemu_log("Host cmsg overflow\n");
1362 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1363 cmsg
->cmsg_level
= SOL_SOCKET
;
1365 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1367 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1368 cmsg
->cmsg_len
= CMSG_LEN(len
);
1370 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1371 int *fd
= (int *)data
;
1372 int *target_fd
= (int *)target_data
;
1373 int i
, numfds
= len
/ sizeof(int);
1375 for (i
= 0; i
< numfds
; i
++) {
1376 __get_user(fd
[i
], target_fd
+ i
);
1378 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1379 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1380 struct ucred
*cred
= (struct ucred
*)data
;
1381 struct target_ucred
*target_cred
=
1382 (struct target_ucred
*)target_data
;
1384 __get_user(cred
->pid
, &target_cred
->pid
);
1385 __get_user(cred
->uid
, &target_cred
->uid
);
1386 __get_user(cred
->gid
, &target_cred
->gid
);
1388 gemu_log("Unsupported ancillary data: %d/%d\n",
1389 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1390 memcpy(data
, target_data
, len
);
1393 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1394 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1397 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1399 msgh
->msg_controllen
= space
;
1403 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1404 struct msghdr
*msgh
)
1406 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1407 abi_long msg_controllen
;
1408 abi_ulong target_cmsg_addr
;
1409 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1410 socklen_t space
= 0;
1412 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1413 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1415 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1416 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1417 target_cmsg_start
= target_cmsg
;
1419 return -TARGET_EFAULT
;
1421 while (cmsg
&& target_cmsg
) {
1422 void *data
= CMSG_DATA(cmsg
);
1423 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1425 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1426 int tgt_len
, tgt_space
;
1428 /* We never copy a half-header but may copy half-data;
1429 * this is Linux's behaviour in put_cmsg(). Note that
1430 * truncation here is a guest problem (which we report
1431 * to the guest via the CTRUNC bit), unlike truncation
1432 * in target_to_host_cmsg, which is a QEMU bug.
1434 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1435 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1439 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1440 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1442 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1444 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1446 tgt_len
= TARGET_CMSG_LEN(len
);
1448 /* Payload types which need a different size of payload on
1449 * the target must adjust tgt_len here.
1451 switch (cmsg
->cmsg_level
) {
1453 switch (cmsg
->cmsg_type
) {
1455 tgt_len
= sizeof(struct target_timeval
);
1464 if (msg_controllen
< tgt_len
) {
1465 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1466 tgt_len
= msg_controllen
;
1469 /* We must now copy-and-convert len bytes of payload
1470 * into tgt_len bytes of destination space. Bear in mind
1471 * that in both source and destination we may be dealing
1472 * with a truncated value!
1474 switch (cmsg
->cmsg_level
) {
1476 switch (cmsg
->cmsg_type
) {
1479 int *fd
= (int *)data
;
1480 int *target_fd
= (int *)target_data
;
1481 int i
, numfds
= tgt_len
/ sizeof(int);
1483 for (i
= 0; i
< numfds
; i
++) {
1484 __put_user(fd
[i
], target_fd
+ i
);
1490 struct timeval
*tv
= (struct timeval
*)data
;
1491 struct target_timeval
*target_tv
=
1492 (struct target_timeval
*)target_data
;
1494 if (len
!= sizeof(struct timeval
) ||
1495 tgt_len
!= sizeof(struct target_timeval
)) {
1499 /* copy struct timeval to target */
1500 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1501 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1504 case SCM_CREDENTIALS
:
1506 struct ucred
*cred
= (struct ucred
*)data
;
1507 struct target_ucred
*target_cred
=
1508 (struct target_ucred
*)target_data
;
1510 __put_user(cred
->pid
, &target_cred
->pid
);
1511 __put_user(cred
->uid
, &target_cred
->uid
);
1512 __put_user(cred
->gid
, &target_cred
->gid
);
1522 gemu_log("Unsupported ancillary data: %d/%d\n",
1523 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1524 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1525 if (tgt_len
> len
) {
1526 memset(target_data
+ len
, 0, tgt_len
- len
);
1530 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1531 tgt_space
= TARGET_CMSG_SPACE(len
);
1532 if (msg_controllen
< tgt_space
) {
1533 tgt_space
= msg_controllen
;
1535 msg_controllen
-= tgt_space
;
1537 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1538 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1541 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1543 target_msgh
->msg_controllen
= tswapal(space
);
1547 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1549 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1550 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1551 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1552 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1553 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1556 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1558 abi_long (*host_to_target_nlmsg
)
1559 (struct nlmsghdr
*))
1564 while (len
> sizeof(struct nlmsghdr
)) {
1566 nlmsg_len
= nlh
->nlmsg_len
;
1567 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1572 switch (nlh
->nlmsg_type
) {
1574 tswap_nlmsghdr(nlh
);
1580 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1581 e
->error
= tswap32(e
->error
);
1582 tswap_nlmsghdr(&e
->msg
);
1583 tswap_nlmsghdr(nlh
);
1587 ret
= host_to_target_nlmsg(nlh
);
1589 tswap_nlmsghdr(nlh
);
1594 tswap_nlmsghdr(nlh
);
1595 len
-= NLMSG_ALIGN(nlmsg_len
);
1596 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1601 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1603 abi_long (*target_to_host_nlmsg
)
1604 (struct nlmsghdr
*))
1608 while (len
> sizeof(struct nlmsghdr
)) {
1609 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1610 tswap32(nlh
->nlmsg_len
) > len
) {
1613 tswap_nlmsghdr(nlh
);
1614 switch (nlh
->nlmsg_type
) {
1621 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1622 e
->error
= tswap32(e
->error
);
1623 tswap_nlmsghdr(&e
->msg
);
1626 ret
= target_to_host_nlmsg(nlh
);
1631 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1632 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1637 #ifdef CONFIG_RTNETLINK
1638 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1640 abi_long (*host_to_target_rtattr
)
1643 unsigned short rta_len
;
1646 while (len
> sizeof(struct rtattr
)) {
1647 rta_len
= rtattr
->rta_len
;
1648 if (rta_len
< sizeof(struct rtattr
) ||
1652 ret
= host_to_target_rtattr(rtattr
);
1653 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1654 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1658 len
-= RTA_ALIGN(rta_len
);
1659 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1664 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1667 struct rtnl_link_stats
*st
;
1668 struct rtnl_link_stats64
*st64
;
1669 struct rtnl_link_ifmap
*map
;
1671 switch (rtattr
->rta_type
) {
1674 case IFLA_BROADCAST
:
1680 case IFLA_OPERSTATE
:
1683 case IFLA_PROTO_DOWN
:
1690 case IFLA_CARRIER_CHANGES
:
1691 case IFLA_NUM_RX_QUEUES
:
1692 case IFLA_NUM_TX_QUEUES
:
1693 case IFLA_PROMISCUITY
:
1695 case IFLA_LINK_NETNSID
:
1699 u32
= RTA_DATA(rtattr
);
1700 *u32
= tswap32(*u32
);
1702 /* struct rtnl_link_stats */
1704 st
= RTA_DATA(rtattr
);
1705 st
->rx_packets
= tswap32(st
->rx_packets
);
1706 st
->tx_packets
= tswap32(st
->tx_packets
);
1707 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1708 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1709 st
->rx_errors
= tswap32(st
->rx_errors
);
1710 st
->tx_errors
= tswap32(st
->tx_errors
);
1711 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1712 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1713 st
->multicast
= tswap32(st
->multicast
);
1714 st
->collisions
= tswap32(st
->collisions
);
1716 /* detailed rx_errors: */
1717 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1718 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1719 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1720 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1721 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1722 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1724 /* detailed tx_errors */
1725 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1726 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1727 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1728 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1729 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1732 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1733 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1735 /* struct rtnl_link_stats64 */
1737 st64
= RTA_DATA(rtattr
);
1738 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1739 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1740 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1741 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1742 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1743 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1744 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1745 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1746 st64
->multicast
= tswap64(st64
->multicast
);
1747 st64
->collisions
= tswap64(st64
->collisions
);
1749 /* detailed rx_errors: */
1750 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1751 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1752 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1753 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1754 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1755 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1757 /* detailed tx_errors */
1758 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1759 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1760 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1761 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1762 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1765 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1766 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1768 /* struct rtnl_link_ifmap */
1770 map
= RTA_DATA(rtattr
);
1771 map
->mem_start
= tswap64(map
->mem_start
);
1772 map
->mem_end
= tswap64(map
->mem_end
);
1773 map
->base_addr
= tswap64(map
->base_addr
);
1774 map
->irq
= tswap16(map
->irq
);
1779 /* FIXME: implement nested type */
1780 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1783 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1789 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1792 struct ifa_cacheinfo
*ci
;
1794 switch (rtattr
->rta_type
) {
1795 /* binary: depends on family type */
1805 u32
= RTA_DATA(rtattr
);
1806 *u32
= tswap32(*u32
);
1808 /* struct ifa_cacheinfo */
1810 ci
= RTA_DATA(rtattr
);
1811 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1812 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1813 ci
->cstamp
= tswap32(ci
->cstamp
);
1814 ci
->tstamp
= tswap32(ci
->tstamp
);
1817 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1823 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1826 switch (rtattr
->rta_type
) {
1827 /* binary: depends on family type */
1836 u32
= RTA_DATA(rtattr
);
1837 *u32
= tswap32(*u32
);
1840 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1846 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1847 uint32_t rtattr_len
)
1849 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1850 host_to_target_data_link_rtattr
);
1853 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1854 uint32_t rtattr_len
)
1856 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1857 host_to_target_data_addr_rtattr
);
1860 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1861 uint32_t rtattr_len
)
1863 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1864 host_to_target_data_route_rtattr
);
1867 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1870 struct ifinfomsg
*ifi
;
1871 struct ifaddrmsg
*ifa
;
1874 nlmsg_len
= nlh
->nlmsg_len
;
1875 switch (nlh
->nlmsg_type
) {
1879 ifi
= NLMSG_DATA(nlh
);
1880 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1881 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1882 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1883 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1884 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1885 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1890 ifa
= NLMSG_DATA(nlh
);
1891 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1892 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1893 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1898 rtm
= NLMSG_DATA(nlh
);
1899 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1900 host_to_target_route_rtattr(RTM_RTA(rtm
),
1901 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1904 return -TARGET_EINVAL
;
1909 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1912 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1915 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1917 abi_long (*target_to_host_rtattr
)
1922 while (len
>= sizeof(struct rtattr
)) {
1923 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1924 tswap16(rtattr
->rta_len
) > len
) {
1927 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1928 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1929 ret
= target_to_host_rtattr(rtattr
);
1933 len
-= RTA_ALIGN(rtattr
->rta_len
);
1934 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
1935 RTA_ALIGN(rtattr
->rta_len
));
1940 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
1942 switch (rtattr
->rta_type
) {
1944 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
1950 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
1952 switch (rtattr
->rta_type
) {
1953 /* binary: depends on family type */
1958 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
1964 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
1967 switch (rtattr
->rta_type
) {
1968 /* binary: depends on family type */
1975 u32
= RTA_DATA(rtattr
);
1976 *u32
= tswap32(*u32
);
1979 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
1985 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
1986 uint32_t rtattr_len
)
1988 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
1989 target_to_host_data_link_rtattr
);
1992 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
1993 uint32_t rtattr_len
)
1995 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
1996 target_to_host_data_addr_rtattr
);
1999 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2000 uint32_t rtattr_len
)
2002 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2003 target_to_host_data_route_rtattr
);
2006 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2008 struct ifinfomsg
*ifi
;
2009 struct ifaddrmsg
*ifa
;
2012 switch (nlh
->nlmsg_type
) {
2017 ifi
= NLMSG_DATA(nlh
);
2018 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2019 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2020 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2021 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2022 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2023 NLMSG_LENGTH(sizeof(*ifi
)));
2028 ifa
= NLMSG_DATA(nlh
);
2029 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2030 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2031 NLMSG_LENGTH(sizeof(*ifa
)));
2037 rtm
= NLMSG_DATA(nlh
);
2038 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2039 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2040 NLMSG_LENGTH(sizeof(*rtm
)));
2043 return -TARGET_EOPNOTSUPP
;
2048 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2050 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2052 #endif /* CONFIG_RTNETLINK */
2054 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2056 switch (nlh
->nlmsg_type
) {
2058 gemu_log("Unknown host audit message type %d\n",
2060 return -TARGET_EINVAL
;
2065 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2068 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2071 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2073 switch (nlh
->nlmsg_type
) {
2075 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2076 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2079 gemu_log("Unknown target audit message type %d\n",
2081 return -TARGET_EINVAL
;
2087 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2089 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2092 /* do_setsockopt() Must return target values and target errnos. */
2093 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2094 abi_ulong optval_addr
, socklen_t optlen
)
2098 struct ip_mreqn
*ip_mreq
;
2099 struct ip_mreq_source
*ip_mreq_source
;
2103 /* TCP options all take an 'int' value. */
2104 if (optlen
< sizeof(uint32_t))
2105 return -TARGET_EINVAL
;
2107 if (get_user_u32(val
, optval_addr
))
2108 return -TARGET_EFAULT
;
2109 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2116 case IP_ROUTER_ALERT
:
2120 case IP_MTU_DISCOVER
:
2126 case IP_MULTICAST_TTL
:
2127 case IP_MULTICAST_LOOP
:
2129 if (optlen
>= sizeof(uint32_t)) {
2130 if (get_user_u32(val
, optval_addr
))
2131 return -TARGET_EFAULT
;
2132 } else if (optlen
>= 1) {
2133 if (get_user_u8(val
, optval_addr
))
2134 return -TARGET_EFAULT
;
2136 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2138 case IP_ADD_MEMBERSHIP
:
2139 case IP_DROP_MEMBERSHIP
:
2140 if (optlen
< sizeof (struct target_ip_mreq
) ||
2141 optlen
> sizeof (struct target_ip_mreqn
))
2142 return -TARGET_EINVAL
;
2144 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2145 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2146 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2149 case IP_BLOCK_SOURCE
:
2150 case IP_UNBLOCK_SOURCE
:
2151 case IP_ADD_SOURCE_MEMBERSHIP
:
2152 case IP_DROP_SOURCE_MEMBERSHIP
:
2153 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2154 return -TARGET_EINVAL
;
2156 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2157 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2158 unlock_user (ip_mreq_source
, optval_addr
, 0);
2167 case IPV6_MTU_DISCOVER
:
2170 case IPV6_RECVPKTINFO
:
2172 if (optlen
< sizeof(uint32_t)) {
2173 return -TARGET_EINVAL
;
2175 if (get_user_u32(val
, optval_addr
)) {
2176 return -TARGET_EFAULT
;
2178 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2179 &val
, sizeof(val
)));
2188 /* struct icmp_filter takes an u32 value */
2189 if (optlen
< sizeof(uint32_t)) {
2190 return -TARGET_EINVAL
;
2193 if (get_user_u32(val
, optval_addr
)) {
2194 return -TARGET_EFAULT
;
2196 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2197 &val
, sizeof(val
)));
2204 case TARGET_SOL_SOCKET
:
2206 case TARGET_SO_RCVTIMEO
:
2210 optname
= SO_RCVTIMEO
;
2213 if (optlen
!= sizeof(struct target_timeval
)) {
2214 return -TARGET_EINVAL
;
2217 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2218 return -TARGET_EFAULT
;
2221 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2225 case TARGET_SO_SNDTIMEO
:
2226 optname
= SO_SNDTIMEO
;
2228 case TARGET_SO_ATTACH_FILTER
:
2230 struct target_sock_fprog
*tfprog
;
2231 struct target_sock_filter
*tfilter
;
2232 struct sock_fprog fprog
;
2233 struct sock_filter
*filter
;
2236 if (optlen
!= sizeof(*tfprog
)) {
2237 return -TARGET_EINVAL
;
2239 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2240 return -TARGET_EFAULT
;
2242 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2243 tswapal(tfprog
->filter
), 0)) {
2244 unlock_user_struct(tfprog
, optval_addr
, 1);
2245 return -TARGET_EFAULT
;
2248 fprog
.len
= tswap16(tfprog
->len
);
2249 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2250 if (filter
== NULL
) {
2251 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2252 unlock_user_struct(tfprog
, optval_addr
, 1);
2253 return -TARGET_ENOMEM
;
2255 for (i
= 0; i
< fprog
.len
; i
++) {
2256 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2257 filter
[i
].jt
= tfilter
[i
].jt
;
2258 filter
[i
].jf
= tfilter
[i
].jf
;
2259 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2261 fprog
.filter
= filter
;
2263 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2264 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2267 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2268 unlock_user_struct(tfprog
, optval_addr
, 1);
2271 case TARGET_SO_BINDTODEVICE
:
2273 char *dev_ifname
, *addr_ifname
;
2275 if (optlen
> IFNAMSIZ
- 1) {
2276 optlen
= IFNAMSIZ
- 1;
2278 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2280 return -TARGET_EFAULT
;
2282 optname
= SO_BINDTODEVICE
;
2283 addr_ifname
= alloca(IFNAMSIZ
);
2284 memcpy(addr_ifname
, dev_ifname
, optlen
);
2285 addr_ifname
[optlen
] = 0;
2286 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2287 addr_ifname
, optlen
));
2288 unlock_user (dev_ifname
, optval_addr
, 0);
2291 /* Options with 'int' argument. */
2292 case TARGET_SO_DEBUG
:
2295 case TARGET_SO_REUSEADDR
:
2296 optname
= SO_REUSEADDR
;
2298 case TARGET_SO_TYPE
:
2301 case TARGET_SO_ERROR
:
2304 case TARGET_SO_DONTROUTE
:
2305 optname
= SO_DONTROUTE
;
2307 case TARGET_SO_BROADCAST
:
2308 optname
= SO_BROADCAST
;
2310 case TARGET_SO_SNDBUF
:
2311 optname
= SO_SNDBUF
;
2313 case TARGET_SO_SNDBUFFORCE
:
2314 optname
= SO_SNDBUFFORCE
;
2316 case TARGET_SO_RCVBUF
:
2317 optname
= SO_RCVBUF
;
2319 case TARGET_SO_RCVBUFFORCE
:
2320 optname
= SO_RCVBUFFORCE
;
2322 case TARGET_SO_KEEPALIVE
:
2323 optname
= SO_KEEPALIVE
;
2325 case TARGET_SO_OOBINLINE
:
2326 optname
= SO_OOBINLINE
;
2328 case TARGET_SO_NO_CHECK
:
2329 optname
= SO_NO_CHECK
;
2331 case TARGET_SO_PRIORITY
:
2332 optname
= SO_PRIORITY
;
2335 case TARGET_SO_BSDCOMPAT
:
2336 optname
= SO_BSDCOMPAT
;
2339 case TARGET_SO_PASSCRED
:
2340 optname
= SO_PASSCRED
;
2342 case TARGET_SO_PASSSEC
:
2343 optname
= SO_PASSSEC
;
2345 case TARGET_SO_TIMESTAMP
:
2346 optname
= SO_TIMESTAMP
;
2348 case TARGET_SO_RCVLOWAT
:
2349 optname
= SO_RCVLOWAT
;
2355 if (optlen
< sizeof(uint32_t))
2356 return -TARGET_EINVAL
;
2358 if (get_user_u32(val
, optval_addr
))
2359 return -TARGET_EFAULT
;
2360 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2364 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2365 ret
= -TARGET_ENOPROTOOPT
;
2370 /* do_getsockopt() Must return target values and target errnos. */
2371 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2372 abi_ulong optval_addr
, abi_ulong optlen
)
2379 case TARGET_SOL_SOCKET
:
2382 /* These don't just return a single integer */
2383 case TARGET_SO_LINGER
:
2384 case TARGET_SO_RCVTIMEO
:
2385 case TARGET_SO_SNDTIMEO
:
2386 case TARGET_SO_PEERNAME
:
2388 case TARGET_SO_PEERCRED
: {
2391 struct target_ucred
*tcr
;
2393 if (get_user_u32(len
, optlen
)) {
2394 return -TARGET_EFAULT
;
2397 return -TARGET_EINVAL
;
2401 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2409 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2410 return -TARGET_EFAULT
;
2412 __put_user(cr
.pid
, &tcr
->pid
);
2413 __put_user(cr
.uid
, &tcr
->uid
);
2414 __put_user(cr
.gid
, &tcr
->gid
);
2415 unlock_user_struct(tcr
, optval_addr
, 1);
2416 if (put_user_u32(len
, optlen
)) {
2417 return -TARGET_EFAULT
;
2421 /* Options with 'int' argument. */
2422 case TARGET_SO_DEBUG
:
2425 case TARGET_SO_REUSEADDR
:
2426 optname
= SO_REUSEADDR
;
2428 case TARGET_SO_TYPE
:
2431 case TARGET_SO_ERROR
:
2434 case TARGET_SO_DONTROUTE
:
2435 optname
= SO_DONTROUTE
;
2437 case TARGET_SO_BROADCAST
:
2438 optname
= SO_BROADCAST
;
2440 case TARGET_SO_SNDBUF
:
2441 optname
= SO_SNDBUF
;
2443 case TARGET_SO_RCVBUF
:
2444 optname
= SO_RCVBUF
;
2446 case TARGET_SO_KEEPALIVE
:
2447 optname
= SO_KEEPALIVE
;
2449 case TARGET_SO_OOBINLINE
:
2450 optname
= SO_OOBINLINE
;
2452 case TARGET_SO_NO_CHECK
:
2453 optname
= SO_NO_CHECK
;
2455 case TARGET_SO_PRIORITY
:
2456 optname
= SO_PRIORITY
;
2459 case TARGET_SO_BSDCOMPAT
:
2460 optname
= SO_BSDCOMPAT
;
2463 case TARGET_SO_PASSCRED
:
2464 optname
= SO_PASSCRED
;
2466 case TARGET_SO_TIMESTAMP
:
2467 optname
= SO_TIMESTAMP
;
2469 case TARGET_SO_RCVLOWAT
:
2470 optname
= SO_RCVLOWAT
;
2472 case TARGET_SO_ACCEPTCONN
:
2473 optname
= SO_ACCEPTCONN
;
2480 /* TCP options all take an 'int' value. */
2482 if (get_user_u32(len
, optlen
))
2483 return -TARGET_EFAULT
;
2485 return -TARGET_EINVAL
;
2487 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2490 if (optname
== SO_TYPE
) {
2491 val
= host_to_target_sock_type(val
);
2496 if (put_user_u32(val
, optval_addr
))
2497 return -TARGET_EFAULT
;
2499 if (put_user_u8(val
, optval_addr
))
2500 return -TARGET_EFAULT
;
2502 if (put_user_u32(len
, optlen
))
2503 return -TARGET_EFAULT
;
2510 case IP_ROUTER_ALERT
:
2514 case IP_MTU_DISCOVER
:
2520 case IP_MULTICAST_TTL
:
2521 case IP_MULTICAST_LOOP
:
2522 if (get_user_u32(len
, optlen
))
2523 return -TARGET_EFAULT
;
2525 return -TARGET_EINVAL
;
2527 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2530 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2532 if (put_user_u32(len
, optlen
)
2533 || put_user_u8(val
, optval_addr
))
2534 return -TARGET_EFAULT
;
2536 if (len
> sizeof(int))
2538 if (put_user_u32(len
, optlen
)
2539 || put_user_u32(val
, optval_addr
))
2540 return -TARGET_EFAULT
;
2544 ret
= -TARGET_ENOPROTOOPT
;
2550 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2552 ret
= -TARGET_EOPNOTSUPP
;
2558 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2559 int count
, int copy
)
2561 struct target_iovec
*target_vec
;
2563 abi_ulong total_len
, max_len
;
2566 bool bad_address
= false;
2572 if (count
< 0 || count
> IOV_MAX
) {
2577 vec
= g_try_new0(struct iovec
, count
);
2583 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2584 count
* sizeof(struct target_iovec
), 1);
2585 if (target_vec
== NULL
) {
2590 /* ??? If host page size > target page size, this will result in a
2591 value larger than what we can actually support. */
2592 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2595 for (i
= 0; i
< count
; i
++) {
2596 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2597 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2602 } else if (len
== 0) {
2603 /* Zero length pointer is ignored. */
2604 vec
[i
].iov_base
= 0;
2606 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2607 /* If the first buffer pointer is bad, this is a fault. But
2608 * subsequent bad buffers will result in a partial write; this
2609 * is realized by filling the vector with null pointers and
2611 if (!vec
[i
].iov_base
) {
2622 if (len
> max_len
- total_len
) {
2623 len
= max_len
- total_len
;
2626 vec
[i
].iov_len
= len
;
2630 unlock_user(target_vec
, target_addr
, 0);
2635 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2636 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2639 unlock_user(target_vec
, target_addr
, 0);
2646 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2647 int count
, int copy
)
2649 struct target_iovec
*target_vec
;
2652 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2653 count
* sizeof(struct target_iovec
), 1);
2655 for (i
= 0; i
< count
; i
++) {
2656 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2657 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2661 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2663 unlock_user(target_vec
, target_addr
, 0);
2669 static inline int target_to_host_sock_type(int *type
)
2672 int target_type
= *type
;
2674 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2675 case TARGET_SOCK_DGRAM
:
2676 host_type
= SOCK_DGRAM
;
2678 case TARGET_SOCK_STREAM
:
2679 host_type
= SOCK_STREAM
;
2682 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2685 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2686 #if defined(SOCK_CLOEXEC)
2687 host_type
|= SOCK_CLOEXEC
;
2689 return -TARGET_EINVAL
;
2692 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2693 #if defined(SOCK_NONBLOCK)
2694 host_type
|= SOCK_NONBLOCK
;
2695 #elif !defined(O_NONBLOCK)
2696 return -TARGET_EINVAL
;
2703 /* Try to emulate socket type flags after socket creation. */
2704 static int sock_flags_fixup(int fd
, int target_type
)
2706 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2707 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2708 int flags
= fcntl(fd
, F_GETFL
);
2709 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2711 return -TARGET_EINVAL
;
2718 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2719 abi_ulong target_addr
,
2722 struct sockaddr
*addr
= host_addr
;
2723 struct target_sockaddr
*target_saddr
;
2725 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2726 if (!target_saddr
) {
2727 return -TARGET_EFAULT
;
2730 memcpy(addr
, target_saddr
, len
);
2731 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2732 /* spkt_protocol is big-endian */
2734 unlock_user(target_saddr
, target_addr
, 0);
2738 static TargetFdTrans target_packet_trans
= {
2739 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2742 #ifdef CONFIG_RTNETLINK
2743 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2745 return target_to_host_nlmsg_route(buf
, len
);
2748 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2750 return host_to_target_nlmsg_route(buf
, len
);
2753 static TargetFdTrans target_netlink_route_trans
= {
2754 .target_to_host_data
= netlink_route_target_to_host
,
2755 .host_to_target_data
= netlink_route_host_to_target
,
2757 #endif /* CONFIG_RTNETLINK */
2759 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2761 return target_to_host_nlmsg_audit(buf
, len
);
2764 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2766 return host_to_target_nlmsg_audit(buf
, len
);
2769 static TargetFdTrans target_netlink_audit_trans
= {
2770 .target_to_host_data
= netlink_audit_target_to_host
,
2771 .host_to_target_data
= netlink_audit_host_to_target
,
2774 /* do_socket() Must return target values and target errnos. */
2775 static abi_long
do_socket(int domain
, int type
, int protocol
)
2777 int target_type
= type
;
2780 ret
= target_to_host_sock_type(&type
);
2785 if (domain
== PF_NETLINK
&& !(
2786 #ifdef CONFIG_RTNETLINK
2787 protocol
== NETLINK_ROUTE
||
2789 protocol
== NETLINK_KOBJECT_UEVENT
||
2790 protocol
== NETLINK_AUDIT
)) {
2791 return -EPFNOSUPPORT
;
2794 if (domain
== AF_PACKET
||
2795 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2796 protocol
= tswap16(protocol
);
2799 ret
= get_errno(socket(domain
, type
, protocol
));
2801 ret
= sock_flags_fixup(ret
, target_type
);
2802 if (type
== SOCK_PACKET
) {
2803 /* Manage an obsolete case :
2804 * if socket type is SOCK_PACKET, bind by name
2806 fd_trans_register(ret
, &target_packet_trans
);
2807 } else if (domain
== PF_NETLINK
) {
2809 #ifdef CONFIG_RTNETLINK
2811 fd_trans_register(ret
, &target_netlink_route_trans
);
2814 case NETLINK_KOBJECT_UEVENT
:
2815 /* nothing to do: messages are strings */
2818 fd_trans_register(ret
, &target_netlink_audit_trans
);
2821 g_assert_not_reached();
2828 /* do_bind() Must return target values and target errnos. */
2829 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2835 if ((int)addrlen
< 0) {
2836 return -TARGET_EINVAL
;
2839 addr
= alloca(addrlen
+1);
2841 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2845 return get_errno(bind(sockfd
, addr
, addrlen
));
2848 /* do_connect() Must return target values and target errnos. */
2849 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2855 if ((int)addrlen
< 0) {
2856 return -TARGET_EINVAL
;
2859 addr
= alloca(addrlen
+1);
2861 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2865 return get_errno(connect(sockfd
, addr
, addrlen
));
2868 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2869 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2870 int flags
, int send
)
2876 abi_ulong target_vec
;
2878 if (msgp
->msg_name
) {
2879 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2880 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2881 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2882 tswapal(msgp
->msg_name
),
2888 msg
.msg_name
= NULL
;
2889 msg
.msg_namelen
= 0;
2891 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2892 msg
.msg_control
= alloca(msg
.msg_controllen
);
2893 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2895 count
= tswapal(msgp
->msg_iovlen
);
2896 target_vec
= tswapal(msgp
->msg_iov
);
2897 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2898 target_vec
, count
, send
);
2900 ret
= -host_to_target_errno(errno
);
2903 msg
.msg_iovlen
= count
;
2907 if (fd_trans_target_to_host_data(fd
)) {
2908 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2909 msg
.msg_iov
->iov_len
);
2911 ret
= target_to_host_cmsg(&msg
, msgp
);
2914 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2917 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2918 if (!is_error(ret
)) {
2920 if (fd_trans_host_to_target_data(fd
)) {
2921 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2922 msg
.msg_iov
->iov_len
);
2924 ret
= host_to_target_cmsg(msgp
, &msg
);
2926 if (!is_error(ret
)) {
2927 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2928 if (msg
.msg_name
!= NULL
) {
2929 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2930 msg
.msg_name
, msg
.msg_namelen
);
2942 unlock_iovec(vec
, target_vec
, count
, !send
);
2947 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2948 int flags
, int send
)
2951 struct target_msghdr
*msgp
;
2953 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2957 return -TARGET_EFAULT
;
2959 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2960 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2964 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2965 * so it might not have this *mmsg-specific flag either.
2967 #ifndef MSG_WAITFORONE
2968 #define MSG_WAITFORONE 0x10000
2971 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2972 unsigned int vlen
, unsigned int flags
,
2975 struct target_mmsghdr
*mmsgp
;
2979 if (vlen
> UIO_MAXIOV
) {
2983 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2985 return -TARGET_EFAULT
;
2988 for (i
= 0; i
< vlen
; i
++) {
2989 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2990 if (is_error(ret
)) {
2993 mmsgp
[i
].msg_len
= tswap32(ret
);
2994 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2995 if (flags
& MSG_WAITFORONE
) {
2996 flags
|= MSG_DONTWAIT
;
3000 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3002 /* Return number of datagrams sent if we sent any at all;
3003 * otherwise return the error.
3011 /* If we don't have a system accept4() then just call accept.
3012 * The callsites to do_accept4() will ensure that they don't
3013 * pass a non-zero flags argument in this config.
3015 #ifndef CONFIG_ACCEPT4
3016 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
3017 socklen_t
*addrlen
, int flags
)
3020 return accept(sockfd
, addr
, addrlen
);
3024 /* do_accept4() Must return target values and target errnos. */
3025 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3026 abi_ulong target_addrlen_addr
, int flags
)
3033 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3035 if (target_addr
== 0) {
3036 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
3039 /* linux returns EINVAL if addrlen pointer is invalid */
3040 if (get_user_u32(addrlen
, target_addrlen_addr
))
3041 return -TARGET_EINVAL
;
3043 if ((int)addrlen
< 0) {
3044 return -TARGET_EINVAL
;
3047 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3048 return -TARGET_EINVAL
;
3050 addr
= alloca(addrlen
);
3052 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
3053 if (!is_error(ret
)) {
3054 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3055 if (put_user_u32(addrlen
, target_addrlen_addr
))
3056 ret
= -TARGET_EFAULT
;
3061 /* do_getpeername() Must return target values and target errnos. */
3062 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3063 abi_ulong target_addrlen_addr
)
3069 if (get_user_u32(addrlen
, target_addrlen_addr
))
3070 return -TARGET_EFAULT
;
3072 if ((int)addrlen
< 0) {
3073 return -TARGET_EINVAL
;
3076 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3077 return -TARGET_EFAULT
;
3079 addr
= alloca(addrlen
);
3081 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3082 if (!is_error(ret
)) {
3083 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3084 if (put_user_u32(addrlen
, target_addrlen_addr
))
3085 ret
= -TARGET_EFAULT
;
3090 /* do_getsockname() Must return target values and target errnos. */
3091 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3092 abi_ulong target_addrlen_addr
)
3098 if (get_user_u32(addrlen
, target_addrlen_addr
))
3099 return -TARGET_EFAULT
;
3101 if ((int)addrlen
< 0) {
3102 return -TARGET_EINVAL
;
3105 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3106 return -TARGET_EFAULT
;
3108 addr
= alloca(addrlen
);
3110 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3111 if (!is_error(ret
)) {
3112 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3113 if (put_user_u32(addrlen
, target_addrlen_addr
))
3114 ret
= -TARGET_EFAULT
;
3119 /* do_socketpair() Must return target values and target errnos. */
3120 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3121 abi_ulong target_tab_addr
)
3126 target_to_host_sock_type(&type
);
3128 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3129 if (!is_error(ret
)) {
3130 if (put_user_s32(tab
[0], target_tab_addr
)
3131 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3132 ret
= -TARGET_EFAULT
;
3137 /* do_sendto() Must return target values and target errnos. */
3138 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3139 abi_ulong target_addr
, socklen_t addrlen
)
3145 if ((int)addrlen
< 0) {
3146 return -TARGET_EINVAL
;
3149 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3151 return -TARGET_EFAULT
;
3152 if (fd_trans_target_to_host_data(fd
)) {
3153 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3155 unlock_user(host_msg
, msg
, 0);
3160 addr
= alloca(addrlen
+1);
3161 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3163 unlock_user(host_msg
, msg
, 0);
3166 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3168 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
3170 unlock_user(host_msg
, msg
, 0);
3174 /* do_recvfrom() Must return target values and target errnos. */
3175 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3176 abi_ulong target_addr
,
3177 abi_ulong target_addrlen
)
3184 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3186 return -TARGET_EFAULT
;
3188 if (get_user_u32(addrlen
, target_addrlen
)) {
3189 ret
= -TARGET_EFAULT
;
3192 if ((int)addrlen
< 0) {
3193 ret
= -TARGET_EINVAL
;
3196 addr
= alloca(addrlen
);
3197 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
3199 addr
= NULL
; /* To keep compiler quiet. */
3200 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
3202 if (!is_error(ret
)) {
3204 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3205 if (put_user_u32(addrlen
, target_addrlen
)) {
3206 ret
= -TARGET_EFAULT
;
3210 unlock_user(host_msg
, msg
, len
);
3213 unlock_user(host_msg
, msg
, 0);
3218 #ifdef TARGET_NR_socketcall
3219 /* do_socketcall() Must return target values and target errnos. */
3220 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3222 static const unsigned ac
[] = { /* number of arguments per call */
3223 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3224 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3225 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3226 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3227 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3228 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3229 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3230 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3231 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3232 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3233 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3234 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3235 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3236 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3237 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3238 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3239 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3240 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3241 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3242 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3244 abi_long a
[6]; /* max 6 args */
3246 /* first, collect the arguments in a[] according to ac[] */
3247 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3249 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3250 for (i
= 0; i
< ac
[num
]; ++i
) {
3251 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3252 return -TARGET_EFAULT
;
3257 /* now when we have the args, actually handle the call */
3259 case SOCKOP_socket
: /* domain, type, protocol */
3260 return do_socket(a
[0], a
[1], a
[2]);
3261 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3262 return do_bind(a
[0], a
[1], a
[2]);
3263 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3264 return do_connect(a
[0], a
[1], a
[2]);
3265 case SOCKOP_listen
: /* sockfd, backlog */
3266 return get_errno(listen(a
[0], a
[1]));
3267 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3268 return do_accept4(a
[0], a
[1], a
[2], 0);
3269 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3270 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3271 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3272 return do_getsockname(a
[0], a
[1], a
[2]);
3273 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3274 return do_getpeername(a
[0], a
[1], a
[2]);
3275 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3276 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3277 case SOCKOP_send
: /* sockfd, msg, len, flags */
3278 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3279 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3280 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3281 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3282 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3283 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3284 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3285 case SOCKOP_shutdown
: /* sockfd, how */
3286 return get_errno(shutdown(a
[0], a
[1]));
3287 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3288 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3289 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3290 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3291 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3292 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3293 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3294 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3295 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3296 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3297 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3298 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3300 gemu_log("Unsupported socketcall: %d\n", num
);
3301 return -TARGET_ENOSYS
;
3306 #define N_SHM_REGIONS 32
3308 static struct shm_region
{
3312 } shm_regions
[N_SHM_REGIONS
];
3314 struct target_semid_ds
3316 struct target_ipc_perm sem_perm
;
3317 abi_ulong sem_otime
;
3318 #if !defined(TARGET_PPC64)
3319 abi_ulong __unused1
;
3321 abi_ulong sem_ctime
;
3322 #if !defined(TARGET_PPC64)
3323 abi_ulong __unused2
;
3325 abi_ulong sem_nsems
;
3326 abi_ulong __unused3
;
3327 abi_ulong __unused4
;
3330 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3331 abi_ulong target_addr
)
3333 struct target_ipc_perm
*target_ip
;
3334 struct target_semid_ds
*target_sd
;
3336 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3337 return -TARGET_EFAULT
;
3338 target_ip
= &(target_sd
->sem_perm
);
3339 host_ip
->__key
= tswap32(target_ip
->__key
);
3340 host_ip
->uid
= tswap32(target_ip
->uid
);
3341 host_ip
->gid
= tswap32(target_ip
->gid
);
3342 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3343 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3344 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3345 host_ip
->mode
= tswap32(target_ip
->mode
);
3347 host_ip
->mode
= tswap16(target_ip
->mode
);
3349 #if defined(TARGET_PPC)
3350 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3352 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3354 unlock_user_struct(target_sd
, target_addr
, 0);
3358 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3359 struct ipc_perm
*host_ip
)
3361 struct target_ipc_perm
*target_ip
;
3362 struct target_semid_ds
*target_sd
;
3364 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3365 return -TARGET_EFAULT
;
3366 target_ip
= &(target_sd
->sem_perm
);
3367 target_ip
->__key
= tswap32(host_ip
->__key
);
3368 target_ip
->uid
= tswap32(host_ip
->uid
);
3369 target_ip
->gid
= tswap32(host_ip
->gid
);
3370 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3371 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3372 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3373 target_ip
->mode
= tswap32(host_ip
->mode
);
3375 target_ip
->mode
= tswap16(host_ip
->mode
);
3377 #if defined(TARGET_PPC)
3378 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3380 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3382 unlock_user_struct(target_sd
, target_addr
, 1);
3386 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3387 abi_ulong target_addr
)
3389 struct target_semid_ds
*target_sd
;
3391 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3392 return -TARGET_EFAULT
;
3393 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3394 return -TARGET_EFAULT
;
3395 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3396 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3397 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3398 unlock_user_struct(target_sd
, target_addr
, 0);
3402 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3403 struct semid_ds
*host_sd
)
3405 struct target_semid_ds
*target_sd
;
3407 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3408 return -TARGET_EFAULT
;
3409 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3410 return -TARGET_EFAULT
;
3411 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3412 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3413 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3414 unlock_user_struct(target_sd
, target_addr
, 1);
3418 struct target_seminfo
{
3431 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3432 struct seminfo
*host_seminfo
)
3434 struct target_seminfo
*target_seminfo
;
3435 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3436 return -TARGET_EFAULT
;
3437 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3438 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3439 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3440 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3441 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3442 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3443 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3444 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3445 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3446 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3447 unlock_user_struct(target_seminfo
, target_addr
, 1);
3453 struct semid_ds
*buf
;
3454 unsigned short *array
;
3455 struct seminfo
*__buf
;
3458 union target_semun
{
3465 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3466 abi_ulong target_addr
)
3469 unsigned short *array
;
3471 struct semid_ds semid_ds
;
3474 semun
.buf
= &semid_ds
;
3476 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3478 return get_errno(ret
);
3480 nsems
= semid_ds
.sem_nsems
;
3482 *host_array
= g_try_new(unsigned short, nsems
);
3484 return -TARGET_ENOMEM
;
3486 array
= lock_user(VERIFY_READ
, target_addr
,
3487 nsems
*sizeof(unsigned short), 1);
3489 g_free(*host_array
);
3490 return -TARGET_EFAULT
;
3493 for(i
=0; i
<nsems
; i
++) {
3494 __get_user((*host_array
)[i
], &array
[i
]);
3496 unlock_user(array
, target_addr
, 0);
3501 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3502 unsigned short **host_array
)
3505 unsigned short *array
;
3507 struct semid_ds semid_ds
;
3510 semun
.buf
= &semid_ds
;
3512 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3514 return get_errno(ret
);
3516 nsems
= semid_ds
.sem_nsems
;
3518 array
= lock_user(VERIFY_WRITE
, target_addr
,
3519 nsems
*sizeof(unsigned short), 0);
3521 return -TARGET_EFAULT
;
3523 for(i
=0; i
<nsems
; i
++) {
3524 __put_user((*host_array
)[i
], &array
[i
]);
3526 g_free(*host_array
);
3527 unlock_user(array
, target_addr
, 1);
3532 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3533 abi_ulong target_arg
)
3535 union target_semun target_su
= { .buf
= target_arg
};
3537 struct semid_ds dsarg
;
3538 unsigned short *array
= NULL
;
3539 struct seminfo seminfo
;
3540 abi_long ret
= -TARGET_EINVAL
;
3547 /* In 64 bit cross-endian situations, we will erroneously pick up
3548 * the wrong half of the union for the "val" element. To rectify
3549 * this, the entire 8-byte structure is byteswapped, followed by
3550 * a swap of the 4 byte val field. In other cases, the data is
3551 * already in proper host byte order. */
3552 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3553 target_su
.buf
= tswapal(target_su
.buf
);
3554 arg
.val
= tswap32(target_su
.val
);
3556 arg
.val
= target_su
.val
;
3558 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3562 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3566 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3567 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3574 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3578 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3579 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3585 arg
.__buf
= &seminfo
;
3586 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3587 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3595 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3602 struct target_sembuf
{
3603 unsigned short sem_num
;
3608 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3609 abi_ulong target_addr
,
3612 struct target_sembuf
*target_sembuf
;
3615 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3616 nsops
*sizeof(struct target_sembuf
), 1);
3618 return -TARGET_EFAULT
;
3620 for(i
=0; i
<nsops
; i
++) {
3621 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3622 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3623 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3626 unlock_user(target_sembuf
, target_addr
, 0);
3631 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3633 struct sembuf sops
[nsops
];
3635 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3636 return -TARGET_EFAULT
;
3638 return get_errno(semop(semid
, sops
, nsops
));
3641 struct target_msqid_ds
3643 struct target_ipc_perm msg_perm
;
3644 abi_ulong msg_stime
;
3645 #if TARGET_ABI_BITS == 32
3646 abi_ulong __unused1
;
3648 abi_ulong msg_rtime
;
3649 #if TARGET_ABI_BITS == 32
3650 abi_ulong __unused2
;
3652 abi_ulong msg_ctime
;
3653 #if TARGET_ABI_BITS == 32
3654 abi_ulong __unused3
;
3656 abi_ulong __msg_cbytes
;
3658 abi_ulong msg_qbytes
;
3659 abi_ulong msg_lspid
;
3660 abi_ulong msg_lrpid
;
3661 abi_ulong __unused4
;
3662 abi_ulong __unused5
;
3665 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3666 abi_ulong target_addr
)
3668 struct target_msqid_ds
*target_md
;
3670 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3671 return -TARGET_EFAULT
;
3672 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3673 return -TARGET_EFAULT
;
3674 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3675 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3676 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3677 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3678 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3679 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3680 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3681 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3682 unlock_user_struct(target_md
, target_addr
, 0);
3686 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3687 struct msqid_ds
*host_md
)
3689 struct target_msqid_ds
*target_md
;
3691 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3692 return -TARGET_EFAULT
;
3693 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3694 return -TARGET_EFAULT
;
3695 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3696 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3697 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3698 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3699 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3700 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3701 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3702 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3703 unlock_user_struct(target_md
, target_addr
, 1);
3707 struct target_msginfo
{
3715 unsigned short int msgseg
;
3718 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3719 struct msginfo
*host_msginfo
)
3721 struct target_msginfo
*target_msginfo
;
3722 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3723 return -TARGET_EFAULT
;
3724 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3725 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3726 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3727 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3728 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3729 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3730 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3731 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3732 unlock_user_struct(target_msginfo
, target_addr
, 1);
3736 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3738 struct msqid_ds dsarg
;
3739 struct msginfo msginfo
;
3740 abi_long ret
= -TARGET_EINVAL
;
3748 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3749 return -TARGET_EFAULT
;
3750 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3751 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3752 return -TARGET_EFAULT
;
3755 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3759 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3760 if (host_to_target_msginfo(ptr
, &msginfo
))
3761 return -TARGET_EFAULT
;
3768 struct target_msgbuf
{
3773 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3774 ssize_t msgsz
, int msgflg
)
3776 struct target_msgbuf
*target_mb
;
3777 struct msgbuf
*host_mb
;
3781 return -TARGET_EINVAL
;
3784 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3785 return -TARGET_EFAULT
;
3786 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3788 unlock_user_struct(target_mb
, msgp
, 0);
3789 return -TARGET_ENOMEM
;
3791 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3792 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3793 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3795 unlock_user_struct(target_mb
, msgp
, 0);
3800 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3801 ssize_t msgsz
, abi_long msgtyp
,
3804 struct target_msgbuf
*target_mb
;
3806 struct msgbuf
*host_mb
;
3810 return -TARGET_EINVAL
;
3813 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3814 return -TARGET_EFAULT
;
3816 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3818 ret
= -TARGET_ENOMEM
;
3821 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3824 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3825 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3826 if (!target_mtext
) {
3827 ret
= -TARGET_EFAULT
;
3830 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3831 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3834 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3838 unlock_user_struct(target_mb
, msgp
, 1);
3843 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3844 abi_ulong target_addr
)
3846 struct target_shmid_ds
*target_sd
;
3848 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3849 return -TARGET_EFAULT
;
3850 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3851 return -TARGET_EFAULT
;
3852 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3853 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3854 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3855 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3856 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3857 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3858 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3859 unlock_user_struct(target_sd
, target_addr
, 0);
3863 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3864 struct shmid_ds
*host_sd
)
3866 struct target_shmid_ds
*target_sd
;
3868 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3869 return -TARGET_EFAULT
;
3870 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3871 return -TARGET_EFAULT
;
3872 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3873 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3874 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3875 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3876 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3877 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3878 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3879 unlock_user_struct(target_sd
, target_addr
, 1);
3883 struct target_shminfo
{
3891 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3892 struct shminfo
*host_shminfo
)
3894 struct target_shminfo
*target_shminfo
;
3895 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3896 return -TARGET_EFAULT
;
3897 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3898 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3899 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3900 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3901 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3902 unlock_user_struct(target_shminfo
, target_addr
, 1);
3906 struct target_shm_info
{
3911 abi_ulong swap_attempts
;
3912 abi_ulong swap_successes
;
3915 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3916 struct shm_info
*host_shm_info
)
3918 struct target_shm_info
*target_shm_info
;
3919 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3920 return -TARGET_EFAULT
;
3921 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3922 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3923 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3924 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3925 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3926 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3927 unlock_user_struct(target_shm_info
, target_addr
, 1);
3931 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3933 struct shmid_ds dsarg
;
3934 struct shminfo shminfo
;
3935 struct shm_info shm_info
;
3936 abi_long ret
= -TARGET_EINVAL
;
3944 if (target_to_host_shmid_ds(&dsarg
, buf
))
3945 return -TARGET_EFAULT
;
3946 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3947 if (host_to_target_shmid_ds(buf
, &dsarg
))
3948 return -TARGET_EFAULT
;
3951 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3952 if (host_to_target_shminfo(buf
, &shminfo
))
3953 return -TARGET_EFAULT
;
3956 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3957 if (host_to_target_shm_info(buf
, &shm_info
))
3958 return -TARGET_EFAULT
;
3963 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3970 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3974 struct shmid_ds shm_info
;
3977 /* find out the length of the shared memory segment */
3978 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3979 if (is_error(ret
)) {
3980 /* can't get length, bail out */
3987 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3989 abi_ulong mmap_start
;
3991 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3993 if (mmap_start
== -1) {
3995 host_raddr
= (void *)-1;
3997 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4000 if (host_raddr
== (void *)-1) {
4002 return get_errno((long)host_raddr
);
4004 raddr
=h2g((unsigned long)host_raddr
);
4006 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4007 PAGE_VALID
| PAGE_READ
|
4008 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4010 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4011 if (!shm_regions
[i
].in_use
) {
4012 shm_regions
[i
].in_use
= true;
4013 shm_regions
[i
].start
= raddr
;
4014 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4024 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4028 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4029 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4030 shm_regions
[i
].in_use
= false;
4031 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4036 return get_errno(shmdt(g2h(shmaddr
)));
4039 #ifdef TARGET_NR_ipc
4040 /* ??? This only works with linear mappings. */
4041 /* do_ipc() must return target values and target errnos. */
4042 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4043 abi_long second
, abi_long third
,
4044 abi_long ptr
, abi_long fifth
)
4049 version
= call
>> 16;
4054 ret
= do_semop(first
, ptr
, second
);
4058 ret
= get_errno(semget(first
, second
, third
));
4061 case IPCOP_semctl
: {
4062 /* The semun argument to semctl is passed by value, so dereference the
4065 get_user_ual(atptr
, ptr
);
4066 ret
= do_semctl(first
, second
, third
, atptr
);
4071 ret
= get_errno(msgget(first
, second
));
4075 ret
= do_msgsnd(first
, ptr
, second
, third
);
4079 ret
= do_msgctl(first
, second
, ptr
);
4086 struct target_ipc_kludge
{
4091 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4092 ret
= -TARGET_EFAULT
;
4096 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4098 unlock_user_struct(tmp
, ptr
, 0);
4102 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4111 raddr
= do_shmat(first
, ptr
, second
);
4112 if (is_error(raddr
))
4113 return get_errno(raddr
);
4114 if (put_user_ual(raddr
, third
))
4115 return -TARGET_EFAULT
;
4119 ret
= -TARGET_EINVAL
;
4124 ret
= do_shmdt(ptr
);
4128 /* IPC_* flag values are the same on all linux platforms */
4129 ret
= get_errno(shmget(first
, second
, third
));
4132 /* IPC_* and SHM_* command values are the same on all linux platforms */
4134 ret
= do_shmctl(first
, second
, ptr
);
4137 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4138 ret
= -TARGET_ENOSYS
;
4145 /* kernel structure types definitions */
4147 #define STRUCT(name, ...) STRUCT_ ## name,
4148 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4150 #include "syscall_types.h"
4154 #undef STRUCT_SPECIAL
4156 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4157 #define STRUCT_SPECIAL(name)
4158 #include "syscall_types.h"
4160 #undef STRUCT_SPECIAL
4162 typedef struct IOCTLEntry IOCTLEntry
;
4164 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4165 int fd
, int cmd
, abi_long arg
);
4169 unsigned int host_cmd
;
4172 do_ioctl_fn
*do_ioctl
;
4173 const argtype arg_type
[5];
4176 #define IOC_R 0x0001
4177 #define IOC_W 0x0002
4178 #define IOC_RW (IOC_R | IOC_W)
4180 #define MAX_STRUCT_SIZE 4096
4182 #ifdef CONFIG_FIEMAP
4183 /* So fiemap access checks don't overflow on 32 bit systems.
4184 * This is very slightly smaller than the limit imposed by
4185 * the underlying kernel.
4187 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4188 / sizeof(struct fiemap_extent))
4190 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4191 int fd
, int cmd
, abi_long arg
)
4193 /* The parameter for this ioctl is a struct fiemap followed
4194 * by an array of struct fiemap_extent whose size is set
4195 * in fiemap->fm_extent_count. The array is filled in by the
4198 int target_size_in
, target_size_out
;
4200 const argtype
*arg_type
= ie
->arg_type
;
4201 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4204 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4208 assert(arg_type
[0] == TYPE_PTR
);
4209 assert(ie
->access
== IOC_RW
);
4211 target_size_in
= thunk_type_size(arg_type
, 0);
4212 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4214 return -TARGET_EFAULT
;
4216 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4217 unlock_user(argptr
, arg
, 0);
4218 fm
= (struct fiemap
*)buf_temp
;
4219 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4220 return -TARGET_EINVAL
;
4223 outbufsz
= sizeof (*fm
) +
4224 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4226 if (outbufsz
> MAX_STRUCT_SIZE
) {
4227 /* We can't fit all the extents into the fixed size buffer.
4228 * Allocate one that is large enough and use it instead.
4230 fm
= g_try_malloc(outbufsz
);
4232 return -TARGET_ENOMEM
;
4234 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4237 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
4238 if (!is_error(ret
)) {
4239 target_size_out
= target_size_in
;
4240 /* An extent_count of 0 means we were only counting the extents
4241 * so there are no structs to copy
4243 if (fm
->fm_extent_count
!= 0) {
4244 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4246 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4248 ret
= -TARGET_EFAULT
;
4250 /* Convert the struct fiemap */
4251 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4252 if (fm
->fm_extent_count
!= 0) {
4253 p
= argptr
+ target_size_in
;
4254 /* ...and then all the struct fiemap_extents */
4255 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4256 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4261 unlock_user(argptr
, arg
, target_size_out
);
4271 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4272 int fd
, int cmd
, abi_long arg
)
4274 const argtype
*arg_type
= ie
->arg_type
;
4278 struct ifconf
*host_ifconf
;
4280 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4281 int target_ifreq_size
;
4286 abi_long target_ifc_buf
;
4290 assert(arg_type
[0] == TYPE_PTR
);
4291 assert(ie
->access
== IOC_RW
);
4294 target_size
= thunk_type_size(arg_type
, 0);
4296 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4298 return -TARGET_EFAULT
;
4299 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4300 unlock_user(argptr
, arg
, 0);
4302 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4303 target_ifc_len
= host_ifconf
->ifc_len
;
4304 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4306 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4307 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4308 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4310 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4311 if (outbufsz
> MAX_STRUCT_SIZE
) {
4312 /* We can't fit all the extents into the fixed size buffer.
4313 * Allocate one that is large enough and use it instead.
4315 host_ifconf
= malloc(outbufsz
);
4317 return -TARGET_ENOMEM
;
4319 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4322 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4324 host_ifconf
->ifc_len
= host_ifc_len
;
4325 host_ifconf
->ifc_buf
= host_ifc_buf
;
4327 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4328 if (!is_error(ret
)) {
4329 /* convert host ifc_len to target ifc_len */
4331 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4332 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4333 host_ifconf
->ifc_len
= target_ifc_len
;
4335 /* restore target ifc_buf */
4337 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4339 /* copy struct ifconf to target user */
4341 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4343 return -TARGET_EFAULT
;
4344 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4345 unlock_user(argptr
, arg
, target_size
);
4347 /* copy ifreq[] to target user */
4349 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4350 for (i
= 0; i
< nb_ifreq
; i
++) {
4351 thunk_convert(argptr
+ i
* target_ifreq_size
,
4352 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4353 ifreq_arg_type
, THUNK_TARGET
);
4355 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4365 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4366 int cmd
, abi_long arg
)
4369 struct dm_ioctl
*host_dm
;
4370 abi_long guest_data
;
4371 uint32_t guest_data_size
;
4373 const argtype
*arg_type
= ie
->arg_type
;
4375 void *big_buf
= NULL
;
4379 target_size
= thunk_type_size(arg_type
, 0);
4380 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4382 ret
= -TARGET_EFAULT
;
4385 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4386 unlock_user(argptr
, arg
, 0);
4388 /* buf_temp is too small, so fetch things into a bigger buffer */
4389 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4390 memcpy(big_buf
, buf_temp
, target_size
);
4394 guest_data
= arg
+ host_dm
->data_start
;
4395 if ((guest_data
- arg
) < 0) {
4399 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4400 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4402 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4403 switch (ie
->host_cmd
) {
4405 case DM_LIST_DEVICES
:
4408 case DM_DEV_SUSPEND
:
4411 case DM_TABLE_STATUS
:
4412 case DM_TABLE_CLEAR
:
4414 case DM_LIST_VERSIONS
:
4418 case DM_DEV_SET_GEOMETRY
:
4419 /* data contains only strings */
4420 memcpy(host_data
, argptr
, guest_data_size
);
4423 memcpy(host_data
, argptr
, guest_data_size
);
4424 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4428 void *gspec
= argptr
;
4429 void *cur_data
= host_data
;
4430 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4431 int spec_size
= thunk_type_size(arg_type
, 0);
4434 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4435 struct dm_target_spec
*spec
= cur_data
;
4439 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4440 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4442 spec
->next
= sizeof(*spec
) + slen
;
4443 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4445 cur_data
+= spec
->next
;
4450 ret
= -TARGET_EINVAL
;
4451 unlock_user(argptr
, guest_data
, 0);
4454 unlock_user(argptr
, guest_data
, 0);
4456 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4457 if (!is_error(ret
)) {
4458 guest_data
= arg
+ host_dm
->data_start
;
4459 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4460 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4461 switch (ie
->host_cmd
) {
4466 case DM_DEV_SUSPEND
:
4469 case DM_TABLE_CLEAR
:
4471 case DM_DEV_SET_GEOMETRY
:
4472 /* no return data */
4474 case DM_LIST_DEVICES
:
4476 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4477 uint32_t remaining_data
= guest_data_size
;
4478 void *cur_data
= argptr
;
4479 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4480 int nl_size
= 12; /* can't use thunk_size due to alignment */
4483 uint32_t next
= nl
->next
;
4485 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4487 if (remaining_data
< nl
->next
) {
4488 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4491 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4492 strcpy(cur_data
+ nl_size
, nl
->name
);
4493 cur_data
+= nl
->next
;
4494 remaining_data
-= nl
->next
;
4498 nl
= (void*)nl
+ next
;
4503 case DM_TABLE_STATUS
:
4505 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4506 void *cur_data
= argptr
;
4507 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4508 int spec_size
= thunk_type_size(arg_type
, 0);
4511 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4512 uint32_t next
= spec
->next
;
4513 int slen
= strlen((char*)&spec
[1]) + 1;
4514 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4515 if (guest_data_size
< spec
->next
) {
4516 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4519 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4520 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4521 cur_data
= argptr
+ spec
->next
;
4522 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4528 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4529 int count
= *(uint32_t*)hdata
;
4530 uint64_t *hdev
= hdata
+ 8;
4531 uint64_t *gdev
= argptr
+ 8;
4534 *(uint32_t*)argptr
= tswap32(count
);
4535 for (i
= 0; i
< count
; i
++) {
4536 *gdev
= tswap64(*hdev
);
4542 case DM_LIST_VERSIONS
:
4544 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4545 uint32_t remaining_data
= guest_data_size
;
4546 void *cur_data
= argptr
;
4547 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4548 int vers_size
= thunk_type_size(arg_type
, 0);
4551 uint32_t next
= vers
->next
;
4553 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4555 if (remaining_data
< vers
->next
) {
4556 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4559 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4560 strcpy(cur_data
+ vers_size
, vers
->name
);
4561 cur_data
+= vers
->next
;
4562 remaining_data
-= vers
->next
;
4566 vers
= (void*)vers
+ next
;
4571 unlock_user(argptr
, guest_data
, 0);
4572 ret
= -TARGET_EINVAL
;
4575 unlock_user(argptr
, guest_data
, guest_data_size
);
4577 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4579 ret
= -TARGET_EFAULT
;
4582 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4583 unlock_user(argptr
, arg
, target_size
);
4590 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4591 int cmd
, abi_long arg
)
4595 const argtype
*arg_type
= ie
->arg_type
;
4596 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4599 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4600 struct blkpg_partition host_part
;
4602 /* Read and convert blkpg */
4604 target_size
= thunk_type_size(arg_type
, 0);
4605 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4607 ret
= -TARGET_EFAULT
;
4610 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4611 unlock_user(argptr
, arg
, 0);
4613 switch (host_blkpg
->op
) {
4614 case BLKPG_ADD_PARTITION
:
4615 case BLKPG_DEL_PARTITION
:
4616 /* payload is struct blkpg_partition */
4619 /* Unknown opcode */
4620 ret
= -TARGET_EINVAL
;
4624 /* Read and convert blkpg->data */
4625 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4626 target_size
= thunk_type_size(part_arg_type
, 0);
4627 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4629 ret
= -TARGET_EFAULT
;
4632 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4633 unlock_user(argptr
, arg
, 0);
4635 /* Swizzle the data pointer to our local copy and call! */
4636 host_blkpg
->data
= &host_part
;
4637 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4643 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4644 int fd
, int cmd
, abi_long arg
)
4646 const argtype
*arg_type
= ie
->arg_type
;
4647 const StructEntry
*se
;
4648 const argtype
*field_types
;
4649 const int *dst_offsets
, *src_offsets
;
4652 abi_ulong
*target_rt_dev_ptr
;
4653 unsigned long *host_rt_dev_ptr
;
4657 assert(ie
->access
== IOC_W
);
4658 assert(*arg_type
== TYPE_PTR
);
4660 assert(*arg_type
== TYPE_STRUCT
);
4661 target_size
= thunk_type_size(arg_type
, 0);
4662 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4664 return -TARGET_EFAULT
;
4667 assert(*arg_type
== (int)STRUCT_rtentry
);
4668 se
= struct_entries
+ *arg_type
++;
4669 assert(se
->convert
[0] == NULL
);
4670 /* convert struct here to be able to catch rt_dev string */
4671 field_types
= se
->field_types
;
4672 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4673 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4674 for (i
= 0; i
< se
->nb_fields
; i
++) {
4675 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4676 assert(*field_types
== TYPE_PTRVOID
);
4677 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4678 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4679 if (*target_rt_dev_ptr
!= 0) {
4680 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4681 tswapal(*target_rt_dev_ptr
));
4682 if (!*host_rt_dev_ptr
) {
4683 unlock_user(argptr
, arg
, 0);
4684 return -TARGET_EFAULT
;
4687 *host_rt_dev_ptr
= 0;
4692 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4693 argptr
+ src_offsets
[i
],
4694 field_types
, THUNK_HOST
);
4696 unlock_user(argptr
, arg
, 0);
4698 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4699 if (*host_rt_dev_ptr
!= 0) {
4700 unlock_user((void *)*host_rt_dev_ptr
,
4701 *target_rt_dev_ptr
, 0);
4706 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4707 int fd
, int cmd
, abi_long arg
)
4709 int sig
= target_to_host_signal(arg
);
4710 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4713 static IOCTLEntry ioctl_entries
[] = {
4714 #define IOCTL(cmd, access, ...) \
4715 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4716 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4717 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4722 /* ??? Implement proper locking for ioctls. */
4723 /* do_ioctl() Must return target values and target errnos. */
4724 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4726 const IOCTLEntry
*ie
;
4727 const argtype
*arg_type
;
4729 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4735 if (ie
->target_cmd
== 0) {
4736 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4737 return -TARGET_ENOSYS
;
4739 if (ie
->target_cmd
== cmd
)
4743 arg_type
= ie
->arg_type
;
4745 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4748 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4751 switch(arg_type
[0]) {
4754 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4758 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4762 target_size
= thunk_type_size(arg_type
, 0);
4763 switch(ie
->access
) {
4765 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4766 if (!is_error(ret
)) {
4767 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4769 return -TARGET_EFAULT
;
4770 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4771 unlock_user(argptr
, arg
, target_size
);
4775 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4777 return -TARGET_EFAULT
;
4778 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4779 unlock_user(argptr
, arg
, 0);
4780 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4784 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4786 return -TARGET_EFAULT
;
4787 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4788 unlock_user(argptr
, arg
, 0);
4789 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4790 if (!is_error(ret
)) {
4791 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4793 return -TARGET_EFAULT
;
4794 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4795 unlock_user(argptr
, arg
, target_size
);
4801 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4802 (long)cmd
, arg_type
[0]);
4803 ret
= -TARGET_ENOSYS
;
4809 static const bitmask_transtbl iflag_tbl
[] = {
4810 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4811 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4812 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4813 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4814 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4815 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4816 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4817 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4818 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4819 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4820 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4821 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4822 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4823 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4827 static const bitmask_transtbl oflag_tbl
[] = {
4828 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4829 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4830 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4831 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4832 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4833 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4834 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4835 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4836 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4837 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4838 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4839 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4840 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4841 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4842 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4843 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4844 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4845 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4846 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4847 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4848 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4849 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4850 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4851 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4855 static const bitmask_transtbl cflag_tbl
[] = {
4856 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4857 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4858 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4859 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4860 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4861 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4862 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4863 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4864 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4865 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4866 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4867 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4868 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4869 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4870 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4871 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4872 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4873 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4874 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4875 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4876 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4877 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4878 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4879 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4880 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4881 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4882 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4883 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4884 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4885 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4886 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4890 static const bitmask_transtbl lflag_tbl
[] = {
4891 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4892 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4893 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4894 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4895 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4896 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4897 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4898 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4899 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4900 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4901 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4902 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4903 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4904 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4905 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4909 static void target_to_host_termios (void *dst
, const void *src
)
4911 struct host_termios
*host
= dst
;
4912 const struct target_termios
*target
= src
;
4915 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4917 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4919 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4921 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4922 host
->c_line
= target
->c_line
;
4924 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4925 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4926 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4927 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4928 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4929 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4930 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4931 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4932 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4933 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4934 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4935 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4936 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4937 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4938 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4939 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4940 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4941 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4944 static void host_to_target_termios (void *dst
, const void *src
)
4946 struct target_termios
*target
= dst
;
4947 const struct host_termios
*host
= src
;
4950 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4952 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4954 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4956 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4957 target
->c_line
= host
->c_line
;
4959 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4960 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4961 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4962 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4963 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4964 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4965 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4966 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4967 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4968 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4969 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4970 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4971 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4972 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4973 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4974 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4975 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4976 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4979 static const StructEntry struct_termios_def
= {
4980 .convert
= { host_to_target_termios
, target_to_host_termios
},
4981 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4982 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4985 static bitmask_transtbl mmap_flags_tbl
[] = {
4986 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4987 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4988 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4989 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4990 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4991 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4992 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4993 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4994 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4999 #if defined(TARGET_I386)
5001 /* NOTE: there is really one LDT for all the threads */
5002 static uint8_t *ldt_table
;
5004 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5011 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5012 if (size
> bytecount
)
5014 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5016 return -TARGET_EFAULT
;
5017 /* ??? Should this by byteswapped? */
5018 memcpy(p
, ldt_table
, size
);
5019 unlock_user(p
, ptr
, size
);
5023 /* XXX: add locking support */
5024 static abi_long
write_ldt(CPUX86State
*env
,
5025 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5027 struct target_modify_ldt_ldt_s ldt_info
;
5028 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5029 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5030 int seg_not_present
, useable
, lm
;
5031 uint32_t *lp
, entry_1
, entry_2
;
5033 if (bytecount
!= sizeof(ldt_info
))
5034 return -TARGET_EINVAL
;
5035 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5036 return -TARGET_EFAULT
;
5037 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5038 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5039 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5040 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5041 unlock_user_struct(target_ldt_info
, ptr
, 0);
5043 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5044 return -TARGET_EINVAL
;
5045 seg_32bit
= ldt_info
.flags
& 1;
5046 contents
= (ldt_info
.flags
>> 1) & 3;
5047 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5048 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5049 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5050 useable
= (ldt_info
.flags
>> 6) & 1;
5054 lm
= (ldt_info
.flags
>> 7) & 1;
5056 if (contents
== 3) {
5058 return -TARGET_EINVAL
;
5059 if (seg_not_present
== 0)
5060 return -TARGET_EINVAL
;
5062 /* allocate the LDT */
5064 env
->ldt
.base
= target_mmap(0,
5065 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5066 PROT_READ
|PROT_WRITE
,
5067 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5068 if (env
->ldt
.base
== -1)
5069 return -TARGET_ENOMEM
;
5070 memset(g2h(env
->ldt
.base
), 0,
5071 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5072 env
->ldt
.limit
= 0xffff;
5073 ldt_table
= g2h(env
->ldt
.base
);
5076 /* NOTE: same code as Linux kernel */
5077 /* Allow LDTs to be cleared by the user. */
5078 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5081 read_exec_only
== 1 &&
5083 limit_in_pages
== 0 &&
5084 seg_not_present
== 1 &&
5092 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5093 (ldt_info
.limit
& 0x0ffff);
5094 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5095 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5096 (ldt_info
.limit
& 0xf0000) |
5097 ((read_exec_only
^ 1) << 9) |
5099 ((seg_not_present
^ 1) << 15) |
5101 (limit_in_pages
<< 23) |
5105 entry_2
|= (useable
<< 20);
5107 /* Install the new entry ... */
5109 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5110 lp
[0] = tswap32(entry_1
);
5111 lp
[1] = tswap32(entry_2
);
5115 /* specific and weird i386 syscalls */
5116 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5117 unsigned long bytecount
)
5123 ret
= read_ldt(ptr
, bytecount
);
5126 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5129 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5132 ret
= -TARGET_ENOSYS
;
5138 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5139 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5141 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5142 struct target_modify_ldt_ldt_s ldt_info
;
5143 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5144 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5145 int seg_not_present
, useable
, lm
;
5146 uint32_t *lp
, entry_1
, entry_2
;
5149 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5150 if (!target_ldt_info
)
5151 return -TARGET_EFAULT
;
5152 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5153 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5154 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5155 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5156 if (ldt_info
.entry_number
== -1) {
5157 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5158 if (gdt_table
[i
] == 0) {
5159 ldt_info
.entry_number
= i
;
5160 target_ldt_info
->entry_number
= tswap32(i
);
5165 unlock_user_struct(target_ldt_info
, ptr
, 1);
5167 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5168 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5169 return -TARGET_EINVAL
;
5170 seg_32bit
= ldt_info
.flags
& 1;
5171 contents
= (ldt_info
.flags
>> 1) & 3;
5172 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5173 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5174 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5175 useable
= (ldt_info
.flags
>> 6) & 1;
5179 lm
= (ldt_info
.flags
>> 7) & 1;
5182 if (contents
== 3) {
5183 if (seg_not_present
== 0)
5184 return -TARGET_EINVAL
;
5187 /* NOTE: same code as Linux kernel */
5188 /* Allow LDTs to be cleared by the user. */
5189 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5190 if ((contents
== 0 &&
5191 read_exec_only
== 1 &&
5193 limit_in_pages
== 0 &&
5194 seg_not_present
== 1 &&
5202 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5203 (ldt_info
.limit
& 0x0ffff);
5204 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5205 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5206 (ldt_info
.limit
& 0xf0000) |
5207 ((read_exec_only
^ 1) << 9) |
5209 ((seg_not_present
^ 1) << 15) |
5211 (limit_in_pages
<< 23) |
5216 /* Install the new entry ... */
5218 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5219 lp
[0] = tswap32(entry_1
);
5220 lp
[1] = tswap32(entry_2
);
5224 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5226 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5227 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5228 uint32_t base_addr
, limit
, flags
;
5229 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5230 int seg_not_present
, useable
, lm
;
5231 uint32_t *lp
, entry_1
, entry_2
;
5233 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5234 if (!target_ldt_info
)
5235 return -TARGET_EFAULT
;
5236 idx
= tswap32(target_ldt_info
->entry_number
);
5237 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5238 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5239 unlock_user_struct(target_ldt_info
, ptr
, 1);
5240 return -TARGET_EINVAL
;
5242 lp
= (uint32_t *)(gdt_table
+ idx
);
5243 entry_1
= tswap32(lp
[0]);
5244 entry_2
= tswap32(lp
[1]);
5246 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5247 contents
= (entry_2
>> 10) & 3;
5248 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5249 seg_32bit
= (entry_2
>> 22) & 1;
5250 limit_in_pages
= (entry_2
>> 23) & 1;
5251 useable
= (entry_2
>> 20) & 1;
5255 lm
= (entry_2
>> 21) & 1;
5257 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5258 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5259 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5260 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5261 base_addr
= (entry_1
>> 16) |
5262 (entry_2
& 0xff000000) |
5263 ((entry_2
& 0xff) << 16);
5264 target_ldt_info
->base_addr
= tswapal(base_addr
);
5265 target_ldt_info
->limit
= tswap32(limit
);
5266 target_ldt_info
->flags
= tswap32(flags
);
5267 unlock_user_struct(target_ldt_info
, ptr
, 1);
5270 #endif /* TARGET_I386 && TARGET_ABI32 */
5272 #ifndef TARGET_ABI32
5273 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5280 case TARGET_ARCH_SET_GS
:
5281 case TARGET_ARCH_SET_FS
:
5282 if (code
== TARGET_ARCH_SET_GS
)
5286 cpu_x86_load_seg(env
, idx
, 0);
5287 env
->segs
[idx
].base
= addr
;
5289 case TARGET_ARCH_GET_GS
:
5290 case TARGET_ARCH_GET_FS
:
5291 if (code
== TARGET_ARCH_GET_GS
)
5295 val
= env
->segs
[idx
].base
;
5296 if (put_user(val
, addr
, abi_ulong
))
5297 ret
= -TARGET_EFAULT
;
5300 ret
= -TARGET_EINVAL
;
5307 #endif /* defined(TARGET_I386) */
5309 #define NEW_STACK_SIZE 0x40000
5312 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5315 pthread_mutex_t mutex
;
5316 pthread_cond_t cond
;
5319 abi_ulong child_tidptr
;
5320 abi_ulong parent_tidptr
;
5324 static void *clone_func(void *arg
)
5326 new_thread_info
*info
= arg
;
5331 rcu_register_thread();
5333 cpu
= ENV_GET_CPU(env
);
5335 ts
= (TaskState
*)cpu
->opaque
;
5336 info
->tid
= gettid();
5337 cpu
->host_tid
= info
->tid
;
5339 if (info
->child_tidptr
)
5340 put_user_u32(info
->tid
, info
->child_tidptr
);
5341 if (info
->parent_tidptr
)
5342 put_user_u32(info
->tid
, info
->parent_tidptr
);
5343 /* Enable signals. */
5344 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5345 /* Signal to the parent that we're ready. */
5346 pthread_mutex_lock(&info
->mutex
);
5347 pthread_cond_broadcast(&info
->cond
);
5348 pthread_mutex_unlock(&info
->mutex
);
5349 /* Wait until the parent has finshed initializing the tls state. */
5350 pthread_mutex_lock(&clone_lock
);
5351 pthread_mutex_unlock(&clone_lock
);
5357 /* do_fork() Must return host values and target errnos (unlike most
5358 do_*() functions). */
5359 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5360 abi_ulong parent_tidptr
, target_ulong newtls
,
5361 abi_ulong child_tidptr
)
5363 CPUState
*cpu
= ENV_GET_CPU(env
);
5367 CPUArchState
*new_env
;
5368 unsigned int nptl_flags
;
5371 /* Emulate vfork() with fork() */
5372 if (flags
& CLONE_VFORK
)
5373 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5375 if (flags
& CLONE_VM
) {
5376 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5377 new_thread_info info
;
5378 pthread_attr_t attr
;
5380 ts
= g_new0(TaskState
, 1);
5381 init_task_state(ts
);
5382 /* we create a new CPU instance. */
5383 new_env
= cpu_copy(env
);
5384 /* Init regs that differ from the parent. */
5385 cpu_clone_regs(new_env
, newsp
);
5386 new_cpu
= ENV_GET_CPU(new_env
);
5387 new_cpu
->opaque
= ts
;
5388 ts
->bprm
= parent_ts
->bprm
;
5389 ts
->info
= parent_ts
->info
;
5391 flags
&= ~CLONE_NPTL_FLAGS2
;
5393 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5394 ts
->child_tidptr
= child_tidptr
;
5397 if (nptl_flags
& CLONE_SETTLS
)
5398 cpu_set_tls (new_env
, newtls
);
5400 /* Grab a mutex so that thread setup appears atomic. */
5401 pthread_mutex_lock(&clone_lock
);
5403 memset(&info
, 0, sizeof(info
));
5404 pthread_mutex_init(&info
.mutex
, NULL
);
5405 pthread_mutex_lock(&info
.mutex
);
5406 pthread_cond_init(&info
.cond
, NULL
);
5408 if (nptl_flags
& CLONE_CHILD_SETTID
)
5409 info
.child_tidptr
= child_tidptr
;
5410 if (nptl_flags
& CLONE_PARENT_SETTID
)
5411 info
.parent_tidptr
= parent_tidptr
;
5413 ret
= pthread_attr_init(&attr
);
5414 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5415 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5416 /* It is not safe to deliver signals until the child has finished
5417 initializing, so temporarily block all signals. */
5418 sigfillset(&sigmask
);
5419 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5421 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5422 /* TODO: Free new CPU state if thread creation failed. */
5424 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5425 pthread_attr_destroy(&attr
);
5427 /* Wait for the child to initialize. */
5428 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5430 if (flags
& CLONE_PARENT_SETTID
)
5431 put_user_u32(ret
, parent_tidptr
);
5435 pthread_mutex_unlock(&info
.mutex
);
5436 pthread_cond_destroy(&info
.cond
);
5437 pthread_mutex_destroy(&info
.mutex
);
5438 pthread_mutex_unlock(&clone_lock
);
5440 /* if no CLONE_VM, we consider it is a fork */
5441 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5442 return -TARGET_EINVAL
;
5447 /* Child Process. */
5449 cpu_clone_regs(env
, newsp
);
5451 /* There is a race condition here. The parent process could
5452 theoretically read the TID in the child process before the child
5453 tid is set. This would require using either ptrace
5454 (not implemented) or having *_tidptr to point at a shared memory
5455 mapping. We can't repeat the spinlock hack used above because
5456 the child process gets its own copy of the lock. */
5457 if (flags
& CLONE_CHILD_SETTID
)
5458 put_user_u32(gettid(), child_tidptr
);
5459 if (flags
& CLONE_PARENT_SETTID
)
5460 put_user_u32(gettid(), parent_tidptr
);
5461 ts
= (TaskState
*)cpu
->opaque
;
5462 if (flags
& CLONE_SETTLS
)
5463 cpu_set_tls (env
, newtls
);
5464 if (flags
& CLONE_CHILD_CLEARTID
)
5465 ts
->child_tidptr
= child_tidptr
;
5473 /* warning : doesn't handle linux specific flags... */
5474 static int target_to_host_fcntl_cmd(int cmd
)
5477 case TARGET_F_DUPFD
:
5478 case TARGET_F_GETFD
:
5479 case TARGET_F_SETFD
:
5480 case TARGET_F_GETFL
:
5481 case TARGET_F_SETFL
:
5483 case TARGET_F_GETLK
:
5485 case TARGET_F_SETLK
:
5487 case TARGET_F_SETLKW
:
5489 case TARGET_F_GETOWN
:
5491 case TARGET_F_SETOWN
:
5493 case TARGET_F_GETSIG
:
5495 case TARGET_F_SETSIG
:
5497 #if TARGET_ABI_BITS == 32
5498 case TARGET_F_GETLK64
:
5500 case TARGET_F_SETLK64
:
5502 case TARGET_F_SETLKW64
:
5505 case TARGET_F_SETLEASE
:
5507 case TARGET_F_GETLEASE
:
5509 #ifdef F_DUPFD_CLOEXEC
5510 case TARGET_F_DUPFD_CLOEXEC
:
5511 return F_DUPFD_CLOEXEC
;
5513 case TARGET_F_NOTIFY
:
5516 case TARGET_F_GETOWN_EX
:
5520 case TARGET_F_SETOWN_EX
:
5524 return -TARGET_EINVAL
;
5526 return -TARGET_EINVAL
;
5529 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5530 static const bitmask_transtbl flock_tbl
[] = {
5531 TRANSTBL_CONVERT(F_RDLCK
),
5532 TRANSTBL_CONVERT(F_WRLCK
),
5533 TRANSTBL_CONVERT(F_UNLCK
),
5534 TRANSTBL_CONVERT(F_EXLCK
),
5535 TRANSTBL_CONVERT(F_SHLCK
),
5539 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5542 struct target_flock
*target_fl
;
5543 struct flock64 fl64
;
5544 struct target_flock64
*target_fl64
;
5546 struct f_owner_ex fox
;
5547 struct target_f_owner_ex
*target_fox
;
5550 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5552 if (host_cmd
== -TARGET_EINVAL
)
5556 case TARGET_F_GETLK
:
5557 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5558 return -TARGET_EFAULT
;
5560 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5561 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5562 fl
.l_start
= tswapal(target_fl
->l_start
);
5563 fl
.l_len
= tswapal(target_fl
->l_len
);
5564 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5565 unlock_user_struct(target_fl
, arg
, 0);
5566 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5568 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
5569 return -TARGET_EFAULT
;
5571 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
5572 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5573 target_fl
->l_start
= tswapal(fl
.l_start
);
5574 target_fl
->l_len
= tswapal(fl
.l_len
);
5575 target_fl
->l_pid
= tswap32(fl
.l_pid
);
5576 unlock_user_struct(target_fl
, arg
, 1);
5580 case TARGET_F_SETLK
:
5581 case TARGET_F_SETLKW
:
5582 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5583 return -TARGET_EFAULT
;
5585 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5586 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5587 fl
.l_start
= tswapal(target_fl
->l_start
);
5588 fl
.l_len
= tswapal(target_fl
->l_len
);
5589 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5590 unlock_user_struct(target_fl
, arg
, 0);
5591 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5594 case TARGET_F_GETLK64
:
5595 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5596 return -TARGET_EFAULT
;
5598 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5599 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5600 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5601 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5602 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5603 unlock_user_struct(target_fl64
, arg
, 0);
5604 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5606 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
5607 return -TARGET_EFAULT
;
5608 target_fl64
->l_type
=
5609 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
5610 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
5611 target_fl64
->l_start
= tswap64(fl64
.l_start
);
5612 target_fl64
->l_len
= tswap64(fl64
.l_len
);
5613 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
5614 unlock_user_struct(target_fl64
, arg
, 1);
5617 case TARGET_F_SETLK64
:
5618 case TARGET_F_SETLKW64
:
5619 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5620 return -TARGET_EFAULT
;
5622 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5623 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5624 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5625 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5626 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5627 unlock_user_struct(target_fl64
, arg
, 0);
5628 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5631 case TARGET_F_GETFL
:
5632 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5634 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5638 case TARGET_F_SETFL
:
5639 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
5643 case TARGET_F_GETOWN_EX
:
5644 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5646 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5647 return -TARGET_EFAULT
;
5648 target_fox
->type
= tswap32(fox
.type
);
5649 target_fox
->pid
= tswap32(fox
.pid
);
5650 unlock_user_struct(target_fox
, arg
, 1);
5656 case TARGET_F_SETOWN_EX
:
5657 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5658 return -TARGET_EFAULT
;
5659 fox
.type
= tswap32(target_fox
->type
);
5660 fox
.pid
= tswap32(target_fox
->pid
);
5661 unlock_user_struct(target_fox
, arg
, 0);
5662 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5666 case TARGET_F_SETOWN
:
5667 case TARGET_F_GETOWN
:
5668 case TARGET_F_SETSIG
:
5669 case TARGET_F_GETSIG
:
5670 case TARGET_F_SETLEASE
:
5671 case TARGET_F_GETLEASE
:
5672 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5676 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5684 static inline int high2lowuid(int uid
)
5692 static inline int high2lowgid(int gid
)
5700 static inline int low2highuid(int uid
)
5702 if ((int16_t)uid
== -1)
5708 static inline int low2highgid(int gid
)
5710 if ((int16_t)gid
== -1)
5715 static inline int tswapid(int id
)
5720 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5722 #else /* !USE_UID16 */
5723 static inline int high2lowuid(int uid
)
5727 static inline int high2lowgid(int gid
)
5731 static inline int low2highuid(int uid
)
5735 static inline int low2highgid(int gid
)
5739 static inline int tswapid(int id
)
5744 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5746 #endif /* USE_UID16 */
5748 /* We must do direct syscalls for setting UID/GID, because we want to
5749 * implement the Linux system call semantics of "change only for this thread",
5750 * not the libc/POSIX semantics of "change for all threads in process".
5751 * (See http://ewontfix.com/17/ for more details.)
5752 * We use the 32-bit version of the syscalls if present; if it is not
5753 * then either the host architecture supports 32-bit UIDs natively with
5754 * the standard syscall, or the 16-bit UID is the best we can do.
5756 #ifdef __NR_setuid32
5757 #define __NR_sys_setuid __NR_setuid32
5759 #define __NR_sys_setuid __NR_setuid
5761 #ifdef __NR_setgid32
5762 #define __NR_sys_setgid __NR_setgid32
5764 #define __NR_sys_setgid __NR_setgid
5766 #ifdef __NR_setresuid32
5767 #define __NR_sys_setresuid __NR_setresuid32
5769 #define __NR_sys_setresuid __NR_setresuid
5771 #ifdef __NR_setresgid32
5772 #define __NR_sys_setresgid __NR_setresgid32
5774 #define __NR_sys_setresgid __NR_setresgid
5777 _syscall1(int, sys_setuid
, uid_t
, uid
)
5778 _syscall1(int, sys_setgid
, gid_t
, gid
)
5779 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5780 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5782 void syscall_init(void)
5785 const argtype
*arg_type
;
5789 thunk_init(STRUCT_MAX
);
5791 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5792 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5793 #include "syscall_types.h"
5795 #undef STRUCT_SPECIAL
5797 /* Build target_to_host_errno_table[] table from
5798 * host_to_target_errno_table[]. */
5799 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5800 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5803 /* we patch the ioctl size if necessary. We rely on the fact that
5804 no ioctl has all the bits at '1' in the size field */
5806 while (ie
->target_cmd
!= 0) {
5807 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5808 TARGET_IOC_SIZEMASK
) {
5809 arg_type
= ie
->arg_type
;
5810 if (arg_type
[0] != TYPE_PTR
) {
5811 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5816 size
= thunk_type_size(arg_type
, 0);
5817 ie
->target_cmd
= (ie
->target_cmd
&
5818 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5819 (size
<< TARGET_IOC_SIZESHIFT
);
5822 /* automatic consistency check if same arch */
5823 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5824 (defined(__x86_64__) && defined(TARGET_X86_64))
5825 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5826 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5827 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5834 #if TARGET_ABI_BITS == 32
5835 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5837 #ifdef TARGET_WORDS_BIGENDIAN
5838 return ((uint64_t)word0
<< 32) | word1
;
5840 return ((uint64_t)word1
<< 32) | word0
;
5843 #else /* TARGET_ABI_BITS == 32 */
5844 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5848 #endif /* TARGET_ABI_BITS != 32 */
5850 #ifdef TARGET_NR_truncate64
5851 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5856 if (regpairs_aligned(cpu_env
)) {
5860 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5864 #ifdef TARGET_NR_ftruncate64
5865 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5870 if (regpairs_aligned(cpu_env
)) {
5874 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5878 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5879 abi_ulong target_addr
)
5881 struct target_timespec
*target_ts
;
5883 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5884 return -TARGET_EFAULT
;
5885 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5886 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5887 unlock_user_struct(target_ts
, target_addr
, 0);
5891 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5892 struct timespec
*host_ts
)
5894 struct target_timespec
*target_ts
;
5896 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5897 return -TARGET_EFAULT
;
5898 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5899 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5900 unlock_user_struct(target_ts
, target_addr
, 1);
5904 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5905 abi_ulong target_addr
)
5907 struct target_itimerspec
*target_itspec
;
5909 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5910 return -TARGET_EFAULT
;
5913 host_itspec
->it_interval
.tv_sec
=
5914 tswapal(target_itspec
->it_interval
.tv_sec
);
5915 host_itspec
->it_interval
.tv_nsec
=
5916 tswapal(target_itspec
->it_interval
.tv_nsec
);
5917 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5918 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5920 unlock_user_struct(target_itspec
, target_addr
, 1);
5924 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5925 struct itimerspec
*host_its
)
5927 struct target_itimerspec
*target_itspec
;
5929 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5930 return -TARGET_EFAULT
;
5933 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5934 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5936 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5937 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5939 unlock_user_struct(target_itspec
, target_addr
, 0);
5943 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5944 abi_ulong target_addr
)
5946 struct target_sigevent
*target_sevp
;
5948 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5949 return -TARGET_EFAULT
;
5952 /* This union is awkward on 64 bit systems because it has a 32 bit
5953 * integer and a pointer in it; we follow the conversion approach
5954 * used for handling sigval types in signal.c so the guest should get
5955 * the correct value back even if we did a 64 bit byteswap and it's
5956 * using the 32 bit integer.
5958 host_sevp
->sigev_value
.sival_ptr
=
5959 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5960 host_sevp
->sigev_signo
=
5961 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5962 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5963 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5965 unlock_user_struct(target_sevp
, target_addr
, 1);
5969 #if defined(TARGET_NR_mlockall)
5970 static inline int target_to_host_mlockall_arg(int arg
)
5974 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5975 result
|= MCL_CURRENT
;
5977 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5978 result
|= MCL_FUTURE
;
5984 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5985 abi_ulong target_addr
,
5986 struct stat
*host_st
)
5988 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5989 if (((CPUARMState
*)cpu_env
)->eabi
) {
5990 struct target_eabi_stat64
*target_st
;
5992 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5993 return -TARGET_EFAULT
;
5994 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5995 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5996 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5997 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5998 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6000 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6001 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6002 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6003 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6004 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6005 __put_user(host_st
->st_size
, &target_st
->st_size
);
6006 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6007 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6008 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6009 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6010 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6011 unlock_user_struct(target_st
, target_addr
, 1);
6015 #if defined(TARGET_HAS_STRUCT_STAT64)
6016 struct target_stat64
*target_st
;
6018 struct target_stat
*target_st
;
6021 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6022 return -TARGET_EFAULT
;
6023 memset(target_st
, 0, sizeof(*target_st
));
6024 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6025 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6026 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6027 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6029 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6030 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6031 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6032 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6033 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6034 /* XXX: better use of kernel struct */
6035 __put_user(host_st
->st_size
, &target_st
->st_size
);
6036 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6037 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6038 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6039 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6040 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6041 unlock_user_struct(target_st
, target_addr
, 1);
6047 /* ??? Using host futex calls even when target atomic operations
6048 are not really atomic probably breaks things. However implementing
6049 futexes locally would make futexes shared between multiple processes
6050 tricky. However they're probably useless because guest atomic
6051 operations won't work either. */
6052 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6053 target_ulong uaddr2
, int val3
)
6055 struct timespec ts
, *pts
;
6058 /* ??? We assume FUTEX_* constants are the same on both host
6060 #ifdef FUTEX_CMD_MASK
6061 base_op
= op
& FUTEX_CMD_MASK
;
6067 case FUTEX_WAIT_BITSET
:
6070 target_to_host_timespec(pts
, timeout
);
6074 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6077 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6079 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6081 case FUTEX_CMP_REQUEUE
:
6083 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6084 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6085 But the prototype takes a `struct timespec *'; insert casts
6086 to satisfy the compiler. We do not need to tswap TIMEOUT
6087 since it's not compared to guest memory. */
6088 pts
= (struct timespec
*)(uintptr_t) timeout
;
6089 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6091 (base_op
== FUTEX_CMP_REQUEUE
6095 return -TARGET_ENOSYS
;
6098 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6099 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6100 abi_long handle
, abi_long mount_id
,
6103 struct file_handle
*target_fh
;
6104 struct file_handle
*fh
;
6108 unsigned int size
, total_size
;
6110 if (get_user_s32(size
, handle
)) {
6111 return -TARGET_EFAULT
;
6114 name
= lock_user_string(pathname
);
6116 return -TARGET_EFAULT
;
6119 total_size
= sizeof(struct file_handle
) + size
;
6120 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6122 unlock_user(name
, pathname
, 0);
6123 return -TARGET_EFAULT
;
6126 fh
= g_malloc0(total_size
);
6127 fh
->handle_bytes
= size
;
6129 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6130 unlock_user(name
, pathname
, 0);
6132 /* man name_to_handle_at(2):
6133 * Other than the use of the handle_bytes field, the caller should treat
6134 * the file_handle structure as an opaque data type
6137 memcpy(target_fh
, fh
, total_size
);
6138 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6139 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6141 unlock_user(target_fh
, handle
, total_size
);
6143 if (put_user_s32(mid
, mount_id
)) {
6144 return -TARGET_EFAULT
;
6152 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6153 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6156 struct file_handle
*target_fh
;
6157 struct file_handle
*fh
;
6158 unsigned int size
, total_size
;
6161 if (get_user_s32(size
, handle
)) {
6162 return -TARGET_EFAULT
;
6165 total_size
= sizeof(struct file_handle
) + size
;
6166 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6168 return -TARGET_EFAULT
;
6171 fh
= g_memdup(target_fh
, total_size
);
6172 fh
->handle_bytes
= size
;
6173 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6175 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6176 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6180 unlock_user(target_fh
, handle
, total_size
);
6186 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6188 /* signalfd siginfo conversion */
6191 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6192 const struct signalfd_siginfo
*info
)
6194 int sig
= host_to_target_signal(info
->ssi_signo
);
6196 /* linux/signalfd.h defines a ssi_addr_lsb
6197 * not defined in sys/signalfd.h but used by some kernels
6200 #ifdef BUS_MCEERR_AO
6201 if (tinfo
->ssi_signo
== SIGBUS
&&
6202 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6203 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6204 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6205 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6206 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6210 tinfo
->ssi_signo
= tswap32(sig
);
6211 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6212 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6213 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6214 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6215 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6216 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6217 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6218 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6219 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6220 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6221 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6222 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6223 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6224 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6225 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6228 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6232 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6233 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6239 static TargetFdTrans target_signalfd_trans
= {
6240 .host_to_target_data
= host_to_target_data_signalfd
,
6243 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6246 target_sigset_t
*target_mask
;
6250 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6251 return -TARGET_EINVAL
;
6253 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6254 return -TARGET_EFAULT
;
6257 target_to_host_sigset(&host_mask
, target_mask
);
6259 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6261 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6263 fd_trans_register(ret
, &target_signalfd_trans
);
6266 unlock_user_struct(target_mask
, mask
, 0);
6272 /* Map host to target signal numbers for the wait family of syscalls.
6273 Assume all other status bits are the same. */
6274 int host_to_target_waitstatus(int status
)
6276 if (WIFSIGNALED(status
)) {
6277 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6279 if (WIFSTOPPED(status
)) {
6280 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6286 static int open_self_cmdline(void *cpu_env
, int fd
)
6289 bool word_skipped
= false;
6291 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6301 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6304 fd_orig
= close(fd_orig
);
6307 } else if (nb_read
== 0) {
6311 if (!word_skipped
) {
6312 /* Skip the first string, which is the path to qemu-*-static
6313 instead of the actual command. */
6314 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6316 /* Null byte found, skip one string */
6318 nb_read
-= cp_buf
- buf
;
6319 word_skipped
= true;
6324 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6333 return close(fd_orig
);
6336 static int open_self_maps(void *cpu_env
, int fd
)
6338 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6339 TaskState
*ts
= cpu
->opaque
;
6345 fp
= fopen("/proc/self/maps", "r");
6350 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6351 int fields
, dev_maj
, dev_min
, inode
;
6352 uint64_t min
, max
, offset
;
6353 char flag_r
, flag_w
, flag_x
, flag_p
;
6354 char path
[512] = "";
6355 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6356 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6357 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6359 if ((fields
< 10) || (fields
> 11)) {
6362 if (h2g_valid(min
)) {
6363 int flags
= page_get_flags(h2g(min
));
6364 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6365 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6368 if (h2g(min
) == ts
->info
->stack_limit
) {
6369 pstrcpy(path
, sizeof(path
), " [stack]");
6371 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6372 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6373 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6374 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6375 path
[0] ? " " : "", path
);
6385 static int open_self_stat(void *cpu_env
, int fd
)
6387 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6388 TaskState
*ts
= cpu
->opaque
;
6389 abi_ulong start_stack
= ts
->info
->start_stack
;
6392 for (i
= 0; i
< 44; i
++) {
6400 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6401 } else if (i
== 1) {
6403 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6404 } else if (i
== 27) {
6407 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6409 /* for the rest, there is MasterCard */
6410 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6414 if (write(fd
, buf
, len
) != len
) {
6422 static int open_self_auxv(void *cpu_env
, int fd
)
6424 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6425 TaskState
*ts
= cpu
->opaque
;
6426 abi_ulong auxv
= ts
->info
->saved_auxv
;
6427 abi_ulong len
= ts
->info
->auxv_len
;
6431 * Auxiliary vector is stored in target process stack.
6432 * read in whole auxv vector and copy it to file
6434 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6438 r
= write(fd
, ptr
, len
);
6445 lseek(fd
, 0, SEEK_SET
);
6446 unlock_user(ptr
, auxv
, len
);
6452 static int is_proc_myself(const char *filename
, const char *entry
)
6454 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6455 filename
+= strlen("/proc/");
6456 if (!strncmp(filename
, "self/", strlen("self/"))) {
6457 filename
+= strlen("self/");
6458 } else if (*filename
>= '1' && *filename
<= '9') {
6460 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6461 if (!strncmp(filename
, myself
, strlen(myself
))) {
6462 filename
+= strlen(myself
);
6469 if (!strcmp(filename
, entry
)) {
6476 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6477 static int is_proc(const char *filename
, const char *entry
)
6479 return strcmp(filename
, entry
) == 0;
6482 static int open_net_route(void *cpu_env
, int fd
)
6489 fp
= fopen("/proc/net/route", "r");
6496 read
= getline(&line
, &len
, fp
);
6497 dprintf(fd
, "%s", line
);
6501 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6503 uint32_t dest
, gw
, mask
;
6504 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6505 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6506 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6507 &mask
, &mtu
, &window
, &irtt
);
6508 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6509 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6510 metric
, tswap32(mask
), mtu
, window
, irtt
);
6520 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6523 const char *filename
;
6524 int (*fill
)(void *cpu_env
, int fd
);
6525 int (*cmp
)(const char *s1
, const char *s2
);
6527 const struct fake_open
*fake_open
;
6528 static const struct fake_open fakes
[] = {
6529 { "maps", open_self_maps
, is_proc_myself
},
6530 { "stat", open_self_stat
, is_proc_myself
},
6531 { "auxv", open_self_auxv
, is_proc_myself
},
6532 { "cmdline", open_self_cmdline
, is_proc_myself
},
6533 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6534 { "/proc/net/route", open_net_route
, is_proc
},
6536 { NULL
, NULL
, NULL
}
6539 if (is_proc_myself(pathname
, "exe")) {
6540 int execfd
= qemu_getauxval(AT_EXECFD
);
6541 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6544 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6545 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6550 if (fake_open
->filename
) {
6552 char filename
[PATH_MAX
];
6555 /* create temporary file to map stat to */
6556 tmpdir
= getenv("TMPDIR");
6559 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6560 fd
= mkstemp(filename
);
6566 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6572 lseek(fd
, 0, SEEK_SET
);
6577 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6580 #define TIMER_MAGIC 0x0caf0000
6581 #define TIMER_MAGIC_MASK 0xffff0000
6583 /* Convert QEMU provided timer ID back to internal 16bit index format */
6584 static target_timer_t
get_timer_id(abi_long arg
)
6586 target_timer_t timerid
= arg
;
6588 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6589 return -TARGET_EINVAL
;
6594 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6595 return -TARGET_EINVAL
;
6601 /* do_syscall() should always have a single exit point at the end so
6602 that actions, such as logging of syscall results, can be performed.
6603 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6604 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6605 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6606 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6609 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6615 #if defined(DEBUG_ERESTARTSYS)
6616 /* Debug-only code for exercising the syscall-restart code paths
6617 * in the per-architecture cpu main loops: restart every syscall
6618 * the guest makes once before letting it through.
6625 return -TARGET_ERESTARTSYS
;
6631 gemu_log("syscall %d", num
);
6634 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6637 case TARGET_NR_exit
:
6638 /* In old applications this may be used to implement _exit(2).
6639 However in threaded applictions it is used for thread termination,
6640 and _exit_group is used for application termination.
6641 Do thread termination if we have more then one thread. */
6642 /* FIXME: This probably breaks if a signal arrives. We should probably
6643 be disabling signals. */
6644 if (CPU_NEXT(first_cpu
)) {
6648 /* Remove the CPU from the list. */
6649 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6652 if (ts
->child_tidptr
) {
6653 put_user_u32(0, ts
->child_tidptr
);
6654 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6658 object_unref(OBJECT(cpu
));
6660 rcu_unregister_thread();
6666 gdb_exit(cpu_env
, arg1
);
6668 ret
= 0; /* avoid warning */
6670 case TARGET_NR_read
:
6674 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6676 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6678 fd_trans_host_to_target_data(arg1
)) {
6679 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6681 unlock_user(p
, arg2
, ret
);
6684 case TARGET_NR_write
:
6685 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6687 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6688 unlock_user(p
, arg2
, 0);
6690 #ifdef TARGET_NR_open
6691 case TARGET_NR_open
:
6692 if (!(p
= lock_user_string(arg1
)))
6694 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6695 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6697 fd_trans_unregister(ret
);
6698 unlock_user(p
, arg1
, 0);
6701 case TARGET_NR_openat
:
6702 if (!(p
= lock_user_string(arg2
)))
6704 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6705 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6707 fd_trans_unregister(ret
);
6708 unlock_user(p
, arg2
, 0);
6710 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6711 case TARGET_NR_name_to_handle_at
:
6712 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6715 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6716 case TARGET_NR_open_by_handle_at
:
6717 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6718 fd_trans_unregister(ret
);
6721 case TARGET_NR_close
:
6722 fd_trans_unregister(arg1
);
6723 ret
= get_errno(close(arg1
));
6728 #ifdef TARGET_NR_fork
6729 case TARGET_NR_fork
:
6730 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6733 #ifdef TARGET_NR_waitpid
6734 case TARGET_NR_waitpid
:
6737 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6738 if (!is_error(ret
) && arg2
&& ret
6739 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6744 #ifdef TARGET_NR_waitid
6745 case TARGET_NR_waitid
:
6749 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6750 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6751 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6753 host_to_target_siginfo(p
, &info
);
6754 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6759 #ifdef TARGET_NR_creat /* not on alpha */
6760 case TARGET_NR_creat
:
6761 if (!(p
= lock_user_string(arg1
)))
6763 ret
= get_errno(creat(p
, arg2
));
6764 fd_trans_unregister(ret
);
6765 unlock_user(p
, arg1
, 0);
6768 #ifdef TARGET_NR_link
6769 case TARGET_NR_link
:
6772 p
= lock_user_string(arg1
);
6773 p2
= lock_user_string(arg2
);
6775 ret
= -TARGET_EFAULT
;
6777 ret
= get_errno(link(p
, p2
));
6778 unlock_user(p2
, arg2
, 0);
6779 unlock_user(p
, arg1
, 0);
6783 #if defined(TARGET_NR_linkat)
6784 case TARGET_NR_linkat
:
6789 p
= lock_user_string(arg2
);
6790 p2
= lock_user_string(arg4
);
6792 ret
= -TARGET_EFAULT
;
6794 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6795 unlock_user(p
, arg2
, 0);
6796 unlock_user(p2
, arg4
, 0);
6800 #ifdef TARGET_NR_unlink
6801 case TARGET_NR_unlink
:
6802 if (!(p
= lock_user_string(arg1
)))
6804 ret
= get_errno(unlink(p
));
6805 unlock_user(p
, arg1
, 0);
6808 #if defined(TARGET_NR_unlinkat)
6809 case TARGET_NR_unlinkat
:
6810 if (!(p
= lock_user_string(arg2
)))
6812 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6813 unlock_user(p
, arg2
, 0);
6816 case TARGET_NR_execve
:
6818 char **argp
, **envp
;
6821 abi_ulong guest_argp
;
6822 abi_ulong guest_envp
;
6829 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6830 if (get_user_ual(addr
, gp
))
6838 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6839 if (get_user_ual(addr
, gp
))
6846 argp
= alloca((argc
+ 1) * sizeof(void *));
6847 envp
= alloca((envc
+ 1) * sizeof(void *));
6849 for (gp
= guest_argp
, q
= argp
; gp
;
6850 gp
+= sizeof(abi_ulong
), q
++) {
6851 if (get_user_ual(addr
, gp
))
6855 if (!(*q
= lock_user_string(addr
)))
6857 total_size
+= strlen(*q
) + 1;
6861 for (gp
= guest_envp
, q
= envp
; gp
;
6862 gp
+= sizeof(abi_ulong
), q
++) {
6863 if (get_user_ual(addr
, gp
))
6867 if (!(*q
= lock_user_string(addr
)))
6869 total_size
+= strlen(*q
) + 1;
6873 if (!(p
= lock_user_string(arg1
)))
6875 /* Although execve() is not an interruptible syscall it is
6876 * a special case where we must use the safe_syscall wrapper:
6877 * if we allow a signal to happen before we make the host
6878 * syscall then we will 'lose' it, because at the point of
6879 * execve the process leaves QEMU's control. So we use the
6880 * safe syscall wrapper to ensure that we either take the
6881 * signal as a guest signal, or else it does not happen
6882 * before the execve completes and makes it the other
6883 * program's problem.
6885 ret
= get_errno(safe_execve(p
, argp
, envp
));
6886 unlock_user(p
, arg1
, 0);
6891 ret
= -TARGET_EFAULT
;
6894 for (gp
= guest_argp
, q
= argp
; *q
;
6895 gp
+= sizeof(abi_ulong
), q
++) {
6896 if (get_user_ual(addr
, gp
)
6899 unlock_user(*q
, addr
, 0);
6901 for (gp
= guest_envp
, q
= envp
; *q
;
6902 gp
+= sizeof(abi_ulong
), q
++) {
6903 if (get_user_ual(addr
, gp
)
6906 unlock_user(*q
, addr
, 0);
6910 case TARGET_NR_chdir
:
6911 if (!(p
= lock_user_string(arg1
)))
6913 ret
= get_errno(chdir(p
));
6914 unlock_user(p
, arg1
, 0);
6916 #ifdef TARGET_NR_time
6917 case TARGET_NR_time
:
6920 ret
= get_errno(time(&host_time
));
6923 && put_user_sal(host_time
, arg1
))
6928 #ifdef TARGET_NR_mknod
6929 case TARGET_NR_mknod
:
6930 if (!(p
= lock_user_string(arg1
)))
6932 ret
= get_errno(mknod(p
, arg2
, arg3
));
6933 unlock_user(p
, arg1
, 0);
6936 #if defined(TARGET_NR_mknodat)
6937 case TARGET_NR_mknodat
:
6938 if (!(p
= lock_user_string(arg2
)))
6940 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6941 unlock_user(p
, arg2
, 0);
6944 #ifdef TARGET_NR_chmod
6945 case TARGET_NR_chmod
:
6946 if (!(p
= lock_user_string(arg1
)))
6948 ret
= get_errno(chmod(p
, arg2
));
6949 unlock_user(p
, arg1
, 0);
6952 #ifdef TARGET_NR_break
6953 case TARGET_NR_break
:
6956 #ifdef TARGET_NR_oldstat
6957 case TARGET_NR_oldstat
:
6960 case TARGET_NR_lseek
:
6961 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6963 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6964 /* Alpha specific */
6965 case TARGET_NR_getxpid
:
6966 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6967 ret
= get_errno(getpid());
6970 #ifdef TARGET_NR_getpid
6971 case TARGET_NR_getpid
:
6972 ret
= get_errno(getpid());
6975 case TARGET_NR_mount
:
6977 /* need to look at the data field */
6981 p
= lock_user_string(arg1
);
6989 p2
= lock_user_string(arg2
);
6992 unlock_user(p
, arg1
, 0);
6998 p3
= lock_user_string(arg3
);
7001 unlock_user(p
, arg1
, 0);
7003 unlock_user(p2
, arg2
, 0);
7010 /* FIXME - arg5 should be locked, but it isn't clear how to
7011 * do that since it's not guaranteed to be a NULL-terminated
7015 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7017 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7019 ret
= get_errno(ret
);
7022 unlock_user(p
, arg1
, 0);
7024 unlock_user(p2
, arg2
, 0);
7026 unlock_user(p3
, arg3
, 0);
7030 #ifdef TARGET_NR_umount
7031 case TARGET_NR_umount
:
7032 if (!(p
= lock_user_string(arg1
)))
7034 ret
= get_errno(umount(p
));
7035 unlock_user(p
, arg1
, 0);
7038 #ifdef TARGET_NR_stime /* not on alpha */
7039 case TARGET_NR_stime
:
7042 if (get_user_sal(host_time
, arg1
))
7044 ret
= get_errno(stime(&host_time
));
7048 case TARGET_NR_ptrace
:
7050 #ifdef TARGET_NR_alarm /* not on alpha */
7051 case TARGET_NR_alarm
:
7055 #ifdef TARGET_NR_oldfstat
7056 case TARGET_NR_oldfstat
:
7059 #ifdef TARGET_NR_pause /* not on alpha */
7060 case TARGET_NR_pause
:
7061 ret
= get_errno(pause());
7064 #ifdef TARGET_NR_utime
7065 case TARGET_NR_utime
:
7067 struct utimbuf tbuf
, *host_tbuf
;
7068 struct target_utimbuf
*target_tbuf
;
7070 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7072 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7073 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7074 unlock_user_struct(target_tbuf
, arg2
, 0);
7079 if (!(p
= lock_user_string(arg1
)))
7081 ret
= get_errno(utime(p
, host_tbuf
));
7082 unlock_user(p
, arg1
, 0);
7086 #ifdef TARGET_NR_utimes
7087 case TARGET_NR_utimes
:
7089 struct timeval
*tvp
, tv
[2];
7091 if (copy_from_user_timeval(&tv
[0], arg2
)
7092 || copy_from_user_timeval(&tv
[1],
7093 arg2
+ sizeof(struct target_timeval
)))
7099 if (!(p
= lock_user_string(arg1
)))
7101 ret
= get_errno(utimes(p
, tvp
));
7102 unlock_user(p
, arg1
, 0);
7106 #if defined(TARGET_NR_futimesat)
7107 case TARGET_NR_futimesat
:
7109 struct timeval
*tvp
, tv
[2];
7111 if (copy_from_user_timeval(&tv
[0], arg3
)
7112 || copy_from_user_timeval(&tv
[1],
7113 arg3
+ sizeof(struct target_timeval
)))
7119 if (!(p
= lock_user_string(arg2
)))
7121 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7122 unlock_user(p
, arg2
, 0);
7126 #ifdef TARGET_NR_stty
7127 case TARGET_NR_stty
:
7130 #ifdef TARGET_NR_gtty
7131 case TARGET_NR_gtty
:
7134 #ifdef TARGET_NR_access
7135 case TARGET_NR_access
:
7136 if (!(p
= lock_user_string(arg1
)))
7138 ret
= get_errno(access(path(p
), arg2
));
7139 unlock_user(p
, arg1
, 0);
7142 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7143 case TARGET_NR_faccessat
:
7144 if (!(p
= lock_user_string(arg2
)))
7146 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7147 unlock_user(p
, arg2
, 0);
7150 #ifdef TARGET_NR_nice /* not on alpha */
7151 case TARGET_NR_nice
:
7152 ret
= get_errno(nice(arg1
));
7155 #ifdef TARGET_NR_ftime
7156 case TARGET_NR_ftime
:
7159 case TARGET_NR_sync
:
7163 case TARGET_NR_kill
:
7164 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
7166 #ifdef TARGET_NR_rename
7167 case TARGET_NR_rename
:
7170 p
= lock_user_string(arg1
);
7171 p2
= lock_user_string(arg2
);
7173 ret
= -TARGET_EFAULT
;
7175 ret
= get_errno(rename(p
, p2
));
7176 unlock_user(p2
, arg2
, 0);
7177 unlock_user(p
, arg1
, 0);
7181 #if defined(TARGET_NR_renameat)
7182 case TARGET_NR_renameat
:
7185 p
= lock_user_string(arg2
);
7186 p2
= lock_user_string(arg4
);
7188 ret
= -TARGET_EFAULT
;
7190 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7191 unlock_user(p2
, arg4
, 0);
7192 unlock_user(p
, arg2
, 0);
7196 #ifdef TARGET_NR_mkdir
7197 case TARGET_NR_mkdir
:
7198 if (!(p
= lock_user_string(arg1
)))
7200 ret
= get_errno(mkdir(p
, arg2
));
7201 unlock_user(p
, arg1
, 0);
7204 #if defined(TARGET_NR_mkdirat)
7205 case TARGET_NR_mkdirat
:
7206 if (!(p
= lock_user_string(arg2
)))
7208 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7209 unlock_user(p
, arg2
, 0);
7212 #ifdef TARGET_NR_rmdir
7213 case TARGET_NR_rmdir
:
7214 if (!(p
= lock_user_string(arg1
)))
7216 ret
= get_errno(rmdir(p
));
7217 unlock_user(p
, arg1
, 0);
7221 ret
= get_errno(dup(arg1
));
7223 fd_trans_dup(arg1
, ret
);
7226 #ifdef TARGET_NR_pipe
7227 case TARGET_NR_pipe
:
7228 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7231 #ifdef TARGET_NR_pipe2
7232 case TARGET_NR_pipe2
:
7233 ret
= do_pipe(cpu_env
, arg1
,
7234 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7237 case TARGET_NR_times
:
7239 struct target_tms
*tmsp
;
7241 ret
= get_errno(times(&tms
));
7243 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7246 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7247 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7248 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7249 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7252 ret
= host_to_target_clock_t(ret
);
7255 #ifdef TARGET_NR_prof
7256 case TARGET_NR_prof
:
7259 #ifdef TARGET_NR_signal
7260 case TARGET_NR_signal
:
7263 case TARGET_NR_acct
:
7265 ret
= get_errno(acct(NULL
));
7267 if (!(p
= lock_user_string(arg1
)))
7269 ret
= get_errno(acct(path(p
)));
7270 unlock_user(p
, arg1
, 0);
7273 #ifdef TARGET_NR_umount2
7274 case TARGET_NR_umount2
:
7275 if (!(p
= lock_user_string(arg1
)))
7277 ret
= get_errno(umount2(p
, arg2
));
7278 unlock_user(p
, arg1
, 0);
7281 #ifdef TARGET_NR_lock
7282 case TARGET_NR_lock
:
7285 case TARGET_NR_ioctl
:
7286 ret
= do_ioctl(arg1
, arg2
, arg3
);
7288 case TARGET_NR_fcntl
:
7289 ret
= do_fcntl(arg1
, arg2
, arg3
);
7291 #ifdef TARGET_NR_mpx
7295 case TARGET_NR_setpgid
:
7296 ret
= get_errno(setpgid(arg1
, arg2
));
7298 #ifdef TARGET_NR_ulimit
7299 case TARGET_NR_ulimit
:
7302 #ifdef TARGET_NR_oldolduname
7303 case TARGET_NR_oldolduname
:
7306 case TARGET_NR_umask
:
7307 ret
= get_errno(umask(arg1
));
7309 case TARGET_NR_chroot
:
7310 if (!(p
= lock_user_string(arg1
)))
7312 ret
= get_errno(chroot(p
));
7313 unlock_user(p
, arg1
, 0);
7315 #ifdef TARGET_NR_ustat
7316 case TARGET_NR_ustat
:
7319 #ifdef TARGET_NR_dup2
7320 case TARGET_NR_dup2
:
7321 ret
= get_errno(dup2(arg1
, arg2
));
7323 fd_trans_dup(arg1
, arg2
);
7327 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7328 case TARGET_NR_dup3
:
7329 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7331 fd_trans_dup(arg1
, arg2
);
7335 #ifdef TARGET_NR_getppid /* not on alpha */
7336 case TARGET_NR_getppid
:
7337 ret
= get_errno(getppid());
7340 #ifdef TARGET_NR_getpgrp
7341 case TARGET_NR_getpgrp
:
7342 ret
= get_errno(getpgrp());
7345 case TARGET_NR_setsid
:
7346 ret
= get_errno(setsid());
7348 #ifdef TARGET_NR_sigaction
7349 case TARGET_NR_sigaction
:
7351 #if defined(TARGET_ALPHA)
7352 struct target_sigaction act
, oact
, *pact
= 0;
7353 struct target_old_sigaction
*old_act
;
7355 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7357 act
._sa_handler
= old_act
->_sa_handler
;
7358 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7359 act
.sa_flags
= old_act
->sa_flags
;
7360 act
.sa_restorer
= 0;
7361 unlock_user_struct(old_act
, arg2
, 0);
7364 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7365 if (!is_error(ret
) && arg3
) {
7366 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7368 old_act
->_sa_handler
= oact
._sa_handler
;
7369 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7370 old_act
->sa_flags
= oact
.sa_flags
;
7371 unlock_user_struct(old_act
, arg3
, 1);
7373 #elif defined(TARGET_MIPS)
7374 struct target_sigaction act
, oact
, *pact
, *old_act
;
7377 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7379 act
._sa_handler
= old_act
->_sa_handler
;
7380 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7381 act
.sa_flags
= old_act
->sa_flags
;
7382 unlock_user_struct(old_act
, arg2
, 0);
7388 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7390 if (!is_error(ret
) && arg3
) {
7391 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7393 old_act
->_sa_handler
= oact
._sa_handler
;
7394 old_act
->sa_flags
= oact
.sa_flags
;
7395 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7396 old_act
->sa_mask
.sig
[1] = 0;
7397 old_act
->sa_mask
.sig
[2] = 0;
7398 old_act
->sa_mask
.sig
[3] = 0;
7399 unlock_user_struct(old_act
, arg3
, 1);
7402 struct target_old_sigaction
*old_act
;
7403 struct target_sigaction act
, oact
, *pact
;
7405 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7407 act
._sa_handler
= old_act
->_sa_handler
;
7408 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7409 act
.sa_flags
= old_act
->sa_flags
;
7410 act
.sa_restorer
= old_act
->sa_restorer
;
7411 unlock_user_struct(old_act
, arg2
, 0);
7416 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7417 if (!is_error(ret
) && arg3
) {
7418 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7420 old_act
->_sa_handler
= oact
._sa_handler
;
7421 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7422 old_act
->sa_flags
= oact
.sa_flags
;
7423 old_act
->sa_restorer
= oact
.sa_restorer
;
7424 unlock_user_struct(old_act
, arg3
, 1);
7430 case TARGET_NR_rt_sigaction
:
7432 #if defined(TARGET_ALPHA)
7433 struct target_sigaction act
, oact
, *pact
= 0;
7434 struct target_rt_sigaction
*rt_act
;
7435 /* ??? arg4 == sizeof(sigset_t). */
7437 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7439 act
._sa_handler
= rt_act
->_sa_handler
;
7440 act
.sa_mask
= rt_act
->sa_mask
;
7441 act
.sa_flags
= rt_act
->sa_flags
;
7442 act
.sa_restorer
= arg5
;
7443 unlock_user_struct(rt_act
, arg2
, 0);
7446 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7447 if (!is_error(ret
) && arg3
) {
7448 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7450 rt_act
->_sa_handler
= oact
._sa_handler
;
7451 rt_act
->sa_mask
= oact
.sa_mask
;
7452 rt_act
->sa_flags
= oact
.sa_flags
;
7453 unlock_user_struct(rt_act
, arg3
, 1);
7456 struct target_sigaction
*act
;
7457 struct target_sigaction
*oact
;
7460 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7465 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7466 ret
= -TARGET_EFAULT
;
7467 goto rt_sigaction_fail
;
7471 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7474 unlock_user_struct(act
, arg2
, 0);
7476 unlock_user_struct(oact
, arg3
, 1);
7480 #ifdef TARGET_NR_sgetmask /* not on alpha */
7481 case TARGET_NR_sgetmask
:
7484 abi_ulong target_set
;
7485 do_sigprocmask(0, NULL
, &cur_set
);
7486 host_to_target_old_sigset(&target_set
, &cur_set
);
7491 #ifdef TARGET_NR_ssetmask /* not on alpha */
7492 case TARGET_NR_ssetmask
:
7494 sigset_t set
, oset
, cur_set
;
7495 abi_ulong target_set
= arg1
;
7496 do_sigprocmask(0, NULL
, &cur_set
);
7497 target_to_host_old_sigset(&set
, &target_set
);
7498 sigorset(&set
, &set
, &cur_set
);
7499 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7500 host_to_target_old_sigset(&target_set
, &oset
);
7505 #ifdef TARGET_NR_sigprocmask
7506 case TARGET_NR_sigprocmask
:
7508 #if defined(TARGET_ALPHA)
7509 sigset_t set
, oldset
;
7514 case TARGET_SIG_BLOCK
:
7517 case TARGET_SIG_UNBLOCK
:
7520 case TARGET_SIG_SETMASK
:
7524 ret
= -TARGET_EINVAL
;
7528 target_to_host_old_sigset(&set
, &mask
);
7530 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
7531 if (!is_error(ret
)) {
7532 host_to_target_old_sigset(&mask
, &oldset
);
7534 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7537 sigset_t set
, oldset
, *set_ptr
;
7542 case TARGET_SIG_BLOCK
:
7545 case TARGET_SIG_UNBLOCK
:
7548 case TARGET_SIG_SETMASK
:
7552 ret
= -TARGET_EINVAL
;
7555 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7557 target_to_host_old_sigset(&set
, p
);
7558 unlock_user(p
, arg2
, 0);
7564 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
7565 if (!is_error(ret
) && arg3
) {
7566 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7568 host_to_target_old_sigset(p
, &oldset
);
7569 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7575 case TARGET_NR_rt_sigprocmask
:
7578 sigset_t set
, oldset
, *set_ptr
;
7582 case TARGET_SIG_BLOCK
:
7585 case TARGET_SIG_UNBLOCK
:
7588 case TARGET_SIG_SETMASK
:
7592 ret
= -TARGET_EINVAL
;
7595 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7597 target_to_host_sigset(&set
, p
);
7598 unlock_user(p
, arg2
, 0);
7604 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
7605 if (!is_error(ret
) && arg3
) {
7606 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7608 host_to_target_sigset(p
, &oldset
);
7609 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7613 #ifdef TARGET_NR_sigpending
7614 case TARGET_NR_sigpending
:
7617 ret
= get_errno(sigpending(&set
));
7618 if (!is_error(ret
)) {
7619 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7621 host_to_target_old_sigset(p
, &set
);
7622 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7627 case TARGET_NR_rt_sigpending
:
7630 ret
= get_errno(sigpending(&set
));
7631 if (!is_error(ret
)) {
7632 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7634 host_to_target_sigset(p
, &set
);
7635 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7639 #ifdef TARGET_NR_sigsuspend
7640 case TARGET_NR_sigsuspend
:
7643 #if defined(TARGET_ALPHA)
7644 abi_ulong mask
= arg1
;
7645 target_to_host_old_sigset(&set
, &mask
);
7647 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7649 target_to_host_old_sigset(&set
, p
);
7650 unlock_user(p
, arg1
, 0);
7652 ret
= get_errno(safe_rt_sigsuspend(&set
, SIGSET_T_SIZE
));
7656 case TARGET_NR_rt_sigsuspend
:
7659 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7661 target_to_host_sigset(&set
, p
);
7662 unlock_user(p
, arg1
, 0);
7663 ret
= get_errno(safe_rt_sigsuspend(&set
, SIGSET_T_SIZE
));
7666 case TARGET_NR_rt_sigtimedwait
:
7669 struct timespec uts
, *puts
;
7672 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7674 target_to_host_sigset(&set
, p
);
7675 unlock_user(p
, arg1
, 0);
7678 target_to_host_timespec(puts
, arg3
);
7682 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
7683 if (!is_error(ret
)) {
7685 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7690 host_to_target_siginfo(p
, &uinfo
);
7691 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7693 ret
= host_to_target_signal(ret
);
7697 case TARGET_NR_rt_sigqueueinfo
:
7700 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7702 target_to_host_siginfo(&uinfo
, p
);
7703 unlock_user(p
, arg1
, 0);
7704 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7707 #ifdef TARGET_NR_sigreturn
7708 case TARGET_NR_sigreturn
:
7709 ret
= do_sigreturn(cpu_env
);
7712 case TARGET_NR_rt_sigreturn
:
7713 ret
= do_rt_sigreturn(cpu_env
);
7715 case TARGET_NR_sethostname
:
7716 if (!(p
= lock_user_string(arg1
)))
7718 ret
= get_errno(sethostname(p
, arg2
));
7719 unlock_user(p
, arg1
, 0);
7721 case TARGET_NR_setrlimit
:
7723 int resource
= target_to_host_resource(arg1
);
7724 struct target_rlimit
*target_rlim
;
7726 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7728 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7729 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7730 unlock_user_struct(target_rlim
, arg2
, 0);
7731 ret
= get_errno(setrlimit(resource
, &rlim
));
7734 case TARGET_NR_getrlimit
:
7736 int resource
= target_to_host_resource(arg1
);
7737 struct target_rlimit
*target_rlim
;
7740 ret
= get_errno(getrlimit(resource
, &rlim
));
7741 if (!is_error(ret
)) {
7742 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7744 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7745 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7746 unlock_user_struct(target_rlim
, arg2
, 1);
7750 case TARGET_NR_getrusage
:
7752 struct rusage rusage
;
7753 ret
= get_errno(getrusage(arg1
, &rusage
));
7754 if (!is_error(ret
)) {
7755 ret
= host_to_target_rusage(arg2
, &rusage
);
7759 case TARGET_NR_gettimeofday
:
7762 ret
= get_errno(gettimeofday(&tv
, NULL
));
7763 if (!is_error(ret
)) {
7764 if (copy_to_user_timeval(arg1
, &tv
))
7769 case TARGET_NR_settimeofday
:
7771 struct timeval tv
, *ptv
= NULL
;
7772 struct timezone tz
, *ptz
= NULL
;
7775 if (copy_from_user_timeval(&tv
, arg1
)) {
7782 if (copy_from_user_timezone(&tz
, arg2
)) {
7788 ret
= get_errno(settimeofday(ptv
, ptz
));
7791 #if defined(TARGET_NR_select)
7792 case TARGET_NR_select
:
7793 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7794 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7797 struct target_sel_arg_struct
*sel
;
7798 abi_ulong inp
, outp
, exp
, tvp
;
7801 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7803 nsel
= tswapal(sel
->n
);
7804 inp
= tswapal(sel
->inp
);
7805 outp
= tswapal(sel
->outp
);
7806 exp
= tswapal(sel
->exp
);
7807 tvp
= tswapal(sel
->tvp
);
7808 unlock_user_struct(sel
, arg1
, 0);
7809 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7814 #ifdef TARGET_NR_pselect6
7815 case TARGET_NR_pselect6
:
7817 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7818 fd_set rfds
, wfds
, efds
;
7819 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7820 struct timespec ts
, *ts_ptr
;
7823 * The 6th arg is actually two args smashed together,
7824 * so we cannot use the C library.
7832 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7833 target_sigset_t
*target_sigset
;
7841 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7845 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7849 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7855 * This takes a timespec, and not a timeval, so we cannot
7856 * use the do_select() helper ...
7859 if (target_to_host_timespec(&ts
, ts_addr
)) {
7867 /* Extract the two packed args for the sigset */
7870 sig
.size
= SIGSET_T_SIZE
;
7872 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7876 arg_sigset
= tswapal(arg7
[0]);
7877 arg_sigsize
= tswapal(arg7
[1]);
7878 unlock_user(arg7
, arg6
, 0);
7882 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7883 /* Like the kernel, we enforce correct size sigsets */
7884 ret
= -TARGET_EINVAL
;
7887 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7888 sizeof(*target_sigset
), 1);
7889 if (!target_sigset
) {
7892 target_to_host_sigset(&set
, target_sigset
);
7893 unlock_user(target_sigset
, arg_sigset
, 0);
7901 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7904 if (!is_error(ret
)) {
7905 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7907 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7909 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7912 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7918 #ifdef TARGET_NR_symlink
7919 case TARGET_NR_symlink
:
7922 p
= lock_user_string(arg1
);
7923 p2
= lock_user_string(arg2
);
7925 ret
= -TARGET_EFAULT
;
7927 ret
= get_errno(symlink(p
, p2
));
7928 unlock_user(p2
, arg2
, 0);
7929 unlock_user(p
, arg1
, 0);
7933 #if defined(TARGET_NR_symlinkat)
7934 case TARGET_NR_symlinkat
:
7937 p
= lock_user_string(arg1
);
7938 p2
= lock_user_string(arg3
);
7940 ret
= -TARGET_EFAULT
;
7942 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7943 unlock_user(p2
, arg3
, 0);
7944 unlock_user(p
, arg1
, 0);
7948 #ifdef TARGET_NR_oldlstat
7949 case TARGET_NR_oldlstat
:
7952 #ifdef TARGET_NR_readlink
7953 case TARGET_NR_readlink
:
7956 p
= lock_user_string(arg1
);
7957 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7959 ret
= -TARGET_EFAULT
;
7961 /* Short circuit this for the magic exe check. */
7962 ret
= -TARGET_EINVAL
;
7963 } else if (is_proc_myself((const char *)p
, "exe")) {
7964 char real
[PATH_MAX
], *temp
;
7965 temp
= realpath(exec_path
, real
);
7966 /* Return value is # of bytes that we wrote to the buffer. */
7968 ret
= get_errno(-1);
7970 /* Don't worry about sign mismatch as earlier mapping
7971 * logic would have thrown a bad address error. */
7972 ret
= MIN(strlen(real
), arg3
);
7973 /* We cannot NUL terminate the string. */
7974 memcpy(p2
, real
, ret
);
7977 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7979 unlock_user(p2
, arg2
, ret
);
7980 unlock_user(p
, arg1
, 0);
7984 #if defined(TARGET_NR_readlinkat)
7985 case TARGET_NR_readlinkat
:
7988 p
= lock_user_string(arg2
);
7989 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7991 ret
= -TARGET_EFAULT
;
7992 } else if (is_proc_myself((const char *)p
, "exe")) {
7993 char real
[PATH_MAX
], *temp
;
7994 temp
= realpath(exec_path
, real
);
7995 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7996 snprintf((char *)p2
, arg4
, "%s", real
);
7998 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8000 unlock_user(p2
, arg3
, ret
);
8001 unlock_user(p
, arg2
, 0);
8005 #ifdef TARGET_NR_uselib
8006 case TARGET_NR_uselib
:
8009 #ifdef TARGET_NR_swapon
8010 case TARGET_NR_swapon
:
8011 if (!(p
= lock_user_string(arg1
)))
8013 ret
= get_errno(swapon(p
, arg2
));
8014 unlock_user(p
, arg1
, 0);
8017 case TARGET_NR_reboot
:
8018 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8019 /* arg4 must be ignored in all other cases */
8020 p
= lock_user_string(arg4
);
8024 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8025 unlock_user(p
, arg4
, 0);
8027 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8030 #ifdef TARGET_NR_readdir
8031 case TARGET_NR_readdir
:
8034 #ifdef TARGET_NR_mmap
8035 case TARGET_NR_mmap
:
8036 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8037 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8038 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8039 || defined(TARGET_S390X)
8042 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8043 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8051 unlock_user(v
, arg1
, 0);
8052 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8053 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8057 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8058 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8064 #ifdef TARGET_NR_mmap2
8065 case TARGET_NR_mmap2
:
8067 #define MMAP_SHIFT 12
8069 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8070 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8072 arg6
<< MMAP_SHIFT
));
8075 case TARGET_NR_munmap
:
8076 ret
= get_errno(target_munmap(arg1
, arg2
));
8078 case TARGET_NR_mprotect
:
8080 TaskState
*ts
= cpu
->opaque
;
8081 /* Special hack to detect libc making the stack executable. */
8082 if ((arg3
& PROT_GROWSDOWN
)
8083 && arg1
>= ts
->info
->stack_limit
8084 && arg1
<= ts
->info
->start_stack
) {
8085 arg3
&= ~PROT_GROWSDOWN
;
8086 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8087 arg1
= ts
->info
->stack_limit
;
8090 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8092 #ifdef TARGET_NR_mremap
8093 case TARGET_NR_mremap
:
8094 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8097 /* ??? msync/mlock/munlock are broken for softmmu. */
8098 #ifdef TARGET_NR_msync
8099 case TARGET_NR_msync
:
8100 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8103 #ifdef TARGET_NR_mlock
8104 case TARGET_NR_mlock
:
8105 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8108 #ifdef TARGET_NR_munlock
8109 case TARGET_NR_munlock
:
8110 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8113 #ifdef TARGET_NR_mlockall
8114 case TARGET_NR_mlockall
:
8115 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8118 #ifdef TARGET_NR_munlockall
8119 case TARGET_NR_munlockall
:
8120 ret
= get_errno(munlockall());
8123 case TARGET_NR_truncate
:
8124 if (!(p
= lock_user_string(arg1
)))
8126 ret
= get_errno(truncate(p
, arg2
));
8127 unlock_user(p
, arg1
, 0);
8129 case TARGET_NR_ftruncate
:
8130 ret
= get_errno(ftruncate(arg1
, arg2
));
8132 case TARGET_NR_fchmod
:
8133 ret
= get_errno(fchmod(arg1
, arg2
));
8135 #if defined(TARGET_NR_fchmodat)
8136 case TARGET_NR_fchmodat
:
8137 if (!(p
= lock_user_string(arg2
)))
8139 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8140 unlock_user(p
, arg2
, 0);
8143 case TARGET_NR_getpriority
:
8144 /* Note that negative values are valid for getpriority, so we must
8145 differentiate based on errno settings. */
8147 ret
= getpriority(arg1
, arg2
);
8148 if (ret
== -1 && errno
!= 0) {
8149 ret
= -host_to_target_errno(errno
);
8153 /* Return value is the unbiased priority. Signal no error. */
8154 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8156 /* Return value is a biased priority to avoid negative numbers. */
8160 case TARGET_NR_setpriority
:
8161 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8163 #ifdef TARGET_NR_profil
8164 case TARGET_NR_profil
:
8167 case TARGET_NR_statfs
:
8168 if (!(p
= lock_user_string(arg1
)))
8170 ret
= get_errno(statfs(path(p
), &stfs
));
8171 unlock_user(p
, arg1
, 0);
8173 if (!is_error(ret
)) {
8174 struct target_statfs
*target_stfs
;
8176 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8178 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8179 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8180 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8181 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8182 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8183 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8184 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8185 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8186 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8187 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8188 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8189 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8190 unlock_user_struct(target_stfs
, arg2
, 1);
8193 case TARGET_NR_fstatfs
:
8194 ret
= get_errno(fstatfs(arg1
, &stfs
));
8195 goto convert_statfs
;
8196 #ifdef TARGET_NR_statfs64
8197 case TARGET_NR_statfs64
:
8198 if (!(p
= lock_user_string(arg1
)))
8200 ret
= get_errno(statfs(path(p
), &stfs
));
8201 unlock_user(p
, arg1
, 0);
8203 if (!is_error(ret
)) {
8204 struct target_statfs64
*target_stfs
;
8206 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8208 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8209 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8210 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8211 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8212 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8213 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8214 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8215 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8216 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8217 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8218 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8219 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8220 unlock_user_struct(target_stfs
, arg3
, 1);
8223 case TARGET_NR_fstatfs64
:
8224 ret
= get_errno(fstatfs(arg1
, &stfs
));
8225 goto convert_statfs64
;
8227 #ifdef TARGET_NR_ioperm
8228 case TARGET_NR_ioperm
:
8231 #ifdef TARGET_NR_socketcall
8232 case TARGET_NR_socketcall
:
8233 ret
= do_socketcall(arg1
, arg2
);
8236 #ifdef TARGET_NR_accept
8237 case TARGET_NR_accept
:
8238 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8241 #ifdef TARGET_NR_accept4
8242 case TARGET_NR_accept4
:
8243 #ifdef CONFIG_ACCEPT4
8244 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8250 #ifdef TARGET_NR_bind
8251 case TARGET_NR_bind
:
8252 ret
= do_bind(arg1
, arg2
, arg3
);
8255 #ifdef TARGET_NR_connect
8256 case TARGET_NR_connect
:
8257 ret
= do_connect(arg1
, arg2
, arg3
);
8260 #ifdef TARGET_NR_getpeername
8261 case TARGET_NR_getpeername
:
8262 ret
= do_getpeername(arg1
, arg2
, arg3
);
8265 #ifdef TARGET_NR_getsockname
8266 case TARGET_NR_getsockname
:
8267 ret
= do_getsockname(arg1
, arg2
, arg3
);
8270 #ifdef TARGET_NR_getsockopt
8271 case TARGET_NR_getsockopt
:
8272 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8275 #ifdef TARGET_NR_listen
8276 case TARGET_NR_listen
:
8277 ret
= get_errno(listen(arg1
, arg2
));
8280 #ifdef TARGET_NR_recv
8281 case TARGET_NR_recv
:
8282 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8285 #ifdef TARGET_NR_recvfrom
8286 case TARGET_NR_recvfrom
:
8287 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8290 #ifdef TARGET_NR_recvmsg
8291 case TARGET_NR_recvmsg
:
8292 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8295 #ifdef TARGET_NR_send
8296 case TARGET_NR_send
:
8297 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8300 #ifdef TARGET_NR_sendmsg
8301 case TARGET_NR_sendmsg
:
8302 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8305 #ifdef TARGET_NR_sendmmsg
8306 case TARGET_NR_sendmmsg
:
8307 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8309 case TARGET_NR_recvmmsg
:
8310 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8313 #ifdef TARGET_NR_sendto
8314 case TARGET_NR_sendto
:
8315 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8318 #ifdef TARGET_NR_shutdown
8319 case TARGET_NR_shutdown
:
8320 ret
= get_errno(shutdown(arg1
, arg2
));
8323 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8324 case TARGET_NR_getrandom
:
8325 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8329 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8330 unlock_user(p
, arg1
, ret
);
8333 #ifdef TARGET_NR_socket
8334 case TARGET_NR_socket
:
8335 ret
= do_socket(arg1
, arg2
, arg3
);
8336 fd_trans_unregister(ret
);
8339 #ifdef TARGET_NR_socketpair
8340 case TARGET_NR_socketpair
:
8341 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8344 #ifdef TARGET_NR_setsockopt
8345 case TARGET_NR_setsockopt
:
8346 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8350 case TARGET_NR_syslog
:
8351 if (!(p
= lock_user_string(arg2
)))
8353 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8354 unlock_user(p
, arg2
, 0);
8357 case TARGET_NR_setitimer
:
8359 struct itimerval value
, ovalue
, *pvalue
;
8363 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8364 || copy_from_user_timeval(&pvalue
->it_value
,
8365 arg2
+ sizeof(struct target_timeval
)))
8370 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8371 if (!is_error(ret
) && arg3
) {
8372 if (copy_to_user_timeval(arg3
,
8373 &ovalue
.it_interval
)
8374 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8380 case TARGET_NR_getitimer
:
8382 struct itimerval value
;
8384 ret
= get_errno(getitimer(arg1
, &value
));
8385 if (!is_error(ret
) && arg2
) {
8386 if (copy_to_user_timeval(arg2
,
8388 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8394 #ifdef TARGET_NR_stat
8395 case TARGET_NR_stat
:
8396 if (!(p
= lock_user_string(arg1
)))
8398 ret
= get_errno(stat(path(p
), &st
));
8399 unlock_user(p
, arg1
, 0);
8402 #ifdef TARGET_NR_lstat
8403 case TARGET_NR_lstat
:
8404 if (!(p
= lock_user_string(arg1
)))
8406 ret
= get_errno(lstat(path(p
), &st
));
8407 unlock_user(p
, arg1
, 0);
8410 case TARGET_NR_fstat
:
8412 ret
= get_errno(fstat(arg1
, &st
));
8413 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8416 if (!is_error(ret
)) {
8417 struct target_stat
*target_st
;
8419 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8421 memset(target_st
, 0, sizeof(*target_st
));
8422 __put_user(st
.st_dev
, &target_st
->st_dev
);
8423 __put_user(st
.st_ino
, &target_st
->st_ino
);
8424 __put_user(st
.st_mode
, &target_st
->st_mode
);
8425 __put_user(st
.st_uid
, &target_st
->st_uid
);
8426 __put_user(st
.st_gid
, &target_st
->st_gid
);
8427 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8428 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8429 __put_user(st
.st_size
, &target_st
->st_size
);
8430 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8431 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8432 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8433 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8434 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8435 unlock_user_struct(target_st
, arg2
, 1);
8439 #ifdef TARGET_NR_olduname
8440 case TARGET_NR_olduname
:
8443 #ifdef TARGET_NR_iopl
8444 case TARGET_NR_iopl
:
8447 case TARGET_NR_vhangup
:
8448 ret
= get_errno(vhangup());
8450 #ifdef TARGET_NR_idle
8451 case TARGET_NR_idle
:
8454 #ifdef TARGET_NR_syscall
8455 case TARGET_NR_syscall
:
8456 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8457 arg6
, arg7
, arg8
, 0);
8460 case TARGET_NR_wait4
:
8463 abi_long status_ptr
= arg2
;
8464 struct rusage rusage
, *rusage_ptr
;
8465 abi_ulong target_rusage
= arg4
;
8466 abi_long rusage_err
;
8468 rusage_ptr
= &rusage
;
8471 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8472 if (!is_error(ret
)) {
8473 if (status_ptr
&& ret
) {
8474 status
= host_to_target_waitstatus(status
);
8475 if (put_user_s32(status
, status_ptr
))
8478 if (target_rusage
) {
8479 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8487 #ifdef TARGET_NR_swapoff
8488 case TARGET_NR_swapoff
:
8489 if (!(p
= lock_user_string(arg1
)))
8491 ret
= get_errno(swapoff(p
));
8492 unlock_user(p
, arg1
, 0);
8495 case TARGET_NR_sysinfo
:
8497 struct target_sysinfo
*target_value
;
8498 struct sysinfo value
;
8499 ret
= get_errno(sysinfo(&value
));
8500 if (!is_error(ret
) && arg1
)
8502 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8504 __put_user(value
.uptime
, &target_value
->uptime
);
8505 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8506 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8507 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8508 __put_user(value
.totalram
, &target_value
->totalram
);
8509 __put_user(value
.freeram
, &target_value
->freeram
);
8510 __put_user(value
.sharedram
, &target_value
->sharedram
);
8511 __put_user(value
.bufferram
, &target_value
->bufferram
);
8512 __put_user(value
.totalswap
, &target_value
->totalswap
);
8513 __put_user(value
.freeswap
, &target_value
->freeswap
);
8514 __put_user(value
.procs
, &target_value
->procs
);
8515 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8516 __put_user(value
.freehigh
, &target_value
->freehigh
);
8517 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8518 unlock_user_struct(target_value
, arg1
, 1);
8522 #ifdef TARGET_NR_ipc
8524 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8527 #ifdef TARGET_NR_semget
8528 case TARGET_NR_semget
:
8529 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8532 #ifdef TARGET_NR_semop
8533 case TARGET_NR_semop
:
8534 ret
= do_semop(arg1
, arg2
, arg3
);
8537 #ifdef TARGET_NR_semctl
8538 case TARGET_NR_semctl
:
8539 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8542 #ifdef TARGET_NR_msgctl
8543 case TARGET_NR_msgctl
:
8544 ret
= do_msgctl(arg1
, arg2
, arg3
);
8547 #ifdef TARGET_NR_msgget
8548 case TARGET_NR_msgget
:
8549 ret
= get_errno(msgget(arg1
, arg2
));
8552 #ifdef TARGET_NR_msgrcv
8553 case TARGET_NR_msgrcv
:
8554 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8557 #ifdef TARGET_NR_msgsnd
8558 case TARGET_NR_msgsnd
:
8559 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8562 #ifdef TARGET_NR_shmget
8563 case TARGET_NR_shmget
:
8564 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8567 #ifdef TARGET_NR_shmctl
8568 case TARGET_NR_shmctl
:
8569 ret
= do_shmctl(arg1
, arg2
, arg3
);
8572 #ifdef TARGET_NR_shmat
8573 case TARGET_NR_shmat
:
8574 ret
= do_shmat(arg1
, arg2
, arg3
);
8577 #ifdef TARGET_NR_shmdt
8578 case TARGET_NR_shmdt
:
8579 ret
= do_shmdt(arg1
);
8582 case TARGET_NR_fsync
:
8583 ret
= get_errno(fsync(arg1
));
8585 case TARGET_NR_clone
:
8586 /* Linux manages to have three different orderings for its
8587 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8588 * match the kernel's CONFIG_CLONE_* settings.
8589 * Microblaze is further special in that it uses a sixth
8590 * implicit argument to clone for the TLS pointer.
8592 #if defined(TARGET_MICROBLAZE)
8593 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8594 #elif defined(TARGET_CLONE_BACKWARDS)
8595 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8596 #elif defined(TARGET_CLONE_BACKWARDS2)
8597 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8599 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8602 #ifdef __NR_exit_group
8603 /* new thread calls */
8604 case TARGET_NR_exit_group
:
8608 gdb_exit(cpu_env
, arg1
);
8609 ret
= get_errno(exit_group(arg1
));
8612 case TARGET_NR_setdomainname
:
8613 if (!(p
= lock_user_string(arg1
)))
8615 ret
= get_errno(setdomainname(p
, arg2
));
8616 unlock_user(p
, arg1
, 0);
8618 case TARGET_NR_uname
:
8619 /* no need to transcode because we use the linux syscall */
8621 struct new_utsname
* buf
;
8623 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8625 ret
= get_errno(sys_uname(buf
));
8626 if (!is_error(ret
)) {
8627 /* Overrite the native machine name with whatever is being
8629 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8630 /* Allow the user to override the reported release. */
8631 if (qemu_uname_release
&& *qemu_uname_release
)
8632 strcpy (buf
->release
, qemu_uname_release
);
8634 unlock_user_struct(buf
, arg1
, 1);
8638 case TARGET_NR_modify_ldt
:
8639 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8641 #if !defined(TARGET_X86_64)
8642 case TARGET_NR_vm86old
:
8644 case TARGET_NR_vm86
:
8645 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8649 case TARGET_NR_adjtimex
:
8651 #ifdef TARGET_NR_create_module
8652 case TARGET_NR_create_module
:
8654 case TARGET_NR_init_module
:
8655 case TARGET_NR_delete_module
:
8656 #ifdef TARGET_NR_get_kernel_syms
8657 case TARGET_NR_get_kernel_syms
:
8660 case TARGET_NR_quotactl
:
8662 case TARGET_NR_getpgid
:
8663 ret
= get_errno(getpgid(arg1
));
8665 case TARGET_NR_fchdir
:
8666 ret
= get_errno(fchdir(arg1
));
8668 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8669 case TARGET_NR_bdflush
:
8672 #ifdef TARGET_NR_sysfs
8673 case TARGET_NR_sysfs
:
8676 case TARGET_NR_personality
:
8677 ret
= get_errno(personality(arg1
));
8679 #ifdef TARGET_NR_afs_syscall
8680 case TARGET_NR_afs_syscall
:
8683 #ifdef TARGET_NR__llseek /* Not on alpha */
8684 case TARGET_NR__llseek
:
8687 #if !defined(__NR_llseek)
8688 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8690 ret
= get_errno(res
);
8695 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8697 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8703 #ifdef TARGET_NR_getdents
8704 case TARGET_NR_getdents
:
8705 #ifdef __NR_getdents
8706 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8708 struct target_dirent
*target_dirp
;
8709 struct linux_dirent
*dirp
;
8710 abi_long count
= arg3
;
8712 dirp
= g_try_malloc(count
);
8714 ret
= -TARGET_ENOMEM
;
8718 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8719 if (!is_error(ret
)) {
8720 struct linux_dirent
*de
;
8721 struct target_dirent
*tde
;
8723 int reclen
, treclen
;
8724 int count1
, tnamelen
;
8728 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8732 reclen
= de
->d_reclen
;
8733 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8734 assert(tnamelen
>= 0);
8735 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8736 assert(count1
+ treclen
<= count
);
8737 tde
->d_reclen
= tswap16(treclen
);
8738 tde
->d_ino
= tswapal(de
->d_ino
);
8739 tde
->d_off
= tswapal(de
->d_off
);
8740 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8741 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8743 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8747 unlock_user(target_dirp
, arg2
, ret
);
8753 struct linux_dirent
*dirp
;
8754 abi_long count
= arg3
;
8756 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8758 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8759 if (!is_error(ret
)) {
8760 struct linux_dirent
*de
;
8765 reclen
= de
->d_reclen
;
8768 de
->d_reclen
= tswap16(reclen
);
8769 tswapls(&de
->d_ino
);
8770 tswapls(&de
->d_off
);
8771 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8775 unlock_user(dirp
, arg2
, ret
);
8779 /* Implement getdents in terms of getdents64 */
8781 struct linux_dirent64
*dirp
;
8782 abi_long count
= arg3
;
8784 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8788 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8789 if (!is_error(ret
)) {
8790 /* Convert the dirent64 structs to target dirent. We do this
8791 * in-place, since we can guarantee that a target_dirent is no
8792 * larger than a dirent64; however this means we have to be
8793 * careful to read everything before writing in the new format.
8795 struct linux_dirent64
*de
;
8796 struct target_dirent
*tde
;
8801 tde
= (struct target_dirent
*)dirp
;
8803 int namelen
, treclen
;
8804 int reclen
= de
->d_reclen
;
8805 uint64_t ino
= de
->d_ino
;
8806 int64_t off
= de
->d_off
;
8807 uint8_t type
= de
->d_type
;
8809 namelen
= strlen(de
->d_name
);
8810 treclen
= offsetof(struct target_dirent
, d_name
)
8812 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8814 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8815 tde
->d_ino
= tswapal(ino
);
8816 tde
->d_off
= tswapal(off
);
8817 tde
->d_reclen
= tswap16(treclen
);
8818 /* The target_dirent type is in what was formerly a padding
8819 * byte at the end of the structure:
8821 *(((char *)tde
) + treclen
- 1) = type
;
8823 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8824 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8830 unlock_user(dirp
, arg2
, ret
);
8834 #endif /* TARGET_NR_getdents */
8835 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8836 case TARGET_NR_getdents64
:
8838 struct linux_dirent64
*dirp
;
8839 abi_long count
= arg3
;
8840 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8842 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8843 if (!is_error(ret
)) {
8844 struct linux_dirent64
*de
;
8849 reclen
= de
->d_reclen
;
8852 de
->d_reclen
= tswap16(reclen
);
8853 tswap64s((uint64_t *)&de
->d_ino
);
8854 tswap64s((uint64_t *)&de
->d_off
);
8855 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8859 unlock_user(dirp
, arg2
, ret
);
8862 #endif /* TARGET_NR_getdents64 */
8863 #if defined(TARGET_NR__newselect)
8864 case TARGET_NR__newselect
:
8865 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8868 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8869 # ifdef TARGET_NR_poll
8870 case TARGET_NR_poll
:
8872 # ifdef TARGET_NR_ppoll
8873 case TARGET_NR_ppoll
:
8876 struct target_pollfd
*target_pfd
;
8877 unsigned int nfds
= arg2
;
8885 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8886 sizeof(struct target_pollfd
) * nfds
, 1);
8891 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8892 for (i
= 0; i
< nfds
; i
++) {
8893 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8894 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8898 # ifdef TARGET_NR_ppoll
8899 if (num
== TARGET_NR_ppoll
) {
8900 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8901 target_sigset_t
*target_set
;
8902 sigset_t _set
, *set
= &_set
;
8905 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8906 unlock_user(target_pfd
, arg1
, 0);
8914 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8916 unlock_user(target_pfd
, arg1
, 0);
8919 target_to_host_sigset(set
, target_set
);
8924 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
,
8925 set
, SIGSET_T_SIZE
));
8927 if (!is_error(ret
) && arg3
) {
8928 host_to_target_timespec(arg3
, timeout_ts
);
8931 unlock_user(target_set
, arg4
, 0);
8935 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8937 if (!is_error(ret
)) {
8938 for(i
= 0; i
< nfds
; i
++) {
8939 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8942 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8946 case TARGET_NR_flock
:
8947 /* NOTE: the flock constant seems to be the same for every
8949 ret
= get_errno(flock(arg1
, arg2
));
8951 case TARGET_NR_readv
:
8953 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8955 ret
= get_errno(readv(arg1
, vec
, arg3
));
8956 unlock_iovec(vec
, arg2
, arg3
, 1);
8958 ret
= -host_to_target_errno(errno
);
8962 case TARGET_NR_writev
:
8964 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8966 ret
= get_errno(writev(arg1
, vec
, arg3
));
8967 unlock_iovec(vec
, arg2
, arg3
, 0);
8969 ret
= -host_to_target_errno(errno
);
8973 case TARGET_NR_getsid
:
8974 ret
= get_errno(getsid(arg1
));
8976 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8977 case TARGET_NR_fdatasync
:
8978 ret
= get_errno(fdatasync(arg1
));
8981 #ifdef TARGET_NR__sysctl
8982 case TARGET_NR__sysctl
:
8983 /* We don't implement this, but ENOTDIR is always a safe
8985 ret
= -TARGET_ENOTDIR
;
8988 case TARGET_NR_sched_getaffinity
:
8990 unsigned int mask_size
;
8991 unsigned long *mask
;
8994 * sched_getaffinity needs multiples of ulong, so need to take
8995 * care of mismatches between target ulong and host ulong sizes.
8997 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8998 ret
= -TARGET_EINVAL
;
9001 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9003 mask
= alloca(mask_size
);
9004 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9006 if (!is_error(ret
)) {
9008 /* More data returned than the caller's buffer will fit.
9009 * This only happens if sizeof(abi_long) < sizeof(long)
9010 * and the caller passed us a buffer holding an odd number
9011 * of abi_longs. If the host kernel is actually using the
9012 * extra 4 bytes then fail EINVAL; otherwise we can just
9013 * ignore them and only copy the interesting part.
9015 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9016 if (numcpus
> arg2
* 8) {
9017 ret
= -TARGET_EINVAL
;
9023 if (copy_to_user(arg3
, mask
, ret
)) {
9029 case TARGET_NR_sched_setaffinity
:
9031 unsigned int mask_size
;
9032 unsigned long *mask
;
9035 * sched_setaffinity needs multiples of ulong, so need to take
9036 * care of mismatches between target ulong and host ulong sizes.
9038 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9039 ret
= -TARGET_EINVAL
;
9042 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9044 mask
= alloca(mask_size
);
9045 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9048 memcpy(mask
, p
, arg2
);
9049 unlock_user_struct(p
, arg2
, 0);
9051 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9054 case TARGET_NR_sched_setparam
:
9056 struct sched_param
*target_schp
;
9057 struct sched_param schp
;
9060 return -TARGET_EINVAL
;
9062 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9064 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9065 unlock_user_struct(target_schp
, arg2
, 0);
9066 ret
= get_errno(sched_setparam(arg1
, &schp
));
9069 case TARGET_NR_sched_getparam
:
9071 struct sched_param
*target_schp
;
9072 struct sched_param schp
;
9075 return -TARGET_EINVAL
;
9077 ret
= get_errno(sched_getparam(arg1
, &schp
));
9078 if (!is_error(ret
)) {
9079 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9081 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9082 unlock_user_struct(target_schp
, arg2
, 1);
9086 case TARGET_NR_sched_setscheduler
:
9088 struct sched_param
*target_schp
;
9089 struct sched_param schp
;
9091 return -TARGET_EINVAL
;
9093 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9095 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9096 unlock_user_struct(target_schp
, arg3
, 0);
9097 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9100 case TARGET_NR_sched_getscheduler
:
9101 ret
= get_errno(sched_getscheduler(arg1
));
9103 case TARGET_NR_sched_yield
:
9104 ret
= get_errno(sched_yield());
9106 case TARGET_NR_sched_get_priority_max
:
9107 ret
= get_errno(sched_get_priority_max(arg1
));
9109 case TARGET_NR_sched_get_priority_min
:
9110 ret
= get_errno(sched_get_priority_min(arg1
));
9112 case TARGET_NR_sched_rr_get_interval
:
9115 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9116 if (!is_error(ret
)) {
9117 ret
= host_to_target_timespec(arg2
, &ts
);
9121 case TARGET_NR_nanosleep
:
9123 struct timespec req
, rem
;
9124 target_to_host_timespec(&req
, arg1
);
9125 ret
= get_errno(nanosleep(&req
, &rem
));
9126 if (is_error(ret
) && arg2
) {
9127 host_to_target_timespec(arg2
, &rem
);
9131 #ifdef TARGET_NR_query_module
9132 case TARGET_NR_query_module
:
9135 #ifdef TARGET_NR_nfsservctl
9136 case TARGET_NR_nfsservctl
:
9139 case TARGET_NR_prctl
:
9141 case PR_GET_PDEATHSIG
:
9144 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9145 if (!is_error(ret
) && arg2
9146 && put_user_ual(deathsig
, arg2
)) {
9154 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9158 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9160 unlock_user(name
, arg2
, 16);
9165 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9169 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9171 unlock_user(name
, arg2
, 0);
9176 /* Most prctl options have no pointer arguments */
9177 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9181 #ifdef TARGET_NR_arch_prctl
9182 case TARGET_NR_arch_prctl
:
9183 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9184 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9190 #ifdef TARGET_NR_pread64
9191 case TARGET_NR_pread64
:
9192 if (regpairs_aligned(cpu_env
)) {
9196 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9198 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9199 unlock_user(p
, arg2
, ret
);
9201 case TARGET_NR_pwrite64
:
9202 if (regpairs_aligned(cpu_env
)) {
9206 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9208 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9209 unlock_user(p
, arg2
, 0);
9212 case TARGET_NR_getcwd
:
9213 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9215 ret
= get_errno(sys_getcwd1(p
, arg2
));
9216 unlock_user(p
, arg1
, ret
);
9218 case TARGET_NR_capget
:
9219 case TARGET_NR_capset
:
9221 struct target_user_cap_header
*target_header
;
9222 struct target_user_cap_data
*target_data
= NULL
;
9223 struct __user_cap_header_struct header
;
9224 struct __user_cap_data_struct data
[2];
9225 struct __user_cap_data_struct
*dataptr
= NULL
;
9226 int i
, target_datalen
;
9229 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9232 header
.version
= tswap32(target_header
->version
);
9233 header
.pid
= tswap32(target_header
->pid
);
9235 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9236 /* Version 2 and up takes pointer to two user_data structs */
9240 target_datalen
= sizeof(*target_data
) * data_items
;
9243 if (num
== TARGET_NR_capget
) {
9244 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9246 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9249 unlock_user_struct(target_header
, arg1
, 0);
9253 if (num
== TARGET_NR_capset
) {
9254 for (i
= 0; i
< data_items
; i
++) {
9255 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9256 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9257 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9264 if (num
== TARGET_NR_capget
) {
9265 ret
= get_errno(capget(&header
, dataptr
));
9267 ret
= get_errno(capset(&header
, dataptr
));
9270 /* The kernel always updates version for both capget and capset */
9271 target_header
->version
= tswap32(header
.version
);
9272 unlock_user_struct(target_header
, arg1
, 1);
9275 if (num
== TARGET_NR_capget
) {
9276 for (i
= 0; i
< data_items
; i
++) {
9277 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9278 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9279 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9281 unlock_user(target_data
, arg2
, target_datalen
);
9283 unlock_user(target_data
, arg2
, 0);
9288 case TARGET_NR_sigaltstack
:
9289 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9292 #ifdef CONFIG_SENDFILE
9293 case TARGET_NR_sendfile
:
9298 ret
= get_user_sal(off
, arg3
);
9299 if (is_error(ret
)) {
9304 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9305 if (!is_error(ret
) && arg3
) {
9306 abi_long ret2
= put_user_sal(off
, arg3
);
9307 if (is_error(ret2
)) {
9313 #ifdef TARGET_NR_sendfile64
9314 case TARGET_NR_sendfile64
:
9319 ret
= get_user_s64(off
, arg3
);
9320 if (is_error(ret
)) {
9325 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9326 if (!is_error(ret
) && arg3
) {
9327 abi_long ret2
= put_user_s64(off
, arg3
);
9328 if (is_error(ret2
)) {
9336 case TARGET_NR_sendfile
:
9337 #ifdef TARGET_NR_sendfile64
9338 case TARGET_NR_sendfile64
:
9343 #ifdef TARGET_NR_getpmsg
9344 case TARGET_NR_getpmsg
:
9347 #ifdef TARGET_NR_putpmsg
9348 case TARGET_NR_putpmsg
:
9351 #ifdef TARGET_NR_vfork
9352 case TARGET_NR_vfork
:
9353 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9357 #ifdef TARGET_NR_ugetrlimit
9358 case TARGET_NR_ugetrlimit
:
9361 int resource
= target_to_host_resource(arg1
);
9362 ret
= get_errno(getrlimit(resource
, &rlim
));
9363 if (!is_error(ret
)) {
9364 struct target_rlimit
*target_rlim
;
9365 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9367 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9368 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9369 unlock_user_struct(target_rlim
, arg2
, 1);
9374 #ifdef TARGET_NR_truncate64
9375 case TARGET_NR_truncate64
:
9376 if (!(p
= lock_user_string(arg1
)))
9378 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9379 unlock_user(p
, arg1
, 0);
9382 #ifdef TARGET_NR_ftruncate64
9383 case TARGET_NR_ftruncate64
:
9384 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9387 #ifdef TARGET_NR_stat64
9388 case TARGET_NR_stat64
:
9389 if (!(p
= lock_user_string(arg1
)))
9391 ret
= get_errno(stat(path(p
), &st
));
9392 unlock_user(p
, arg1
, 0);
9394 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9397 #ifdef TARGET_NR_lstat64
9398 case TARGET_NR_lstat64
:
9399 if (!(p
= lock_user_string(arg1
)))
9401 ret
= get_errno(lstat(path(p
), &st
));
9402 unlock_user(p
, arg1
, 0);
9404 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9407 #ifdef TARGET_NR_fstat64
9408 case TARGET_NR_fstat64
:
9409 ret
= get_errno(fstat(arg1
, &st
));
9411 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9414 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9415 #ifdef TARGET_NR_fstatat64
9416 case TARGET_NR_fstatat64
:
9418 #ifdef TARGET_NR_newfstatat
9419 case TARGET_NR_newfstatat
:
9421 if (!(p
= lock_user_string(arg2
)))
9423 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9425 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9428 #ifdef TARGET_NR_lchown
9429 case TARGET_NR_lchown
:
9430 if (!(p
= lock_user_string(arg1
)))
9432 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9433 unlock_user(p
, arg1
, 0);
9436 #ifdef TARGET_NR_getuid
9437 case TARGET_NR_getuid
:
9438 ret
= get_errno(high2lowuid(getuid()));
9441 #ifdef TARGET_NR_getgid
9442 case TARGET_NR_getgid
:
9443 ret
= get_errno(high2lowgid(getgid()));
9446 #ifdef TARGET_NR_geteuid
9447 case TARGET_NR_geteuid
:
9448 ret
= get_errno(high2lowuid(geteuid()));
9451 #ifdef TARGET_NR_getegid
9452 case TARGET_NR_getegid
:
9453 ret
= get_errno(high2lowgid(getegid()));
9456 case TARGET_NR_setreuid
:
9457 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9459 case TARGET_NR_setregid
:
9460 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9462 case TARGET_NR_getgroups
:
9464 int gidsetsize
= arg1
;
9465 target_id
*target_grouplist
;
9469 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9470 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9471 if (gidsetsize
== 0)
9473 if (!is_error(ret
)) {
9474 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9475 if (!target_grouplist
)
9477 for(i
= 0;i
< ret
; i
++)
9478 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9479 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9483 case TARGET_NR_setgroups
:
9485 int gidsetsize
= arg1
;
9486 target_id
*target_grouplist
;
9487 gid_t
*grouplist
= NULL
;
9490 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9491 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9492 if (!target_grouplist
) {
9493 ret
= -TARGET_EFAULT
;
9496 for (i
= 0; i
< gidsetsize
; i
++) {
9497 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9499 unlock_user(target_grouplist
, arg2
, 0);
9501 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9504 case TARGET_NR_fchown
:
9505 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9507 #if defined(TARGET_NR_fchownat)
9508 case TARGET_NR_fchownat
:
9509 if (!(p
= lock_user_string(arg2
)))
9511 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9512 low2highgid(arg4
), arg5
));
9513 unlock_user(p
, arg2
, 0);
9516 #ifdef TARGET_NR_setresuid
9517 case TARGET_NR_setresuid
:
9518 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9520 low2highuid(arg3
)));
9523 #ifdef TARGET_NR_getresuid
9524 case TARGET_NR_getresuid
:
9526 uid_t ruid
, euid
, suid
;
9527 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9528 if (!is_error(ret
)) {
9529 if (put_user_id(high2lowuid(ruid
), arg1
)
9530 || put_user_id(high2lowuid(euid
), arg2
)
9531 || put_user_id(high2lowuid(suid
), arg3
))
9537 #ifdef TARGET_NR_getresgid
9538 case TARGET_NR_setresgid
:
9539 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9541 low2highgid(arg3
)));
9544 #ifdef TARGET_NR_getresgid
9545 case TARGET_NR_getresgid
:
9547 gid_t rgid
, egid
, sgid
;
9548 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9549 if (!is_error(ret
)) {
9550 if (put_user_id(high2lowgid(rgid
), arg1
)
9551 || put_user_id(high2lowgid(egid
), arg2
)
9552 || put_user_id(high2lowgid(sgid
), arg3
))
9558 #ifdef TARGET_NR_chown
9559 case TARGET_NR_chown
:
9560 if (!(p
= lock_user_string(arg1
)))
9562 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9563 unlock_user(p
, arg1
, 0);
9566 case TARGET_NR_setuid
:
9567 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9569 case TARGET_NR_setgid
:
9570 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9572 case TARGET_NR_setfsuid
:
9573 ret
= get_errno(setfsuid(arg1
));
9575 case TARGET_NR_setfsgid
:
9576 ret
= get_errno(setfsgid(arg1
));
9579 #ifdef TARGET_NR_lchown32
9580 case TARGET_NR_lchown32
:
9581 if (!(p
= lock_user_string(arg1
)))
9583 ret
= get_errno(lchown(p
, arg2
, arg3
));
9584 unlock_user(p
, arg1
, 0);
9587 #ifdef TARGET_NR_getuid32
9588 case TARGET_NR_getuid32
:
9589 ret
= get_errno(getuid());
9593 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9594 /* Alpha specific */
9595 case TARGET_NR_getxuid
:
9599 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9601 ret
= get_errno(getuid());
9604 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9605 /* Alpha specific */
9606 case TARGET_NR_getxgid
:
9610 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9612 ret
= get_errno(getgid());
9615 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9616 /* Alpha specific */
9617 case TARGET_NR_osf_getsysinfo
:
9618 ret
= -TARGET_EOPNOTSUPP
;
9620 case TARGET_GSI_IEEE_FP_CONTROL
:
9622 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9624 /* Copied from linux ieee_fpcr_to_swcr. */
9625 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9626 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9627 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9628 | SWCR_TRAP_ENABLE_DZE
9629 | SWCR_TRAP_ENABLE_OVF
);
9630 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9631 | SWCR_TRAP_ENABLE_INE
);
9632 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9633 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9635 if (put_user_u64 (swcr
, arg2
))
9641 /* case GSI_IEEE_STATE_AT_SIGNAL:
9642 -- Not implemented in linux kernel.
9644 -- Retrieves current unaligned access state; not much used.
9646 -- Retrieves implver information; surely not used.
9648 -- Grabs a copy of the HWRPB; surely not used.
9653 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9654 /* Alpha specific */
9655 case TARGET_NR_osf_setsysinfo
:
9656 ret
= -TARGET_EOPNOTSUPP
;
9658 case TARGET_SSI_IEEE_FP_CONTROL
:
9660 uint64_t swcr
, fpcr
, orig_fpcr
;
9662 if (get_user_u64 (swcr
, arg2
)) {
9665 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9666 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9668 /* Copied from linux ieee_swcr_to_fpcr. */
9669 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9670 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9671 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9672 | SWCR_TRAP_ENABLE_DZE
9673 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9674 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9675 | SWCR_TRAP_ENABLE_INE
)) << 57;
9676 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9677 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9679 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9684 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9686 uint64_t exc
, fpcr
, orig_fpcr
;
9689 if (get_user_u64(exc
, arg2
)) {
9693 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9695 /* We only add to the exception status here. */
9696 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9698 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9701 /* Old exceptions are not signaled. */
9702 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9704 /* If any exceptions set by this call,
9705 and are unmasked, send a signal. */
9707 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9708 si_code
= TARGET_FPE_FLTRES
;
9710 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9711 si_code
= TARGET_FPE_FLTUND
;
9713 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9714 si_code
= TARGET_FPE_FLTOVF
;
9716 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9717 si_code
= TARGET_FPE_FLTDIV
;
9719 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9720 si_code
= TARGET_FPE_FLTINV
;
9723 target_siginfo_t info
;
9724 info
.si_signo
= SIGFPE
;
9726 info
.si_code
= si_code
;
9727 info
._sifields
._sigfault
._addr
9728 = ((CPUArchState
*)cpu_env
)->pc
;
9729 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9734 /* case SSI_NVPAIRS:
9735 -- Used with SSIN_UACPROC to enable unaligned accesses.
9736 case SSI_IEEE_STATE_AT_SIGNAL:
9737 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9738 -- Not implemented in linux kernel
9743 #ifdef TARGET_NR_osf_sigprocmask
9744 /* Alpha specific. */
9745 case TARGET_NR_osf_sigprocmask
:
9749 sigset_t set
, oldset
;
9752 case TARGET_SIG_BLOCK
:
9755 case TARGET_SIG_UNBLOCK
:
9758 case TARGET_SIG_SETMASK
:
9762 ret
= -TARGET_EINVAL
;
9766 target_to_host_old_sigset(&set
, &mask
);
9767 do_sigprocmask(how
, &set
, &oldset
);
9768 host_to_target_old_sigset(&mask
, &oldset
);
9774 #ifdef TARGET_NR_getgid32
9775 case TARGET_NR_getgid32
:
9776 ret
= get_errno(getgid());
9779 #ifdef TARGET_NR_geteuid32
9780 case TARGET_NR_geteuid32
:
9781 ret
= get_errno(geteuid());
9784 #ifdef TARGET_NR_getegid32
9785 case TARGET_NR_getegid32
:
9786 ret
= get_errno(getegid());
9789 #ifdef TARGET_NR_setreuid32
9790 case TARGET_NR_setreuid32
:
9791 ret
= get_errno(setreuid(arg1
, arg2
));
9794 #ifdef TARGET_NR_setregid32
9795 case TARGET_NR_setregid32
:
9796 ret
= get_errno(setregid(arg1
, arg2
));
9799 #ifdef TARGET_NR_getgroups32
9800 case TARGET_NR_getgroups32
:
9802 int gidsetsize
= arg1
;
9803 uint32_t *target_grouplist
;
9807 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9808 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9809 if (gidsetsize
== 0)
9811 if (!is_error(ret
)) {
9812 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9813 if (!target_grouplist
) {
9814 ret
= -TARGET_EFAULT
;
9817 for(i
= 0;i
< ret
; i
++)
9818 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9819 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9824 #ifdef TARGET_NR_setgroups32
9825 case TARGET_NR_setgroups32
:
9827 int gidsetsize
= arg1
;
9828 uint32_t *target_grouplist
;
9832 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9833 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9834 if (!target_grouplist
) {
9835 ret
= -TARGET_EFAULT
;
9838 for(i
= 0;i
< gidsetsize
; i
++)
9839 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9840 unlock_user(target_grouplist
, arg2
, 0);
9841 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9845 #ifdef TARGET_NR_fchown32
9846 case TARGET_NR_fchown32
:
9847 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9850 #ifdef TARGET_NR_setresuid32
9851 case TARGET_NR_setresuid32
:
9852 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9855 #ifdef TARGET_NR_getresuid32
9856 case TARGET_NR_getresuid32
:
9858 uid_t ruid
, euid
, suid
;
9859 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9860 if (!is_error(ret
)) {
9861 if (put_user_u32(ruid
, arg1
)
9862 || put_user_u32(euid
, arg2
)
9863 || put_user_u32(suid
, arg3
))
9869 #ifdef TARGET_NR_setresgid32
9870 case TARGET_NR_setresgid32
:
9871 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9874 #ifdef TARGET_NR_getresgid32
9875 case TARGET_NR_getresgid32
:
9877 gid_t rgid
, egid
, sgid
;
9878 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9879 if (!is_error(ret
)) {
9880 if (put_user_u32(rgid
, arg1
)
9881 || put_user_u32(egid
, arg2
)
9882 || put_user_u32(sgid
, arg3
))
9888 #ifdef TARGET_NR_chown32
9889 case TARGET_NR_chown32
:
9890 if (!(p
= lock_user_string(arg1
)))
9892 ret
= get_errno(chown(p
, arg2
, arg3
));
9893 unlock_user(p
, arg1
, 0);
9896 #ifdef TARGET_NR_setuid32
9897 case TARGET_NR_setuid32
:
9898 ret
= get_errno(sys_setuid(arg1
));
9901 #ifdef TARGET_NR_setgid32
9902 case TARGET_NR_setgid32
:
9903 ret
= get_errno(sys_setgid(arg1
));
9906 #ifdef TARGET_NR_setfsuid32
9907 case TARGET_NR_setfsuid32
:
9908 ret
= get_errno(setfsuid(arg1
));
9911 #ifdef TARGET_NR_setfsgid32
9912 case TARGET_NR_setfsgid32
:
9913 ret
= get_errno(setfsgid(arg1
));
9917 case TARGET_NR_pivot_root
:
9919 #ifdef TARGET_NR_mincore
9920 case TARGET_NR_mincore
:
9923 ret
= -TARGET_EFAULT
;
9924 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9926 if (!(p
= lock_user_string(arg3
)))
9928 ret
= get_errno(mincore(a
, arg2
, p
));
9929 unlock_user(p
, arg3
, ret
);
9931 unlock_user(a
, arg1
, 0);
9935 #ifdef TARGET_NR_arm_fadvise64_64
9936 case TARGET_NR_arm_fadvise64_64
:
9939 * arm_fadvise64_64 looks like fadvise64_64 but
9940 * with different argument order
9948 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9949 #ifdef TARGET_NR_fadvise64_64
9950 case TARGET_NR_fadvise64_64
:
9952 #ifdef TARGET_NR_fadvise64
9953 case TARGET_NR_fadvise64
:
9957 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9958 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9959 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9960 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9964 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9967 #ifdef TARGET_NR_madvise
9968 case TARGET_NR_madvise
:
9969 /* A straight passthrough may not be safe because qemu sometimes
9970 turns private file-backed mappings into anonymous mappings.
9971 This will break MADV_DONTNEED.
9972 This is a hint, so ignoring and returning success is ok. */
9976 #if TARGET_ABI_BITS == 32
9977 case TARGET_NR_fcntl64
:
9981 struct target_flock64
*target_fl
;
9983 struct target_eabi_flock64
*target_efl
;
9986 cmd
= target_to_host_fcntl_cmd(arg2
);
9987 if (cmd
== -TARGET_EINVAL
) {
9993 case TARGET_F_GETLK64
:
9995 if (((CPUARMState
*)cpu_env
)->eabi
) {
9996 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9998 fl
.l_type
= tswap16(target_efl
->l_type
);
9999 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10000 fl
.l_start
= tswap64(target_efl
->l_start
);
10001 fl
.l_len
= tswap64(target_efl
->l_len
);
10002 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10003 unlock_user_struct(target_efl
, arg3
, 0);
10007 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10009 fl
.l_type
= tswap16(target_fl
->l_type
);
10010 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10011 fl
.l_start
= tswap64(target_fl
->l_start
);
10012 fl
.l_len
= tswap64(target_fl
->l_len
);
10013 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10014 unlock_user_struct(target_fl
, arg3
, 0);
10016 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10019 if (((CPUARMState
*)cpu_env
)->eabi
) {
10020 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
10022 target_efl
->l_type
= tswap16(fl
.l_type
);
10023 target_efl
->l_whence
= tswap16(fl
.l_whence
);
10024 target_efl
->l_start
= tswap64(fl
.l_start
);
10025 target_efl
->l_len
= tswap64(fl
.l_len
);
10026 target_efl
->l_pid
= tswap32(fl
.l_pid
);
10027 unlock_user_struct(target_efl
, arg3
, 1);
10031 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
10033 target_fl
->l_type
= tswap16(fl
.l_type
);
10034 target_fl
->l_whence
= tswap16(fl
.l_whence
);
10035 target_fl
->l_start
= tswap64(fl
.l_start
);
10036 target_fl
->l_len
= tswap64(fl
.l_len
);
10037 target_fl
->l_pid
= tswap32(fl
.l_pid
);
10038 unlock_user_struct(target_fl
, arg3
, 1);
10043 case TARGET_F_SETLK64
:
10044 case TARGET_F_SETLKW64
:
10046 if (((CPUARMState
*)cpu_env
)->eabi
) {
10047 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10049 fl
.l_type
= tswap16(target_efl
->l_type
);
10050 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10051 fl
.l_start
= tswap64(target_efl
->l_start
);
10052 fl
.l_len
= tswap64(target_efl
->l_len
);
10053 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10054 unlock_user_struct(target_efl
, arg3
, 0);
10058 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10060 fl
.l_type
= tswap16(target_fl
->l_type
);
10061 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10062 fl
.l_start
= tswap64(target_fl
->l_start
);
10063 fl
.l_len
= tswap64(target_fl
->l_len
);
10064 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10065 unlock_user_struct(target_fl
, arg3
, 0);
10067 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10070 ret
= do_fcntl(arg1
, arg2
, arg3
);
10076 #ifdef TARGET_NR_cacheflush
10077 case TARGET_NR_cacheflush
:
10078 /* self-modifying code is handled automatically, so nothing needed */
10082 #ifdef TARGET_NR_security
10083 case TARGET_NR_security
:
10084 goto unimplemented
;
10086 #ifdef TARGET_NR_getpagesize
10087 case TARGET_NR_getpagesize
:
10088 ret
= TARGET_PAGE_SIZE
;
10091 case TARGET_NR_gettid
:
10092 ret
= get_errno(gettid());
10094 #ifdef TARGET_NR_readahead
10095 case TARGET_NR_readahead
:
10096 #if TARGET_ABI_BITS == 32
10097 if (regpairs_aligned(cpu_env
)) {
10102 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10104 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10109 #ifdef TARGET_NR_setxattr
10110 case TARGET_NR_listxattr
:
10111 case TARGET_NR_llistxattr
:
10115 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10117 ret
= -TARGET_EFAULT
;
10121 p
= lock_user_string(arg1
);
10123 if (num
== TARGET_NR_listxattr
) {
10124 ret
= get_errno(listxattr(p
, b
, arg3
));
10126 ret
= get_errno(llistxattr(p
, b
, arg3
));
10129 ret
= -TARGET_EFAULT
;
10131 unlock_user(p
, arg1
, 0);
10132 unlock_user(b
, arg2
, arg3
);
10135 case TARGET_NR_flistxattr
:
10139 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10141 ret
= -TARGET_EFAULT
;
10145 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10146 unlock_user(b
, arg2
, arg3
);
10149 case TARGET_NR_setxattr
:
10150 case TARGET_NR_lsetxattr
:
10152 void *p
, *n
, *v
= 0;
10154 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10156 ret
= -TARGET_EFAULT
;
10160 p
= lock_user_string(arg1
);
10161 n
= lock_user_string(arg2
);
10163 if (num
== TARGET_NR_setxattr
) {
10164 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10166 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10169 ret
= -TARGET_EFAULT
;
10171 unlock_user(p
, arg1
, 0);
10172 unlock_user(n
, arg2
, 0);
10173 unlock_user(v
, arg3
, 0);
10176 case TARGET_NR_fsetxattr
:
10180 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10182 ret
= -TARGET_EFAULT
;
10186 n
= lock_user_string(arg2
);
10188 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10190 ret
= -TARGET_EFAULT
;
10192 unlock_user(n
, arg2
, 0);
10193 unlock_user(v
, arg3
, 0);
10196 case TARGET_NR_getxattr
:
10197 case TARGET_NR_lgetxattr
:
10199 void *p
, *n
, *v
= 0;
10201 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10203 ret
= -TARGET_EFAULT
;
10207 p
= lock_user_string(arg1
);
10208 n
= lock_user_string(arg2
);
10210 if (num
== TARGET_NR_getxattr
) {
10211 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10213 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10216 ret
= -TARGET_EFAULT
;
10218 unlock_user(p
, arg1
, 0);
10219 unlock_user(n
, arg2
, 0);
10220 unlock_user(v
, arg3
, arg4
);
10223 case TARGET_NR_fgetxattr
:
10227 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10229 ret
= -TARGET_EFAULT
;
10233 n
= lock_user_string(arg2
);
10235 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10237 ret
= -TARGET_EFAULT
;
10239 unlock_user(n
, arg2
, 0);
10240 unlock_user(v
, arg3
, arg4
);
10243 case TARGET_NR_removexattr
:
10244 case TARGET_NR_lremovexattr
:
10247 p
= lock_user_string(arg1
);
10248 n
= lock_user_string(arg2
);
10250 if (num
== TARGET_NR_removexattr
) {
10251 ret
= get_errno(removexattr(p
, n
));
10253 ret
= get_errno(lremovexattr(p
, n
));
10256 ret
= -TARGET_EFAULT
;
10258 unlock_user(p
, arg1
, 0);
10259 unlock_user(n
, arg2
, 0);
10262 case TARGET_NR_fremovexattr
:
10265 n
= lock_user_string(arg2
);
10267 ret
= get_errno(fremovexattr(arg1
, n
));
10269 ret
= -TARGET_EFAULT
;
10271 unlock_user(n
, arg2
, 0);
10275 #endif /* CONFIG_ATTR */
10276 #ifdef TARGET_NR_set_thread_area
10277 case TARGET_NR_set_thread_area
:
10278 #if defined(TARGET_MIPS)
10279 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10282 #elif defined(TARGET_CRIS)
10284 ret
= -TARGET_EINVAL
;
10286 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10290 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10291 ret
= do_set_thread_area(cpu_env
, arg1
);
10293 #elif defined(TARGET_M68K)
10295 TaskState
*ts
= cpu
->opaque
;
10296 ts
->tp_value
= arg1
;
10301 goto unimplemented_nowarn
;
10304 #ifdef TARGET_NR_get_thread_area
10305 case TARGET_NR_get_thread_area
:
10306 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10307 ret
= do_get_thread_area(cpu_env
, arg1
);
10309 #elif defined(TARGET_M68K)
10311 TaskState
*ts
= cpu
->opaque
;
10312 ret
= ts
->tp_value
;
10316 goto unimplemented_nowarn
;
10319 #ifdef TARGET_NR_getdomainname
10320 case TARGET_NR_getdomainname
:
10321 goto unimplemented_nowarn
;
10324 #ifdef TARGET_NR_clock_gettime
10325 case TARGET_NR_clock_gettime
:
10327 struct timespec ts
;
10328 ret
= get_errno(clock_gettime(arg1
, &ts
));
10329 if (!is_error(ret
)) {
10330 host_to_target_timespec(arg2
, &ts
);
10335 #ifdef TARGET_NR_clock_getres
10336 case TARGET_NR_clock_getres
:
10338 struct timespec ts
;
10339 ret
= get_errno(clock_getres(arg1
, &ts
));
10340 if (!is_error(ret
)) {
10341 host_to_target_timespec(arg2
, &ts
);
10346 #ifdef TARGET_NR_clock_nanosleep
10347 case TARGET_NR_clock_nanosleep
:
10349 struct timespec ts
;
10350 target_to_host_timespec(&ts
, arg3
);
10351 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
10353 host_to_target_timespec(arg4
, &ts
);
10355 #if defined(TARGET_PPC)
10356 /* clock_nanosleep is odd in that it returns positive errno values.
10357 * On PPC, CR0 bit 3 should be set in such a situation. */
10359 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10366 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10367 case TARGET_NR_set_tid_address
:
10368 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10372 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
10373 case TARGET_NR_tkill
:
10374 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
10378 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
10379 case TARGET_NR_tgkill
:
10380 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
10381 target_to_host_signal(arg3
)));
10385 #ifdef TARGET_NR_set_robust_list
10386 case TARGET_NR_set_robust_list
:
10387 case TARGET_NR_get_robust_list
:
10388 /* The ABI for supporting robust futexes has userspace pass
10389 * the kernel a pointer to a linked list which is updated by
10390 * userspace after the syscall; the list is walked by the kernel
10391 * when the thread exits. Since the linked list in QEMU guest
10392 * memory isn't a valid linked list for the host and we have
10393 * no way to reliably intercept the thread-death event, we can't
10394 * support these. Silently return ENOSYS so that guest userspace
10395 * falls back to a non-robust futex implementation (which should
10396 * be OK except in the corner case of the guest crashing while
10397 * holding a mutex that is shared with another process via
10400 goto unimplemented_nowarn
;
10403 #if defined(TARGET_NR_utimensat)
10404 case TARGET_NR_utimensat
:
10406 struct timespec
*tsp
, ts
[2];
10410 target_to_host_timespec(ts
, arg3
);
10411 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10415 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10417 if (!(p
= lock_user_string(arg2
))) {
10418 ret
= -TARGET_EFAULT
;
10421 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10422 unlock_user(p
, arg2
, 0);
10427 case TARGET_NR_futex
:
10428 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10431 case TARGET_NR_inotify_init
:
10432 ret
= get_errno(sys_inotify_init());
10435 #ifdef CONFIG_INOTIFY1
10436 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10437 case TARGET_NR_inotify_init1
:
10438 ret
= get_errno(sys_inotify_init1(arg1
));
10442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10443 case TARGET_NR_inotify_add_watch
:
10444 p
= lock_user_string(arg2
);
10445 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10446 unlock_user(p
, arg2
, 0);
10449 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10450 case TARGET_NR_inotify_rm_watch
:
10451 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10455 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10456 case TARGET_NR_mq_open
:
10458 struct mq_attr posix_mq_attr
, *attrp
;
10460 p
= lock_user_string(arg1
- 1);
10462 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10463 attrp
= &posix_mq_attr
;
10467 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10468 unlock_user (p
, arg1
, 0);
10472 case TARGET_NR_mq_unlink
:
10473 p
= lock_user_string(arg1
- 1);
10474 ret
= get_errno(mq_unlink(p
));
10475 unlock_user (p
, arg1
, 0);
10478 case TARGET_NR_mq_timedsend
:
10480 struct timespec ts
;
10482 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10484 target_to_host_timespec(&ts
, arg5
);
10485 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10486 host_to_target_timespec(arg5
, &ts
);
10489 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
10490 unlock_user (p
, arg2
, arg3
);
10494 case TARGET_NR_mq_timedreceive
:
10496 struct timespec ts
;
10499 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10501 target_to_host_timespec(&ts
, arg5
);
10502 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
10503 host_to_target_timespec(arg5
, &ts
);
10506 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
10507 unlock_user (p
, arg2
, arg3
);
10509 put_user_u32(prio
, arg4
);
10513 /* Not implemented for now... */
10514 /* case TARGET_NR_mq_notify: */
10517 case TARGET_NR_mq_getsetattr
:
10519 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10522 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10523 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10526 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10527 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10534 #ifdef CONFIG_SPLICE
10535 #ifdef TARGET_NR_tee
10536 case TARGET_NR_tee
:
10538 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10542 #ifdef TARGET_NR_splice
10543 case TARGET_NR_splice
:
10545 loff_t loff_in
, loff_out
;
10546 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10548 if (get_user_u64(loff_in
, arg2
)) {
10551 ploff_in
= &loff_in
;
10554 if (get_user_u64(loff_out
, arg4
)) {
10557 ploff_out
= &loff_out
;
10559 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10561 if (put_user_u64(loff_in
, arg2
)) {
10566 if (put_user_u64(loff_out
, arg4
)) {
10573 #ifdef TARGET_NR_vmsplice
10574 case TARGET_NR_vmsplice
:
10576 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10578 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10579 unlock_iovec(vec
, arg2
, arg3
, 0);
10581 ret
= -host_to_target_errno(errno
);
10586 #endif /* CONFIG_SPLICE */
10587 #ifdef CONFIG_EVENTFD
10588 #if defined(TARGET_NR_eventfd)
10589 case TARGET_NR_eventfd
:
10590 ret
= get_errno(eventfd(arg1
, 0));
10591 fd_trans_unregister(ret
);
10594 #if defined(TARGET_NR_eventfd2)
10595 case TARGET_NR_eventfd2
:
10597 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10598 if (arg2
& TARGET_O_NONBLOCK
) {
10599 host_flags
|= O_NONBLOCK
;
10601 if (arg2
& TARGET_O_CLOEXEC
) {
10602 host_flags
|= O_CLOEXEC
;
10604 ret
= get_errno(eventfd(arg1
, host_flags
));
10605 fd_trans_unregister(ret
);
10609 #endif /* CONFIG_EVENTFD */
10610 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10611 case TARGET_NR_fallocate
:
10612 #if TARGET_ABI_BITS == 32
10613 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10614 target_offset64(arg5
, arg6
)));
10616 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10620 #if defined(CONFIG_SYNC_FILE_RANGE)
10621 #if defined(TARGET_NR_sync_file_range)
10622 case TARGET_NR_sync_file_range
:
10623 #if TARGET_ABI_BITS == 32
10624 #if defined(TARGET_MIPS)
10625 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10626 target_offset64(arg5
, arg6
), arg7
));
10628 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10629 target_offset64(arg4
, arg5
), arg6
));
10630 #endif /* !TARGET_MIPS */
10632 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10636 #if defined(TARGET_NR_sync_file_range2)
10637 case TARGET_NR_sync_file_range2
:
10638 /* This is like sync_file_range but the arguments are reordered */
10639 #if TARGET_ABI_BITS == 32
10640 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10641 target_offset64(arg5
, arg6
), arg2
));
10643 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10648 #if defined(TARGET_NR_signalfd4)
10649 case TARGET_NR_signalfd4
:
10650 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10653 #if defined(TARGET_NR_signalfd)
10654 case TARGET_NR_signalfd
:
10655 ret
= do_signalfd4(arg1
, arg2
, 0);
10658 #if defined(CONFIG_EPOLL)
10659 #if defined(TARGET_NR_epoll_create)
10660 case TARGET_NR_epoll_create
:
10661 ret
= get_errno(epoll_create(arg1
));
10664 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10665 case TARGET_NR_epoll_create1
:
10666 ret
= get_errno(epoll_create1(arg1
));
10669 #if defined(TARGET_NR_epoll_ctl)
10670 case TARGET_NR_epoll_ctl
:
10672 struct epoll_event ep
;
10673 struct epoll_event
*epp
= 0;
10675 struct target_epoll_event
*target_ep
;
10676 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10679 ep
.events
= tswap32(target_ep
->events
);
10680 /* The epoll_data_t union is just opaque data to the kernel,
10681 * so we transfer all 64 bits across and need not worry what
10682 * actual data type it is.
10684 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10685 unlock_user_struct(target_ep
, arg4
, 0);
10688 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10693 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10694 #define IMPLEMENT_EPOLL_PWAIT
10696 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10697 #if defined(TARGET_NR_epoll_wait)
10698 case TARGET_NR_epoll_wait
:
10700 #if defined(IMPLEMENT_EPOLL_PWAIT)
10701 case TARGET_NR_epoll_pwait
:
10704 struct target_epoll_event
*target_ep
;
10705 struct epoll_event
*ep
;
10707 int maxevents
= arg3
;
10708 int timeout
= arg4
;
10710 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10711 maxevents
* sizeof(struct target_epoll_event
), 1);
10716 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10719 #if defined(IMPLEMENT_EPOLL_PWAIT)
10720 case TARGET_NR_epoll_pwait
:
10722 target_sigset_t
*target_set
;
10723 sigset_t _set
, *set
= &_set
;
10726 target_set
= lock_user(VERIFY_READ
, arg5
,
10727 sizeof(target_sigset_t
), 1);
10729 unlock_user(target_ep
, arg2
, 0);
10732 target_to_host_sigset(set
, target_set
);
10733 unlock_user(target_set
, arg5
, 0);
10738 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10742 #if defined(TARGET_NR_epoll_wait)
10743 case TARGET_NR_epoll_wait
:
10744 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10748 ret
= -TARGET_ENOSYS
;
10750 if (!is_error(ret
)) {
10752 for (i
= 0; i
< ret
; i
++) {
10753 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10754 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10757 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10762 #ifdef TARGET_NR_prlimit64
10763 case TARGET_NR_prlimit64
:
10765 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10766 struct target_rlimit64
*target_rnew
, *target_rold
;
10767 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10768 int resource
= target_to_host_resource(arg2
);
10770 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10773 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10774 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10775 unlock_user_struct(target_rnew
, arg3
, 0);
10779 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10780 if (!is_error(ret
) && arg4
) {
10781 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10784 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10785 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10786 unlock_user_struct(target_rold
, arg4
, 1);
10791 #ifdef TARGET_NR_gethostname
10792 case TARGET_NR_gethostname
:
10794 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10796 ret
= get_errno(gethostname(name
, arg2
));
10797 unlock_user(name
, arg1
, arg2
);
10799 ret
= -TARGET_EFAULT
;
10804 #ifdef TARGET_NR_atomic_cmpxchg_32
10805 case TARGET_NR_atomic_cmpxchg_32
:
10807 /* should use start_exclusive from main.c */
10808 abi_ulong mem_value
;
10809 if (get_user_u32(mem_value
, arg6
)) {
10810 target_siginfo_t info
;
10811 info
.si_signo
= SIGSEGV
;
10813 info
.si_code
= TARGET_SEGV_MAPERR
;
10814 info
._sifields
._sigfault
._addr
= arg6
;
10815 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10819 if (mem_value
== arg2
)
10820 put_user_u32(arg1
, arg6
);
10825 #ifdef TARGET_NR_atomic_barrier
10826 case TARGET_NR_atomic_barrier
:
10828 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10834 #ifdef TARGET_NR_timer_create
10835 case TARGET_NR_timer_create
:
10837 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10839 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10842 int timer_index
= next_free_host_timer();
10844 if (timer_index
< 0) {
10845 ret
= -TARGET_EAGAIN
;
10847 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10850 phost_sevp
= &host_sevp
;
10851 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10857 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10861 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10870 #ifdef TARGET_NR_timer_settime
10871 case TARGET_NR_timer_settime
:
10873 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10874 * struct itimerspec * old_value */
10875 target_timer_t timerid
= get_timer_id(arg1
);
10879 } else if (arg3
== 0) {
10880 ret
= -TARGET_EINVAL
;
10882 timer_t htimer
= g_posix_timers
[timerid
];
10883 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10885 target_to_host_itimerspec(&hspec_new
, arg3
);
10887 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10888 host_to_target_itimerspec(arg2
, &hspec_old
);
10894 #ifdef TARGET_NR_timer_gettime
10895 case TARGET_NR_timer_gettime
:
10897 /* args: timer_t timerid, struct itimerspec *curr_value */
10898 target_timer_t timerid
= get_timer_id(arg1
);
10902 } else if (!arg2
) {
10903 ret
= -TARGET_EFAULT
;
10905 timer_t htimer
= g_posix_timers
[timerid
];
10906 struct itimerspec hspec
;
10907 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10909 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10910 ret
= -TARGET_EFAULT
;
10917 #ifdef TARGET_NR_timer_getoverrun
10918 case TARGET_NR_timer_getoverrun
:
10920 /* args: timer_t timerid */
10921 target_timer_t timerid
= get_timer_id(arg1
);
10926 timer_t htimer
= g_posix_timers
[timerid
];
10927 ret
= get_errno(timer_getoverrun(htimer
));
10929 fd_trans_unregister(ret
);
10934 #ifdef TARGET_NR_timer_delete
10935 case TARGET_NR_timer_delete
:
10937 /* args: timer_t timerid */
10938 target_timer_t timerid
= get_timer_id(arg1
);
10943 timer_t htimer
= g_posix_timers
[timerid
];
10944 ret
= get_errno(timer_delete(htimer
));
10945 g_posix_timers
[timerid
] = 0;
10951 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10952 case TARGET_NR_timerfd_create
:
10953 ret
= get_errno(timerfd_create(arg1
,
10954 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10958 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10959 case TARGET_NR_timerfd_gettime
:
10961 struct itimerspec its_curr
;
10963 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10965 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10972 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10973 case TARGET_NR_timerfd_settime
:
10975 struct itimerspec its_new
, its_old
, *p_new
;
10978 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10986 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10988 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10995 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10996 case TARGET_NR_ioprio_get
:
10997 ret
= get_errno(ioprio_get(arg1
, arg2
));
11001 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11002 case TARGET_NR_ioprio_set
:
11003 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11007 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11008 case TARGET_NR_setns
:
11009 ret
= get_errno(setns(arg1
, arg2
));
11012 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11013 case TARGET_NR_unshare
:
11014 ret
= get_errno(unshare(arg1
));
11020 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11021 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11022 unimplemented_nowarn
:
11024 ret
= -TARGET_ENOSYS
;
11029 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11032 print_syscall_ret(num
, ret
);
11035 ret
= -TARGET_EFAULT
;