4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
292 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
294 typedef struct TargetFdTrans
{
295 TargetFdDataFunc host_to_target_data
;
296 TargetFdDataFunc target_to_host_data
;
297 TargetFdAddrFunc target_to_host_addr
;
300 static TargetFdTrans
**target_fd_trans
;
302 static unsigned int target_fd_max
;
304 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
306 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
307 return target_fd_trans
[fd
]->target_to_host_data
;
312 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
314 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
315 return target_fd_trans
[fd
]->host_to_target_data
;
320 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
322 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
323 return target_fd_trans
[fd
]->target_to_host_addr
;
328 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
332 if (fd
>= target_fd_max
) {
333 oldmax
= target_fd_max
;
334 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans
= g_renew(TargetFdTrans
*,
336 target_fd_trans
, target_fd_max
);
337 memset((void *)(target_fd_trans
+ oldmax
), 0,
338 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
340 target_fd_trans
[fd
] = trans
;
343 static void fd_trans_unregister(int fd
)
345 if (fd
>= 0 && fd
< target_fd_max
) {
346 target_fd_trans
[fd
] = NULL
;
350 static void fd_trans_dup(int oldfd
, int newfd
)
352 fd_trans_unregister(newfd
);
353 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
354 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
358 static int sys_getcwd1(char *buf
, size_t size
)
360 if (getcwd(buf
, size
) == NULL
) {
361 /* getcwd() sets errno */
364 return strlen(buf
)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd
, const char *pathname
,
370 const struct timespec times
[2], int flags
)
372 if (pathname
== NULL
)
373 return futimens(dirfd
, times
);
375 return utimensat(dirfd
, pathname
, times
, flags
);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
380 const struct timespec
*,tsp
,int,flags
)
382 static int sys_utimensat(int dirfd
, const char *pathname
,
383 const struct timespec times
[2], int flags
)
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
403 return (inotify_add_watch(fd
, pathname
, mask
));
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
409 return (inotify_rm_watch(fd
, wd
));
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags
)
416 return (inotify_init1(flags
));
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64
{
438 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
439 const struct host_rlimit64
*, new_limit
,
440 struct host_rlimit64
*, old_limit
)
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers
[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
453 if (g_posix_timers
[k
] == 0) {
454 g_posix_timers
[k
] = (timer_t
) 1;
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
464 static inline int regpairs_aligned(void *cpu_env
) {
465 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
473 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
475 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
490 [EAGAIN
] = TARGET_EAGAIN
,
491 [EIDRM
] = TARGET_EIDRM
,
492 [ECHRNG
] = TARGET_ECHRNG
,
493 [EL2NSYNC
] = TARGET_EL2NSYNC
,
494 [EL3HLT
] = TARGET_EL3HLT
,
495 [EL3RST
] = TARGET_EL3RST
,
496 [ELNRNG
] = TARGET_ELNRNG
,
497 [EUNATCH
] = TARGET_EUNATCH
,
498 [ENOCSI
] = TARGET_ENOCSI
,
499 [EL2HLT
] = TARGET_EL2HLT
,
500 [EDEADLK
] = TARGET_EDEADLK
,
501 [ENOLCK
] = TARGET_ENOLCK
,
502 [EBADE
] = TARGET_EBADE
,
503 [EBADR
] = TARGET_EBADR
,
504 [EXFULL
] = TARGET_EXFULL
,
505 [ENOANO
] = TARGET_ENOANO
,
506 [EBADRQC
] = TARGET_EBADRQC
,
507 [EBADSLT
] = TARGET_EBADSLT
,
508 [EBFONT
] = TARGET_EBFONT
,
509 [ENOSTR
] = TARGET_ENOSTR
,
510 [ENODATA
] = TARGET_ENODATA
,
511 [ETIME
] = TARGET_ETIME
,
512 [ENOSR
] = TARGET_ENOSR
,
513 [ENONET
] = TARGET_ENONET
,
514 [ENOPKG
] = TARGET_ENOPKG
,
515 [EREMOTE
] = TARGET_EREMOTE
,
516 [ENOLINK
] = TARGET_ENOLINK
,
517 [EADV
] = TARGET_EADV
,
518 [ESRMNT
] = TARGET_ESRMNT
,
519 [ECOMM
] = TARGET_ECOMM
,
520 [EPROTO
] = TARGET_EPROTO
,
521 [EDOTDOT
] = TARGET_EDOTDOT
,
522 [EMULTIHOP
] = TARGET_EMULTIHOP
,
523 [EBADMSG
] = TARGET_EBADMSG
,
524 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
525 [EOVERFLOW
] = TARGET_EOVERFLOW
,
526 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
527 [EBADFD
] = TARGET_EBADFD
,
528 [EREMCHG
] = TARGET_EREMCHG
,
529 [ELIBACC
] = TARGET_ELIBACC
,
530 [ELIBBAD
] = TARGET_ELIBBAD
,
531 [ELIBSCN
] = TARGET_ELIBSCN
,
532 [ELIBMAX
] = TARGET_ELIBMAX
,
533 [ELIBEXEC
] = TARGET_ELIBEXEC
,
534 [EILSEQ
] = TARGET_EILSEQ
,
535 [ENOSYS
] = TARGET_ENOSYS
,
536 [ELOOP
] = TARGET_ELOOP
,
537 [ERESTART
] = TARGET_ERESTART
,
538 [ESTRPIPE
] = TARGET_ESTRPIPE
,
539 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
540 [EUSERS
] = TARGET_EUSERS
,
541 [ENOTSOCK
] = TARGET_ENOTSOCK
,
542 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
543 [EMSGSIZE
] = TARGET_EMSGSIZE
,
544 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
545 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
546 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
547 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
548 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
549 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
550 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
551 [EADDRINUSE
] = TARGET_EADDRINUSE
,
552 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
553 [ENETDOWN
] = TARGET_ENETDOWN
,
554 [ENETUNREACH
] = TARGET_ENETUNREACH
,
555 [ENETRESET
] = TARGET_ENETRESET
,
556 [ECONNABORTED
] = TARGET_ECONNABORTED
,
557 [ECONNRESET
] = TARGET_ECONNRESET
,
558 [ENOBUFS
] = TARGET_ENOBUFS
,
559 [EISCONN
] = TARGET_EISCONN
,
560 [ENOTCONN
] = TARGET_ENOTCONN
,
561 [EUCLEAN
] = TARGET_EUCLEAN
,
562 [ENOTNAM
] = TARGET_ENOTNAM
,
563 [ENAVAIL
] = TARGET_ENAVAIL
,
564 [EISNAM
] = TARGET_EISNAM
,
565 [EREMOTEIO
] = TARGET_EREMOTEIO
,
566 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
567 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
568 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
569 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
570 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
571 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
572 [EALREADY
] = TARGET_EALREADY
,
573 [EINPROGRESS
] = TARGET_EINPROGRESS
,
574 [ESTALE
] = TARGET_ESTALE
,
575 [ECANCELED
] = TARGET_ECANCELED
,
576 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
577 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
579 [ENOKEY
] = TARGET_ENOKEY
,
582 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
585 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
588 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
591 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
598 static inline int host_to_target_errno(int err
)
600 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
601 host_to_target_errno_table
[err
]) {
602 return host_to_target_errno_table
[err
];
607 static inline int target_to_host_errno(int err
)
609 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
610 target_to_host_errno_table
[err
]) {
611 return target_to_host_errno_table
[err
];
616 static inline abi_long
get_errno(abi_long ret
)
619 return -host_to_target_errno(errno
);
624 static inline int is_error(abi_long ret
)
626 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
629 const char *target_strerror(int err
)
631 if (err
== TARGET_ERESTARTSYS
) {
632 return "To be restarted";
634 if (err
== TARGET_QEMU_ESIGRETURN
) {
635 return "Successful exit from sigreturn";
638 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
641 return strerror(target_to_host_errno(err
));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
692 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
693 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
694 int, flags
, mode_t
, mode
)
695 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
696 struct rusage
*, rusage
)
697 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
698 int, options
, struct rusage
*, rusage
)
699 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
700 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
701 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
702 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
703 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
705 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
706 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
708 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
709 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
710 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
711 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
712 safe_syscall2(int, tkill
, int, tid
, int, sig
)
713 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
714 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
715 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
718 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
719 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
720 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
721 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
722 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
723 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
724 safe_syscall2(int, flock
, int, fd
, int, operation
)
725 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
726 const struct timespec
*, uts
, size_t, sigsetsize
)
727 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
729 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
730 struct timespec
*, rem
)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
733 const struct timespec
*, req
, struct timespec
*, rem
)
736 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
738 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
739 long, msgtype
, int, flags
)
740 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
741 unsigned, nsops
, const struct timespec
*, timeout
)
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
756 void *, ptr
, long, fifth
)
757 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
761 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
765 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
766 const struct timespec
*timeout
)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
774 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
775 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
776 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
794 static inline int host_to_target_sock_type(int host_type
)
798 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
800 target_type
= TARGET_SOCK_DGRAM
;
803 target_type
= TARGET_SOCK_STREAM
;
806 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type
& SOCK_CLOEXEC
) {
812 target_type
|= TARGET_SOCK_CLOEXEC
;
816 #if defined(SOCK_NONBLOCK)
817 if (host_type
& SOCK_NONBLOCK
) {
818 target_type
|= TARGET_SOCK_NONBLOCK
;
825 static abi_ulong target_brk
;
826 static abi_ulong target_original_brk
;
827 static abi_ulong brk_page
;
829 void target_set_brk(abi_ulong new_brk
)
831 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
832 brk_page
= HOST_PAGE_ALIGN(target_brk
);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long
do_brk(abi_ulong new_brk
)
841 abi_long mapped_addr
;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
847 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
850 if (new_brk
< target_original_brk
) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk
<= brk_page
) {
859 /* Heap contents are initialized to zero, as for anonymous
861 if (new_brk
> target_brk
) {
862 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
864 target_brk
= new_brk
;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
876 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
877 PROT_READ
|PROT_WRITE
,
878 MAP_ANON
|MAP_PRIVATE
, 0, 0));
880 if (mapped_addr
== brk_page
) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
888 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
890 target_brk
= new_brk
;
891 brk_page
= HOST_PAGE_ALIGN(target_brk
);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
895 } else if (mapped_addr
!= -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr
, new_alloc_size
);
901 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM
;
912 /* For everything else, return the previous break. */
916 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
917 abi_ulong target_fds_addr
,
921 abi_ulong b
, *target_fds
;
923 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
924 if (!(target_fds
= lock_user(VERIFY_READ
,
926 sizeof(abi_ulong
) * nw
,
928 return -TARGET_EFAULT
;
932 for (i
= 0; i
< nw
; i
++) {
933 /* grab the abi_ulong */
934 __get_user(b
, &target_fds
[i
]);
935 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
936 /* check the bit inside the abi_ulong */
943 unlock_user(target_fds
, target_fds_addr
, 0);
948 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
949 abi_ulong target_fds_addr
,
952 if (target_fds_addr
) {
953 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
954 return -TARGET_EFAULT
;
962 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
968 abi_ulong
*target_fds
;
970 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
971 if (!(target_fds
= lock_user(VERIFY_WRITE
,
973 sizeof(abi_ulong
) * nw
,
975 return -TARGET_EFAULT
;
978 for (i
= 0; i
< nw
; i
++) {
980 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
981 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
984 __put_user(v
, &target_fds
[i
]);
987 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
992 #if defined(__alpha__)
998 static inline abi_long
host_to_target_clock_t(long ticks
)
1000 #if HOST_HZ == TARGET_HZ
1003 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1007 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1008 const struct rusage
*rusage
)
1010 struct target_rusage
*target_rusage
;
1012 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1013 return -TARGET_EFAULT
;
1014 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1015 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1016 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1017 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1018 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1019 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1020 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1021 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1022 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1023 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1024 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1025 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1026 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1027 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1028 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1029 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1030 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1031 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1032 unlock_user_struct(target_rusage
, target_addr
, 1);
1037 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1039 abi_ulong target_rlim_swap
;
1042 target_rlim_swap
= tswapal(target_rlim
);
1043 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1044 return RLIM_INFINITY
;
1046 result
= target_rlim_swap
;
1047 if (target_rlim_swap
!= (rlim_t
)result
)
1048 return RLIM_INFINITY
;
1053 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1055 abi_ulong target_rlim_swap
;
1058 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1059 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1061 target_rlim_swap
= rlim
;
1062 result
= tswapal(target_rlim_swap
);
1067 static inline int target_to_host_resource(int code
)
1070 case TARGET_RLIMIT_AS
:
1072 case TARGET_RLIMIT_CORE
:
1074 case TARGET_RLIMIT_CPU
:
1076 case TARGET_RLIMIT_DATA
:
1078 case TARGET_RLIMIT_FSIZE
:
1079 return RLIMIT_FSIZE
;
1080 case TARGET_RLIMIT_LOCKS
:
1081 return RLIMIT_LOCKS
;
1082 case TARGET_RLIMIT_MEMLOCK
:
1083 return RLIMIT_MEMLOCK
;
1084 case TARGET_RLIMIT_MSGQUEUE
:
1085 return RLIMIT_MSGQUEUE
;
1086 case TARGET_RLIMIT_NICE
:
1088 case TARGET_RLIMIT_NOFILE
:
1089 return RLIMIT_NOFILE
;
1090 case TARGET_RLIMIT_NPROC
:
1091 return RLIMIT_NPROC
;
1092 case TARGET_RLIMIT_RSS
:
1094 case TARGET_RLIMIT_RTPRIO
:
1095 return RLIMIT_RTPRIO
;
1096 case TARGET_RLIMIT_SIGPENDING
:
1097 return RLIMIT_SIGPENDING
;
1098 case TARGET_RLIMIT_STACK
:
1099 return RLIMIT_STACK
;
1105 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1106 abi_ulong target_tv_addr
)
1108 struct target_timeval
*target_tv
;
1110 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1111 return -TARGET_EFAULT
;
1113 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1114 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1116 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1121 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1122 const struct timeval
*tv
)
1124 struct target_timeval
*target_tv
;
1126 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1127 return -TARGET_EFAULT
;
1129 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1130 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1132 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1137 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1138 abi_ulong target_tz_addr
)
1140 struct target_timezone
*target_tz
;
1142 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1143 return -TARGET_EFAULT
;
1146 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1147 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1149 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1157 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1158 abi_ulong target_mq_attr_addr
)
1160 struct target_mq_attr
*target_mq_attr
;
1162 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1163 target_mq_attr_addr
, 1))
1164 return -TARGET_EFAULT
;
1166 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1167 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1168 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1169 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1171 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1176 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1177 const struct mq_attr
*attr
)
1179 struct target_mq_attr
*target_mq_attr
;
1181 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1182 target_mq_attr_addr
, 0))
1183 return -TARGET_EFAULT
;
1185 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1186 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1187 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1188 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1190 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long
do_select(int n
,
1199 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1200 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1202 fd_set rfds
, wfds
, efds
;
1203 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1205 struct timespec ts
, *ts_ptr
;
1208 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1212 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1216 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1221 if (target_tv_addr
) {
1222 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1223 return -TARGET_EFAULT
;
1224 ts
.tv_sec
= tv
.tv_sec
;
1225 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1231 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1234 if (!is_error(ret
)) {
1235 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1236 return -TARGET_EFAULT
;
1237 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1238 return -TARGET_EFAULT
;
1239 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1240 return -TARGET_EFAULT
;
1242 if (target_tv_addr
) {
1243 tv
.tv_sec
= ts
.tv_sec
;
1244 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1245 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1246 return -TARGET_EFAULT
;
1255 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1258 return pipe2(host_pipe
, flags
);
1264 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1265 int flags
, int is_pipe2
)
1269 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1272 return get_errno(ret
);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1279 return host_pipe
[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1282 return host_pipe
[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1285 return host_pipe
[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1288 return host_pipe
[0];
1292 if (put_user_s32(host_pipe
[0], pipedes
)
1293 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1294 return -TARGET_EFAULT
;
1295 return get_errno(ret
);
1298 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1299 abi_ulong target_addr
,
1302 struct target_ip_mreqn
*target_smreqn
;
1304 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1306 return -TARGET_EFAULT
;
1307 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1308 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1309 if (len
== sizeof(struct target_ip_mreqn
))
1310 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1311 unlock_user(target_smreqn
, target_addr
, 0);
1316 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1317 abi_ulong target_addr
,
1320 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1321 sa_family_t sa_family
;
1322 struct target_sockaddr
*target_saddr
;
1324 if (fd_trans_target_to_host_addr(fd
)) {
1325 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1328 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1330 return -TARGET_EFAULT
;
1332 sa_family
= tswap16(target_saddr
->sa_family
);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family
== AF_UNIX
) {
1343 if (len
< unix_maxlen
&& len
> 0) {
1344 char *cp
= (char*)target_saddr
;
1346 if ( cp
[len
-1] && !cp
[len
] )
1349 if (len
> unix_maxlen
)
1353 memcpy(addr
, target_saddr
, len
);
1354 addr
->sa_family
= sa_family
;
1355 if (sa_family
== AF_NETLINK
) {
1356 struct sockaddr_nl
*nladdr
;
1358 nladdr
= (struct sockaddr_nl
*)addr
;
1359 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1360 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1361 } else if (sa_family
== AF_PACKET
) {
1362 struct target_sockaddr_ll
*lladdr
;
1364 lladdr
= (struct target_sockaddr_ll
*)addr
;
1365 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1366 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1368 unlock_user(target_saddr
, target_addr
, 0);
1373 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1374 struct sockaddr
*addr
,
1377 struct target_sockaddr
*target_saddr
;
1379 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1381 return -TARGET_EFAULT
;
1382 memcpy(target_saddr
, addr
, len
);
1383 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1384 if (addr
->sa_family
== AF_NETLINK
) {
1385 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1386 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1387 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1388 } else if (addr
->sa_family
== AF_PACKET
) {
1389 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1390 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1391 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1393 unlock_user(target_saddr
, target_addr
, len
);
1398 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1399 struct target_msghdr
*target_msgh
)
1401 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1402 abi_long msg_controllen
;
1403 abi_ulong target_cmsg_addr
;
1404 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1405 socklen_t space
= 0;
1407 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1408 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1410 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1411 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1412 target_cmsg_start
= target_cmsg
;
1414 return -TARGET_EFAULT
;
1416 while (cmsg
&& target_cmsg
) {
1417 void *data
= CMSG_DATA(cmsg
);
1418 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1420 int len
= tswapal(target_cmsg
->cmsg_len
)
1421 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1423 space
+= CMSG_SPACE(len
);
1424 if (space
> msgh
->msg_controllen
) {
1425 space
-= CMSG_SPACE(len
);
1426 /* This is a QEMU bug, since we allocated the payload
1427 * area ourselves (unlike overflow in host-to-target
1428 * conversion, which is just the guest giving us a buffer
1429 * that's too small). It can't happen for the payload types
1430 * we currently support; if it becomes an issue in future
1431 * we would need to improve our allocation strategy to
1432 * something more intelligent than "twice the size of the
1433 * target buffer we're reading from".
1435 gemu_log("Host cmsg overflow\n");
1439 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1440 cmsg
->cmsg_level
= SOL_SOCKET
;
1442 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1444 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1445 cmsg
->cmsg_len
= CMSG_LEN(len
);
1447 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1448 int *fd
= (int *)data
;
1449 int *target_fd
= (int *)target_data
;
1450 int i
, numfds
= len
/ sizeof(int);
1452 for (i
= 0; i
< numfds
; i
++) {
1453 __get_user(fd
[i
], target_fd
+ i
);
1455 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1456 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1457 struct ucred
*cred
= (struct ucred
*)data
;
1458 struct target_ucred
*target_cred
=
1459 (struct target_ucred
*)target_data
;
1461 __get_user(cred
->pid
, &target_cred
->pid
);
1462 __get_user(cred
->uid
, &target_cred
->uid
);
1463 __get_user(cred
->gid
, &target_cred
->gid
);
1465 gemu_log("Unsupported ancillary data: %d/%d\n",
1466 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1467 memcpy(data
, target_data
, len
);
1470 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1471 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1474 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1476 msgh
->msg_controllen
= space
;
1480 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1481 struct msghdr
*msgh
)
1483 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1484 abi_long msg_controllen
;
1485 abi_ulong target_cmsg_addr
;
1486 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1487 socklen_t space
= 0;
1489 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1490 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1492 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1493 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1494 target_cmsg_start
= target_cmsg
;
1496 return -TARGET_EFAULT
;
1498 while (cmsg
&& target_cmsg
) {
1499 void *data
= CMSG_DATA(cmsg
);
1500 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1502 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1503 int tgt_len
, tgt_space
;
1505 /* We never copy a half-header but may copy half-data;
1506 * this is Linux's behaviour in put_cmsg(). Note that
1507 * truncation here is a guest problem (which we report
1508 * to the guest via the CTRUNC bit), unlike truncation
1509 * in target_to_host_cmsg, which is a QEMU bug.
1511 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1512 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1516 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1517 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1519 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1521 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1523 tgt_len
= TARGET_CMSG_LEN(len
);
1525 /* Payload types which need a different size of payload on
1526 * the target must adjust tgt_len here.
1528 switch (cmsg
->cmsg_level
) {
1530 switch (cmsg
->cmsg_type
) {
1532 tgt_len
= sizeof(struct target_timeval
);
1541 if (msg_controllen
< tgt_len
) {
1542 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1543 tgt_len
= msg_controllen
;
1546 /* We must now copy-and-convert len bytes of payload
1547 * into tgt_len bytes of destination space. Bear in mind
1548 * that in both source and destination we may be dealing
1549 * with a truncated value!
1551 switch (cmsg
->cmsg_level
) {
1553 switch (cmsg
->cmsg_type
) {
1556 int *fd
= (int *)data
;
1557 int *target_fd
= (int *)target_data
;
1558 int i
, numfds
= tgt_len
/ sizeof(int);
1560 for (i
= 0; i
< numfds
; i
++) {
1561 __put_user(fd
[i
], target_fd
+ i
);
1567 struct timeval
*tv
= (struct timeval
*)data
;
1568 struct target_timeval
*target_tv
=
1569 (struct target_timeval
*)target_data
;
1571 if (len
!= sizeof(struct timeval
) ||
1572 tgt_len
!= sizeof(struct target_timeval
)) {
1576 /* copy struct timeval to target */
1577 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1578 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1581 case SCM_CREDENTIALS
:
1583 struct ucred
*cred
= (struct ucred
*)data
;
1584 struct target_ucred
*target_cred
=
1585 (struct target_ucred
*)target_data
;
1587 __put_user(cred
->pid
, &target_cred
->pid
);
1588 __put_user(cred
->uid
, &target_cred
->uid
);
1589 __put_user(cred
->gid
, &target_cred
->gid
);
1599 gemu_log("Unsupported ancillary data: %d/%d\n",
1600 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1601 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1602 if (tgt_len
> len
) {
1603 memset(target_data
+ len
, 0, tgt_len
- len
);
1607 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1608 tgt_space
= TARGET_CMSG_SPACE(len
);
1609 if (msg_controllen
< tgt_space
) {
1610 tgt_space
= msg_controllen
;
1612 msg_controllen
-= tgt_space
;
1614 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1615 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1618 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1620 target_msgh
->msg_controllen
= tswapal(space
);
1624 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1626 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1627 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1628 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1629 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1630 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1633 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1635 abi_long (*host_to_target_nlmsg
)
1636 (struct nlmsghdr
*))
1641 while (len
> sizeof(struct nlmsghdr
)) {
1643 nlmsg_len
= nlh
->nlmsg_len
;
1644 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1649 switch (nlh
->nlmsg_type
) {
1651 tswap_nlmsghdr(nlh
);
1657 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1658 e
->error
= tswap32(e
->error
);
1659 tswap_nlmsghdr(&e
->msg
);
1660 tswap_nlmsghdr(nlh
);
1664 ret
= host_to_target_nlmsg(nlh
);
1666 tswap_nlmsghdr(nlh
);
1671 tswap_nlmsghdr(nlh
);
1672 len
-= NLMSG_ALIGN(nlmsg_len
);
1673 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1678 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1680 abi_long (*target_to_host_nlmsg
)
1681 (struct nlmsghdr
*))
1685 while (len
> sizeof(struct nlmsghdr
)) {
1686 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1687 tswap32(nlh
->nlmsg_len
) > len
) {
1690 tswap_nlmsghdr(nlh
);
1691 switch (nlh
->nlmsg_type
) {
1698 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1699 e
->error
= tswap32(e
->error
);
1700 tswap_nlmsghdr(&e
->msg
);
1704 ret
= target_to_host_nlmsg(nlh
);
1709 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1710 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1715 #ifdef CONFIG_RTNETLINK
1716 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1717 size_t len
, void *context
,
1718 abi_long (*host_to_target_nlattr
)
1722 unsigned short nla_len
;
1725 while (len
> sizeof(struct nlattr
)) {
1726 nla_len
= nlattr
->nla_len
;
1727 if (nla_len
< sizeof(struct nlattr
) ||
1731 ret
= host_to_target_nlattr(nlattr
, context
);
1732 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1733 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1737 len
-= NLA_ALIGN(nla_len
);
1738 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1743 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1745 abi_long (*host_to_target_rtattr
)
1748 unsigned short rta_len
;
1751 while (len
> sizeof(struct rtattr
)) {
1752 rta_len
= rtattr
->rta_len
;
1753 if (rta_len
< sizeof(struct rtattr
) ||
1757 ret
= host_to_target_rtattr(rtattr
);
1758 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1759 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1763 len
-= RTA_ALIGN(rta_len
);
1764 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1769 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1771 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1778 switch (nlattr
->nla_type
) {
1780 case IFLA_BR_FDB_FLUSH
:
1783 case IFLA_BR_GROUP_ADDR
:
1786 case IFLA_BR_VLAN_FILTERING
:
1787 case IFLA_BR_TOPOLOGY_CHANGE
:
1788 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1789 case IFLA_BR_MCAST_ROUTER
:
1790 case IFLA_BR_MCAST_SNOOPING
:
1791 case IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1792 case IFLA_BR_MCAST_QUERIER
:
1793 case IFLA_BR_NF_CALL_IPTABLES
:
1794 case IFLA_BR_NF_CALL_IP6TABLES
:
1795 case IFLA_BR_NF_CALL_ARPTABLES
:
1798 case IFLA_BR_PRIORITY
:
1799 case IFLA_BR_VLAN_PROTOCOL
:
1800 case IFLA_BR_GROUP_FWD_MASK
:
1801 case IFLA_BR_ROOT_PORT
:
1802 case IFLA_BR_VLAN_DEFAULT_PVID
:
1803 u16
= NLA_DATA(nlattr
);
1804 *u16
= tswap16(*u16
);
1807 case IFLA_BR_FORWARD_DELAY
:
1808 case IFLA_BR_HELLO_TIME
:
1809 case IFLA_BR_MAX_AGE
:
1810 case IFLA_BR_AGEING_TIME
:
1811 case IFLA_BR_STP_STATE
:
1812 case IFLA_BR_ROOT_PATH_COST
:
1813 case IFLA_BR_MCAST_HASH_ELASTICITY
:
1814 case IFLA_BR_MCAST_HASH_MAX
:
1815 case IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1816 case IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1817 u32
= NLA_DATA(nlattr
);
1818 *u32
= tswap32(*u32
);
1821 case IFLA_BR_HELLO_TIMER
:
1822 case IFLA_BR_TCN_TIMER
:
1823 case IFLA_BR_GC_TIMER
:
1824 case IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1825 case IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1826 case IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1827 case IFLA_BR_MCAST_QUERIER_INTVL
:
1828 case IFLA_BR_MCAST_QUERY_INTVL
:
1829 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1830 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1831 u64
= NLA_DATA(nlattr
);
1832 *u64
= tswap64(*u64
);
1834 /* ifla_bridge_id: uin8_t[] */
1835 case IFLA_BR_ROOT_ID
:
1836 case IFLA_BR_BRIDGE_ID
:
1839 gemu_log("Unknown IFLA_BR type %d\n", nlattr
->nla_type
);
1845 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
1852 switch (nlattr
->nla_type
) {
1854 case IFLA_BRPORT_STATE
:
1855 case IFLA_BRPORT_MODE
:
1856 case IFLA_BRPORT_GUARD
:
1857 case IFLA_BRPORT_PROTECT
:
1858 case IFLA_BRPORT_FAST_LEAVE
:
1859 case IFLA_BRPORT_LEARNING
:
1860 case IFLA_BRPORT_UNICAST_FLOOD
:
1861 case IFLA_BRPORT_PROXYARP
:
1862 case IFLA_BRPORT_LEARNING_SYNC
:
1863 case IFLA_BRPORT_PROXYARP_WIFI
:
1864 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
1865 case IFLA_BRPORT_CONFIG_PENDING
:
1866 case IFLA_BRPORT_MULTICAST_ROUTER
:
1869 case IFLA_BRPORT_PRIORITY
:
1870 case IFLA_BRPORT_DESIGNATED_PORT
:
1871 case IFLA_BRPORT_DESIGNATED_COST
:
1872 case IFLA_BRPORT_ID
:
1873 case IFLA_BRPORT_NO
:
1874 u16
= NLA_DATA(nlattr
);
1875 *u16
= tswap16(*u16
);
1878 case IFLA_BRPORT_COST
:
1879 u32
= NLA_DATA(nlattr
);
1880 *u32
= tswap32(*u32
);
1883 case IFLA_BRPORT_MESSAGE_AGE_TIMER
:
1884 case IFLA_BRPORT_FORWARD_DELAY_TIMER
:
1885 case IFLA_BRPORT_HOLD_TIMER
:
1886 u64
= NLA_DATA(nlattr
);
1887 *u64
= tswap64(*u64
);
1889 /* ifla_bridge_id: uint8_t[] */
1890 case IFLA_BRPORT_ROOT_ID
:
1891 case IFLA_BRPORT_BRIDGE_ID
:
1894 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr
->nla_type
);
1900 struct linkinfo_context
{
1907 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
1910 struct linkinfo_context
*li_context
= context
;
1912 switch (nlattr
->nla_type
) {
1914 case IFLA_INFO_KIND
:
1915 li_context
->name
= NLA_DATA(nlattr
);
1916 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
1918 case IFLA_INFO_SLAVE_KIND
:
1919 li_context
->slave_name
= NLA_DATA(nlattr
);
1920 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
1923 case IFLA_INFO_XSTATS
:
1924 /* FIXME: only used by CAN */
1927 case IFLA_INFO_DATA
:
1928 if (strncmp(li_context
->name
, "bridge",
1929 li_context
->len
) == 0) {
1930 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
1933 host_to_target_data_bridge_nlattr
);
1935 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context
->name
);
1938 case IFLA_INFO_SLAVE_DATA
:
1939 if (strncmp(li_context
->slave_name
, "bridge",
1940 li_context
->slave_len
) == 0) {
1941 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
1944 host_to_target_slave_data_bridge_nlattr
);
1946 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
1947 li_context
->slave_name
);
1951 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr
->nla_type
);
1958 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
1964 switch (nlattr
->nla_type
) {
1965 case IFLA_INET_CONF
:
1966 u32
= NLA_DATA(nlattr
);
1967 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
1969 u32
[i
] = tswap32(u32
[i
]);
1973 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
1978 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
1983 struct ifla_cacheinfo
*ci
;
1986 switch (nlattr
->nla_type
) {
1988 case IFLA_INET6_TOKEN
:
1991 case IFLA_INET6_ADDR_GEN_MODE
:
1994 case IFLA_INET6_FLAGS
:
1995 u32
= NLA_DATA(nlattr
);
1996 *u32
= tswap32(*u32
);
1999 case IFLA_INET6_CONF
:
2000 u32
= NLA_DATA(nlattr
);
2001 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2003 u32
[i
] = tswap32(u32
[i
]);
2006 /* ifla_cacheinfo */
2007 case IFLA_INET6_CACHEINFO
:
2008 ci
= NLA_DATA(nlattr
);
2009 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2010 ci
->tstamp
= tswap32(ci
->tstamp
);
2011 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2012 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2015 case IFLA_INET6_STATS
:
2016 case IFLA_INET6_ICMP6STATS
:
2017 u64
= NLA_DATA(nlattr
);
2018 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2020 u64
[i
] = tswap64(u64
[i
]);
2024 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2029 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2032 switch (nlattr
->nla_type
) {
2034 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2036 host_to_target_data_inet_nlattr
);
2038 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2040 host_to_target_data_inet6_nlattr
);
2042 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2048 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2051 struct rtnl_link_stats
*st
;
2052 struct rtnl_link_stats64
*st64
;
2053 struct rtnl_link_ifmap
*map
;
2054 struct linkinfo_context li_context
;
2056 switch (rtattr
->rta_type
) {
2059 case IFLA_BROADCAST
:
2065 case IFLA_OPERSTATE
:
2068 case IFLA_PROTO_DOWN
:
2075 case IFLA_CARRIER_CHANGES
:
2076 case IFLA_NUM_RX_QUEUES
:
2077 case IFLA_NUM_TX_QUEUES
:
2078 case IFLA_PROMISCUITY
:
2080 case IFLA_LINK_NETNSID
:
2084 u32
= RTA_DATA(rtattr
);
2085 *u32
= tswap32(*u32
);
2087 /* struct rtnl_link_stats */
2089 st
= RTA_DATA(rtattr
);
2090 st
->rx_packets
= tswap32(st
->rx_packets
);
2091 st
->tx_packets
= tswap32(st
->tx_packets
);
2092 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2093 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2094 st
->rx_errors
= tswap32(st
->rx_errors
);
2095 st
->tx_errors
= tswap32(st
->tx_errors
);
2096 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2097 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2098 st
->multicast
= tswap32(st
->multicast
);
2099 st
->collisions
= tswap32(st
->collisions
);
2101 /* detailed rx_errors: */
2102 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2103 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2104 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2105 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2106 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2107 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2109 /* detailed tx_errors */
2110 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2111 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2112 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2113 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2114 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2117 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2118 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2120 /* struct rtnl_link_stats64 */
2122 st64
= RTA_DATA(rtattr
);
2123 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2124 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2125 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2126 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2127 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2128 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2129 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2130 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2131 st64
->multicast
= tswap64(st64
->multicast
);
2132 st64
->collisions
= tswap64(st64
->collisions
);
2134 /* detailed rx_errors: */
2135 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2136 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2137 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2138 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2139 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2140 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2142 /* detailed tx_errors */
2143 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2144 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2145 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2146 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2147 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2150 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2151 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2153 /* struct rtnl_link_ifmap */
2155 map
= RTA_DATA(rtattr
);
2156 map
->mem_start
= tswap64(map
->mem_start
);
2157 map
->mem_end
= tswap64(map
->mem_end
);
2158 map
->base_addr
= tswap64(map
->base_addr
);
2159 map
->irq
= tswap16(map
->irq
);
2163 memset(&li_context
, 0, sizeof(li_context
));
2164 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2166 host_to_target_data_linkinfo_nlattr
);
2168 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2170 host_to_target_data_spec_nlattr
);
2172 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
2178 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2181 struct ifa_cacheinfo
*ci
;
2183 switch (rtattr
->rta_type
) {
2184 /* binary: depends on family type */
2194 u32
= RTA_DATA(rtattr
);
2195 *u32
= tswap32(*u32
);
2197 /* struct ifa_cacheinfo */
2199 ci
= RTA_DATA(rtattr
);
2200 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2201 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2202 ci
->cstamp
= tswap32(ci
->cstamp
);
2203 ci
->tstamp
= tswap32(ci
->tstamp
);
2206 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2212 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2215 switch (rtattr
->rta_type
) {
2216 /* binary: depends on family type */
2225 u32
= RTA_DATA(rtattr
);
2226 *u32
= tswap32(*u32
);
2229 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2235 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2236 uint32_t rtattr_len
)
2238 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2239 host_to_target_data_link_rtattr
);
2242 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2243 uint32_t rtattr_len
)
2245 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2246 host_to_target_data_addr_rtattr
);
2249 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2250 uint32_t rtattr_len
)
2252 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2253 host_to_target_data_route_rtattr
);
2256 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2259 struct ifinfomsg
*ifi
;
2260 struct ifaddrmsg
*ifa
;
2263 nlmsg_len
= nlh
->nlmsg_len
;
2264 switch (nlh
->nlmsg_type
) {
2268 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2269 ifi
= NLMSG_DATA(nlh
);
2270 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2271 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2272 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2273 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2274 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2275 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2281 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2282 ifa
= NLMSG_DATA(nlh
);
2283 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2284 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2285 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2291 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2292 rtm
= NLMSG_DATA(nlh
);
2293 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2294 host_to_target_route_rtattr(RTM_RTA(rtm
),
2295 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2299 return -TARGET_EINVAL
;
2304 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2307 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2310 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2312 abi_long (*target_to_host_rtattr
)
2317 while (len
>= sizeof(struct rtattr
)) {
2318 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2319 tswap16(rtattr
->rta_len
) > len
) {
2322 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2323 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2324 ret
= target_to_host_rtattr(rtattr
);
2328 len
-= RTA_ALIGN(rtattr
->rta_len
);
2329 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2330 RTA_ALIGN(rtattr
->rta_len
));
2335 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2337 switch (rtattr
->rta_type
) {
2339 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2345 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2347 switch (rtattr
->rta_type
) {
2348 /* binary: depends on family type */
2353 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2359 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2362 switch (rtattr
->rta_type
) {
2363 /* binary: depends on family type */
2370 u32
= RTA_DATA(rtattr
);
2371 *u32
= tswap32(*u32
);
2374 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2380 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2381 uint32_t rtattr_len
)
2383 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2384 target_to_host_data_link_rtattr
);
2387 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2388 uint32_t rtattr_len
)
2390 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2391 target_to_host_data_addr_rtattr
);
2394 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2395 uint32_t rtattr_len
)
2397 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2398 target_to_host_data_route_rtattr
);
2401 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2403 struct ifinfomsg
*ifi
;
2404 struct ifaddrmsg
*ifa
;
2407 switch (nlh
->nlmsg_type
) {
2412 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2413 ifi
= NLMSG_DATA(nlh
);
2414 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2415 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2416 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2417 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2418 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2419 NLMSG_LENGTH(sizeof(*ifi
)));
2425 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2426 ifa
= NLMSG_DATA(nlh
);
2427 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2428 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2429 NLMSG_LENGTH(sizeof(*ifa
)));
2436 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2437 rtm
= NLMSG_DATA(nlh
);
2438 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2439 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2440 NLMSG_LENGTH(sizeof(*rtm
)));
2444 return -TARGET_EOPNOTSUPP
;
2449 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2451 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2453 #endif /* CONFIG_RTNETLINK */
2455 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2457 switch (nlh
->nlmsg_type
) {
2459 gemu_log("Unknown host audit message type %d\n",
2461 return -TARGET_EINVAL
;
2466 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2469 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2472 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2474 switch (nlh
->nlmsg_type
) {
2476 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2477 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2480 gemu_log("Unknown target audit message type %d\n",
2482 return -TARGET_EINVAL
;
2488 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2490 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2493 /* do_setsockopt() Must return target values and target errnos. */
2494 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2495 abi_ulong optval_addr
, socklen_t optlen
)
2499 struct ip_mreqn
*ip_mreq
;
2500 struct ip_mreq_source
*ip_mreq_source
;
2504 /* TCP options all take an 'int' value. */
2505 if (optlen
< sizeof(uint32_t))
2506 return -TARGET_EINVAL
;
2508 if (get_user_u32(val
, optval_addr
))
2509 return -TARGET_EFAULT
;
2510 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2517 case IP_ROUTER_ALERT
:
2521 case IP_MTU_DISCOVER
:
2527 case IP_MULTICAST_TTL
:
2528 case IP_MULTICAST_LOOP
:
2530 if (optlen
>= sizeof(uint32_t)) {
2531 if (get_user_u32(val
, optval_addr
))
2532 return -TARGET_EFAULT
;
2533 } else if (optlen
>= 1) {
2534 if (get_user_u8(val
, optval_addr
))
2535 return -TARGET_EFAULT
;
2537 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2539 case IP_ADD_MEMBERSHIP
:
2540 case IP_DROP_MEMBERSHIP
:
2541 if (optlen
< sizeof (struct target_ip_mreq
) ||
2542 optlen
> sizeof (struct target_ip_mreqn
))
2543 return -TARGET_EINVAL
;
2545 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2546 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2547 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2550 case IP_BLOCK_SOURCE
:
2551 case IP_UNBLOCK_SOURCE
:
2552 case IP_ADD_SOURCE_MEMBERSHIP
:
2553 case IP_DROP_SOURCE_MEMBERSHIP
:
2554 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2555 return -TARGET_EINVAL
;
2557 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2558 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2559 unlock_user (ip_mreq_source
, optval_addr
, 0);
2568 case IPV6_MTU_DISCOVER
:
2571 case IPV6_RECVPKTINFO
:
2573 if (optlen
< sizeof(uint32_t)) {
2574 return -TARGET_EINVAL
;
2576 if (get_user_u32(val
, optval_addr
)) {
2577 return -TARGET_EFAULT
;
2579 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2580 &val
, sizeof(val
)));
2589 /* struct icmp_filter takes an u32 value */
2590 if (optlen
< sizeof(uint32_t)) {
2591 return -TARGET_EINVAL
;
2594 if (get_user_u32(val
, optval_addr
)) {
2595 return -TARGET_EFAULT
;
2597 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2598 &val
, sizeof(val
)));
2605 case TARGET_SOL_SOCKET
:
2607 case TARGET_SO_RCVTIMEO
:
2611 optname
= SO_RCVTIMEO
;
2614 if (optlen
!= sizeof(struct target_timeval
)) {
2615 return -TARGET_EINVAL
;
2618 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2619 return -TARGET_EFAULT
;
2622 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2626 case TARGET_SO_SNDTIMEO
:
2627 optname
= SO_SNDTIMEO
;
2629 case TARGET_SO_ATTACH_FILTER
:
2631 struct target_sock_fprog
*tfprog
;
2632 struct target_sock_filter
*tfilter
;
2633 struct sock_fprog fprog
;
2634 struct sock_filter
*filter
;
2637 if (optlen
!= sizeof(*tfprog
)) {
2638 return -TARGET_EINVAL
;
2640 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2641 return -TARGET_EFAULT
;
2643 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2644 tswapal(tfprog
->filter
), 0)) {
2645 unlock_user_struct(tfprog
, optval_addr
, 1);
2646 return -TARGET_EFAULT
;
2649 fprog
.len
= tswap16(tfprog
->len
);
2650 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2651 if (filter
== NULL
) {
2652 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2653 unlock_user_struct(tfprog
, optval_addr
, 1);
2654 return -TARGET_ENOMEM
;
2656 for (i
= 0; i
< fprog
.len
; i
++) {
2657 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2658 filter
[i
].jt
= tfilter
[i
].jt
;
2659 filter
[i
].jf
= tfilter
[i
].jf
;
2660 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2662 fprog
.filter
= filter
;
2664 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2665 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2668 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2669 unlock_user_struct(tfprog
, optval_addr
, 1);
2672 case TARGET_SO_BINDTODEVICE
:
2674 char *dev_ifname
, *addr_ifname
;
2676 if (optlen
> IFNAMSIZ
- 1) {
2677 optlen
= IFNAMSIZ
- 1;
2679 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2681 return -TARGET_EFAULT
;
2683 optname
= SO_BINDTODEVICE
;
2684 addr_ifname
= alloca(IFNAMSIZ
);
2685 memcpy(addr_ifname
, dev_ifname
, optlen
);
2686 addr_ifname
[optlen
] = 0;
2687 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2688 addr_ifname
, optlen
));
2689 unlock_user (dev_ifname
, optval_addr
, 0);
2692 /* Options with 'int' argument. */
2693 case TARGET_SO_DEBUG
:
2696 case TARGET_SO_REUSEADDR
:
2697 optname
= SO_REUSEADDR
;
2699 case TARGET_SO_TYPE
:
2702 case TARGET_SO_ERROR
:
2705 case TARGET_SO_DONTROUTE
:
2706 optname
= SO_DONTROUTE
;
2708 case TARGET_SO_BROADCAST
:
2709 optname
= SO_BROADCAST
;
2711 case TARGET_SO_SNDBUF
:
2712 optname
= SO_SNDBUF
;
2714 case TARGET_SO_SNDBUFFORCE
:
2715 optname
= SO_SNDBUFFORCE
;
2717 case TARGET_SO_RCVBUF
:
2718 optname
= SO_RCVBUF
;
2720 case TARGET_SO_RCVBUFFORCE
:
2721 optname
= SO_RCVBUFFORCE
;
2723 case TARGET_SO_KEEPALIVE
:
2724 optname
= SO_KEEPALIVE
;
2726 case TARGET_SO_OOBINLINE
:
2727 optname
= SO_OOBINLINE
;
2729 case TARGET_SO_NO_CHECK
:
2730 optname
= SO_NO_CHECK
;
2732 case TARGET_SO_PRIORITY
:
2733 optname
= SO_PRIORITY
;
2736 case TARGET_SO_BSDCOMPAT
:
2737 optname
= SO_BSDCOMPAT
;
2740 case TARGET_SO_PASSCRED
:
2741 optname
= SO_PASSCRED
;
2743 case TARGET_SO_PASSSEC
:
2744 optname
= SO_PASSSEC
;
2746 case TARGET_SO_TIMESTAMP
:
2747 optname
= SO_TIMESTAMP
;
2749 case TARGET_SO_RCVLOWAT
:
2750 optname
= SO_RCVLOWAT
;
2756 if (optlen
< sizeof(uint32_t))
2757 return -TARGET_EINVAL
;
2759 if (get_user_u32(val
, optval_addr
))
2760 return -TARGET_EFAULT
;
2761 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2765 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2766 ret
= -TARGET_ENOPROTOOPT
;
2771 /* do_getsockopt() Must return target values and target errnos. */
2772 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2773 abi_ulong optval_addr
, abi_ulong optlen
)
2780 case TARGET_SOL_SOCKET
:
2783 /* These don't just return a single integer */
2784 case TARGET_SO_LINGER
:
2785 case TARGET_SO_RCVTIMEO
:
2786 case TARGET_SO_SNDTIMEO
:
2787 case TARGET_SO_PEERNAME
:
2789 case TARGET_SO_PEERCRED
: {
2792 struct target_ucred
*tcr
;
2794 if (get_user_u32(len
, optlen
)) {
2795 return -TARGET_EFAULT
;
2798 return -TARGET_EINVAL
;
2802 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2810 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2811 return -TARGET_EFAULT
;
2813 __put_user(cr
.pid
, &tcr
->pid
);
2814 __put_user(cr
.uid
, &tcr
->uid
);
2815 __put_user(cr
.gid
, &tcr
->gid
);
2816 unlock_user_struct(tcr
, optval_addr
, 1);
2817 if (put_user_u32(len
, optlen
)) {
2818 return -TARGET_EFAULT
;
2822 /* Options with 'int' argument. */
2823 case TARGET_SO_DEBUG
:
2826 case TARGET_SO_REUSEADDR
:
2827 optname
= SO_REUSEADDR
;
2829 case TARGET_SO_TYPE
:
2832 case TARGET_SO_ERROR
:
2835 case TARGET_SO_DONTROUTE
:
2836 optname
= SO_DONTROUTE
;
2838 case TARGET_SO_BROADCAST
:
2839 optname
= SO_BROADCAST
;
2841 case TARGET_SO_SNDBUF
:
2842 optname
= SO_SNDBUF
;
2844 case TARGET_SO_RCVBUF
:
2845 optname
= SO_RCVBUF
;
2847 case TARGET_SO_KEEPALIVE
:
2848 optname
= SO_KEEPALIVE
;
2850 case TARGET_SO_OOBINLINE
:
2851 optname
= SO_OOBINLINE
;
2853 case TARGET_SO_NO_CHECK
:
2854 optname
= SO_NO_CHECK
;
2856 case TARGET_SO_PRIORITY
:
2857 optname
= SO_PRIORITY
;
2860 case TARGET_SO_BSDCOMPAT
:
2861 optname
= SO_BSDCOMPAT
;
2864 case TARGET_SO_PASSCRED
:
2865 optname
= SO_PASSCRED
;
2867 case TARGET_SO_TIMESTAMP
:
2868 optname
= SO_TIMESTAMP
;
2870 case TARGET_SO_RCVLOWAT
:
2871 optname
= SO_RCVLOWAT
;
2873 case TARGET_SO_ACCEPTCONN
:
2874 optname
= SO_ACCEPTCONN
;
2881 /* TCP options all take an 'int' value. */
2883 if (get_user_u32(len
, optlen
))
2884 return -TARGET_EFAULT
;
2886 return -TARGET_EINVAL
;
2888 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2891 if (optname
== SO_TYPE
) {
2892 val
= host_to_target_sock_type(val
);
2897 if (put_user_u32(val
, optval_addr
))
2898 return -TARGET_EFAULT
;
2900 if (put_user_u8(val
, optval_addr
))
2901 return -TARGET_EFAULT
;
2903 if (put_user_u32(len
, optlen
))
2904 return -TARGET_EFAULT
;
2911 case IP_ROUTER_ALERT
:
2915 case IP_MTU_DISCOVER
:
2921 case IP_MULTICAST_TTL
:
2922 case IP_MULTICAST_LOOP
:
2923 if (get_user_u32(len
, optlen
))
2924 return -TARGET_EFAULT
;
2926 return -TARGET_EINVAL
;
2928 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2931 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2933 if (put_user_u32(len
, optlen
)
2934 || put_user_u8(val
, optval_addr
))
2935 return -TARGET_EFAULT
;
2937 if (len
> sizeof(int))
2939 if (put_user_u32(len
, optlen
)
2940 || put_user_u32(val
, optval_addr
))
2941 return -TARGET_EFAULT
;
2945 ret
= -TARGET_ENOPROTOOPT
;
2951 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2953 ret
= -TARGET_EOPNOTSUPP
;
2959 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2960 int count
, int copy
)
2962 struct target_iovec
*target_vec
;
2964 abi_ulong total_len
, max_len
;
2967 bool bad_address
= false;
2973 if (count
< 0 || count
> IOV_MAX
) {
2978 vec
= g_try_new0(struct iovec
, count
);
2984 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2985 count
* sizeof(struct target_iovec
), 1);
2986 if (target_vec
== NULL
) {
2991 /* ??? If host page size > target page size, this will result in a
2992 value larger than what we can actually support. */
2993 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2996 for (i
= 0; i
< count
; i
++) {
2997 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2998 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3003 } else if (len
== 0) {
3004 /* Zero length pointer is ignored. */
3005 vec
[i
].iov_base
= 0;
3007 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3008 /* If the first buffer pointer is bad, this is a fault. But
3009 * subsequent bad buffers will result in a partial write; this
3010 * is realized by filling the vector with null pointers and
3012 if (!vec
[i
].iov_base
) {
3023 if (len
> max_len
- total_len
) {
3024 len
= max_len
- total_len
;
3027 vec
[i
].iov_len
= len
;
3031 unlock_user(target_vec
, target_addr
, 0);
3036 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3037 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3040 unlock_user(target_vec
, target_addr
, 0);
3047 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3048 int count
, int copy
)
3050 struct target_iovec
*target_vec
;
3053 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3054 count
* sizeof(struct target_iovec
), 1);
3056 for (i
= 0; i
< count
; i
++) {
3057 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3058 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3062 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3064 unlock_user(target_vec
, target_addr
, 0);
3070 static inline int target_to_host_sock_type(int *type
)
3073 int target_type
= *type
;
3075 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3076 case TARGET_SOCK_DGRAM
:
3077 host_type
= SOCK_DGRAM
;
3079 case TARGET_SOCK_STREAM
:
3080 host_type
= SOCK_STREAM
;
3083 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3086 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3087 #if defined(SOCK_CLOEXEC)
3088 host_type
|= SOCK_CLOEXEC
;
3090 return -TARGET_EINVAL
;
3093 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3094 #if defined(SOCK_NONBLOCK)
3095 host_type
|= SOCK_NONBLOCK
;
3096 #elif !defined(O_NONBLOCK)
3097 return -TARGET_EINVAL
;
3104 /* Try to emulate socket type flags after socket creation. */
3105 static int sock_flags_fixup(int fd
, int target_type
)
3107 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3108 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3109 int flags
= fcntl(fd
, F_GETFL
);
3110 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3112 return -TARGET_EINVAL
;
3119 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3120 abi_ulong target_addr
,
3123 struct sockaddr
*addr
= host_addr
;
3124 struct target_sockaddr
*target_saddr
;
3126 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3127 if (!target_saddr
) {
3128 return -TARGET_EFAULT
;
3131 memcpy(addr
, target_saddr
, len
);
3132 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3133 /* spkt_protocol is big-endian */
3135 unlock_user(target_saddr
, target_addr
, 0);
3139 static TargetFdTrans target_packet_trans
= {
3140 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3143 #ifdef CONFIG_RTNETLINK
3144 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3148 ret
= target_to_host_nlmsg_route(buf
, len
);
3156 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3160 ret
= host_to_target_nlmsg_route(buf
, len
);
3168 static TargetFdTrans target_netlink_route_trans
= {
3169 .target_to_host_data
= netlink_route_target_to_host
,
3170 .host_to_target_data
= netlink_route_host_to_target
,
3172 #endif /* CONFIG_RTNETLINK */
3174 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3178 ret
= target_to_host_nlmsg_audit(buf
, len
);
3186 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3190 ret
= host_to_target_nlmsg_audit(buf
, len
);
3198 static TargetFdTrans target_netlink_audit_trans
= {
3199 .target_to_host_data
= netlink_audit_target_to_host
,
3200 .host_to_target_data
= netlink_audit_host_to_target
,
3203 /* do_socket() Must return target values and target errnos. */
3204 static abi_long
do_socket(int domain
, int type
, int protocol
)
3206 int target_type
= type
;
3209 ret
= target_to_host_sock_type(&type
);
3214 if (domain
== PF_NETLINK
&& !(
3215 #ifdef CONFIG_RTNETLINK
3216 protocol
== NETLINK_ROUTE
||
3218 protocol
== NETLINK_KOBJECT_UEVENT
||
3219 protocol
== NETLINK_AUDIT
)) {
3220 return -EPFNOSUPPORT
;
3223 if (domain
== AF_PACKET
||
3224 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3225 protocol
= tswap16(protocol
);
3228 ret
= get_errno(socket(domain
, type
, protocol
));
3230 ret
= sock_flags_fixup(ret
, target_type
);
3231 if (type
== SOCK_PACKET
) {
3232 /* Manage an obsolete case :
3233 * if socket type is SOCK_PACKET, bind by name
3235 fd_trans_register(ret
, &target_packet_trans
);
3236 } else if (domain
== PF_NETLINK
) {
3238 #ifdef CONFIG_RTNETLINK
3240 fd_trans_register(ret
, &target_netlink_route_trans
);
3243 case NETLINK_KOBJECT_UEVENT
:
3244 /* nothing to do: messages are strings */
3247 fd_trans_register(ret
, &target_netlink_audit_trans
);
3250 g_assert_not_reached();
3257 /* do_bind() Must return target values and target errnos. */
3258 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3264 if ((int)addrlen
< 0) {
3265 return -TARGET_EINVAL
;
3268 addr
= alloca(addrlen
+1);
3270 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3274 return get_errno(bind(sockfd
, addr
, addrlen
));
3277 /* do_connect() Must return target values and target errnos. */
3278 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3284 if ((int)addrlen
< 0) {
3285 return -TARGET_EINVAL
;
3288 addr
= alloca(addrlen
+1);
3290 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3294 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3297 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3298 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3299 int flags
, int send
)
3305 abi_ulong target_vec
;
3307 if (msgp
->msg_name
) {
3308 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3309 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3310 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3311 tswapal(msgp
->msg_name
),
3317 msg
.msg_name
= NULL
;
3318 msg
.msg_namelen
= 0;
3320 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3321 msg
.msg_control
= alloca(msg
.msg_controllen
);
3322 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3324 count
= tswapal(msgp
->msg_iovlen
);
3325 target_vec
= tswapal(msgp
->msg_iov
);
3326 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3327 target_vec
, count
, send
);
3329 ret
= -host_to_target_errno(errno
);
3332 msg
.msg_iovlen
= count
;
3336 if (fd_trans_target_to_host_data(fd
)) {
3339 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3340 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3341 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3342 msg
.msg_iov
->iov_len
);
3344 msg
.msg_iov
->iov_base
= host_msg
;
3345 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3349 ret
= target_to_host_cmsg(&msg
, msgp
);
3351 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3355 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3356 if (!is_error(ret
)) {
3358 if (fd_trans_host_to_target_data(fd
)) {
3359 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3362 ret
= host_to_target_cmsg(msgp
, &msg
);
3364 if (!is_error(ret
)) {
3365 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3366 if (msg
.msg_name
!= NULL
) {
3367 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3368 msg
.msg_name
, msg
.msg_namelen
);
3380 unlock_iovec(vec
, target_vec
, count
, !send
);
3385 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3386 int flags
, int send
)
3389 struct target_msghdr
*msgp
;
3391 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3395 return -TARGET_EFAULT
;
3397 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3398 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3402 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3403 * so it might not have this *mmsg-specific flag either.
3405 #ifndef MSG_WAITFORONE
3406 #define MSG_WAITFORONE 0x10000
3409 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3410 unsigned int vlen
, unsigned int flags
,
3413 struct target_mmsghdr
*mmsgp
;
3417 if (vlen
> UIO_MAXIOV
) {
3421 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3423 return -TARGET_EFAULT
;
3426 for (i
= 0; i
< vlen
; i
++) {
3427 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3428 if (is_error(ret
)) {
3431 mmsgp
[i
].msg_len
= tswap32(ret
);
3432 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3433 if (flags
& MSG_WAITFORONE
) {
3434 flags
|= MSG_DONTWAIT
;
3438 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3440 /* Return number of datagrams sent if we sent any at all;
3441 * otherwise return the error.
3449 /* do_accept4() Must return target values and target errnos. */
3450 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3451 abi_ulong target_addrlen_addr
, int flags
)
3458 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3460 if (target_addr
== 0) {
3461 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3464 /* linux returns EINVAL if addrlen pointer is invalid */
3465 if (get_user_u32(addrlen
, target_addrlen_addr
))
3466 return -TARGET_EINVAL
;
3468 if ((int)addrlen
< 0) {
3469 return -TARGET_EINVAL
;
3472 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3473 return -TARGET_EINVAL
;
3475 addr
= alloca(addrlen
);
3477 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3478 if (!is_error(ret
)) {
3479 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3480 if (put_user_u32(addrlen
, target_addrlen_addr
))
3481 ret
= -TARGET_EFAULT
;
3486 /* do_getpeername() Must return target values and target errnos. */
3487 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3488 abi_ulong target_addrlen_addr
)
3494 if (get_user_u32(addrlen
, target_addrlen_addr
))
3495 return -TARGET_EFAULT
;
3497 if ((int)addrlen
< 0) {
3498 return -TARGET_EINVAL
;
3501 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3502 return -TARGET_EFAULT
;
3504 addr
= alloca(addrlen
);
3506 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3507 if (!is_error(ret
)) {
3508 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3509 if (put_user_u32(addrlen
, target_addrlen_addr
))
3510 ret
= -TARGET_EFAULT
;
3515 /* do_getsockname() Must return target values and target errnos. */
3516 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3517 abi_ulong target_addrlen_addr
)
3523 if (get_user_u32(addrlen
, target_addrlen_addr
))
3524 return -TARGET_EFAULT
;
3526 if ((int)addrlen
< 0) {
3527 return -TARGET_EINVAL
;
3530 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3531 return -TARGET_EFAULT
;
3533 addr
= alloca(addrlen
);
3535 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3536 if (!is_error(ret
)) {
3537 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3538 if (put_user_u32(addrlen
, target_addrlen_addr
))
3539 ret
= -TARGET_EFAULT
;
3544 /* do_socketpair() Must return target values and target errnos. */
3545 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3546 abi_ulong target_tab_addr
)
3551 target_to_host_sock_type(&type
);
3553 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3554 if (!is_error(ret
)) {
3555 if (put_user_s32(tab
[0], target_tab_addr
)
3556 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3557 ret
= -TARGET_EFAULT
;
3562 /* do_sendto() Must return target values and target errnos. */
3563 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3564 abi_ulong target_addr
, socklen_t addrlen
)
3568 void *copy_msg
= NULL
;
3571 if ((int)addrlen
< 0) {
3572 return -TARGET_EINVAL
;
3575 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3577 return -TARGET_EFAULT
;
3578 if (fd_trans_target_to_host_data(fd
)) {
3579 copy_msg
= host_msg
;
3580 host_msg
= g_malloc(len
);
3581 memcpy(host_msg
, copy_msg
, len
);
3582 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3588 addr
= alloca(addrlen
+1);
3589 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3593 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3595 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3600 host_msg
= copy_msg
;
3602 unlock_user(host_msg
, msg
, 0);
3606 /* do_recvfrom() Must return target values and target errnos. */
3607 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3608 abi_ulong target_addr
,
3609 abi_ulong target_addrlen
)
3616 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3618 return -TARGET_EFAULT
;
3620 if (get_user_u32(addrlen
, target_addrlen
)) {
3621 ret
= -TARGET_EFAULT
;
3624 if ((int)addrlen
< 0) {
3625 ret
= -TARGET_EINVAL
;
3628 addr
= alloca(addrlen
);
3629 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3632 addr
= NULL
; /* To keep compiler quiet. */
3633 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3635 if (!is_error(ret
)) {
3636 if (fd_trans_host_to_target_data(fd
)) {
3637 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3640 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3641 if (put_user_u32(addrlen
, target_addrlen
)) {
3642 ret
= -TARGET_EFAULT
;
3646 unlock_user(host_msg
, msg
, len
);
3649 unlock_user(host_msg
, msg
, 0);
3654 #ifdef TARGET_NR_socketcall
3655 /* do_socketcall() Must return target values and target errnos. */
3656 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3658 static const unsigned ac
[] = { /* number of arguments per call */
3659 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3660 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3661 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3662 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3663 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3664 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3665 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3666 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3667 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3668 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3669 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3670 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3671 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3672 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3673 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3674 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3675 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3676 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3677 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3678 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3680 abi_long a
[6]; /* max 6 args */
3682 /* first, collect the arguments in a[] according to ac[] */
3683 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3685 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3686 for (i
= 0; i
< ac
[num
]; ++i
) {
3687 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3688 return -TARGET_EFAULT
;
3693 /* now when we have the args, actually handle the call */
3695 case SOCKOP_socket
: /* domain, type, protocol */
3696 return do_socket(a
[0], a
[1], a
[2]);
3697 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3698 return do_bind(a
[0], a
[1], a
[2]);
3699 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3700 return do_connect(a
[0], a
[1], a
[2]);
3701 case SOCKOP_listen
: /* sockfd, backlog */
3702 return get_errno(listen(a
[0], a
[1]));
3703 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3704 return do_accept4(a
[0], a
[1], a
[2], 0);
3705 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3706 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3707 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3708 return do_getsockname(a
[0], a
[1], a
[2]);
3709 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3710 return do_getpeername(a
[0], a
[1], a
[2]);
3711 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3712 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3713 case SOCKOP_send
: /* sockfd, msg, len, flags */
3714 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3715 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3716 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3717 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3718 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3719 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3720 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3721 case SOCKOP_shutdown
: /* sockfd, how */
3722 return get_errno(shutdown(a
[0], a
[1]));
3723 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3724 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3725 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3726 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3727 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3728 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3729 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3730 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3731 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3732 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3733 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3734 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3736 gemu_log("Unsupported socketcall: %d\n", num
);
3737 return -TARGET_ENOSYS
;
3742 #define N_SHM_REGIONS 32
3744 static struct shm_region
{
3748 } shm_regions
[N_SHM_REGIONS
];
3750 struct target_semid_ds
3752 struct target_ipc_perm sem_perm
;
3753 abi_ulong sem_otime
;
3754 #if !defined(TARGET_PPC64)
3755 abi_ulong __unused1
;
3757 abi_ulong sem_ctime
;
3758 #if !defined(TARGET_PPC64)
3759 abi_ulong __unused2
;
3761 abi_ulong sem_nsems
;
3762 abi_ulong __unused3
;
3763 abi_ulong __unused4
;
3766 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3767 abi_ulong target_addr
)
3769 struct target_ipc_perm
*target_ip
;
3770 struct target_semid_ds
*target_sd
;
3772 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3773 return -TARGET_EFAULT
;
3774 target_ip
= &(target_sd
->sem_perm
);
3775 host_ip
->__key
= tswap32(target_ip
->__key
);
3776 host_ip
->uid
= tswap32(target_ip
->uid
);
3777 host_ip
->gid
= tswap32(target_ip
->gid
);
3778 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3779 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3780 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3781 host_ip
->mode
= tswap32(target_ip
->mode
);
3783 host_ip
->mode
= tswap16(target_ip
->mode
);
3785 #if defined(TARGET_PPC)
3786 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3788 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3790 unlock_user_struct(target_sd
, target_addr
, 0);
3794 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3795 struct ipc_perm
*host_ip
)
3797 struct target_ipc_perm
*target_ip
;
3798 struct target_semid_ds
*target_sd
;
3800 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3801 return -TARGET_EFAULT
;
3802 target_ip
= &(target_sd
->sem_perm
);
3803 target_ip
->__key
= tswap32(host_ip
->__key
);
3804 target_ip
->uid
= tswap32(host_ip
->uid
);
3805 target_ip
->gid
= tswap32(host_ip
->gid
);
3806 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3807 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3809 target_ip
->mode
= tswap32(host_ip
->mode
);
3811 target_ip
->mode
= tswap16(host_ip
->mode
);
3813 #if defined(TARGET_PPC)
3814 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3816 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3818 unlock_user_struct(target_sd
, target_addr
, 1);
3822 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3823 abi_ulong target_addr
)
3825 struct target_semid_ds
*target_sd
;
3827 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3828 return -TARGET_EFAULT
;
3829 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3830 return -TARGET_EFAULT
;
3831 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3832 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3833 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3834 unlock_user_struct(target_sd
, target_addr
, 0);
3838 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3839 struct semid_ds
*host_sd
)
3841 struct target_semid_ds
*target_sd
;
3843 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3844 return -TARGET_EFAULT
;
3845 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3846 return -TARGET_EFAULT
;
3847 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3848 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3849 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3850 unlock_user_struct(target_sd
, target_addr
, 1);
3854 struct target_seminfo
{
3867 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3868 struct seminfo
*host_seminfo
)
3870 struct target_seminfo
*target_seminfo
;
3871 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3872 return -TARGET_EFAULT
;
3873 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3874 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3875 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3876 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3877 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3878 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3879 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3880 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3881 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3882 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3883 unlock_user_struct(target_seminfo
, target_addr
, 1);
3889 struct semid_ds
*buf
;
3890 unsigned short *array
;
3891 struct seminfo
*__buf
;
3894 union target_semun
{
3901 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3902 abi_ulong target_addr
)
3905 unsigned short *array
;
3907 struct semid_ds semid_ds
;
3910 semun
.buf
= &semid_ds
;
3912 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3914 return get_errno(ret
);
3916 nsems
= semid_ds
.sem_nsems
;
3918 *host_array
= g_try_new(unsigned short, nsems
);
3920 return -TARGET_ENOMEM
;
3922 array
= lock_user(VERIFY_READ
, target_addr
,
3923 nsems
*sizeof(unsigned short), 1);
3925 g_free(*host_array
);
3926 return -TARGET_EFAULT
;
3929 for(i
=0; i
<nsems
; i
++) {
3930 __get_user((*host_array
)[i
], &array
[i
]);
3932 unlock_user(array
, target_addr
, 0);
3937 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3938 unsigned short **host_array
)
3941 unsigned short *array
;
3943 struct semid_ds semid_ds
;
3946 semun
.buf
= &semid_ds
;
3948 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3950 return get_errno(ret
);
3952 nsems
= semid_ds
.sem_nsems
;
3954 array
= lock_user(VERIFY_WRITE
, target_addr
,
3955 nsems
*sizeof(unsigned short), 0);
3957 return -TARGET_EFAULT
;
3959 for(i
=0; i
<nsems
; i
++) {
3960 __put_user((*host_array
)[i
], &array
[i
]);
3962 g_free(*host_array
);
3963 unlock_user(array
, target_addr
, 1);
3968 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3969 abi_ulong target_arg
)
3971 union target_semun target_su
= { .buf
= target_arg
};
3973 struct semid_ds dsarg
;
3974 unsigned short *array
= NULL
;
3975 struct seminfo seminfo
;
3976 abi_long ret
= -TARGET_EINVAL
;
3983 /* In 64 bit cross-endian situations, we will erroneously pick up
3984 * the wrong half of the union for the "val" element. To rectify
3985 * this, the entire 8-byte structure is byteswapped, followed by
3986 * a swap of the 4 byte val field. In other cases, the data is
3987 * already in proper host byte order. */
3988 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3989 target_su
.buf
= tswapal(target_su
.buf
);
3990 arg
.val
= tswap32(target_su
.val
);
3992 arg
.val
= target_su
.val
;
3994 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3998 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4002 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4003 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4010 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4014 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4015 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4021 arg
.__buf
= &seminfo
;
4022 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4023 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4031 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4038 struct target_sembuf
{
4039 unsigned short sem_num
;
4044 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4045 abi_ulong target_addr
,
4048 struct target_sembuf
*target_sembuf
;
4051 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4052 nsops
*sizeof(struct target_sembuf
), 1);
4054 return -TARGET_EFAULT
;
4056 for(i
=0; i
<nsops
; i
++) {
4057 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4058 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4059 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4062 unlock_user(target_sembuf
, target_addr
, 0);
4067 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4069 struct sembuf sops
[nsops
];
4071 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4072 return -TARGET_EFAULT
;
4074 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4077 struct target_msqid_ds
4079 struct target_ipc_perm msg_perm
;
4080 abi_ulong msg_stime
;
4081 #if TARGET_ABI_BITS == 32
4082 abi_ulong __unused1
;
4084 abi_ulong msg_rtime
;
4085 #if TARGET_ABI_BITS == 32
4086 abi_ulong __unused2
;
4088 abi_ulong msg_ctime
;
4089 #if TARGET_ABI_BITS == 32
4090 abi_ulong __unused3
;
4092 abi_ulong __msg_cbytes
;
4094 abi_ulong msg_qbytes
;
4095 abi_ulong msg_lspid
;
4096 abi_ulong msg_lrpid
;
4097 abi_ulong __unused4
;
4098 abi_ulong __unused5
;
4101 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4102 abi_ulong target_addr
)
4104 struct target_msqid_ds
*target_md
;
4106 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4107 return -TARGET_EFAULT
;
4108 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4109 return -TARGET_EFAULT
;
4110 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4111 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4112 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4113 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4114 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4115 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4116 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4117 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4118 unlock_user_struct(target_md
, target_addr
, 0);
4122 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4123 struct msqid_ds
*host_md
)
4125 struct target_msqid_ds
*target_md
;
4127 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4128 return -TARGET_EFAULT
;
4129 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4130 return -TARGET_EFAULT
;
4131 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4132 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4133 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4134 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4135 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4136 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4137 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4138 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4139 unlock_user_struct(target_md
, target_addr
, 1);
4143 struct target_msginfo
{
4151 unsigned short int msgseg
;
4154 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4155 struct msginfo
*host_msginfo
)
4157 struct target_msginfo
*target_msginfo
;
4158 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4159 return -TARGET_EFAULT
;
4160 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4161 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4162 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4163 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4164 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4165 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4166 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4167 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4168 unlock_user_struct(target_msginfo
, target_addr
, 1);
4172 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4174 struct msqid_ds dsarg
;
4175 struct msginfo msginfo
;
4176 abi_long ret
= -TARGET_EINVAL
;
4184 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4185 return -TARGET_EFAULT
;
4186 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4187 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4188 return -TARGET_EFAULT
;
4191 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4195 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4196 if (host_to_target_msginfo(ptr
, &msginfo
))
4197 return -TARGET_EFAULT
;
4204 struct target_msgbuf
{
4209 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4210 ssize_t msgsz
, int msgflg
)
4212 struct target_msgbuf
*target_mb
;
4213 struct msgbuf
*host_mb
;
4217 return -TARGET_EINVAL
;
4220 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4221 return -TARGET_EFAULT
;
4222 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4224 unlock_user_struct(target_mb
, msgp
, 0);
4225 return -TARGET_ENOMEM
;
4227 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4228 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4229 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4231 unlock_user_struct(target_mb
, msgp
, 0);
4236 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4237 ssize_t msgsz
, abi_long msgtyp
,
4240 struct target_msgbuf
*target_mb
;
4242 struct msgbuf
*host_mb
;
4246 return -TARGET_EINVAL
;
4249 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4250 return -TARGET_EFAULT
;
4252 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4254 ret
= -TARGET_ENOMEM
;
4257 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4260 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4261 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4262 if (!target_mtext
) {
4263 ret
= -TARGET_EFAULT
;
4266 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4267 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4270 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4274 unlock_user_struct(target_mb
, msgp
, 1);
4279 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4280 abi_ulong target_addr
)
4282 struct target_shmid_ds
*target_sd
;
4284 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4285 return -TARGET_EFAULT
;
4286 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4287 return -TARGET_EFAULT
;
4288 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4289 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4290 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4291 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4292 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4293 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4294 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4295 unlock_user_struct(target_sd
, target_addr
, 0);
4299 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4300 struct shmid_ds
*host_sd
)
4302 struct target_shmid_ds
*target_sd
;
4304 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4305 return -TARGET_EFAULT
;
4306 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4307 return -TARGET_EFAULT
;
4308 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4309 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4310 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4311 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4312 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4313 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4314 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4315 unlock_user_struct(target_sd
, target_addr
, 1);
4319 struct target_shminfo
{
4327 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4328 struct shminfo
*host_shminfo
)
4330 struct target_shminfo
*target_shminfo
;
4331 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4332 return -TARGET_EFAULT
;
4333 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4334 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4335 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4336 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4337 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4338 unlock_user_struct(target_shminfo
, target_addr
, 1);
4342 struct target_shm_info
{
4347 abi_ulong swap_attempts
;
4348 abi_ulong swap_successes
;
4351 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4352 struct shm_info
*host_shm_info
)
4354 struct target_shm_info
*target_shm_info
;
4355 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4356 return -TARGET_EFAULT
;
4357 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4358 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4359 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4360 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4361 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4362 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4363 unlock_user_struct(target_shm_info
, target_addr
, 1);
4367 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4369 struct shmid_ds dsarg
;
4370 struct shminfo shminfo
;
4371 struct shm_info shm_info
;
4372 abi_long ret
= -TARGET_EINVAL
;
4380 if (target_to_host_shmid_ds(&dsarg
, buf
))
4381 return -TARGET_EFAULT
;
4382 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4383 if (host_to_target_shmid_ds(buf
, &dsarg
))
4384 return -TARGET_EFAULT
;
4387 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4388 if (host_to_target_shminfo(buf
, &shminfo
))
4389 return -TARGET_EFAULT
;
4392 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4393 if (host_to_target_shm_info(buf
, &shm_info
))
4394 return -TARGET_EFAULT
;
4399 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4406 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4410 struct shmid_ds shm_info
;
4413 /* find out the length of the shared memory segment */
4414 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4415 if (is_error(ret
)) {
4416 /* can't get length, bail out */
4423 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4425 abi_ulong mmap_start
;
4427 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4429 if (mmap_start
== -1) {
4431 host_raddr
= (void *)-1;
4433 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4436 if (host_raddr
== (void *)-1) {
4438 return get_errno((long)host_raddr
);
4440 raddr
=h2g((unsigned long)host_raddr
);
4442 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4443 PAGE_VALID
| PAGE_READ
|
4444 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4446 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4447 if (!shm_regions
[i
].in_use
) {
4448 shm_regions
[i
].in_use
= true;
4449 shm_regions
[i
].start
= raddr
;
4450 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4460 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4464 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4465 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4466 shm_regions
[i
].in_use
= false;
4467 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4472 return get_errno(shmdt(g2h(shmaddr
)));
4475 #ifdef TARGET_NR_ipc
4476 /* ??? This only works with linear mappings. */
4477 /* do_ipc() must return target values and target errnos. */
4478 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4479 abi_long second
, abi_long third
,
4480 abi_long ptr
, abi_long fifth
)
4485 version
= call
>> 16;
4490 ret
= do_semop(first
, ptr
, second
);
4494 ret
= get_errno(semget(first
, second
, third
));
4497 case IPCOP_semctl
: {
4498 /* The semun argument to semctl is passed by value, so dereference the
4501 get_user_ual(atptr
, ptr
);
4502 ret
= do_semctl(first
, second
, third
, atptr
);
4507 ret
= get_errno(msgget(first
, second
));
4511 ret
= do_msgsnd(first
, ptr
, second
, third
);
4515 ret
= do_msgctl(first
, second
, ptr
);
4522 struct target_ipc_kludge
{
4527 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4528 ret
= -TARGET_EFAULT
;
4532 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4534 unlock_user_struct(tmp
, ptr
, 0);
4538 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4547 raddr
= do_shmat(first
, ptr
, second
);
4548 if (is_error(raddr
))
4549 return get_errno(raddr
);
4550 if (put_user_ual(raddr
, third
))
4551 return -TARGET_EFAULT
;
4555 ret
= -TARGET_EINVAL
;
4560 ret
= do_shmdt(ptr
);
4564 /* IPC_* flag values are the same on all linux platforms */
4565 ret
= get_errno(shmget(first
, second
, third
));
4568 /* IPC_* and SHM_* command values are the same on all linux platforms */
4570 ret
= do_shmctl(first
, second
, ptr
);
4573 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4574 ret
= -TARGET_ENOSYS
;
4581 /* kernel structure types definitions */
4583 #define STRUCT(name, ...) STRUCT_ ## name,
4584 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4586 #include "syscall_types.h"
4590 #undef STRUCT_SPECIAL
4592 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4593 #define STRUCT_SPECIAL(name)
4594 #include "syscall_types.h"
4596 #undef STRUCT_SPECIAL
4598 typedef struct IOCTLEntry IOCTLEntry
;
4600 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4601 int fd
, int cmd
, abi_long arg
);
4605 unsigned int host_cmd
;
4608 do_ioctl_fn
*do_ioctl
;
4609 const argtype arg_type
[5];
4612 #define IOC_R 0x0001
4613 #define IOC_W 0x0002
4614 #define IOC_RW (IOC_R | IOC_W)
4616 #define MAX_STRUCT_SIZE 4096
4618 #ifdef CONFIG_FIEMAP
4619 /* So fiemap access checks don't overflow on 32 bit systems.
4620 * This is very slightly smaller than the limit imposed by
4621 * the underlying kernel.
4623 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4624 / sizeof(struct fiemap_extent))
4626 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4627 int fd
, int cmd
, abi_long arg
)
4629 /* The parameter for this ioctl is a struct fiemap followed
4630 * by an array of struct fiemap_extent whose size is set
4631 * in fiemap->fm_extent_count. The array is filled in by the
4634 int target_size_in
, target_size_out
;
4636 const argtype
*arg_type
= ie
->arg_type
;
4637 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4640 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4644 assert(arg_type
[0] == TYPE_PTR
);
4645 assert(ie
->access
== IOC_RW
);
4647 target_size_in
= thunk_type_size(arg_type
, 0);
4648 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4650 return -TARGET_EFAULT
;
4652 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4653 unlock_user(argptr
, arg
, 0);
4654 fm
= (struct fiemap
*)buf_temp
;
4655 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4656 return -TARGET_EINVAL
;
4659 outbufsz
= sizeof (*fm
) +
4660 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4662 if (outbufsz
> MAX_STRUCT_SIZE
) {
4663 /* We can't fit all the extents into the fixed size buffer.
4664 * Allocate one that is large enough and use it instead.
4666 fm
= g_try_malloc(outbufsz
);
4668 return -TARGET_ENOMEM
;
4670 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4673 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4674 if (!is_error(ret
)) {
4675 target_size_out
= target_size_in
;
4676 /* An extent_count of 0 means we were only counting the extents
4677 * so there are no structs to copy
4679 if (fm
->fm_extent_count
!= 0) {
4680 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4682 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4684 ret
= -TARGET_EFAULT
;
4686 /* Convert the struct fiemap */
4687 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4688 if (fm
->fm_extent_count
!= 0) {
4689 p
= argptr
+ target_size_in
;
4690 /* ...and then all the struct fiemap_extents */
4691 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4692 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4697 unlock_user(argptr
, arg
, target_size_out
);
4707 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4708 int fd
, int cmd
, abi_long arg
)
4710 const argtype
*arg_type
= ie
->arg_type
;
4714 struct ifconf
*host_ifconf
;
4716 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4717 int target_ifreq_size
;
4722 abi_long target_ifc_buf
;
4726 assert(arg_type
[0] == TYPE_PTR
);
4727 assert(ie
->access
== IOC_RW
);
4730 target_size
= thunk_type_size(arg_type
, 0);
4732 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4734 return -TARGET_EFAULT
;
4735 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4736 unlock_user(argptr
, arg
, 0);
4738 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4739 target_ifc_len
= host_ifconf
->ifc_len
;
4740 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4742 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4743 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4744 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4746 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4747 if (outbufsz
> MAX_STRUCT_SIZE
) {
4748 /* We can't fit all the extents into the fixed size buffer.
4749 * Allocate one that is large enough and use it instead.
4751 host_ifconf
= malloc(outbufsz
);
4753 return -TARGET_ENOMEM
;
4755 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4758 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4760 host_ifconf
->ifc_len
= host_ifc_len
;
4761 host_ifconf
->ifc_buf
= host_ifc_buf
;
4763 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4764 if (!is_error(ret
)) {
4765 /* convert host ifc_len to target ifc_len */
4767 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4768 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4769 host_ifconf
->ifc_len
= target_ifc_len
;
4771 /* restore target ifc_buf */
4773 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4775 /* copy struct ifconf to target user */
4777 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4779 return -TARGET_EFAULT
;
4780 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4781 unlock_user(argptr
, arg
, target_size
);
4783 /* copy ifreq[] to target user */
4785 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4786 for (i
= 0; i
< nb_ifreq
; i
++) {
4787 thunk_convert(argptr
+ i
* target_ifreq_size
,
4788 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4789 ifreq_arg_type
, THUNK_TARGET
);
4791 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4801 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4802 int cmd
, abi_long arg
)
4805 struct dm_ioctl
*host_dm
;
4806 abi_long guest_data
;
4807 uint32_t guest_data_size
;
4809 const argtype
*arg_type
= ie
->arg_type
;
4811 void *big_buf
= NULL
;
4815 target_size
= thunk_type_size(arg_type
, 0);
4816 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4818 ret
= -TARGET_EFAULT
;
4821 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4822 unlock_user(argptr
, arg
, 0);
4824 /* buf_temp is too small, so fetch things into a bigger buffer */
4825 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4826 memcpy(big_buf
, buf_temp
, target_size
);
4830 guest_data
= arg
+ host_dm
->data_start
;
4831 if ((guest_data
- arg
) < 0) {
4835 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4836 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4838 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4839 switch (ie
->host_cmd
) {
4841 case DM_LIST_DEVICES
:
4844 case DM_DEV_SUSPEND
:
4847 case DM_TABLE_STATUS
:
4848 case DM_TABLE_CLEAR
:
4850 case DM_LIST_VERSIONS
:
4854 case DM_DEV_SET_GEOMETRY
:
4855 /* data contains only strings */
4856 memcpy(host_data
, argptr
, guest_data_size
);
4859 memcpy(host_data
, argptr
, guest_data_size
);
4860 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4864 void *gspec
= argptr
;
4865 void *cur_data
= host_data
;
4866 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4867 int spec_size
= thunk_type_size(arg_type
, 0);
4870 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4871 struct dm_target_spec
*spec
= cur_data
;
4875 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4876 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4878 spec
->next
= sizeof(*spec
) + slen
;
4879 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4881 cur_data
+= spec
->next
;
4886 ret
= -TARGET_EINVAL
;
4887 unlock_user(argptr
, guest_data
, 0);
4890 unlock_user(argptr
, guest_data
, 0);
4892 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4893 if (!is_error(ret
)) {
4894 guest_data
= arg
+ host_dm
->data_start
;
4895 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4896 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4897 switch (ie
->host_cmd
) {
4902 case DM_DEV_SUSPEND
:
4905 case DM_TABLE_CLEAR
:
4907 case DM_DEV_SET_GEOMETRY
:
4908 /* no return data */
4910 case DM_LIST_DEVICES
:
4912 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4913 uint32_t remaining_data
= guest_data_size
;
4914 void *cur_data
= argptr
;
4915 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4916 int nl_size
= 12; /* can't use thunk_size due to alignment */
4919 uint32_t next
= nl
->next
;
4921 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4923 if (remaining_data
< nl
->next
) {
4924 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4927 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4928 strcpy(cur_data
+ nl_size
, nl
->name
);
4929 cur_data
+= nl
->next
;
4930 remaining_data
-= nl
->next
;
4934 nl
= (void*)nl
+ next
;
4939 case DM_TABLE_STATUS
:
4941 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4942 void *cur_data
= argptr
;
4943 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4944 int spec_size
= thunk_type_size(arg_type
, 0);
4947 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4948 uint32_t next
= spec
->next
;
4949 int slen
= strlen((char*)&spec
[1]) + 1;
4950 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4951 if (guest_data_size
< spec
->next
) {
4952 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4955 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4956 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4957 cur_data
= argptr
+ spec
->next
;
4958 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4964 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4965 int count
= *(uint32_t*)hdata
;
4966 uint64_t *hdev
= hdata
+ 8;
4967 uint64_t *gdev
= argptr
+ 8;
4970 *(uint32_t*)argptr
= tswap32(count
);
4971 for (i
= 0; i
< count
; i
++) {
4972 *gdev
= tswap64(*hdev
);
4978 case DM_LIST_VERSIONS
:
4980 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4981 uint32_t remaining_data
= guest_data_size
;
4982 void *cur_data
= argptr
;
4983 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4984 int vers_size
= thunk_type_size(arg_type
, 0);
4987 uint32_t next
= vers
->next
;
4989 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4991 if (remaining_data
< vers
->next
) {
4992 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4995 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4996 strcpy(cur_data
+ vers_size
, vers
->name
);
4997 cur_data
+= vers
->next
;
4998 remaining_data
-= vers
->next
;
5002 vers
= (void*)vers
+ next
;
5007 unlock_user(argptr
, guest_data
, 0);
5008 ret
= -TARGET_EINVAL
;
5011 unlock_user(argptr
, guest_data
, guest_data_size
);
5013 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5015 ret
= -TARGET_EFAULT
;
5018 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5019 unlock_user(argptr
, arg
, target_size
);
5026 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5027 int cmd
, abi_long arg
)
5031 const argtype
*arg_type
= ie
->arg_type
;
5032 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5035 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5036 struct blkpg_partition host_part
;
5038 /* Read and convert blkpg */
5040 target_size
= thunk_type_size(arg_type
, 0);
5041 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5043 ret
= -TARGET_EFAULT
;
5046 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5047 unlock_user(argptr
, arg
, 0);
5049 switch (host_blkpg
->op
) {
5050 case BLKPG_ADD_PARTITION
:
5051 case BLKPG_DEL_PARTITION
:
5052 /* payload is struct blkpg_partition */
5055 /* Unknown opcode */
5056 ret
= -TARGET_EINVAL
;
5060 /* Read and convert blkpg->data */
5061 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5062 target_size
= thunk_type_size(part_arg_type
, 0);
5063 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5065 ret
= -TARGET_EFAULT
;
5068 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5069 unlock_user(argptr
, arg
, 0);
5071 /* Swizzle the data pointer to our local copy and call! */
5072 host_blkpg
->data
= &host_part
;
5073 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5079 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5080 int fd
, int cmd
, abi_long arg
)
5082 const argtype
*arg_type
= ie
->arg_type
;
5083 const StructEntry
*se
;
5084 const argtype
*field_types
;
5085 const int *dst_offsets
, *src_offsets
;
5088 abi_ulong
*target_rt_dev_ptr
;
5089 unsigned long *host_rt_dev_ptr
;
5093 assert(ie
->access
== IOC_W
);
5094 assert(*arg_type
== TYPE_PTR
);
5096 assert(*arg_type
== TYPE_STRUCT
);
5097 target_size
= thunk_type_size(arg_type
, 0);
5098 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5100 return -TARGET_EFAULT
;
5103 assert(*arg_type
== (int)STRUCT_rtentry
);
5104 se
= struct_entries
+ *arg_type
++;
5105 assert(se
->convert
[0] == NULL
);
5106 /* convert struct here to be able to catch rt_dev string */
5107 field_types
= se
->field_types
;
5108 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5109 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5110 for (i
= 0; i
< se
->nb_fields
; i
++) {
5111 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5112 assert(*field_types
== TYPE_PTRVOID
);
5113 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5114 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5115 if (*target_rt_dev_ptr
!= 0) {
5116 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5117 tswapal(*target_rt_dev_ptr
));
5118 if (!*host_rt_dev_ptr
) {
5119 unlock_user(argptr
, arg
, 0);
5120 return -TARGET_EFAULT
;
5123 *host_rt_dev_ptr
= 0;
5128 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5129 argptr
+ src_offsets
[i
],
5130 field_types
, THUNK_HOST
);
5132 unlock_user(argptr
, arg
, 0);
5134 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5135 if (*host_rt_dev_ptr
!= 0) {
5136 unlock_user((void *)*host_rt_dev_ptr
,
5137 *target_rt_dev_ptr
, 0);
5142 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5143 int fd
, int cmd
, abi_long arg
)
5145 int sig
= target_to_host_signal(arg
);
5146 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5149 static IOCTLEntry ioctl_entries
[] = {
5150 #define IOCTL(cmd, access, ...) \
5151 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5152 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5153 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5158 /* ??? Implement proper locking for ioctls. */
5159 /* do_ioctl() Must return target values and target errnos. */
5160 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5162 const IOCTLEntry
*ie
;
5163 const argtype
*arg_type
;
5165 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5171 if (ie
->target_cmd
== 0) {
5172 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5173 return -TARGET_ENOSYS
;
5175 if (ie
->target_cmd
== cmd
)
5179 arg_type
= ie
->arg_type
;
5181 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5184 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5187 switch(arg_type
[0]) {
5190 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5194 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5198 target_size
= thunk_type_size(arg_type
, 0);
5199 switch(ie
->access
) {
5201 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5202 if (!is_error(ret
)) {
5203 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5205 return -TARGET_EFAULT
;
5206 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5207 unlock_user(argptr
, arg
, target_size
);
5211 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5213 return -TARGET_EFAULT
;
5214 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5215 unlock_user(argptr
, arg
, 0);
5216 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5220 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5222 return -TARGET_EFAULT
;
5223 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5224 unlock_user(argptr
, arg
, 0);
5225 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5226 if (!is_error(ret
)) {
5227 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5229 return -TARGET_EFAULT
;
5230 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5231 unlock_user(argptr
, arg
, target_size
);
5237 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5238 (long)cmd
, arg_type
[0]);
5239 ret
= -TARGET_ENOSYS
;
5245 static const bitmask_transtbl iflag_tbl
[] = {
5246 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5247 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5248 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5249 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5250 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5251 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5252 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5253 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5254 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5255 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5256 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5257 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5258 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5259 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5263 static const bitmask_transtbl oflag_tbl
[] = {
5264 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5265 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5266 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5267 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5268 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5269 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5270 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5271 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5272 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5273 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5274 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5275 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5276 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5277 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5278 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5279 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5280 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5281 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5282 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5283 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5284 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5285 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5286 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5287 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5291 static const bitmask_transtbl cflag_tbl
[] = {
5292 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5293 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5294 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5295 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5296 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5297 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5298 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5299 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5300 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5301 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5302 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5303 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5304 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5305 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5306 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5307 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5308 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5309 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5310 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5311 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5312 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5313 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5314 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5315 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5316 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5317 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5318 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5319 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5320 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5321 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5322 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5326 static const bitmask_transtbl lflag_tbl
[] = {
5327 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5328 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5329 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5330 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5331 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5332 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5333 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5334 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5335 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5336 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5337 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5338 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5339 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5340 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5341 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5345 static void target_to_host_termios (void *dst
, const void *src
)
5347 struct host_termios
*host
= dst
;
5348 const struct target_termios
*target
= src
;
5351 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5353 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5355 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5357 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5358 host
->c_line
= target
->c_line
;
5360 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5361 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5362 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5363 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5364 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5365 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5366 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5367 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5368 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5369 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5370 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5371 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5372 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5373 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5374 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5375 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5376 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5377 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5380 static void host_to_target_termios (void *dst
, const void *src
)
5382 struct target_termios
*target
= dst
;
5383 const struct host_termios
*host
= src
;
5386 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5388 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5390 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5392 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5393 target
->c_line
= host
->c_line
;
5395 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5396 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5397 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5398 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5399 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5400 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5401 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5402 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5403 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5404 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5405 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5406 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5407 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5408 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5409 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5410 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5411 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5412 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5415 static const StructEntry struct_termios_def
= {
5416 .convert
= { host_to_target_termios
, target_to_host_termios
},
5417 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5418 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5421 static bitmask_transtbl mmap_flags_tbl
[] = {
5422 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5423 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5424 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5425 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5426 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5427 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5428 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5429 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5430 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5435 #if defined(TARGET_I386)
5437 /* NOTE: there is really one LDT for all the threads */
5438 static uint8_t *ldt_table
;
5440 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5447 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5448 if (size
> bytecount
)
5450 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5452 return -TARGET_EFAULT
;
5453 /* ??? Should this by byteswapped? */
5454 memcpy(p
, ldt_table
, size
);
5455 unlock_user(p
, ptr
, size
);
5459 /* XXX: add locking support */
5460 static abi_long
write_ldt(CPUX86State
*env
,
5461 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5463 struct target_modify_ldt_ldt_s ldt_info
;
5464 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5465 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5466 int seg_not_present
, useable
, lm
;
5467 uint32_t *lp
, entry_1
, entry_2
;
5469 if (bytecount
!= sizeof(ldt_info
))
5470 return -TARGET_EINVAL
;
5471 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5472 return -TARGET_EFAULT
;
5473 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5474 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5475 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5476 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5477 unlock_user_struct(target_ldt_info
, ptr
, 0);
5479 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5480 return -TARGET_EINVAL
;
5481 seg_32bit
= ldt_info
.flags
& 1;
5482 contents
= (ldt_info
.flags
>> 1) & 3;
5483 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5484 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5485 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5486 useable
= (ldt_info
.flags
>> 6) & 1;
5490 lm
= (ldt_info
.flags
>> 7) & 1;
5492 if (contents
== 3) {
5494 return -TARGET_EINVAL
;
5495 if (seg_not_present
== 0)
5496 return -TARGET_EINVAL
;
5498 /* allocate the LDT */
5500 env
->ldt
.base
= target_mmap(0,
5501 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5502 PROT_READ
|PROT_WRITE
,
5503 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5504 if (env
->ldt
.base
== -1)
5505 return -TARGET_ENOMEM
;
5506 memset(g2h(env
->ldt
.base
), 0,
5507 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5508 env
->ldt
.limit
= 0xffff;
5509 ldt_table
= g2h(env
->ldt
.base
);
5512 /* NOTE: same code as Linux kernel */
5513 /* Allow LDTs to be cleared by the user. */
5514 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5517 read_exec_only
== 1 &&
5519 limit_in_pages
== 0 &&
5520 seg_not_present
== 1 &&
5528 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5529 (ldt_info
.limit
& 0x0ffff);
5530 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5531 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5532 (ldt_info
.limit
& 0xf0000) |
5533 ((read_exec_only
^ 1) << 9) |
5535 ((seg_not_present
^ 1) << 15) |
5537 (limit_in_pages
<< 23) |
5541 entry_2
|= (useable
<< 20);
5543 /* Install the new entry ... */
5545 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5546 lp
[0] = tswap32(entry_1
);
5547 lp
[1] = tswap32(entry_2
);
5551 /* specific and weird i386 syscalls */
5552 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5553 unsigned long bytecount
)
5559 ret
= read_ldt(ptr
, bytecount
);
5562 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5565 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5568 ret
= -TARGET_ENOSYS
;
5574 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5575 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5577 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5578 struct target_modify_ldt_ldt_s ldt_info
;
5579 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5580 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5581 int seg_not_present
, useable
, lm
;
5582 uint32_t *lp
, entry_1
, entry_2
;
5585 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5586 if (!target_ldt_info
)
5587 return -TARGET_EFAULT
;
5588 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5589 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5590 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5591 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5592 if (ldt_info
.entry_number
== -1) {
5593 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5594 if (gdt_table
[i
] == 0) {
5595 ldt_info
.entry_number
= i
;
5596 target_ldt_info
->entry_number
= tswap32(i
);
5601 unlock_user_struct(target_ldt_info
, ptr
, 1);
5603 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5604 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5605 return -TARGET_EINVAL
;
5606 seg_32bit
= ldt_info
.flags
& 1;
5607 contents
= (ldt_info
.flags
>> 1) & 3;
5608 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5609 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5610 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5611 useable
= (ldt_info
.flags
>> 6) & 1;
5615 lm
= (ldt_info
.flags
>> 7) & 1;
5618 if (contents
== 3) {
5619 if (seg_not_present
== 0)
5620 return -TARGET_EINVAL
;
5623 /* NOTE: same code as Linux kernel */
5624 /* Allow LDTs to be cleared by the user. */
5625 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5626 if ((contents
== 0 &&
5627 read_exec_only
== 1 &&
5629 limit_in_pages
== 0 &&
5630 seg_not_present
== 1 &&
5638 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5639 (ldt_info
.limit
& 0x0ffff);
5640 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5641 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5642 (ldt_info
.limit
& 0xf0000) |
5643 ((read_exec_only
^ 1) << 9) |
5645 ((seg_not_present
^ 1) << 15) |
5647 (limit_in_pages
<< 23) |
5652 /* Install the new entry ... */
5654 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5655 lp
[0] = tswap32(entry_1
);
5656 lp
[1] = tswap32(entry_2
);
5660 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5662 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5663 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5664 uint32_t base_addr
, limit
, flags
;
5665 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5666 int seg_not_present
, useable
, lm
;
5667 uint32_t *lp
, entry_1
, entry_2
;
5669 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5670 if (!target_ldt_info
)
5671 return -TARGET_EFAULT
;
5672 idx
= tswap32(target_ldt_info
->entry_number
);
5673 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5674 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5675 unlock_user_struct(target_ldt_info
, ptr
, 1);
5676 return -TARGET_EINVAL
;
5678 lp
= (uint32_t *)(gdt_table
+ idx
);
5679 entry_1
= tswap32(lp
[0]);
5680 entry_2
= tswap32(lp
[1]);
5682 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5683 contents
= (entry_2
>> 10) & 3;
5684 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5685 seg_32bit
= (entry_2
>> 22) & 1;
5686 limit_in_pages
= (entry_2
>> 23) & 1;
5687 useable
= (entry_2
>> 20) & 1;
5691 lm
= (entry_2
>> 21) & 1;
5693 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5694 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5695 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5696 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5697 base_addr
= (entry_1
>> 16) |
5698 (entry_2
& 0xff000000) |
5699 ((entry_2
& 0xff) << 16);
5700 target_ldt_info
->base_addr
= tswapal(base_addr
);
5701 target_ldt_info
->limit
= tswap32(limit
);
5702 target_ldt_info
->flags
= tswap32(flags
);
5703 unlock_user_struct(target_ldt_info
, ptr
, 1);
5706 #endif /* TARGET_I386 && TARGET_ABI32 */
5708 #ifndef TARGET_ABI32
5709 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5716 case TARGET_ARCH_SET_GS
:
5717 case TARGET_ARCH_SET_FS
:
5718 if (code
== TARGET_ARCH_SET_GS
)
5722 cpu_x86_load_seg(env
, idx
, 0);
5723 env
->segs
[idx
].base
= addr
;
5725 case TARGET_ARCH_GET_GS
:
5726 case TARGET_ARCH_GET_FS
:
5727 if (code
== TARGET_ARCH_GET_GS
)
5731 val
= env
->segs
[idx
].base
;
5732 if (put_user(val
, addr
, abi_ulong
))
5733 ret
= -TARGET_EFAULT
;
5736 ret
= -TARGET_EINVAL
;
5743 #endif /* defined(TARGET_I386) */
5745 #define NEW_STACK_SIZE 0x40000
5748 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5751 pthread_mutex_t mutex
;
5752 pthread_cond_t cond
;
5755 abi_ulong child_tidptr
;
5756 abi_ulong parent_tidptr
;
5760 static void *clone_func(void *arg
)
5762 new_thread_info
*info
= arg
;
5767 rcu_register_thread();
5769 cpu
= ENV_GET_CPU(env
);
5771 ts
= (TaskState
*)cpu
->opaque
;
5772 info
->tid
= gettid();
5773 cpu
->host_tid
= info
->tid
;
5775 if (info
->child_tidptr
)
5776 put_user_u32(info
->tid
, info
->child_tidptr
);
5777 if (info
->parent_tidptr
)
5778 put_user_u32(info
->tid
, info
->parent_tidptr
);
5779 /* Enable signals. */
5780 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5781 /* Signal to the parent that we're ready. */
5782 pthread_mutex_lock(&info
->mutex
);
5783 pthread_cond_broadcast(&info
->cond
);
5784 pthread_mutex_unlock(&info
->mutex
);
5785 /* Wait until the parent has finshed initializing the tls state. */
5786 pthread_mutex_lock(&clone_lock
);
5787 pthread_mutex_unlock(&clone_lock
);
5793 /* do_fork() Must return host values and target errnos (unlike most
5794 do_*() functions). */
5795 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5796 abi_ulong parent_tidptr
, target_ulong newtls
,
5797 abi_ulong child_tidptr
)
5799 CPUState
*cpu
= ENV_GET_CPU(env
);
5803 CPUArchState
*new_env
;
5804 unsigned int nptl_flags
;
5807 /* Emulate vfork() with fork() */
5808 if (flags
& CLONE_VFORK
)
5809 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5811 if (flags
& CLONE_VM
) {
5812 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5813 new_thread_info info
;
5814 pthread_attr_t attr
;
5816 ts
= g_new0(TaskState
, 1);
5817 init_task_state(ts
);
5818 /* we create a new CPU instance. */
5819 new_env
= cpu_copy(env
);
5820 /* Init regs that differ from the parent. */
5821 cpu_clone_regs(new_env
, newsp
);
5822 new_cpu
= ENV_GET_CPU(new_env
);
5823 new_cpu
->opaque
= ts
;
5824 ts
->bprm
= parent_ts
->bprm
;
5825 ts
->info
= parent_ts
->info
;
5826 ts
->signal_mask
= parent_ts
->signal_mask
;
5828 flags
&= ~CLONE_NPTL_FLAGS2
;
5830 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5831 ts
->child_tidptr
= child_tidptr
;
5834 if (nptl_flags
& CLONE_SETTLS
)
5835 cpu_set_tls (new_env
, newtls
);
5837 /* Grab a mutex so that thread setup appears atomic. */
5838 pthread_mutex_lock(&clone_lock
);
5840 memset(&info
, 0, sizeof(info
));
5841 pthread_mutex_init(&info
.mutex
, NULL
);
5842 pthread_mutex_lock(&info
.mutex
);
5843 pthread_cond_init(&info
.cond
, NULL
);
5845 if (nptl_flags
& CLONE_CHILD_SETTID
)
5846 info
.child_tidptr
= child_tidptr
;
5847 if (nptl_flags
& CLONE_PARENT_SETTID
)
5848 info
.parent_tidptr
= parent_tidptr
;
5850 ret
= pthread_attr_init(&attr
);
5851 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5852 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5853 /* It is not safe to deliver signals until the child has finished
5854 initializing, so temporarily block all signals. */
5855 sigfillset(&sigmask
);
5856 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5858 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5859 /* TODO: Free new CPU state if thread creation failed. */
5861 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5862 pthread_attr_destroy(&attr
);
5864 /* Wait for the child to initialize. */
5865 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5867 if (flags
& CLONE_PARENT_SETTID
)
5868 put_user_u32(ret
, parent_tidptr
);
5872 pthread_mutex_unlock(&info
.mutex
);
5873 pthread_cond_destroy(&info
.cond
);
5874 pthread_mutex_destroy(&info
.mutex
);
5875 pthread_mutex_unlock(&clone_lock
);
5877 /* if no CLONE_VM, we consider it is a fork */
5878 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5879 return -TARGET_EINVAL
;
5882 if (block_signals()) {
5883 return -TARGET_ERESTARTSYS
;
5889 /* Child Process. */
5891 cpu_clone_regs(env
, newsp
);
5893 /* There is a race condition here. The parent process could
5894 theoretically read the TID in the child process before the child
5895 tid is set. This would require using either ptrace
5896 (not implemented) or having *_tidptr to point at a shared memory
5897 mapping. We can't repeat the spinlock hack used above because
5898 the child process gets its own copy of the lock. */
5899 if (flags
& CLONE_CHILD_SETTID
)
5900 put_user_u32(gettid(), child_tidptr
);
5901 if (flags
& CLONE_PARENT_SETTID
)
5902 put_user_u32(gettid(), parent_tidptr
);
5903 ts
= (TaskState
*)cpu
->opaque
;
5904 if (flags
& CLONE_SETTLS
)
5905 cpu_set_tls (env
, newtls
);
5906 if (flags
& CLONE_CHILD_CLEARTID
)
5907 ts
->child_tidptr
= child_tidptr
;
5915 /* warning : doesn't handle linux specific flags... */
5916 static int target_to_host_fcntl_cmd(int cmd
)
5919 case TARGET_F_DUPFD
:
5920 case TARGET_F_GETFD
:
5921 case TARGET_F_SETFD
:
5922 case TARGET_F_GETFL
:
5923 case TARGET_F_SETFL
:
5925 case TARGET_F_GETLK
:
5927 case TARGET_F_SETLK
:
5929 case TARGET_F_SETLKW
:
5931 case TARGET_F_GETOWN
:
5933 case TARGET_F_SETOWN
:
5935 case TARGET_F_GETSIG
:
5937 case TARGET_F_SETSIG
:
5939 #if TARGET_ABI_BITS == 32
5940 case TARGET_F_GETLK64
:
5942 case TARGET_F_SETLK64
:
5944 case TARGET_F_SETLKW64
:
5947 case TARGET_F_SETLEASE
:
5949 case TARGET_F_GETLEASE
:
5951 #ifdef F_DUPFD_CLOEXEC
5952 case TARGET_F_DUPFD_CLOEXEC
:
5953 return F_DUPFD_CLOEXEC
;
5955 case TARGET_F_NOTIFY
:
5958 case TARGET_F_GETOWN_EX
:
5962 case TARGET_F_SETOWN_EX
:
5966 case TARGET_F_SETPIPE_SZ
:
5967 return F_SETPIPE_SZ
;
5968 case TARGET_F_GETPIPE_SZ
:
5969 return F_GETPIPE_SZ
;
5972 return -TARGET_EINVAL
;
5974 return -TARGET_EINVAL
;
5977 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5978 static const bitmask_transtbl flock_tbl
[] = {
5979 TRANSTBL_CONVERT(F_RDLCK
),
5980 TRANSTBL_CONVERT(F_WRLCK
),
5981 TRANSTBL_CONVERT(F_UNLCK
),
5982 TRANSTBL_CONVERT(F_EXLCK
),
5983 TRANSTBL_CONVERT(F_SHLCK
),
5987 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5988 abi_ulong target_flock_addr
)
5990 struct target_flock
*target_fl
;
5993 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5994 return -TARGET_EFAULT
;
5997 __get_user(l_type
, &target_fl
->l_type
);
5998 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5999 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6000 __get_user(fl
->l_start
, &target_fl
->l_start
);
6001 __get_user(fl
->l_len
, &target_fl
->l_len
);
6002 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6003 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6007 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6008 const struct flock64
*fl
)
6010 struct target_flock
*target_fl
;
6013 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6014 return -TARGET_EFAULT
;
6017 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6018 __put_user(l_type
, &target_fl
->l_type
);
6019 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6020 __put_user(fl
->l_start
, &target_fl
->l_start
);
6021 __put_user(fl
->l_len
, &target_fl
->l_len
);
6022 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6023 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6027 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6028 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6030 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6031 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6032 abi_ulong target_flock_addr
)
6034 struct target_eabi_flock64
*target_fl
;
6037 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6038 return -TARGET_EFAULT
;
6041 __get_user(l_type
, &target_fl
->l_type
);
6042 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6043 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6044 __get_user(fl
->l_start
, &target_fl
->l_start
);
6045 __get_user(fl
->l_len
, &target_fl
->l_len
);
6046 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6047 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6051 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6052 const struct flock64
*fl
)
6054 struct target_eabi_flock64
*target_fl
;
6057 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6058 return -TARGET_EFAULT
;
6061 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6062 __put_user(l_type
, &target_fl
->l_type
);
6063 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6064 __put_user(fl
->l_start
, &target_fl
->l_start
);
6065 __put_user(fl
->l_len
, &target_fl
->l_len
);
6066 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6067 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6072 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6073 abi_ulong target_flock_addr
)
6075 struct target_flock64
*target_fl
;
6078 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6079 return -TARGET_EFAULT
;
6082 __get_user(l_type
, &target_fl
->l_type
);
6083 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6084 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6085 __get_user(fl
->l_start
, &target_fl
->l_start
);
6086 __get_user(fl
->l_len
, &target_fl
->l_len
);
6087 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6088 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6092 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6093 const struct flock64
*fl
)
6095 struct target_flock64
*target_fl
;
6098 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6099 return -TARGET_EFAULT
;
6102 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6103 __put_user(l_type
, &target_fl
->l_type
);
6104 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6105 __put_user(fl
->l_start
, &target_fl
->l_start
);
6106 __put_user(fl
->l_len
, &target_fl
->l_len
);
6107 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6108 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6112 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6114 struct flock64 fl64
;
6116 struct f_owner_ex fox
;
6117 struct target_f_owner_ex
*target_fox
;
6120 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6122 if (host_cmd
== -TARGET_EINVAL
)
6126 case TARGET_F_GETLK
:
6127 ret
= copy_from_user_flock(&fl64
, arg
);
6131 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6133 ret
= copy_to_user_flock(arg
, &fl64
);
6137 case TARGET_F_SETLK
:
6138 case TARGET_F_SETLKW
:
6139 ret
= copy_from_user_flock(&fl64
, arg
);
6143 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6146 case TARGET_F_GETLK64
:
6147 ret
= copy_from_user_flock64(&fl64
, arg
);
6151 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6153 ret
= copy_to_user_flock64(arg
, &fl64
);
6156 case TARGET_F_SETLK64
:
6157 case TARGET_F_SETLKW64
:
6158 ret
= copy_from_user_flock64(&fl64
, arg
);
6162 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6165 case TARGET_F_GETFL
:
6166 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6168 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6172 case TARGET_F_SETFL
:
6173 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6174 target_to_host_bitmask(arg
,
6179 case TARGET_F_GETOWN_EX
:
6180 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6182 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6183 return -TARGET_EFAULT
;
6184 target_fox
->type
= tswap32(fox
.type
);
6185 target_fox
->pid
= tswap32(fox
.pid
);
6186 unlock_user_struct(target_fox
, arg
, 1);
6192 case TARGET_F_SETOWN_EX
:
6193 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6194 return -TARGET_EFAULT
;
6195 fox
.type
= tswap32(target_fox
->type
);
6196 fox
.pid
= tswap32(target_fox
->pid
);
6197 unlock_user_struct(target_fox
, arg
, 0);
6198 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6202 case TARGET_F_SETOWN
:
6203 case TARGET_F_GETOWN
:
6204 case TARGET_F_SETSIG
:
6205 case TARGET_F_GETSIG
:
6206 case TARGET_F_SETLEASE
:
6207 case TARGET_F_GETLEASE
:
6208 case TARGET_F_SETPIPE_SZ
:
6209 case TARGET_F_GETPIPE_SZ
:
6210 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6214 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6222 static inline int high2lowuid(int uid
)
6230 static inline int high2lowgid(int gid
)
6238 static inline int low2highuid(int uid
)
6240 if ((int16_t)uid
== -1)
6246 static inline int low2highgid(int gid
)
6248 if ((int16_t)gid
== -1)
6253 static inline int tswapid(int id
)
6258 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6260 #else /* !USE_UID16 */
6261 static inline int high2lowuid(int uid
)
6265 static inline int high2lowgid(int gid
)
6269 static inline int low2highuid(int uid
)
6273 static inline int low2highgid(int gid
)
6277 static inline int tswapid(int id
)
6282 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6284 #endif /* USE_UID16 */
6286 /* We must do direct syscalls for setting UID/GID, because we want to
6287 * implement the Linux system call semantics of "change only for this thread",
6288 * not the libc/POSIX semantics of "change for all threads in process".
6289 * (See http://ewontfix.com/17/ for more details.)
6290 * We use the 32-bit version of the syscalls if present; if it is not
6291 * then either the host architecture supports 32-bit UIDs natively with
6292 * the standard syscall, or the 16-bit UID is the best we can do.
6294 #ifdef __NR_setuid32
6295 #define __NR_sys_setuid __NR_setuid32
6297 #define __NR_sys_setuid __NR_setuid
6299 #ifdef __NR_setgid32
6300 #define __NR_sys_setgid __NR_setgid32
6302 #define __NR_sys_setgid __NR_setgid
6304 #ifdef __NR_setresuid32
6305 #define __NR_sys_setresuid __NR_setresuid32
6307 #define __NR_sys_setresuid __NR_setresuid
6309 #ifdef __NR_setresgid32
6310 #define __NR_sys_setresgid __NR_setresgid32
6312 #define __NR_sys_setresgid __NR_setresgid
6315 _syscall1(int, sys_setuid
, uid_t
, uid
)
6316 _syscall1(int, sys_setgid
, gid_t
, gid
)
6317 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6318 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6320 void syscall_init(void)
6323 const argtype
*arg_type
;
6327 thunk_init(STRUCT_MAX
);
6329 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6330 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6331 #include "syscall_types.h"
6333 #undef STRUCT_SPECIAL
6335 /* Build target_to_host_errno_table[] table from
6336 * host_to_target_errno_table[]. */
6337 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6338 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6341 /* we patch the ioctl size if necessary. We rely on the fact that
6342 no ioctl has all the bits at '1' in the size field */
6344 while (ie
->target_cmd
!= 0) {
6345 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6346 TARGET_IOC_SIZEMASK
) {
6347 arg_type
= ie
->arg_type
;
6348 if (arg_type
[0] != TYPE_PTR
) {
6349 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6354 size
= thunk_type_size(arg_type
, 0);
6355 ie
->target_cmd
= (ie
->target_cmd
&
6356 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6357 (size
<< TARGET_IOC_SIZESHIFT
);
6360 /* automatic consistency check if same arch */
6361 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6362 (defined(__x86_64__) && defined(TARGET_X86_64))
6363 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6364 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6365 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6372 #if TARGET_ABI_BITS == 32
6373 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6375 #ifdef TARGET_WORDS_BIGENDIAN
6376 return ((uint64_t)word0
<< 32) | word1
;
6378 return ((uint64_t)word1
<< 32) | word0
;
6381 #else /* TARGET_ABI_BITS == 32 */
6382 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6386 #endif /* TARGET_ABI_BITS != 32 */
6388 #ifdef TARGET_NR_truncate64
6389 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6394 if (regpairs_aligned(cpu_env
)) {
6398 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6402 #ifdef TARGET_NR_ftruncate64
6403 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6408 if (regpairs_aligned(cpu_env
)) {
6412 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6416 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6417 abi_ulong target_addr
)
6419 struct target_timespec
*target_ts
;
6421 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6422 return -TARGET_EFAULT
;
6423 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6424 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6425 unlock_user_struct(target_ts
, target_addr
, 0);
6429 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6430 struct timespec
*host_ts
)
6432 struct target_timespec
*target_ts
;
6434 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6435 return -TARGET_EFAULT
;
6436 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6437 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6438 unlock_user_struct(target_ts
, target_addr
, 1);
6442 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6443 abi_ulong target_addr
)
6445 struct target_itimerspec
*target_itspec
;
6447 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6448 return -TARGET_EFAULT
;
6451 host_itspec
->it_interval
.tv_sec
=
6452 tswapal(target_itspec
->it_interval
.tv_sec
);
6453 host_itspec
->it_interval
.tv_nsec
=
6454 tswapal(target_itspec
->it_interval
.tv_nsec
);
6455 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6456 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6458 unlock_user_struct(target_itspec
, target_addr
, 1);
6462 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6463 struct itimerspec
*host_its
)
6465 struct target_itimerspec
*target_itspec
;
6467 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6468 return -TARGET_EFAULT
;
6471 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6472 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6474 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6475 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6477 unlock_user_struct(target_itspec
, target_addr
, 0);
6481 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6482 abi_ulong target_addr
)
6484 struct target_sigevent
*target_sevp
;
6486 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6487 return -TARGET_EFAULT
;
6490 /* This union is awkward on 64 bit systems because it has a 32 bit
6491 * integer and a pointer in it; we follow the conversion approach
6492 * used for handling sigval types in signal.c so the guest should get
6493 * the correct value back even if we did a 64 bit byteswap and it's
6494 * using the 32 bit integer.
6496 host_sevp
->sigev_value
.sival_ptr
=
6497 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6498 host_sevp
->sigev_signo
=
6499 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6500 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6501 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6503 unlock_user_struct(target_sevp
, target_addr
, 1);
6507 #if defined(TARGET_NR_mlockall)
6508 static inline int target_to_host_mlockall_arg(int arg
)
6512 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6513 result
|= MCL_CURRENT
;
6515 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6516 result
|= MCL_FUTURE
;
6522 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6523 abi_ulong target_addr
,
6524 struct stat
*host_st
)
6526 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6527 if (((CPUARMState
*)cpu_env
)->eabi
) {
6528 struct target_eabi_stat64
*target_st
;
6530 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6531 return -TARGET_EFAULT
;
6532 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6533 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6534 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6535 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6536 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6538 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6539 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6540 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6541 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6542 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6543 __put_user(host_st
->st_size
, &target_st
->st_size
);
6544 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6545 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6546 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6547 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6548 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6549 unlock_user_struct(target_st
, target_addr
, 1);
6553 #if defined(TARGET_HAS_STRUCT_STAT64)
6554 struct target_stat64
*target_st
;
6556 struct target_stat
*target_st
;
6559 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6560 return -TARGET_EFAULT
;
6561 memset(target_st
, 0, sizeof(*target_st
));
6562 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6563 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6564 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6565 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6567 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6568 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6569 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6570 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6571 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6572 /* XXX: better use of kernel struct */
6573 __put_user(host_st
->st_size
, &target_st
->st_size
);
6574 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6575 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6576 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6577 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6578 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6579 unlock_user_struct(target_st
, target_addr
, 1);
6585 /* ??? Using host futex calls even when target atomic operations
6586 are not really atomic probably breaks things. However implementing
6587 futexes locally would make futexes shared between multiple processes
6588 tricky. However they're probably useless because guest atomic
6589 operations won't work either. */
6590 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6591 target_ulong uaddr2
, int val3
)
6593 struct timespec ts
, *pts
;
6596 /* ??? We assume FUTEX_* constants are the same on both host
6598 #ifdef FUTEX_CMD_MASK
6599 base_op
= op
& FUTEX_CMD_MASK
;
6605 case FUTEX_WAIT_BITSET
:
6608 target_to_host_timespec(pts
, timeout
);
6612 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6615 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6617 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6619 case FUTEX_CMP_REQUEUE
:
6621 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6622 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6623 But the prototype takes a `struct timespec *'; insert casts
6624 to satisfy the compiler. We do not need to tswap TIMEOUT
6625 since it's not compared to guest memory. */
6626 pts
= (struct timespec
*)(uintptr_t) timeout
;
6627 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6629 (base_op
== FUTEX_CMP_REQUEUE
6633 return -TARGET_ENOSYS
;
6636 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6637 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6638 abi_long handle
, abi_long mount_id
,
6641 struct file_handle
*target_fh
;
6642 struct file_handle
*fh
;
6646 unsigned int size
, total_size
;
6648 if (get_user_s32(size
, handle
)) {
6649 return -TARGET_EFAULT
;
6652 name
= lock_user_string(pathname
);
6654 return -TARGET_EFAULT
;
6657 total_size
= sizeof(struct file_handle
) + size
;
6658 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6660 unlock_user(name
, pathname
, 0);
6661 return -TARGET_EFAULT
;
6664 fh
= g_malloc0(total_size
);
6665 fh
->handle_bytes
= size
;
6667 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6668 unlock_user(name
, pathname
, 0);
6670 /* man name_to_handle_at(2):
6671 * Other than the use of the handle_bytes field, the caller should treat
6672 * the file_handle structure as an opaque data type
6675 memcpy(target_fh
, fh
, total_size
);
6676 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6677 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6679 unlock_user(target_fh
, handle
, total_size
);
6681 if (put_user_s32(mid
, mount_id
)) {
6682 return -TARGET_EFAULT
;
6690 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6691 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6694 struct file_handle
*target_fh
;
6695 struct file_handle
*fh
;
6696 unsigned int size
, total_size
;
6699 if (get_user_s32(size
, handle
)) {
6700 return -TARGET_EFAULT
;
6703 total_size
= sizeof(struct file_handle
) + size
;
6704 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6706 return -TARGET_EFAULT
;
6709 fh
= g_memdup(target_fh
, total_size
);
6710 fh
->handle_bytes
= size
;
6711 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6713 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6714 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6718 unlock_user(target_fh
, handle
, total_size
);
6724 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6726 /* signalfd siginfo conversion */
6729 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6730 const struct signalfd_siginfo
*info
)
6732 int sig
= host_to_target_signal(info
->ssi_signo
);
6734 /* linux/signalfd.h defines a ssi_addr_lsb
6735 * not defined in sys/signalfd.h but used by some kernels
6738 #ifdef BUS_MCEERR_AO
6739 if (tinfo
->ssi_signo
== SIGBUS
&&
6740 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6741 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6742 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6743 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6744 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6748 tinfo
->ssi_signo
= tswap32(sig
);
6749 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6750 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6751 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6752 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6753 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6754 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6755 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6756 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6757 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6758 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6759 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6760 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6761 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6762 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6763 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6766 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6770 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6771 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6777 static TargetFdTrans target_signalfd_trans
= {
6778 .host_to_target_data
= host_to_target_data_signalfd
,
6781 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6784 target_sigset_t
*target_mask
;
6788 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6789 return -TARGET_EINVAL
;
6791 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6792 return -TARGET_EFAULT
;
6795 target_to_host_sigset(&host_mask
, target_mask
);
6797 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6799 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6801 fd_trans_register(ret
, &target_signalfd_trans
);
6804 unlock_user_struct(target_mask
, mask
, 0);
6810 /* Map host to target signal numbers for the wait family of syscalls.
6811 Assume all other status bits are the same. */
6812 int host_to_target_waitstatus(int status
)
6814 if (WIFSIGNALED(status
)) {
6815 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6817 if (WIFSTOPPED(status
)) {
6818 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6824 static int open_self_cmdline(void *cpu_env
, int fd
)
6827 bool word_skipped
= false;
6829 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6839 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6842 fd_orig
= close(fd_orig
);
6845 } else if (nb_read
== 0) {
6849 if (!word_skipped
) {
6850 /* Skip the first string, which is the path to qemu-*-static
6851 instead of the actual command. */
6852 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6854 /* Null byte found, skip one string */
6856 nb_read
-= cp_buf
- buf
;
6857 word_skipped
= true;
6862 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6871 return close(fd_orig
);
6874 static int open_self_maps(void *cpu_env
, int fd
)
6876 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6877 TaskState
*ts
= cpu
->opaque
;
6883 fp
= fopen("/proc/self/maps", "r");
6888 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6889 int fields
, dev_maj
, dev_min
, inode
;
6890 uint64_t min
, max
, offset
;
6891 char flag_r
, flag_w
, flag_x
, flag_p
;
6892 char path
[512] = "";
6893 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6894 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6895 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6897 if ((fields
< 10) || (fields
> 11)) {
6900 if (h2g_valid(min
)) {
6901 int flags
= page_get_flags(h2g(min
));
6902 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6903 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6906 if (h2g(min
) == ts
->info
->stack_limit
) {
6907 pstrcpy(path
, sizeof(path
), " [stack]");
6909 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6910 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6911 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6912 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6913 path
[0] ? " " : "", path
);
6923 static int open_self_stat(void *cpu_env
, int fd
)
6925 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6926 TaskState
*ts
= cpu
->opaque
;
6927 abi_ulong start_stack
= ts
->info
->start_stack
;
6930 for (i
= 0; i
< 44; i
++) {
6938 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6939 } else if (i
== 1) {
6941 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6942 } else if (i
== 27) {
6945 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6947 /* for the rest, there is MasterCard */
6948 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6952 if (write(fd
, buf
, len
) != len
) {
6960 static int open_self_auxv(void *cpu_env
, int fd
)
6962 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6963 TaskState
*ts
= cpu
->opaque
;
6964 abi_ulong auxv
= ts
->info
->saved_auxv
;
6965 abi_ulong len
= ts
->info
->auxv_len
;
6969 * Auxiliary vector is stored in target process stack.
6970 * read in whole auxv vector and copy it to file
6972 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6976 r
= write(fd
, ptr
, len
);
6983 lseek(fd
, 0, SEEK_SET
);
6984 unlock_user(ptr
, auxv
, len
);
6990 static int is_proc_myself(const char *filename
, const char *entry
)
6992 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6993 filename
+= strlen("/proc/");
6994 if (!strncmp(filename
, "self/", strlen("self/"))) {
6995 filename
+= strlen("self/");
6996 } else if (*filename
>= '1' && *filename
<= '9') {
6998 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6999 if (!strncmp(filename
, myself
, strlen(myself
))) {
7000 filename
+= strlen(myself
);
7007 if (!strcmp(filename
, entry
)) {
7014 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7015 static int is_proc(const char *filename
, const char *entry
)
7017 return strcmp(filename
, entry
) == 0;
7020 static int open_net_route(void *cpu_env
, int fd
)
7027 fp
= fopen("/proc/net/route", "r");
7034 read
= getline(&line
, &len
, fp
);
7035 dprintf(fd
, "%s", line
);
7039 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7041 uint32_t dest
, gw
, mask
;
7042 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7043 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7044 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7045 &mask
, &mtu
, &window
, &irtt
);
7046 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7047 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7048 metric
, tswap32(mask
), mtu
, window
, irtt
);
7058 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7061 const char *filename
;
7062 int (*fill
)(void *cpu_env
, int fd
);
7063 int (*cmp
)(const char *s1
, const char *s2
);
7065 const struct fake_open
*fake_open
;
7066 static const struct fake_open fakes
[] = {
7067 { "maps", open_self_maps
, is_proc_myself
},
7068 { "stat", open_self_stat
, is_proc_myself
},
7069 { "auxv", open_self_auxv
, is_proc_myself
},
7070 { "cmdline", open_self_cmdline
, is_proc_myself
},
7071 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7072 { "/proc/net/route", open_net_route
, is_proc
},
7074 { NULL
, NULL
, NULL
}
7077 if (is_proc_myself(pathname
, "exe")) {
7078 int execfd
= qemu_getauxval(AT_EXECFD
);
7079 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7082 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7083 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7088 if (fake_open
->filename
) {
7090 char filename
[PATH_MAX
];
7093 /* create temporary file to map stat to */
7094 tmpdir
= getenv("TMPDIR");
7097 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7098 fd
= mkstemp(filename
);
7104 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7110 lseek(fd
, 0, SEEK_SET
);
7115 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7118 #define TIMER_MAGIC 0x0caf0000
7119 #define TIMER_MAGIC_MASK 0xffff0000
7121 /* Convert QEMU provided timer ID back to internal 16bit index format */
7122 static target_timer_t
get_timer_id(abi_long arg
)
7124 target_timer_t timerid
= arg
;
7126 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7127 return -TARGET_EINVAL
;
7132 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7133 return -TARGET_EINVAL
;
7139 /* do_syscall() should always have a single exit point at the end so
7140 that actions, such as logging of syscall results, can be performed.
7141 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7142 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7143 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7144 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7147 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7153 #if defined(DEBUG_ERESTARTSYS)
7154 /* Debug-only code for exercising the syscall-restart code paths
7155 * in the per-architecture cpu main loops: restart every syscall
7156 * the guest makes once before letting it through.
7163 return -TARGET_ERESTARTSYS
;
7169 gemu_log("syscall %d", num
);
7171 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7173 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7176 case TARGET_NR_exit
:
7177 /* In old applications this may be used to implement _exit(2).
7178 However in threaded applictions it is used for thread termination,
7179 and _exit_group is used for application termination.
7180 Do thread termination if we have more then one thread. */
7182 if (block_signals()) {
7183 ret
= -TARGET_ERESTARTSYS
;
7187 if (CPU_NEXT(first_cpu
)) {
7191 /* Remove the CPU from the list. */
7192 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7195 if (ts
->child_tidptr
) {
7196 put_user_u32(0, ts
->child_tidptr
);
7197 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7201 object_unref(OBJECT(cpu
));
7203 rcu_unregister_thread();
7209 gdb_exit(cpu_env
, arg1
);
7211 ret
= 0; /* avoid warning */
7213 case TARGET_NR_read
:
7217 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7219 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7221 fd_trans_host_to_target_data(arg1
)) {
7222 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7224 unlock_user(p
, arg2
, ret
);
7227 case TARGET_NR_write
:
7228 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7230 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7231 unlock_user(p
, arg2
, 0);
7233 #ifdef TARGET_NR_open
7234 case TARGET_NR_open
:
7235 if (!(p
= lock_user_string(arg1
)))
7237 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7238 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7240 fd_trans_unregister(ret
);
7241 unlock_user(p
, arg1
, 0);
7244 case TARGET_NR_openat
:
7245 if (!(p
= lock_user_string(arg2
)))
7247 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7248 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7250 fd_trans_unregister(ret
);
7251 unlock_user(p
, arg2
, 0);
7253 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7254 case TARGET_NR_name_to_handle_at
:
7255 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7258 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7259 case TARGET_NR_open_by_handle_at
:
7260 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7261 fd_trans_unregister(ret
);
7264 case TARGET_NR_close
:
7265 fd_trans_unregister(arg1
);
7266 ret
= get_errno(close(arg1
));
7271 #ifdef TARGET_NR_fork
7272 case TARGET_NR_fork
:
7273 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7276 #ifdef TARGET_NR_waitpid
7277 case TARGET_NR_waitpid
:
7280 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7281 if (!is_error(ret
) && arg2
&& ret
7282 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7287 #ifdef TARGET_NR_waitid
7288 case TARGET_NR_waitid
:
7292 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7293 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7294 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7296 host_to_target_siginfo(p
, &info
);
7297 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7302 #ifdef TARGET_NR_creat /* not on alpha */
7303 case TARGET_NR_creat
:
7304 if (!(p
= lock_user_string(arg1
)))
7306 ret
= get_errno(creat(p
, arg2
));
7307 fd_trans_unregister(ret
);
7308 unlock_user(p
, arg1
, 0);
7311 #ifdef TARGET_NR_link
7312 case TARGET_NR_link
:
7315 p
= lock_user_string(arg1
);
7316 p2
= lock_user_string(arg2
);
7318 ret
= -TARGET_EFAULT
;
7320 ret
= get_errno(link(p
, p2
));
7321 unlock_user(p2
, arg2
, 0);
7322 unlock_user(p
, arg1
, 0);
7326 #if defined(TARGET_NR_linkat)
7327 case TARGET_NR_linkat
:
7332 p
= lock_user_string(arg2
);
7333 p2
= lock_user_string(arg4
);
7335 ret
= -TARGET_EFAULT
;
7337 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7338 unlock_user(p
, arg2
, 0);
7339 unlock_user(p2
, arg4
, 0);
7343 #ifdef TARGET_NR_unlink
7344 case TARGET_NR_unlink
:
7345 if (!(p
= lock_user_string(arg1
)))
7347 ret
= get_errno(unlink(p
));
7348 unlock_user(p
, arg1
, 0);
7351 #if defined(TARGET_NR_unlinkat)
7352 case TARGET_NR_unlinkat
:
7353 if (!(p
= lock_user_string(arg2
)))
7355 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7356 unlock_user(p
, arg2
, 0);
7359 case TARGET_NR_execve
:
7361 char **argp
, **envp
;
7364 abi_ulong guest_argp
;
7365 abi_ulong guest_envp
;
7372 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7373 if (get_user_ual(addr
, gp
))
7381 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7382 if (get_user_ual(addr
, gp
))
7389 argp
= alloca((argc
+ 1) * sizeof(void *));
7390 envp
= alloca((envc
+ 1) * sizeof(void *));
7392 for (gp
= guest_argp
, q
= argp
; gp
;
7393 gp
+= sizeof(abi_ulong
), q
++) {
7394 if (get_user_ual(addr
, gp
))
7398 if (!(*q
= lock_user_string(addr
)))
7400 total_size
+= strlen(*q
) + 1;
7404 for (gp
= guest_envp
, q
= envp
; gp
;
7405 gp
+= sizeof(abi_ulong
), q
++) {
7406 if (get_user_ual(addr
, gp
))
7410 if (!(*q
= lock_user_string(addr
)))
7412 total_size
+= strlen(*q
) + 1;
7416 if (!(p
= lock_user_string(arg1
)))
7418 /* Although execve() is not an interruptible syscall it is
7419 * a special case where we must use the safe_syscall wrapper:
7420 * if we allow a signal to happen before we make the host
7421 * syscall then we will 'lose' it, because at the point of
7422 * execve the process leaves QEMU's control. So we use the
7423 * safe syscall wrapper to ensure that we either take the
7424 * signal as a guest signal, or else it does not happen
7425 * before the execve completes and makes it the other
7426 * program's problem.
7428 ret
= get_errno(safe_execve(p
, argp
, envp
));
7429 unlock_user(p
, arg1
, 0);
7434 ret
= -TARGET_EFAULT
;
7437 for (gp
= guest_argp
, q
= argp
; *q
;
7438 gp
+= sizeof(abi_ulong
), q
++) {
7439 if (get_user_ual(addr
, gp
)
7442 unlock_user(*q
, addr
, 0);
7444 for (gp
= guest_envp
, q
= envp
; *q
;
7445 gp
+= sizeof(abi_ulong
), q
++) {
7446 if (get_user_ual(addr
, gp
)
7449 unlock_user(*q
, addr
, 0);
7453 case TARGET_NR_chdir
:
7454 if (!(p
= lock_user_string(arg1
)))
7456 ret
= get_errno(chdir(p
));
7457 unlock_user(p
, arg1
, 0);
7459 #ifdef TARGET_NR_time
7460 case TARGET_NR_time
:
7463 ret
= get_errno(time(&host_time
));
7466 && put_user_sal(host_time
, arg1
))
7471 #ifdef TARGET_NR_mknod
7472 case TARGET_NR_mknod
:
7473 if (!(p
= lock_user_string(arg1
)))
7475 ret
= get_errno(mknod(p
, arg2
, arg3
));
7476 unlock_user(p
, arg1
, 0);
7479 #if defined(TARGET_NR_mknodat)
7480 case TARGET_NR_mknodat
:
7481 if (!(p
= lock_user_string(arg2
)))
7483 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7484 unlock_user(p
, arg2
, 0);
7487 #ifdef TARGET_NR_chmod
7488 case TARGET_NR_chmod
:
7489 if (!(p
= lock_user_string(arg1
)))
7491 ret
= get_errno(chmod(p
, arg2
));
7492 unlock_user(p
, arg1
, 0);
7495 #ifdef TARGET_NR_break
7496 case TARGET_NR_break
:
7499 #ifdef TARGET_NR_oldstat
7500 case TARGET_NR_oldstat
:
7503 case TARGET_NR_lseek
:
7504 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7506 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7507 /* Alpha specific */
7508 case TARGET_NR_getxpid
:
7509 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7510 ret
= get_errno(getpid());
7513 #ifdef TARGET_NR_getpid
7514 case TARGET_NR_getpid
:
7515 ret
= get_errno(getpid());
7518 case TARGET_NR_mount
:
7520 /* need to look at the data field */
7524 p
= lock_user_string(arg1
);
7532 p2
= lock_user_string(arg2
);
7535 unlock_user(p
, arg1
, 0);
7541 p3
= lock_user_string(arg3
);
7544 unlock_user(p
, arg1
, 0);
7546 unlock_user(p2
, arg2
, 0);
7553 /* FIXME - arg5 should be locked, but it isn't clear how to
7554 * do that since it's not guaranteed to be a NULL-terminated
7558 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7560 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7562 ret
= get_errno(ret
);
7565 unlock_user(p
, arg1
, 0);
7567 unlock_user(p2
, arg2
, 0);
7569 unlock_user(p3
, arg3
, 0);
7573 #ifdef TARGET_NR_umount
7574 case TARGET_NR_umount
:
7575 if (!(p
= lock_user_string(arg1
)))
7577 ret
= get_errno(umount(p
));
7578 unlock_user(p
, arg1
, 0);
7581 #ifdef TARGET_NR_stime /* not on alpha */
7582 case TARGET_NR_stime
:
7585 if (get_user_sal(host_time
, arg1
))
7587 ret
= get_errno(stime(&host_time
));
7591 case TARGET_NR_ptrace
:
7593 #ifdef TARGET_NR_alarm /* not on alpha */
7594 case TARGET_NR_alarm
:
7598 #ifdef TARGET_NR_oldfstat
7599 case TARGET_NR_oldfstat
:
7602 #ifdef TARGET_NR_pause /* not on alpha */
7603 case TARGET_NR_pause
:
7604 if (!block_signals()) {
7605 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7607 ret
= -TARGET_EINTR
;
7610 #ifdef TARGET_NR_utime
7611 case TARGET_NR_utime
:
7613 struct utimbuf tbuf
, *host_tbuf
;
7614 struct target_utimbuf
*target_tbuf
;
7616 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7618 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7619 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7620 unlock_user_struct(target_tbuf
, arg2
, 0);
7625 if (!(p
= lock_user_string(arg1
)))
7627 ret
= get_errno(utime(p
, host_tbuf
));
7628 unlock_user(p
, arg1
, 0);
7632 #ifdef TARGET_NR_utimes
7633 case TARGET_NR_utimes
:
7635 struct timeval
*tvp
, tv
[2];
7637 if (copy_from_user_timeval(&tv
[0], arg2
)
7638 || copy_from_user_timeval(&tv
[1],
7639 arg2
+ sizeof(struct target_timeval
)))
7645 if (!(p
= lock_user_string(arg1
)))
7647 ret
= get_errno(utimes(p
, tvp
));
7648 unlock_user(p
, arg1
, 0);
7652 #if defined(TARGET_NR_futimesat)
7653 case TARGET_NR_futimesat
:
7655 struct timeval
*tvp
, tv
[2];
7657 if (copy_from_user_timeval(&tv
[0], arg3
)
7658 || copy_from_user_timeval(&tv
[1],
7659 arg3
+ sizeof(struct target_timeval
)))
7665 if (!(p
= lock_user_string(arg2
)))
7667 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7668 unlock_user(p
, arg2
, 0);
7672 #ifdef TARGET_NR_stty
7673 case TARGET_NR_stty
:
7676 #ifdef TARGET_NR_gtty
7677 case TARGET_NR_gtty
:
7680 #ifdef TARGET_NR_access
7681 case TARGET_NR_access
:
7682 if (!(p
= lock_user_string(arg1
)))
7684 ret
= get_errno(access(path(p
), arg2
));
7685 unlock_user(p
, arg1
, 0);
7688 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7689 case TARGET_NR_faccessat
:
7690 if (!(p
= lock_user_string(arg2
)))
7692 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7693 unlock_user(p
, arg2
, 0);
7696 #ifdef TARGET_NR_nice /* not on alpha */
7697 case TARGET_NR_nice
:
7698 ret
= get_errno(nice(arg1
));
7701 #ifdef TARGET_NR_ftime
7702 case TARGET_NR_ftime
:
7705 case TARGET_NR_sync
:
7709 case TARGET_NR_kill
:
7710 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7712 #ifdef TARGET_NR_rename
7713 case TARGET_NR_rename
:
7716 p
= lock_user_string(arg1
);
7717 p2
= lock_user_string(arg2
);
7719 ret
= -TARGET_EFAULT
;
7721 ret
= get_errno(rename(p
, p2
));
7722 unlock_user(p2
, arg2
, 0);
7723 unlock_user(p
, arg1
, 0);
7727 #if defined(TARGET_NR_renameat)
7728 case TARGET_NR_renameat
:
7731 p
= lock_user_string(arg2
);
7732 p2
= lock_user_string(arg4
);
7734 ret
= -TARGET_EFAULT
;
7736 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7737 unlock_user(p2
, arg4
, 0);
7738 unlock_user(p
, arg2
, 0);
7742 #ifdef TARGET_NR_mkdir
7743 case TARGET_NR_mkdir
:
7744 if (!(p
= lock_user_string(arg1
)))
7746 ret
= get_errno(mkdir(p
, arg2
));
7747 unlock_user(p
, arg1
, 0);
7750 #if defined(TARGET_NR_mkdirat)
7751 case TARGET_NR_mkdirat
:
7752 if (!(p
= lock_user_string(arg2
)))
7754 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7755 unlock_user(p
, arg2
, 0);
7758 #ifdef TARGET_NR_rmdir
7759 case TARGET_NR_rmdir
:
7760 if (!(p
= lock_user_string(arg1
)))
7762 ret
= get_errno(rmdir(p
));
7763 unlock_user(p
, arg1
, 0);
7767 ret
= get_errno(dup(arg1
));
7769 fd_trans_dup(arg1
, ret
);
7772 #ifdef TARGET_NR_pipe
7773 case TARGET_NR_pipe
:
7774 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7777 #ifdef TARGET_NR_pipe2
7778 case TARGET_NR_pipe2
:
7779 ret
= do_pipe(cpu_env
, arg1
,
7780 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7783 case TARGET_NR_times
:
7785 struct target_tms
*tmsp
;
7787 ret
= get_errno(times(&tms
));
7789 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7792 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7793 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7794 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7795 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7798 ret
= host_to_target_clock_t(ret
);
7801 #ifdef TARGET_NR_prof
7802 case TARGET_NR_prof
:
7805 #ifdef TARGET_NR_signal
7806 case TARGET_NR_signal
:
7809 case TARGET_NR_acct
:
7811 ret
= get_errno(acct(NULL
));
7813 if (!(p
= lock_user_string(arg1
)))
7815 ret
= get_errno(acct(path(p
)));
7816 unlock_user(p
, arg1
, 0);
7819 #ifdef TARGET_NR_umount2
7820 case TARGET_NR_umount2
:
7821 if (!(p
= lock_user_string(arg1
)))
7823 ret
= get_errno(umount2(p
, arg2
));
7824 unlock_user(p
, arg1
, 0);
7827 #ifdef TARGET_NR_lock
7828 case TARGET_NR_lock
:
7831 case TARGET_NR_ioctl
:
7832 ret
= do_ioctl(arg1
, arg2
, arg3
);
7834 case TARGET_NR_fcntl
:
7835 ret
= do_fcntl(arg1
, arg2
, arg3
);
7837 #ifdef TARGET_NR_mpx
7841 case TARGET_NR_setpgid
:
7842 ret
= get_errno(setpgid(arg1
, arg2
));
7844 #ifdef TARGET_NR_ulimit
7845 case TARGET_NR_ulimit
:
7848 #ifdef TARGET_NR_oldolduname
7849 case TARGET_NR_oldolduname
:
7852 case TARGET_NR_umask
:
7853 ret
= get_errno(umask(arg1
));
7855 case TARGET_NR_chroot
:
7856 if (!(p
= lock_user_string(arg1
)))
7858 ret
= get_errno(chroot(p
));
7859 unlock_user(p
, arg1
, 0);
7861 #ifdef TARGET_NR_ustat
7862 case TARGET_NR_ustat
:
7865 #ifdef TARGET_NR_dup2
7866 case TARGET_NR_dup2
:
7867 ret
= get_errno(dup2(arg1
, arg2
));
7869 fd_trans_dup(arg1
, arg2
);
7873 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7874 case TARGET_NR_dup3
:
7875 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7877 fd_trans_dup(arg1
, arg2
);
7881 #ifdef TARGET_NR_getppid /* not on alpha */
7882 case TARGET_NR_getppid
:
7883 ret
= get_errno(getppid());
7886 #ifdef TARGET_NR_getpgrp
7887 case TARGET_NR_getpgrp
:
7888 ret
= get_errno(getpgrp());
7891 case TARGET_NR_setsid
:
7892 ret
= get_errno(setsid());
7894 #ifdef TARGET_NR_sigaction
7895 case TARGET_NR_sigaction
:
7897 #if defined(TARGET_ALPHA)
7898 struct target_sigaction act
, oact
, *pact
= 0;
7899 struct target_old_sigaction
*old_act
;
7901 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7903 act
._sa_handler
= old_act
->_sa_handler
;
7904 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7905 act
.sa_flags
= old_act
->sa_flags
;
7906 act
.sa_restorer
= 0;
7907 unlock_user_struct(old_act
, arg2
, 0);
7910 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7911 if (!is_error(ret
) && arg3
) {
7912 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7914 old_act
->_sa_handler
= oact
._sa_handler
;
7915 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7916 old_act
->sa_flags
= oact
.sa_flags
;
7917 unlock_user_struct(old_act
, arg3
, 1);
7919 #elif defined(TARGET_MIPS)
7920 struct target_sigaction act
, oact
, *pact
, *old_act
;
7923 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7925 act
._sa_handler
= old_act
->_sa_handler
;
7926 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7927 act
.sa_flags
= old_act
->sa_flags
;
7928 unlock_user_struct(old_act
, arg2
, 0);
7934 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7936 if (!is_error(ret
) && arg3
) {
7937 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7939 old_act
->_sa_handler
= oact
._sa_handler
;
7940 old_act
->sa_flags
= oact
.sa_flags
;
7941 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7942 old_act
->sa_mask
.sig
[1] = 0;
7943 old_act
->sa_mask
.sig
[2] = 0;
7944 old_act
->sa_mask
.sig
[3] = 0;
7945 unlock_user_struct(old_act
, arg3
, 1);
7948 struct target_old_sigaction
*old_act
;
7949 struct target_sigaction act
, oact
, *pact
;
7951 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7953 act
._sa_handler
= old_act
->_sa_handler
;
7954 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7955 act
.sa_flags
= old_act
->sa_flags
;
7956 act
.sa_restorer
= old_act
->sa_restorer
;
7957 unlock_user_struct(old_act
, arg2
, 0);
7962 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7963 if (!is_error(ret
) && arg3
) {
7964 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7966 old_act
->_sa_handler
= oact
._sa_handler
;
7967 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7968 old_act
->sa_flags
= oact
.sa_flags
;
7969 old_act
->sa_restorer
= oact
.sa_restorer
;
7970 unlock_user_struct(old_act
, arg3
, 1);
7976 case TARGET_NR_rt_sigaction
:
7978 #if defined(TARGET_ALPHA)
7979 struct target_sigaction act
, oact
, *pact
= 0;
7980 struct target_rt_sigaction
*rt_act
;
7982 if (arg4
!= sizeof(target_sigset_t
)) {
7983 ret
= -TARGET_EINVAL
;
7987 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7989 act
._sa_handler
= rt_act
->_sa_handler
;
7990 act
.sa_mask
= rt_act
->sa_mask
;
7991 act
.sa_flags
= rt_act
->sa_flags
;
7992 act
.sa_restorer
= arg5
;
7993 unlock_user_struct(rt_act
, arg2
, 0);
7996 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7997 if (!is_error(ret
) && arg3
) {
7998 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8000 rt_act
->_sa_handler
= oact
._sa_handler
;
8001 rt_act
->sa_mask
= oact
.sa_mask
;
8002 rt_act
->sa_flags
= oact
.sa_flags
;
8003 unlock_user_struct(rt_act
, arg3
, 1);
8006 struct target_sigaction
*act
;
8007 struct target_sigaction
*oact
;
8009 if (arg4
!= sizeof(target_sigset_t
)) {
8010 ret
= -TARGET_EINVAL
;
8014 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8019 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8020 ret
= -TARGET_EFAULT
;
8021 goto rt_sigaction_fail
;
8025 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8028 unlock_user_struct(act
, arg2
, 0);
8030 unlock_user_struct(oact
, arg3
, 1);
8034 #ifdef TARGET_NR_sgetmask /* not on alpha */
8035 case TARGET_NR_sgetmask
:
8038 abi_ulong target_set
;
8039 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8041 host_to_target_old_sigset(&target_set
, &cur_set
);
8047 #ifdef TARGET_NR_ssetmask /* not on alpha */
8048 case TARGET_NR_ssetmask
:
8050 sigset_t set
, oset
, cur_set
;
8051 abi_ulong target_set
= arg1
;
8052 /* We only have one word of the new mask so we must read
8053 * the rest of it with do_sigprocmask() and OR in this word.
8054 * We are guaranteed that a do_sigprocmask() that only queries
8055 * the signal mask will not fail.
8057 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8059 target_to_host_old_sigset(&set
, &target_set
);
8060 sigorset(&set
, &set
, &cur_set
);
8061 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8063 host_to_target_old_sigset(&target_set
, &oset
);
8069 #ifdef TARGET_NR_sigprocmask
8070 case TARGET_NR_sigprocmask
:
8072 #if defined(TARGET_ALPHA)
8073 sigset_t set
, oldset
;
8078 case TARGET_SIG_BLOCK
:
8081 case TARGET_SIG_UNBLOCK
:
8084 case TARGET_SIG_SETMASK
:
8088 ret
= -TARGET_EINVAL
;
8092 target_to_host_old_sigset(&set
, &mask
);
8094 ret
= do_sigprocmask(how
, &set
, &oldset
);
8095 if (!is_error(ret
)) {
8096 host_to_target_old_sigset(&mask
, &oldset
);
8098 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8101 sigset_t set
, oldset
, *set_ptr
;
8106 case TARGET_SIG_BLOCK
:
8109 case TARGET_SIG_UNBLOCK
:
8112 case TARGET_SIG_SETMASK
:
8116 ret
= -TARGET_EINVAL
;
8119 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8121 target_to_host_old_sigset(&set
, p
);
8122 unlock_user(p
, arg2
, 0);
8128 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8129 if (!is_error(ret
) && arg3
) {
8130 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8132 host_to_target_old_sigset(p
, &oldset
);
8133 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8139 case TARGET_NR_rt_sigprocmask
:
8142 sigset_t set
, oldset
, *set_ptr
;
8144 if (arg4
!= sizeof(target_sigset_t
)) {
8145 ret
= -TARGET_EINVAL
;
8151 case TARGET_SIG_BLOCK
:
8154 case TARGET_SIG_UNBLOCK
:
8157 case TARGET_SIG_SETMASK
:
8161 ret
= -TARGET_EINVAL
;
8164 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8166 target_to_host_sigset(&set
, p
);
8167 unlock_user(p
, arg2
, 0);
8173 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8174 if (!is_error(ret
) && arg3
) {
8175 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8177 host_to_target_sigset(p
, &oldset
);
8178 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8182 #ifdef TARGET_NR_sigpending
8183 case TARGET_NR_sigpending
:
8186 ret
= get_errno(sigpending(&set
));
8187 if (!is_error(ret
)) {
8188 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8190 host_to_target_old_sigset(p
, &set
);
8191 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8196 case TARGET_NR_rt_sigpending
:
8200 /* Yes, this check is >, not != like most. We follow the kernel's
8201 * logic and it does it like this because it implements
8202 * NR_sigpending through the same code path, and in that case
8203 * the old_sigset_t is smaller in size.
8205 if (arg2
> sizeof(target_sigset_t
)) {
8206 ret
= -TARGET_EINVAL
;
8210 ret
= get_errno(sigpending(&set
));
8211 if (!is_error(ret
)) {
8212 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8214 host_to_target_sigset(p
, &set
);
8215 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8219 #ifdef TARGET_NR_sigsuspend
8220 case TARGET_NR_sigsuspend
:
8222 TaskState
*ts
= cpu
->opaque
;
8223 #if defined(TARGET_ALPHA)
8224 abi_ulong mask
= arg1
;
8225 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8227 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8229 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8230 unlock_user(p
, arg1
, 0);
8232 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8234 if (ret
!= -TARGET_ERESTARTSYS
) {
8235 ts
->in_sigsuspend
= 1;
8240 case TARGET_NR_rt_sigsuspend
:
8242 TaskState
*ts
= cpu
->opaque
;
8244 if (arg2
!= sizeof(target_sigset_t
)) {
8245 ret
= -TARGET_EINVAL
;
8248 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8250 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8251 unlock_user(p
, arg1
, 0);
8252 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8254 if (ret
!= -TARGET_ERESTARTSYS
) {
8255 ts
->in_sigsuspend
= 1;
8259 case TARGET_NR_rt_sigtimedwait
:
8262 struct timespec uts
, *puts
;
8265 if (arg4
!= sizeof(target_sigset_t
)) {
8266 ret
= -TARGET_EINVAL
;
8270 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8272 target_to_host_sigset(&set
, p
);
8273 unlock_user(p
, arg1
, 0);
8276 target_to_host_timespec(puts
, arg3
);
8280 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8282 if (!is_error(ret
)) {
8284 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8289 host_to_target_siginfo(p
, &uinfo
);
8290 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8292 ret
= host_to_target_signal(ret
);
8296 case TARGET_NR_rt_sigqueueinfo
:
8300 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8304 target_to_host_siginfo(&uinfo
, p
);
8305 unlock_user(p
, arg1
, 0);
8306 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8309 #ifdef TARGET_NR_sigreturn
8310 case TARGET_NR_sigreturn
:
8311 if (block_signals()) {
8312 ret
= -TARGET_ERESTARTSYS
;
8314 ret
= do_sigreturn(cpu_env
);
8318 case TARGET_NR_rt_sigreturn
:
8319 if (block_signals()) {
8320 ret
= -TARGET_ERESTARTSYS
;
8322 ret
= do_rt_sigreturn(cpu_env
);
8325 case TARGET_NR_sethostname
:
8326 if (!(p
= lock_user_string(arg1
)))
8328 ret
= get_errno(sethostname(p
, arg2
));
8329 unlock_user(p
, arg1
, 0);
8331 case TARGET_NR_setrlimit
:
8333 int resource
= target_to_host_resource(arg1
);
8334 struct target_rlimit
*target_rlim
;
8336 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8338 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8339 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8340 unlock_user_struct(target_rlim
, arg2
, 0);
8341 ret
= get_errno(setrlimit(resource
, &rlim
));
8344 case TARGET_NR_getrlimit
:
8346 int resource
= target_to_host_resource(arg1
);
8347 struct target_rlimit
*target_rlim
;
8350 ret
= get_errno(getrlimit(resource
, &rlim
));
8351 if (!is_error(ret
)) {
8352 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8354 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8355 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8356 unlock_user_struct(target_rlim
, arg2
, 1);
8360 case TARGET_NR_getrusage
:
8362 struct rusage rusage
;
8363 ret
= get_errno(getrusage(arg1
, &rusage
));
8364 if (!is_error(ret
)) {
8365 ret
= host_to_target_rusage(arg2
, &rusage
);
8369 case TARGET_NR_gettimeofday
:
8372 ret
= get_errno(gettimeofday(&tv
, NULL
));
8373 if (!is_error(ret
)) {
8374 if (copy_to_user_timeval(arg1
, &tv
))
8379 case TARGET_NR_settimeofday
:
8381 struct timeval tv
, *ptv
= NULL
;
8382 struct timezone tz
, *ptz
= NULL
;
8385 if (copy_from_user_timeval(&tv
, arg1
)) {
8392 if (copy_from_user_timezone(&tz
, arg2
)) {
8398 ret
= get_errno(settimeofday(ptv
, ptz
));
8401 #if defined(TARGET_NR_select)
8402 case TARGET_NR_select
:
8403 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8404 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8407 struct target_sel_arg_struct
*sel
;
8408 abi_ulong inp
, outp
, exp
, tvp
;
8411 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8413 nsel
= tswapal(sel
->n
);
8414 inp
= tswapal(sel
->inp
);
8415 outp
= tswapal(sel
->outp
);
8416 exp
= tswapal(sel
->exp
);
8417 tvp
= tswapal(sel
->tvp
);
8418 unlock_user_struct(sel
, arg1
, 0);
8419 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8424 #ifdef TARGET_NR_pselect6
8425 case TARGET_NR_pselect6
:
8427 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8428 fd_set rfds
, wfds
, efds
;
8429 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8430 struct timespec ts
, *ts_ptr
;
8433 * The 6th arg is actually two args smashed together,
8434 * so we cannot use the C library.
8442 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8443 target_sigset_t
*target_sigset
;
8451 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8455 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8459 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8465 * This takes a timespec, and not a timeval, so we cannot
8466 * use the do_select() helper ...
8469 if (target_to_host_timespec(&ts
, ts_addr
)) {
8477 /* Extract the two packed args for the sigset */
8480 sig
.size
= SIGSET_T_SIZE
;
8482 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8486 arg_sigset
= tswapal(arg7
[0]);
8487 arg_sigsize
= tswapal(arg7
[1]);
8488 unlock_user(arg7
, arg6
, 0);
8492 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8493 /* Like the kernel, we enforce correct size sigsets */
8494 ret
= -TARGET_EINVAL
;
8497 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8498 sizeof(*target_sigset
), 1);
8499 if (!target_sigset
) {
8502 target_to_host_sigset(&set
, target_sigset
);
8503 unlock_user(target_sigset
, arg_sigset
, 0);
8511 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8514 if (!is_error(ret
)) {
8515 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8517 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8519 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8522 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8528 #ifdef TARGET_NR_symlink
8529 case TARGET_NR_symlink
:
8532 p
= lock_user_string(arg1
);
8533 p2
= lock_user_string(arg2
);
8535 ret
= -TARGET_EFAULT
;
8537 ret
= get_errno(symlink(p
, p2
));
8538 unlock_user(p2
, arg2
, 0);
8539 unlock_user(p
, arg1
, 0);
8543 #if defined(TARGET_NR_symlinkat)
8544 case TARGET_NR_symlinkat
:
8547 p
= lock_user_string(arg1
);
8548 p2
= lock_user_string(arg3
);
8550 ret
= -TARGET_EFAULT
;
8552 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8553 unlock_user(p2
, arg3
, 0);
8554 unlock_user(p
, arg1
, 0);
8558 #ifdef TARGET_NR_oldlstat
8559 case TARGET_NR_oldlstat
:
8562 #ifdef TARGET_NR_readlink
8563 case TARGET_NR_readlink
:
8566 p
= lock_user_string(arg1
);
8567 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8569 ret
= -TARGET_EFAULT
;
8571 /* Short circuit this for the magic exe check. */
8572 ret
= -TARGET_EINVAL
;
8573 } else if (is_proc_myself((const char *)p
, "exe")) {
8574 char real
[PATH_MAX
], *temp
;
8575 temp
= realpath(exec_path
, real
);
8576 /* Return value is # of bytes that we wrote to the buffer. */
8578 ret
= get_errno(-1);
8580 /* Don't worry about sign mismatch as earlier mapping
8581 * logic would have thrown a bad address error. */
8582 ret
= MIN(strlen(real
), arg3
);
8583 /* We cannot NUL terminate the string. */
8584 memcpy(p2
, real
, ret
);
8587 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8589 unlock_user(p2
, arg2
, ret
);
8590 unlock_user(p
, arg1
, 0);
8594 #if defined(TARGET_NR_readlinkat)
8595 case TARGET_NR_readlinkat
:
8598 p
= lock_user_string(arg2
);
8599 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8601 ret
= -TARGET_EFAULT
;
8602 } else if (is_proc_myself((const char *)p
, "exe")) {
8603 char real
[PATH_MAX
], *temp
;
8604 temp
= realpath(exec_path
, real
);
8605 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8606 snprintf((char *)p2
, arg4
, "%s", real
);
8608 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8610 unlock_user(p2
, arg3
, ret
);
8611 unlock_user(p
, arg2
, 0);
8615 #ifdef TARGET_NR_uselib
8616 case TARGET_NR_uselib
:
8619 #ifdef TARGET_NR_swapon
8620 case TARGET_NR_swapon
:
8621 if (!(p
= lock_user_string(arg1
)))
8623 ret
= get_errno(swapon(p
, arg2
));
8624 unlock_user(p
, arg1
, 0);
8627 case TARGET_NR_reboot
:
8628 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8629 /* arg4 must be ignored in all other cases */
8630 p
= lock_user_string(arg4
);
8634 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8635 unlock_user(p
, arg4
, 0);
8637 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8640 #ifdef TARGET_NR_readdir
8641 case TARGET_NR_readdir
:
8644 #ifdef TARGET_NR_mmap
8645 case TARGET_NR_mmap
:
8646 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8647 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8648 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8649 || defined(TARGET_S390X)
8652 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8653 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8661 unlock_user(v
, arg1
, 0);
8662 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8663 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8667 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8668 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8674 #ifdef TARGET_NR_mmap2
8675 case TARGET_NR_mmap2
:
8677 #define MMAP_SHIFT 12
8679 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8680 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8682 arg6
<< MMAP_SHIFT
));
8685 case TARGET_NR_munmap
:
8686 ret
= get_errno(target_munmap(arg1
, arg2
));
8688 case TARGET_NR_mprotect
:
8690 TaskState
*ts
= cpu
->opaque
;
8691 /* Special hack to detect libc making the stack executable. */
8692 if ((arg3
& PROT_GROWSDOWN
)
8693 && arg1
>= ts
->info
->stack_limit
8694 && arg1
<= ts
->info
->start_stack
) {
8695 arg3
&= ~PROT_GROWSDOWN
;
8696 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8697 arg1
= ts
->info
->stack_limit
;
8700 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8702 #ifdef TARGET_NR_mremap
8703 case TARGET_NR_mremap
:
8704 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8707 /* ??? msync/mlock/munlock are broken for softmmu. */
8708 #ifdef TARGET_NR_msync
8709 case TARGET_NR_msync
:
8710 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8713 #ifdef TARGET_NR_mlock
8714 case TARGET_NR_mlock
:
8715 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8718 #ifdef TARGET_NR_munlock
8719 case TARGET_NR_munlock
:
8720 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8723 #ifdef TARGET_NR_mlockall
8724 case TARGET_NR_mlockall
:
8725 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8728 #ifdef TARGET_NR_munlockall
8729 case TARGET_NR_munlockall
:
8730 ret
= get_errno(munlockall());
8733 case TARGET_NR_truncate
:
8734 if (!(p
= lock_user_string(arg1
)))
8736 ret
= get_errno(truncate(p
, arg2
));
8737 unlock_user(p
, arg1
, 0);
8739 case TARGET_NR_ftruncate
:
8740 ret
= get_errno(ftruncate(arg1
, arg2
));
8742 case TARGET_NR_fchmod
:
8743 ret
= get_errno(fchmod(arg1
, arg2
));
8745 #if defined(TARGET_NR_fchmodat)
8746 case TARGET_NR_fchmodat
:
8747 if (!(p
= lock_user_string(arg2
)))
8749 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8750 unlock_user(p
, arg2
, 0);
8753 case TARGET_NR_getpriority
:
8754 /* Note that negative values are valid for getpriority, so we must
8755 differentiate based on errno settings. */
8757 ret
= getpriority(arg1
, arg2
);
8758 if (ret
== -1 && errno
!= 0) {
8759 ret
= -host_to_target_errno(errno
);
8763 /* Return value is the unbiased priority. Signal no error. */
8764 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8766 /* Return value is a biased priority to avoid negative numbers. */
8770 case TARGET_NR_setpriority
:
8771 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8773 #ifdef TARGET_NR_profil
8774 case TARGET_NR_profil
:
8777 case TARGET_NR_statfs
:
8778 if (!(p
= lock_user_string(arg1
)))
8780 ret
= get_errno(statfs(path(p
), &stfs
));
8781 unlock_user(p
, arg1
, 0);
8783 if (!is_error(ret
)) {
8784 struct target_statfs
*target_stfs
;
8786 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8788 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8789 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8790 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8791 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8792 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8793 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8794 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8795 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8796 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8797 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8798 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8799 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8800 unlock_user_struct(target_stfs
, arg2
, 1);
8803 case TARGET_NR_fstatfs
:
8804 ret
= get_errno(fstatfs(arg1
, &stfs
));
8805 goto convert_statfs
;
8806 #ifdef TARGET_NR_statfs64
8807 case TARGET_NR_statfs64
:
8808 if (!(p
= lock_user_string(arg1
)))
8810 ret
= get_errno(statfs(path(p
), &stfs
));
8811 unlock_user(p
, arg1
, 0);
8813 if (!is_error(ret
)) {
8814 struct target_statfs64
*target_stfs
;
8816 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8818 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8819 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8820 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8821 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8822 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8823 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8824 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8825 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8826 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8827 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8828 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8829 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8830 unlock_user_struct(target_stfs
, arg3
, 1);
8833 case TARGET_NR_fstatfs64
:
8834 ret
= get_errno(fstatfs(arg1
, &stfs
));
8835 goto convert_statfs64
;
8837 #ifdef TARGET_NR_ioperm
8838 case TARGET_NR_ioperm
:
8841 #ifdef TARGET_NR_socketcall
8842 case TARGET_NR_socketcall
:
8843 ret
= do_socketcall(arg1
, arg2
);
8846 #ifdef TARGET_NR_accept
8847 case TARGET_NR_accept
:
8848 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8851 #ifdef TARGET_NR_accept4
8852 case TARGET_NR_accept4
:
8853 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8856 #ifdef TARGET_NR_bind
8857 case TARGET_NR_bind
:
8858 ret
= do_bind(arg1
, arg2
, arg3
);
8861 #ifdef TARGET_NR_connect
8862 case TARGET_NR_connect
:
8863 ret
= do_connect(arg1
, arg2
, arg3
);
8866 #ifdef TARGET_NR_getpeername
8867 case TARGET_NR_getpeername
:
8868 ret
= do_getpeername(arg1
, arg2
, arg3
);
8871 #ifdef TARGET_NR_getsockname
8872 case TARGET_NR_getsockname
:
8873 ret
= do_getsockname(arg1
, arg2
, arg3
);
8876 #ifdef TARGET_NR_getsockopt
8877 case TARGET_NR_getsockopt
:
8878 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8881 #ifdef TARGET_NR_listen
8882 case TARGET_NR_listen
:
8883 ret
= get_errno(listen(arg1
, arg2
));
8886 #ifdef TARGET_NR_recv
8887 case TARGET_NR_recv
:
8888 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8891 #ifdef TARGET_NR_recvfrom
8892 case TARGET_NR_recvfrom
:
8893 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8896 #ifdef TARGET_NR_recvmsg
8897 case TARGET_NR_recvmsg
:
8898 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8901 #ifdef TARGET_NR_send
8902 case TARGET_NR_send
:
8903 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8906 #ifdef TARGET_NR_sendmsg
8907 case TARGET_NR_sendmsg
:
8908 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8911 #ifdef TARGET_NR_sendmmsg
8912 case TARGET_NR_sendmmsg
:
8913 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8915 case TARGET_NR_recvmmsg
:
8916 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8919 #ifdef TARGET_NR_sendto
8920 case TARGET_NR_sendto
:
8921 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8924 #ifdef TARGET_NR_shutdown
8925 case TARGET_NR_shutdown
:
8926 ret
= get_errno(shutdown(arg1
, arg2
));
8929 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8930 case TARGET_NR_getrandom
:
8931 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8935 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8936 unlock_user(p
, arg1
, ret
);
8939 #ifdef TARGET_NR_socket
8940 case TARGET_NR_socket
:
8941 ret
= do_socket(arg1
, arg2
, arg3
);
8942 fd_trans_unregister(ret
);
8945 #ifdef TARGET_NR_socketpair
8946 case TARGET_NR_socketpair
:
8947 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8950 #ifdef TARGET_NR_setsockopt
8951 case TARGET_NR_setsockopt
:
8952 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8956 case TARGET_NR_syslog
:
8957 if (!(p
= lock_user_string(arg2
)))
8959 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8960 unlock_user(p
, arg2
, 0);
8963 case TARGET_NR_setitimer
:
8965 struct itimerval value
, ovalue
, *pvalue
;
8969 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8970 || copy_from_user_timeval(&pvalue
->it_value
,
8971 arg2
+ sizeof(struct target_timeval
)))
8976 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8977 if (!is_error(ret
) && arg3
) {
8978 if (copy_to_user_timeval(arg3
,
8979 &ovalue
.it_interval
)
8980 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8986 case TARGET_NR_getitimer
:
8988 struct itimerval value
;
8990 ret
= get_errno(getitimer(arg1
, &value
));
8991 if (!is_error(ret
) && arg2
) {
8992 if (copy_to_user_timeval(arg2
,
8994 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9000 #ifdef TARGET_NR_stat
9001 case TARGET_NR_stat
:
9002 if (!(p
= lock_user_string(arg1
)))
9004 ret
= get_errno(stat(path(p
), &st
));
9005 unlock_user(p
, arg1
, 0);
9008 #ifdef TARGET_NR_lstat
9009 case TARGET_NR_lstat
:
9010 if (!(p
= lock_user_string(arg1
)))
9012 ret
= get_errno(lstat(path(p
), &st
));
9013 unlock_user(p
, arg1
, 0);
9016 case TARGET_NR_fstat
:
9018 ret
= get_errno(fstat(arg1
, &st
));
9019 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9022 if (!is_error(ret
)) {
9023 struct target_stat
*target_st
;
9025 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9027 memset(target_st
, 0, sizeof(*target_st
));
9028 __put_user(st
.st_dev
, &target_st
->st_dev
);
9029 __put_user(st
.st_ino
, &target_st
->st_ino
);
9030 __put_user(st
.st_mode
, &target_st
->st_mode
);
9031 __put_user(st
.st_uid
, &target_st
->st_uid
);
9032 __put_user(st
.st_gid
, &target_st
->st_gid
);
9033 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9034 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9035 __put_user(st
.st_size
, &target_st
->st_size
);
9036 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9037 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9038 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9039 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9040 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9041 unlock_user_struct(target_st
, arg2
, 1);
9045 #ifdef TARGET_NR_olduname
9046 case TARGET_NR_olduname
:
9049 #ifdef TARGET_NR_iopl
9050 case TARGET_NR_iopl
:
9053 case TARGET_NR_vhangup
:
9054 ret
= get_errno(vhangup());
9056 #ifdef TARGET_NR_idle
9057 case TARGET_NR_idle
:
9060 #ifdef TARGET_NR_syscall
9061 case TARGET_NR_syscall
:
9062 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9063 arg6
, arg7
, arg8
, 0);
9066 case TARGET_NR_wait4
:
9069 abi_long status_ptr
= arg2
;
9070 struct rusage rusage
, *rusage_ptr
;
9071 abi_ulong target_rusage
= arg4
;
9072 abi_long rusage_err
;
9074 rusage_ptr
= &rusage
;
9077 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9078 if (!is_error(ret
)) {
9079 if (status_ptr
&& ret
) {
9080 status
= host_to_target_waitstatus(status
);
9081 if (put_user_s32(status
, status_ptr
))
9084 if (target_rusage
) {
9085 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9093 #ifdef TARGET_NR_swapoff
9094 case TARGET_NR_swapoff
:
9095 if (!(p
= lock_user_string(arg1
)))
9097 ret
= get_errno(swapoff(p
));
9098 unlock_user(p
, arg1
, 0);
9101 case TARGET_NR_sysinfo
:
9103 struct target_sysinfo
*target_value
;
9104 struct sysinfo value
;
9105 ret
= get_errno(sysinfo(&value
));
9106 if (!is_error(ret
) && arg1
)
9108 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9110 __put_user(value
.uptime
, &target_value
->uptime
);
9111 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9112 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9113 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9114 __put_user(value
.totalram
, &target_value
->totalram
);
9115 __put_user(value
.freeram
, &target_value
->freeram
);
9116 __put_user(value
.sharedram
, &target_value
->sharedram
);
9117 __put_user(value
.bufferram
, &target_value
->bufferram
);
9118 __put_user(value
.totalswap
, &target_value
->totalswap
);
9119 __put_user(value
.freeswap
, &target_value
->freeswap
);
9120 __put_user(value
.procs
, &target_value
->procs
);
9121 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9122 __put_user(value
.freehigh
, &target_value
->freehigh
);
9123 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9124 unlock_user_struct(target_value
, arg1
, 1);
9128 #ifdef TARGET_NR_ipc
9130 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9133 #ifdef TARGET_NR_semget
9134 case TARGET_NR_semget
:
9135 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9138 #ifdef TARGET_NR_semop
9139 case TARGET_NR_semop
:
9140 ret
= do_semop(arg1
, arg2
, arg3
);
9143 #ifdef TARGET_NR_semctl
9144 case TARGET_NR_semctl
:
9145 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9148 #ifdef TARGET_NR_msgctl
9149 case TARGET_NR_msgctl
:
9150 ret
= do_msgctl(arg1
, arg2
, arg3
);
9153 #ifdef TARGET_NR_msgget
9154 case TARGET_NR_msgget
:
9155 ret
= get_errno(msgget(arg1
, arg2
));
9158 #ifdef TARGET_NR_msgrcv
9159 case TARGET_NR_msgrcv
:
9160 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9163 #ifdef TARGET_NR_msgsnd
9164 case TARGET_NR_msgsnd
:
9165 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9168 #ifdef TARGET_NR_shmget
9169 case TARGET_NR_shmget
:
9170 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9173 #ifdef TARGET_NR_shmctl
9174 case TARGET_NR_shmctl
:
9175 ret
= do_shmctl(arg1
, arg2
, arg3
);
9178 #ifdef TARGET_NR_shmat
9179 case TARGET_NR_shmat
:
9180 ret
= do_shmat(arg1
, arg2
, arg3
);
9183 #ifdef TARGET_NR_shmdt
9184 case TARGET_NR_shmdt
:
9185 ret
= do_shmdt(arg1
);
9188 case TARGET_NR_fsync
:
9189 ret
= get_errno(fsync(arg1
));
9191 case TARGET_NR_clone
:
9192 /* Linux manages to have three different orderings for its
9193 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9194 * match the kernel's CONFIG_CLONE_* settings.
9195 * Microblaze is further special in that it uses a sixth
9196 * implicit argument to clone for the TLS pointer.
9198 #if defined(TARGET_MICROBLAZE)
9199 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9200 #elif defined(TARGET_CLONE_BACKWARDS)
9201 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9202 #elif defined(TARGET_CLONE_BACKWARDS2)
9203 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9205 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9208 #ifdef __NR_exit_group
9209 /* new thread calls */
9210 case TARGET_NR_exit_group
:
9214 gdb_exit(cpu_env
, arg1
);
9215 ret
= get_errno(exit_group(arg1
));
9218 case TARGET_NR_setdomainname
:
9219 if (!(p
= lock_user_string(arg1
)))
9221 ret
= get_errno(setdomainname(p
, arg2
));
9222 unlock_user(p
, arg1
, 0);
9224 case TARGET_NR_uname
:
9225 /* no need to transcode because we use the linux syscall */
9227 struct new_utsname
* buf
;
9229 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9231 ret
= get_errno(sys_uname(buf
));
9232 if (!is_error(ret
)) {
9233 /* Overrite the native machine name with whatever is being
9235 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9236 /* Allow the user to override the reported release. */
9237 if (qemu_uname_release
&& *qemu_uname_release
)
9238 strcpy (buf
->release
, qemu_uname_release
);
9240 unlock_user_struct(buf
, arg1
, 1);
9244 case TARGET_NR_modify_ldt
:
9245 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9247 #if !defined(TARGET_X86_64)
9248 case TARGET_NR_vm86old
:
9250 case TARGET_NR_vm86
:
9251 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9255 case TARGET_NR_adjtimex
:
9257 #ifdef TARGET_NR_create_module
9258 case TARGET_NR_create_module
:
9260 case TARGET_NR_init_module
:
9261 case TARGET_NR_delete_module
:
9262 #ifdef TARGET_NR_get_kernel_syms
9263 case TARGET_NR_get_kernel_syms
:
9266 case TARGET_NR_quotactl
:
9268 case TARGET_NR_getpgid
:
9269 ret
= get_errno(getpgid(arg1
));
9271 case TARGET_NR_fchdir
:
9272 ret
= get_errno(fchdir(arg1
));
9274 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9275 case TARGET_NR_bdflush
:
9278 #ifdef TARGET_NR_sysfs
9279 case TARGET_NR_sysfs
:
9282 case TARGET_NR_personality
:
9283 ret
= get_errno(personality(arg1
));
9285 #ifdef TARGET_NR_afs_syscall
9286 case TARGET_NR_afs_syscall
:
9289 #ifdef TARGET_NR__llseek /* Not on alpha */
9290 case TARGET_NR__llseek
:
9293 #if !defined(__NR_llseek)
9294 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
9296 ret
= get_errno(res
);
9301 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9303 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9309 #ifdef TARGET_NR_getdents
9310 case TARGET_NR_getdents
:
9311 #ifdef __NR_getdents
9312 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9314 struct target_dirent
*target_dirp
;
9315 struct linux_dirent
*dirp
;
9316 abi_long count
= arg3
;
9318 dirp
= g_try_malloc(count
);
9320 ret
= -TARGET_ENOMEM
;
9324 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9325 if (!is_error(ret
)) {
9326 struct linux_dirent
*de
;
9327 struct target_dirent
*tde
;
9329 int reclen
, treclen
;
9330 int count1
, tnamelen
;
9334 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9338 reclen
= de
->d_reclen
;
9339 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9340 assert(tnamelen
>= 0);
9341 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9342 assert(count1
+ treclen
<= count
);
9343 tde
->d_reclen
= tswap16(treclen
);
9344 tde
->d_ino
= tswapal(de
->d_ino
);
9345 tde
->d_off
= tswapal(de
->d_off
);
9346 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9347 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9349 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9353 unlock_user(target_dirp
, arg2
, ret
);
9359 struct linux_dirent
*dirp
;
9360 abi_long count
= arg3
;
9362 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9364 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9365 if (!is_error(ret
)) {
9366 struct linux_dirent
*de
;
9371 reclen
= de
->d_reclen
;
9374 de
->d_reclen
= tswap16(reclen
);
9375 tswapls(&de
->d_ino
);
9376 tswapls(&de
->d_off
);
9377 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9381 unlock_user(dirp
, arg2
, ret
);
9385 /* Implement getdents in terms of getdents64 */
9387 struct linux_dirent64
*dirp
;
9388 abi_long count
= arg3
;
9390 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9394 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9395 if (!is_error(ret
)) {
9396 /* Convert the dirent64 structs to target dirent. We do this
9397 * in-place, since we can guarantee that a target_dirent is no
9398 * larger than a dirent64; however this means we have to be
9399 * careful to read everything before writing in the new format.
9401 struct linux_dirent64
*de
;
9402 struct target_dirent
*tde
;
9407 tde
= (struct target_dirent
*)dirp
;
9409 int namelen
, treclen
;
9410 int reclen
= de
->d_reclen
;
9411 uint64_t ino
= de
->d_ino
;
9412 int64_t off
= de
->d_off
;
9413 uint8_t type
= de
->d_type
;
9415 namelen
= strlen(de
->d_name
);
9416 treclen
= offsetof(struct target_dirent
, d_name
)
9418 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9420 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9421 tde
->d_ino
= tswapal(ino
);
9422 tde
->d_off
= tswapal(off
);
9423 tde
->d_reclen
= tswap16(treclen
);
9424 /* The target_dirent type is in what was formerly a padding
9425 * byte at the end of the structure:
9427 *(((char *)tde
) + treclen
- 1) = type
;
9429 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9430 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9436 unlock_user(dirp
, arg2
, ret
);
9440 #endif /* TARGET_NR_getdents */
9441 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9442 case TARGET_NR_getdents64
:
9444 struct linux_dirent64
*dirp
;
9445 abi_long count
= arg3
;
9446 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9448 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9449 if (!is_error(ret
)) {
9450 struct linux_dirent64
*de
;
9455 reclen
= de
->d_reclen
;
9458 de
->d_reclen
= tswap16(reclen
);
9459 tswap64s((uint64_t *)&de
->d_ino
);
9460 tswap64s((uint64_t *)&de
->d_off
);
9461 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9465 unlock_user(dirp
, arg2
, ret
);
9468 #endif /* TARGET_NR_getdents64 */
9469 #if defined(TARGET_NR__newselect)
9470 case TARGET_NR__newselect
:
9471 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9474 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9475 # ifdef TARGET_NR_poll
9476 case TARGET_NR_poll
:
9478 # ifdef TARGET_NR_ppoll
9479 case TARGET_NR_ppoll
:
9482 struct target_pollfd
*target_pfd
;
9483 unsigned int nfds
= arg2
;
9490 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9491 sizeof(struct target_pollfd
) * nfds
, 1);
9496 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9497 for (i
= 0; i
< nfds
; i
++) {
9498 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9499 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9504 # ifdef TARGET_NR_ppoll
9505 case TARGET_NR_ppoll
:
9507 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9508 target_sigset_t
*target_set
;
9509 sigset_t _set
, *set
= &_set
;
9512 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9513 unlock_user(target_pfd
, arg1
, 0);
9521 if (arg5
!= sizeof(target_sigset_t
)) {
9522 unlock_user(target_pfd
, arg1
, 0);
9523 ret
= -TARGET_EINVAL
;
9527 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9529 unlock_user(target_pfd
, arg1
, 0);
9532 target_to_host_sigset(set
, target_set
);
9537 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9538 set
, SIGSET_T_SIZE
));
9540 if (!is_error(ret
) && arg3
) {
9541 host_to_target_timespec(arg3
, timeout_ts
);
9544 unlock_user(target_set
, arg4
, 0);
9549 # ifdef TARGET_NR_poll
9550 case TARGET_NR_poll
:
9552 struct timespec ts
, *pts
;
9555 /* Convert ms to secs, ns */
9556 ts
.tv_sec
= arg3
/ 1000;
9557 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9560 /* -ve poll() timeout means "infinite" */
9563 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9568 g_assert_not_reached();
9571 if (!is_error(ret
)) {
9572 for(i
= 0; i
< nfds
; i
++) {
9573 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9576 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9580 case TARGET_NR_flock
:
9581 /* NOTE: the flock constant seems to be the same for every
9583 ret
= get_errno(safe_flock(arg1
, arg2
));
9585 case TARGET_NR_readv
:
9587 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9589 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9590 unlock_iovec(vec
, arg2
, arg3
, 1);
9592 ret
= -host_to_target_errno(errno
);
9596 case TARGET_NR_writev
:
9598 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9600 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9601 unlock_iovec(vec
, arg2
, arg3
, 0);
9603 ret
= -host_to_target_errno(errno
);
9607 case TARGET_NR_getsid
:
9608 ret
= get_errno(getsid(arg1
));
9610 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9611 case TARGET_NR_fdatasync
:
9612 ret
= get_errno(fdatasync(arg1
));
9615 #ifdef TARGET_NR__sysctl
9616 case TARGET_NR__sysctl
:
9617 /* We don't implement this, but ENOTDIR is always a safe
9619 ret
= -TARGET_ENOTDIR
;
9622 case TARGET_NR_sched_getaffinity
:
9624 unsigned int mask_size
;
9625 unsigned long *mask
;
9628 * sched_getaffinity needs multiples of ulong, so need to take
9629 * care of mismatches between target ulong and host ulong sizes.
9631 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9632 ret
= -TARGET_EINVAL
;
9635 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9637 mask
= alloca(mask_size
);
9638 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9640 if (!is_error(ret
)) {
9642 /* More data returned than the caller's buffer will fit.
9643 * This only happens if sizeof(abi_long) < sizeof(long)
9644 * and the caller passed us a buffer holding an odd number
9645 * of abi_longs. If the host kernel is actually using the
9646 * extra 4 bytes then fail EINVAL; otherwise we can just
9647 * ignore them and only copy the interesting part.
9649 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9650 if (numcpus
> arg2
* 8) {
9651 ret
= -TARGET_EINVAL
;
9657 if (copy_to_user(arg3
, mask
, ret
)) {
9663 case TARGET_NR_sched_setaffinity
:
9665 unsigned int mask_size
;
9666 unsigned long *mask
;
9669 * sched_setaffinity needs multiples of ulong, so need to take
9670 * care of mismatches between target ulong and host ulong sizes.
9672 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9673 ret
= -TARGET_EINVAL
;
9676 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9678 mask
= alloca(mask_size
);
9679 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9682 memcpy(mask
, p
, arg2
);
9683 unlock_user_struct(p
, arg2
, 0);
9685 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9688 case TARGET_NR_sched_setparam
:
9690 struct sched_param
*target_schp
;
9691 struct sched_param schp
;
9694 return -TARGET_EINVAL
;
9696 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9698 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9699 unlock_user_struct(target_schp
, arg2
, 0);
9700 ret
= get_errno(sched_setparam(arg1
, &schp
));
9703 case TARGET_NR_sched_getparam
:
9705 struct sched_param
*target_schp
;
9706 struct sched_param schp
;
9709 return -TARGET_EINVAL
;
9711 ret
= get_errno(sched_getparam(arg1
, &schp
));
9712 if (!is_error(ret
)) {
9713 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9715 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9716 unlock_user_struct(target_schp
, arg2
, 1);
9720 case TARGET_NR_sched_setscheduler
:
9722 struct sched_param
*target_schp
;
9723 struct sched_param schp
;
9725 return -TARGET_EINVAL
;
9727 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9729 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9730 unlock_user_struct(target_schp
, arg3
, 0);
9731 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9734 case TARGET_NR_sched_getscheduler
:
9735 ret
= get_errno(sched_getscheduler(arg1
));
9737 case TARGET_NR_sched_yield
:
9738 ret
= get_errno(sched_yield());
9740 case TARGET_NR_sched_get_priority_max
:
9741 ret
= get_errno(sched_get_priority_max(arg1
));
9743 case TARGET_NR_sched_get_priority_min
:
9744 ret
= get_errno(sched_get_priority_min(arg1
));
9746 case TARGET_NR_sched_rr_get_interval
:
9749 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9750 if (!is_error(ret
)) {
9751 ret
= host_to_target_timespec(arg2
, &ts
);
9755 case TARGET_NR_nanosleep
:
9757 struct timespec req
, rem
;
9758 target_to_host_timespec(&req
, arg1
);
9759 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9760 if (is_error(ret
) && arg2
) {
9761 host_to_target_timespec(arg2
, &rem
);
9765 #ifdef TARGET_NR_query_module
9766 case TARGET_NR_query_module
:
9769 #ifdef TARGET_NR_nfsservctl
9770 case TARGET_NR_nfsservctl
:
9773 case TARGET_NR_prctl
:
9775 case PR_GET_PDEATHSIG
:
9778 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9779 if (!is_error(ret
) && arg2
9780 && put_user_ual(deathsig
, arg2
)) {
9788 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9792 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9794 unlock_user(name
, arg2
, 16);
9799 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9803 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9805 unlock_user(name
, arg2
, 0);
9810 /* Most prctl options have no pointer arguments */
9811 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9815 #ifdef TARGET_NR_arch_prctl
9816 case TARGET_NR_arch_prctl
:
9817 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9818 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9824 #ifdef TARGET_NR_pread64
9825 case TARGET_NR_pread64
:
9826 if (regpairs_aligned(cpu_env
)) {
9830 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9832 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9833 unlock_user(p
, arg2
, ret
);
9835 case TARGET_NR_pwrite64
:
9836 if (regpairs_aligned(cpu_env
)) {
9840 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9842 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9843 unlock_user(p
, arg2
, 0);
9846 case TARGET_NR_getcwd
:
9847 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9849 ret
= get_errno(sys_getcwd1(p
, arg2
));
9850 unlock_user(p
, arg1
, ret
);
9852 case TARGET_NR_capget
:
9853 case TARGET_NR_capset
:
9855 struct target_user_cap_header
*target_header
;
9856 struct target_user_cap_data
*target_data
= NULL
;
9857 struct __user_cap_header_struct header
;
9858 struct __user_cap_data_struct data
[2];
9859 struct __user_cap_data_struct
*dataptr
= NULL
;
9860 int i
, target_datalen
;
9863 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9866 header
.version
= tswap32(target_header
->version
);
9867 header
.pid
= tswap32(target_header
->pid
);
9869 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9870 /* Version 2 and up takes pointer to two user_data structs */
9874 target_datalen
= sizeof(*target_data
) * data_items
;
9877 if (num
== TARGET_NR_capget
) {
9878 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9880 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9883 unlock_user_struct(target_header
, arg1
, 0);
9887 if (num
== TARGET_NR_capset
) {
9888 for (i
= 0; i
< data_items
; i
++) {
9889 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9890 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9891 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9898 if (num
== TARGET_NR_capget
) {
9899 ret
= get_errno(capget(&header
, dataptr
));
9901 ret
= get_errno(capset(&header
, dataptr
));
9904 /* The kernel always updates version for both capget and capset */
9905 target_header
->version
= tswap32(header
.version
);
9906 unlock_user_struct(target_header
, arg1
, 1);
9909 if (num
== TARGET_NR_capget
) {
9910 for (i
= 0; i
< data_items
; i
++) {
9911 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9912 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9913 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9915 unlock_user(target_data
, arg2
, target_datalen
);
9917 unlock_user(target_data
, arg2
, 0);
9922 case TARGET_NR_sigaltstack
:
9923 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9926 #ifdef CONFIG_SENDFILE
9927 case TARGET_NR_sendfile
:
9932 ret
= get_user_sal(off
, arg3
);
9933 if (is_error(ret
)) {
9938 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9939 if (!is_error(ret
) && arg3
) {
9940 abi_long ret2
= put_user_sal(off
, arg3
);
9941 if (is_error(ret2
)) {
9947 #ifdef TARGET_NR_sendfile64
9948 case TARGET_NR_sendfile64
:
9953 ret
= get_user_s64(off
, arg3
);
9954 if (is_error(ret
)) {
9959 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9960 if (!is_error(ret
) && arg3
) {
9961 abi_long ret2
= put_user_s64(off
, arg3
);
9962 if (is_error(ret2
)) {
9970 case TARGET_NR_sendfile
:
9971 #ifdef TARGET_NR_sendfile64
9972 case TARGET_NR_sendfile64
:
9977 #ifdef TARGET_NR_getpmsg
9978 case TARGET_NR_getpmsg
:
9981 #ifdef TARGET_NR_putpmsg
9982 case TARGET_NR_putpmsg
:
9985 #ifdef TARGET_NR_vfork
9986 case TARGET_NR_vfork
:
9987 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9991 #ifdef TARGET_NR_ugetrlimit
9992 case TARGET_NR_ugetrlimit
:
9995 int resource
= target_to_host_resource(arg1
);
9996 ret
= get_errno(getrlimit(resource
, &rlim
));
9997 if (!is_error(ret
)) {
9998 struct target_rlimit
*target_rlim
;
9999 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10001 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10002 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10003 unlock_user_struct(target_rlim
, arg2
, 1);
10008 #ifdef TARGET_NR_truncate64
10009 case TARGET_NR_truncate64
:
10010 if (!(p
= lock_user_string(arg1
)))
10012 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10013 unlock_user(p
, arg1
, 0);
10016 #ifdef TARGET_NR_ftruncate64
10017 case TARGET_NR_ftruncate64
:
10018 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10021 #ifdef TARGET_NR_stat64
10022 case TARGET_NR_stat64
:
10023 if (!(p
= lock_user_string(arg1
)))
10025 ret
= get_errno(stat(path(p
), &st
));
10026 unlock_user(p
, arg1
, 0);
10027 if (!is_error(ret
))
10028 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10031 #ifdef TARGET_NR_lstat64
10032 case TARGET_NR_lstat64
:
10033 if (!(p
= lock_user_string(arg1
)))
10035 ret
= get_errno(lstat(path(p
), &st
));
10036 unlock_user(p
, arg1
, 0);
10037 if (!is_error(ret
))
10038 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10041 #ifdef TARGET_NR_fstat64
10042 case TARGET_NR_fstat64
:
10043 ret
= get_errno(fstat(arg1
, &st
));
10044 if (!is_error(ret
))
10045 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10048 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10049 #ifdef TARGET_NR_fstatat64
10050 case TARGET_NR_fstatat64
:
10052 #ifdef TARGET_NR_newfstatat
10053 case TARGET_NR_newfstatat
:
10055 if (!(p
= lock_user_string(arg2
)))
10057 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10058 if (!is_error(ret
))
10059 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10062 #ifdef TARGET_NR_lchown
10063 case TARGET_NR_lchown
:
10064 if (!(p
= lock_user_string(arg1
)))
10066 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10067 unlock_user(p
, arg1
, 0);
10070 #ifdef TARGET_NR_getuid
10071 case TARGET_NR_getuid
:
10072 ret
= get_errno(high2lowuid(getuid()));
10075 #ifdef TARGET_NR_getgid
10076 case TARGET_NR_getgid
:
10077 ret
= get_errno(high2lowgid(getgid()));
10080 #ifdef TARGET_NR_geteuid
10081 case TARGET_NR_geteuid
:
10082 ret
= get_errno(high2lowuid(geteuid()));
10085 #ifdef TARGET_NR_getegid
10086 case TARGET_NR_getegid
:
10087 ret
= get_errno(high2lowgid(getegid()));
10090 case TARGET_NR_setreuid
:
10091 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10093 case TARGET_NR_setregid
:
10094 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10096 case TARGET_NR_getgroups
:
10098 int gidsetsize
= arg1
;
10099 target_id
*target_grouplist
;
10103 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10104 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10105 if (gidsetsize
== 0)
10107 if (!is_error(ret
)) {
10108 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10109 if (!target_grouplist
)
10111 for(i
= 0;i
< ret
; i
++)
10112 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10113 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10117 case TARGET_NR_setgroups
:
10119 int gidsetsize
= arg1
;
10120 target_id
*target_grouplist
;
10121 gid_t
*grouplist
= NULL
;
10124 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10125 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10126 if (!target_grouplist
) {
10127 ret
= -TARGET_EFAULT
;
10130 for (i
= 0; i
< gidsetsize
; i
++) {
10131 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10133 unlock_user(target_grouplist
, arg2
, 0);
10135 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10138 case TARGET_NR_fchown
:
10139 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10141 #if defined(TARGET_NR_fchownat)
10142 case TARGET_NR_fchownat
:
10143 if (!(p
= lock_user_string(arg2
)))
10145 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10146 low2highgid(arg4
), arg5
));
10147 unlock_user(p
, arg2
, 0);
10150 #ifdef TARGET_NR_setresuid
10151 case TARGET_NR_setresuid
:
10152 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10154 low2highuid(arg3
)));
10157 #ifdef TARGET_NR_getresuid
10158 case TARGET_NR_getresuid
:
10160 uid_t ruid
, euid
, suid
;
10161 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10162 if (!is_error(ret
)) {
10163 if (put_user_id(high2lowuid(ruid
), arg1
)
10164 || put_user_id(high2lowuid(euid
), arg2
)
10165 || put_user_id(high2lowuid(suid
), arg3
))
10171 #ifdef TARGET_NR_getresgid
10172 case TARGET_NR_setresgid
:
10173 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10175 low2highgid(arg3
)));
10178 #ifdef TARGET_NR_getresgid
10179 case TARGET_NR_getresgid
:
10181 gid_t rgid
, egid
, sgid
;
10182 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10183 if (!is_error(ret
)) {
10184 if (put_user_id(high2lowgid(rgid
), arg1
)
10185 || put_user_id(high2lowgid(egid
), arg2
)
10186 || put_user_id(high2lowgid(sgid
), arg3
))
10192 #ifdef TARGET_NR_chown
10193 case TARGET_NR_chown
:
10194 if (!(p
= lock_user_string(arg1
)))
10196 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10197 unlock_user(p
, arg1
, 0);
10200 case TARGET_NR_setuid
:
10201 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10203 case TARGET_NR_setgid
:
10204 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10206 case TARGET_NR_setfsuid
:
10207 ret
= get_errno(setfsuid(arg1
));
10209 case TARGET_NR_setfsgid
:
10210 ret
= get_errno(setfsgid(arg1
));
10213 #ifdef TARGET_NR_lchown32
10214 case TARGET_NR_lchown32
:
10215 if (!(p
= lock_user_string(arg1
)))
10217 ret
= get_errno(lchown(p
, arg2
, arg3
));
10218 unlock_user(p
, arg1
, 0);
10221 #ifdef TARGET_NR_getuid32
10222 case TARGET_NR_getuid32
:
10223 ret
= get_errno(getuid());
10227 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10228 /* Alpha specific */
10229 case TARGET_NR_getxuid
:
10233 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10235 ret
= get_errno(getuid());
10238 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10239 /* Alpha specific */
10240 case TARGET_NR_getxgid
:
10244 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10246 ret
= get_errno(getgid());
10249 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10250 /* Alpha specific */
10251 case TARGET_NR_osf_getsysinfo
:
10252 ret
= -TARGET_EOPNOTSUPP
;
10254 case TARGET_GSI_IEEE_FP_CONTROL
:
10256 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10258 /* Copied from linux ieee_fpcr_to_swcr. */
10259 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10260 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10261 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10262 | SWCR_TRAP_ENABLE_DZE
10263 | SWCR_TRAP_ENABLE_OVF
);
10264 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10265 | SWCR_TRAP_ENABLE_INE
);
10266 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10267 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10269 if (put_user_u64 (swcr
, arg2
))
10275 /* case GSI_IEEE_STATE_AT_SIGNAL:
10276 -- Not implemented in linux kernel.
10278 -- Retrieves current unaligned access state; not much used.
10279 case GSI_PROC_TYPE:
10280 -- Retrieves implver information; surely not used.
10281 case GSI_GET_HWRPB:
10282 -- Grabs a copy of the HWRPB; surely not used.
10287 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10288 /* Alpha specific */
10289 case TARGET_NR_osf_setsysinfo
:
10290 ret
= -TARGET_EOPNOTSUPP
;
10292 case TARGET_SSI_IEEE_FP_CONTROL
:
10294 uint64_t swcr
, fpcr
, orig_fpcr
;
10296 if (get_user_u64 (swcr
, arg2
)) {
10299 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10300 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10302 /* Copied from linux ieee_swcr_to_fpcr. */
10303 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10304 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10305 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10306 | SWCR_TRAP_ENABLE_DZE
10307 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10308 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10309 | SWCR_TRAP_ENABLE_INE
)) << 57;
10310 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10311 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10313 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10318 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10320 uint64_t exc
, fpcr
, orig_fpcr
;
10323 if (get_user_u64(exc
, arg2
)) {
10327 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10329 /* We only add to the exception status here. */
10330 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10332 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10335 /* Old exceptions are not signaled. */
10336 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10338 /* If any exceptions set by this call,
10339 and are unmasked, send a signal. */
10341 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10342 si_code
= TARGET_FPE_FLTRES
;
10344 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10345 si_code
= TARGET_FPE_FLTUND
;
10347 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10348 si_code
= TARGET_FPE_FLTOVF
;
10350 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10351 si_code
= TARGET_FPE_FLTDIV
;
10353 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10354 si_code
= TARGET_FPE_FLTINV
;
10356 if (si_code
!= 0) {
10357 target_siginfo_t info
;
10358 info
.si_signo
= SIGFPE
;
10360 info
.si_code
= si_code
;
10361 info
._sifields
._sigfault
._addr
10362 = ((CPUArchState
*)cpu_env
)->pc
;
10363 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10368 /* case SSI_NVPAIRS:
10369 -- Used with SSIN_UACPROC to enable unaligned accesses.
10370 case SSI_IEEE_STATE_AT_SIGNAL:
10371 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10372 -- Not implemented in linux kernel
10377 #ifdef TARGET_NR_osf_sigprocmask
10378 /* Alpha specific. */
10379 case TARGET_NR_osf_sigprocmask
:
10383 sigset_t set
, oldset
;
10386 case TARGET_SIG_BLOCK
:
10389 case TARGET_SIG_UNBLOCK
:
10392 case TARGET_SIG_SETMASK
:
10396 ret
= -TARGET_EINVAL
;
10400 target_to_host_old_sigset(&set
, &mask
);
10401 ret
= do_sigprocmask(how
, &set
, &oldset
);
10403 host_to_target_old_sigset(&mask
, &oldset
);
10410 #ifdef TARGET_NR_getgid32
10411 case TARGET_NR_getgid32
:
10412 ret
= get_errno(getgid());
10415 #ifdef TARGET_NR_geteuid32
10416 case TARGET_NR_geteuid32
:
10417 ret
= get_errno(geteuid());
10420 #ifdef TARGET_NR_getegid32
10421 case TARGET_NR_getegid32
:
10422 ret
= get_errno(getegid());
10425 #ifdef TARGET_NR_setreuid32
10426 case TARGET_NR_setreuid32
:
10427 ret
= get_errno(setreuid(arg1
, arg2
));
10430 #ifdef TARGET_NR_setregid32
10431 case TARGET_NR_setregid32
:
10432 ret
= get_errno(setregid(arg1
, arg2
));
10435 #ifdef TARGET_NR_getgroups32
10436 case TARGET_NR_getgroups32
:
10438 int gidsetsize
= arg1
;
10439 uint32_t *target_grouplist
;
10443 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10444 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10445 if (gidsetsize
== 0)
10447 if (!is_error(ret
)) {
10448 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10449 if (!target_grouplist
) {
10450 ret
= -TARGET_EFAULT
;
10453 for(i
= 0;i
< ret
; i
++)
10454 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10455 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10460 #ifdef TARGET_NR_setgroups32
10461 case TARGET_NR_setgroups32
:
10463 int gidsetsize
= arg1
;
10464 uint32_t *target_grouplist
;
10468 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10469 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10470 if (!target_grouplist
) {
10471 ret
= -TARGET_EFAULT
;
10474 for(i
= 0;i
< gidsetsize
; i
++)
10475 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10476 unlock_user(target_grouplist
, arg2
, 0);
10477 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10481 #ifdef TARGET_NR_fchown32
10482 case TARGET_NR_fchown32
:
10483 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10486 #ifdef TARGET_NR_setresuid32
10487 case TARGET_NR_setresuid32
:
10488 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10491 #ifdef TARGET_NR_getresuid32
10492 case TARGET_NR_getresuid32
:
10494 uid_t ruid
, euid
, suid
;
10495 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10496 if (!is_error(ret
)) {
10497 if (put_user_u32(ruid
, arg1
)
10498 || put_user_u32(euid
, arg2
)
10499 || put_user_u32(suid
, arg3
))
10505 #ifdef TARGET_NR_setresgid32
10506 case TARGET_NR_setresgid32
:
10507 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10510 #ifdef TARGET_NR_getresgid32
10511 case TARGET_NR_getresgid32
:
10513 gid_t rgid
, egid
, sgid
;
10514 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10515 if (!is_error(ret
)) {
10516 if (put_user_u32(rgid
, arg1
)
10517 || put_user_u32(egid
, arg2
)
10518 || put_user_u32(sgid
, arg3
))
10524 #ifdef TARGET_NR_chown32
10525 case TARGET_NR_chown32
:
10526 if (!(p
= lock_user_string(arg1
)))
10528 ret
= get_errno(chown(p
, arg2
, arg3
));
10529 unlock_user(p
, arg1
, 0);
10532 #ifdef TARGET_NR_setuid32
10533 case TARGET_NR_setuid32
:
10534 ret
= get_errno(sys_setuid(arg1
));
10537 #ifdef TARGET_NR_setgid32
10538 case TARGET_NR_setgid32
:
10539 ret
= get_errno(sys_setgid(arg1
));
10542 #ifdef TARGET_NR_setfsuid32
10543 case TARGET_NR_setfsuid32
:
10544 ret
= get_errno(setfsuid(arg1
));
10547 #ifdef TARGET_NR_setfsgid32
10548 case TARGET_NR_setfsgid32
:
10549 ret
= get_errno(setfsgid(arg1
));
10553 case TARGET_NR_pivot_root
:
10554 goto unimplemented
;
10555 #ifdef TARGET_NR_mincore
10556 case TARGET_NR_mincore
:
10559 ret
= -TARGET_EFAULT
;
10560 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10562 if (!(p
= lock_user_string(arg3
)))
10564 ret
= get_errno(mincore(a
, arg2
, p
));
10565 unlock_user(p
, arg3
, ret
);
10567 unlock_user(a
, arg1
, 0);
10571 #ifdef TARGET_NR_arm_fadvise64_64
10572 case TARGET_NR_arm_fadvise64_64
:
10573 /* arm_fadvise64_64 looks like fadvise64_64 but
10574 * with different argument order: fd, advice, offset, len
10575 * rather than the usual fd, offset, len, advice.
10576 * Note that offset and len are both 64-bit so appear as
10577 * pairs of 32-bit registers.
10579 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10580 target_offset64(arg5
, arg6
), arg2
);
10581 ret
= -host_to_target_errno(ret
);
10585 #if TARGET_ABI_BITS == 32
10587 #ifdef TARGET_NR_fadvise64_64
10588 case TARGET_NR_fadvise64_64
:
10589 /* 6 args: fd, offset (high, low), len (high, low), advice */
10590 if (regpairs_aligned(cpu_env
)) {
10591 /* offset is in (3,4), len in (5,6) and advice in 7 */
10598 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10599 target_offset64(arg2
, arg3
),
10600 target_offset64(arg4
, arg5
),
10605 #ifdef TARGET_NR_fadvise64
10606 case TARGET_NR_fadvise64
:
10607 /* 5 args: fd, offset (high, low), len, advice */
10608 if (regpairs_aligned(cpu_env
)) {
10609 /* offset is in (3,4), len in 5 and advice in 6 */
10615 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10616 target_offset64(arg2
, arg3
),
10621 #else /* not a 32-bit ABI */
10622 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10623 #ifdef TARGET_NR_fadvise64_64
10624 case TARGET_NR_fadvise64_64
:
10626 #ifdef TARGET_NR_fadvise64
10627 case TARGET_NR_fadvise64
:
10629 #ifdef TARGET_S390X
10631 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10632 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10633 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10634 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10638 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10641 #endif /* end of 64-bit ABI fadvise handling */
10643 #ifdef TARGET_NR_madvise
10644 case TARGET_NR_madvise
:
10645 /* A straight passthrough may not be safe because qemu sometimes
10646 turns private file-backed mappings into anonymous mappings.
10647 This will break MADV_DONTNEED.
10648 This is a hint, so ignoring and returning success is ok. */
10649 ret
= get_errno(0);
10652 #if TARGET_ABI_BITS == 32
10653 case TARGET_NR_fcntl64
:
10657 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10658 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10661 if (((CPUARMState
*)cpu_env
)->eabi
) {
10662 copyfrom
= copy_from_user_eabi_flock64
;
10663 copyto
= copy_to_user_eabi_flock64
;
10667 cmd
= target_to_host_fcntl_cmd(arg2
);
10668 if (cmd
== -TARGET_EINVAL
) {
10674 case TARGET_F_GETLK64
:
10675 ret
= copyfrom(&fl
, arg3
);
10679 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10681 ret
= copyto(arg3
, &fl
);
10685 case TARGET_F_SETLK64
:
10686 case TARGET_F_SETLKW64
:
10687 ret
= copyfrom(&fl
, arg3
);
10691 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10694 ret
= do_fcntl(arg1
, arg2
, arg3
);
10700 #ifdef TARGET_NR_cacheflush
10701 case TARGET_NR_cacheflush
:
10702 /* self-modifying code is handled automatically, so nothing needed */
10706 #ifdef TARGET_NR_security
10707 case TARGET_NR_security
:
10708 goto unimplemented
;
10710 #ifdef TARGET_NR_getpagesize
10711 case TARGET_NR_getpagesize
:
10712 ret
= TARGET_PAGE_SIZE
;
10715 case TARGET_NR_gettid
:
10716 ret
= get_errno(gettid());
10718 #ifdef TARGET_NR_readahead
10719 case TARGET_NR_readahead
:
10720 #if TARGET_ABI_BITS == 32
10721 if (regpairs_aligned(cpu_env
)) {
10726 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10728 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10733 #ifdef TARGET_NR_setxattr
10734 case TARGET_NR_listxattr
:
10735 case TARGET_NR_llistxattr
:
10739 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10741 ret
= -TARGET_EFAULT
;
10745 p
= lock_user_string(arg1
);
10747 if (num
== TARGET_NR_listxattr
) {
10748 ret
= get_errno(listxattr(p
, b
, arg3
));
10750 ret
= get_errno(llistxattr(p
, b
, arg3
));
10753 ret
= -TARGET_EFAULT
;
10755 unlock_user(p
, arg1
, 0);
10756 unlock_user(b
, arg2
, arg3
);
10759 case TARGET_NR_flistxattr
:
10763 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10765 ret
= -TARGET_EFAULT
;
10769 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10770 unlock_user(b
, arg2
, arg3
);
10773 case TARGET_NR_setxattr
:
10774 case TARGET_NR_lsetxattr
:
10776 void *p
, *n
, *v
= 0;
10778 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10780 ret
= -TARGET_EFAULT
;
10784 p
= lock_user_string(arg1
);
10785 n
= lock_user_string(arg2
);
10787 if (num
== TARGET_NR_setxattr
) {
10788 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10790 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10793 ret
= -TARGET_EFAULT
;
10795 unlock_user(p
, arg1
, 0);
10796 unlock_user(n
, arg2
, 0);
10797 unlock_user(v
, arg3
, 0);
10800 case TARGET_NR_fsetxattr
:
10804 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10806 ret
= -TARGET_EFAULT
;
10810 n
= lock_user_string(arg2
);
10812 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10814 ret
= -TARGET_EFAULT
;
10816 unlock_user(n
, arg2
, 0);
10817 unlock_user(v
, arg3
, 0);
10820 case TARGET_NR_getxattr
:
10821 case TARGET_NR_lgetxattr
:
10823 void *p
, *n
, *v
= 0;
10825 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10827 ret
= -TARGET_EFAULT
;
10831 p
= lock_user_string(arg1
);
10832 n
= lock_user_string(arg2
);
10834 if (num
== TARGET_NR_getxattr
) {
10835 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10837 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10840 ret
= -TARGET_EFAULT
;
10842 unlock_user(p
, arg1
, 0);
10843 unlock_user(n
, arg2
, 0);
10844 unlock_user(v
, arg3
, arg4
);
10847 case TARGET_NR_fgetxattr
:
10851 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10853 ret
= -TARGET_EFAULT
;
10857 n
= lock_user_string(arg2
);
10859 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10861 ret
= -TARGET_EFAULT
;
10863 unlock_user(n
, arg2
, 0);
10864 unlock_user(v
, arg3
, arg4
);
10867 case TARGET_NR_removexattr
:
10868 case TARGET_NR_lremovexattr
:
10871 p
= lock_user_string(arg1
);
10872 n
= lock_user_string(arg2
);
10874 if (num
== TARGET_NR_removexattr
) {
10875 ret
= get_errno(removexattr(p
, n
));
10877 ret
= get_errno(lremovexattr(p
, n
));
10880 ret
= -TARGET_EFAULT
;
10882 unlock_user(p
, arg1
, 0);
10883 unlock_user(n
, arg2
, 0);
10886 case TARGET_NR_fremovexattr
:
10889 n
= lock_user_string(arg2
);
10891 ret
= get_errno(fremovexattr(arg1
, n
));
10893 ret
= -TARGET_EFAULT
;
10895 unlock_user(n
, arg2
, 0);
10899 #endif /* CONFIG_ATTR */
10900 #ifdef TARGET_NR_set_thread_area
10901 case TARGET_NR_set_thread_area
:
10902 #if defined(TARGET_MIPS)
10903 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10906 #elif defined(TARGET_CRIS)
10908 ret
= -TARGET_EINVAL
;
10910 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10914 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10915 ret
= do_set_thread_area(cpu_env
, arg1
);
10917 #elif defined(TARGET_M68K)
10919 TaskState
*ts
= cpu
->opaque
;
10920 ts
->tp_value
= arg1
;
10925 goto unimplemented_nowarn
;
10928 #ifdef TARGET_NR_get_thread_area
10929 case TARGET_NR_get_thread_area
:
10930 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10931 ret
= do_get_thread_area(cpu_env
, arg1
);
10933 #elif defined(TARGET_M68K)
10935 TaskState
*ts
= cpu
->opaque
;
10936 ret
= ts
->tp_value
;
10940 goto unimplemented_nowarn
;
10943 #ifdef TARGET_NR_getdomainname
10944 case TARGET_NR_getdomainname
:
10945 goto unimplemented_nowarn
;
10948 #ifdef TARGET_NR_clock_gettime
10949 case TARGET_NR_clock_gettime
:
10951 struct timespec ts
;
10952 ret
= get_errno(clock_gettime(arg1
, &ts
));
10953 if (!is_error(ret
)) {
10954 host_to_target_timespec(arg2
, &ts
);
10959 #ifdef TARGET_NR_clock_getres
10960 case TARGET_NR_clock_getres
:
10962 struct timespec ts
;
10963 ret
= get_errno(clock_getres(arg1
, &ts
));
10964 if (!is_error(ret
)) {
10965 host_to_target_timespec(arg2
, &ts
);
10970 #ifdef TARGET_NR_clock_nanosleep
10971 case TARGET_NR_clock_nanosleep
:
10973 struct timespec ts
;
10974 target_to_host_timespec(&ts
, arg3
);
10975 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10976 &ts
, arg4
? &ts
: NULL
));
10978 host_to_target_timespec(arg4
, &ts
);
10980 #if defined(TARGET_PPC)
10981 /* clock_nanosleep is odd in that it returns positive errno values.
10982 * On PPC, CR0 bit 3 should be set in such a situation. */
10983 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10984 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10991 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10992 case TARGET_NR_set_tid_address
:
10993 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10997 case TARGET_NR_tkill
:
10998 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11001 case TARGET_NR_tgkill
:
11002 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11003 target_to_host_signal(arg3
)));
11006 #ifdef TARGET_NR_set_robust_list
11007 case TARGET_NR_set_robust_list
:
11008 case TARGET_NR_get_robust_list
:
11009 /* The ABI for supporting robust futexes has userspace pass
11010 * the kernel a pointer to a linked list which is updated by
11011 * userspace after the syscall; the list is walked by the kernel
11012 * when the thread exits. Since the linked list in QEMU guest
11013 * memory isn't a valid linked list for the host and we have
11014 * no way to reliably intercept the thread-death event, we can't
11015 * support these. Silently return ENOSYS so that guest userspace
11016 * falls back to a non-robust futex implementation (which should
11017 * be OK except in the corner case of the guest crashing while
11018 * holding a mutex that is shared with another process via
11021 goto unimplemented_nowarn
;
11024 #if defined(TARGET_NR_utimensat)
11025 case TARGET_NR_utimensat
:
11027 struct timespec
*tsp
, ts
[2];
11031 target_to_host_timespec(ts
, arg3
);
11032 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11036 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11038 if (!(p
= lock_user_string(arg2
))) {
11039 ret
= -TARGET_EFAULT
;
11042 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11043 unlock_user(p
, arg2
, 0);
11048 case TARGET_NR_futex
:
11049 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11051 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11052 case TARGET_NR_inotify_init
:
11053 ret
= get_errno(sys_inotify_init());
11056 #ifdef CONFIG_INOTIFY1
11057 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11058 case TARGET_NR_inotify_init1
:
11059 ret
= get_errno(sys_inotify_init1(arg1
));
11063 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11064 case TARGET_NR_inotify_add_watch
:
11065 p
= lock_user_string(arg2
);
11066 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11067 unlock_user(p
, arg2
, 0);
11070 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11071 case TARGET_NR_inotify_rm_watch
:
11072 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11076 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11077 case TARGET_NR_mq_open
:
11079 struct mq_attr posix_mq_attr
, *attrp
;
11081 p
= lock_user_string(arg1
- 1);
11083 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11084 attrp
= &posix_mq_attr
;
11088 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11089 unlock_user (p
, arg1
, 0);
11093 case TARGET_NR_mq_unlink
:
11094 p
= lock_user_string(arg1
- 1);
11095 ret
= get_errno(mq_unlink(p
));
11096 unlock_user (p
, arg1
, 0);
11099 case TARGET_NR_mq_timedsend
:
11101 struct timespec ts
;
11103 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11105 target_to_host_timespec(&ts
, arg5
);
11106 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11107 host_to_target_timespec(arg5
, &ts
);
11109 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11111 unlock_user (p
, arg2
, arg3
);
11115 case TARGET_NR_mq_timedreceive
:
11117 struct timespec ts
;
11120 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11122 target_to_host_timespec(&ts
, arg5
);
11123 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11125 host_to_target_timespec(arg5
, &ts
);
11127 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11130 unlock_user (p
, arg2
, arg3
);
11132 put_user_u32(prio
, arg4
);
11136 /* Not implemented for now... */
11137 /* case TARGET_NR_mq_notify: */
11140 case TARGET_NR_mq_getsetattr
:
11142 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11145 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11146 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11149 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11150 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11157 #ifdef CONFIG_SPLICE
11158 #ifdef TARGET_NR_tee
11159 case TARGET_NR_tee
:
11161 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11165 #ifdef TARGET_NR_splice
11166 case TARGET_NR_splice
:
11168 loff_t loff_in
, loff_out
;
11169 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11171 if (get_user_u64(loff_in
, arg2
)) {
11174 ploff_in
= &loff_in
;
11177 if (get_user_u64(loff_out
, arg4
)) {
11180 ploff_out
= &loff_out
;
11182 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11184 if (put_user_u64(loff_in
, arg2
)) {
11189 if (put_user_u64(loff_out
, arg4
)) {
11196 #ifdef TARGET_NR_vmsplice
11197 case TARGET_NR_vmsplice
:
11199 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11201 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11202 unlock_iovec(vec
, arg2
, arg3
, 0);
11204 ret
= -host_to_target_errno(errno
);
11209 #endif /* CONFIG_SPLICE */
11210 #ifdef CONFIG_EVENTFD
11211 #if defined(TARGET_NR_eventfd)
11212 case TARGET_NR_eventfd
:
11213 ret
= get_errno(eventfd(arg1
, 0));
11214 fd_trans_unregister(ret
);
11217 #if defined(TARGET_NR_eventfd2)
11218 case TARGET_NR_eventfd2
:
11220 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11221 if (arg2
& TARGET_O_NONBLOCK
) {
11222 host_flags
|= O_NONBLOCK
;
11224 if (arg2
& TARGET_O_CLOEXEC
) {
11225 host_flags
|= O_CLOEXEC
;
11227 ret
= get_errno(eventfd(arg1
, host_flags
));
11228 fd_trans_unregister(ret
);
11232 #endif /* CONFIG_EVENTFD */
11233 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11234 case TARGET_NR_fallocate
:
11235 #if TARGET_ABI_BITS == 32
11236 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11237 target_offset64(arg5
, arg6
)));
11239 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11243 #if defined(CONFIG_SYNC_FILE_RANGE)
11244 #if defined(TARGET_NR_sync_file_range)
11245 case TARGET_NR_sync_file_range
:
11246 #if TARGET_ABI_BITS == 32
11247 #if defined(TARGET_MIPS)
11248 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11249 target_offset64(arg5
, arg6
), arg7
));
11251 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11252 target_offset64(arg4
, arg5
), arg6
));
11253 #endif /* !TARGET_MIPS */
11255 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11259 #if defined(TARGET_NR_sync_file_range2)
11260 case TARGET_NR_sync_file_range2
:
11261 /* This is like sync_file_range but the arguments are reordered */
11262 #if TARGET_ABI_BITS == 32
11263 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11264 target_offset64(arg5
, arg6
), arg2
));
11266 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11271 #if defined(TARGET_NR_signalfd4)
11272 case TARGET_NR_signalfd4
:
11273 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11276 #if defined(TARGET_NR_signalfd)
11277 case TARGET_NR_signalfd
:
11278 ret
= do_signalfd4(arg1
, arg2
, 0);
11281 #if defined(CONFIG_EPOLL)
11282 #if defined(TARGET_NR_epoll_create)
11283 case TARGET_NR_epoll_create
:
11284 ret
= get_errno(epoll_create(arg1
));
11287 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11288 case TARGET_NR_epoll_create1
:
11289 ret
= get_errno(epoll_create1(arg1
));
11292 #if defined(TARGET_NR_epoll_ctl)
11293 case TARGET_NR_epoll_ctl
:
11295 struct epoll_event ep
;
11296 struct epoll_event
*epp
= 0;
11298 struct target_epoll_event
*target_ep
;
11299 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11302 ep
.events
= tswap32(target_ep
->events
);
11303 /* The epoll_data_t union is just opaque data to the kernel,
11304 * so we transfer all 64 bits across and need not worry what
11305 * actual data type it is.
11307 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11308 unlock_user_struct(target_ep
, arg4
, 0);
11311 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11316 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11317 #if defined(TARGET_NR_epoll_wait)
11318 case TARGET_NR_epoll_wait
:
11320 #if defined(TARGET_NR_epoll_pwait)
11321 case TARGET_NR_epoll_pwait
:
11324 struct target_epoll_event
*target_ep
;
11325 struct epoll_event
*ep
;
11327 int maxevents
= arg3
;
11328 int timeout
= arg4
;
11330 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11331 maxevents
* sizeof(struct target_epoll_event
), 1);
11336 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11339 #if defined(TARGET_NR_epoll_pwait)
11340 case TARGET_NR_epoll_pwait
:
11342 target_sigset_t
*target_set
;
11343 sigset_t _set
, *set
= &_set
;
11346 if (arg6
!= sizeof(target_sigset_t
)) {
11347 ret
= -TARGET_EINVAL
;
11351 target_set
= lock_user(VERIFY_READ
, arg5
,
11352 sizeof(target_sigset_t
), 1);
11354 unlock_user(target_ep
, arg2
, 0);
11357 target_to_host_sigset(set
, target_set
);
11358 unlock_user(target_set
, arg5
, 0);
11363 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11364 set
, SIGSET_T_SIZE
));
11368 #if defined(TARGET_NR_epoll_wait)
11369 case TARGET_NR_epoll_wait
:
11370 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11375 ret
= -TARGET_ENOSYS
;
11377 if (!is_error(ret
)) {
11379 for (i
= 0; i
< ret
; i
++) {
11380 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11381 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11384 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11389 #ifdef TARGET_NR_prlimit64
11390 case TARGET_NR_prlimit64
:
11392 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11393 struct target_rlimit64
*target_rnew
, *target_rold
;
11394 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11395 int resource
= target_to_host_resource(arg2
);
11397 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11400 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11401 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11402 unlock_user_struct(target_rnew
, arg3
, 0);
11406 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11407 if (!is_error(ret
) && arg4
) {
11408 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11411 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11412 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11413 unlock_user_struct(target_rold
, arg4
, 1);
11418 #ifdef TARGET_NR_gethostname
11419 case TARGET_NR_gethostname
:
11421 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11423 ret
= get_errno(gethostname(name
, arg2
));
11424 unlock_user(name
, arg1
, arg2
);
11426 ret
= -TARGET_EFAULT
;
11431 #ifdef TARGET_NR_atomic_cmpxchg_32
11432 case TARGET_NR_atomic_cmpxchg_32
:
11434 /* should use start_exclusive from main.c */
11435 abi_ulong mem_value
;
11436 if (get_user_u32(mem_value
, arg6
)) {
11437 target_siginfo_t info
;
11438 info
.si_signo
= SIGSEGV
;
11440 info
.si_code
= TARGET_SEGV_MAPERR
;
11441 info
._sifields
._sigfault
._addr
= arg6
;
11442 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11446 if (mem_value
== arg2
)
11447 put_user_u32(arg1
, arg6
);
11452 #ifdef TARGET_NR_atomic_barrier
11453 case TARGET_NR_atomic_barrier
:
11455 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11461 #ifdef TARGET_NR_timer_create
11462 case TARGET_NR_timer_create
:
11464 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11466 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11469 int timer_index
= next_free_host_timer();
11471 if (timer_index
< 0) {
11472 ret
= -TARGET_EAGAIN
;
11474 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11477 phost_sevp
= &host_sevp
;
11478 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11484 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11488 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11497 #ifdef TARGET_NR_timer_settime
11498 case TARGET_NR_timer_settime
:
11500 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11501 * struct itimerspec * old_value */
11502 target_timer_t timerid
= get_timer_id(arg1
);
11506 } else if (arg3
== 0) {
11507 ret
= -TARGET_EINVAL
;
11509 timer_t htimer
= g_posix_timers
[timerid
];
11510 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11512 target_to_host_itimerspec(&hspec_new
, arg3
);
11514 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11515 host_to_target_itimerspec(arg2
, &hspec_old
);
11521 #ifdef TARGET_NR_timer_gettime
11522 case TARGET_NR_timer_gettime
:
11524 /* args: timer_t timerid, struct itimerspec *curr_value */
11525 target_timer_t timerid
= get_timer_id(arg1
);
11529 } else if (!arg2
) {
11530 ret
= -TARGET_EFAULT
;
11532 timer_t htimer
= g_posix_timers
[timerid
];
11533 struct itimerspec hspec
;
11534 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11536 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11537 ret
= -TARGET_EFAULT
;
11544 #ifdef TARGET_NR_timer_getoverrun
11545 case TARGET_NR_timer_getoverrun
:
11547 /* args: timer_t timerid */
11548 target_timer_t timerid
= get_timer_id(arg1
);
11553 timer_t htimer
= g_posix_timers
[timerid
];
11554 ret
= get_errno(timer_getoverrun(htimer
));
11556 fd_trans_unregister(ret
);
11561 #ifdef TARGET_NR_timer_delete
11562 case TARGET_NR_timer_delete
:
11564 /* args: timer_t timerid */
11565 target_timer_t timerid
= get_timer_id(arg1
);
11570 timer_t htimer
= g_posix_timers
[timerid
];
11571 ret
= get_errno(timer_delete(htimer
));
11572 g_posix_timers
[timerid
] = 0;
11578 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11579 case TARGET_NR_timerfd_create
:
11580 ret
= get_errno(timerfd_create(arg1
,
11581 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11585 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11586 case TARGET_NR_timerfd_gettime
:
11588 struct itimerspec its_curr
;
11590 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11592 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11599 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11600 case TARGET_NR_timerfd_settime
:
11602 struct itimerspec its_new
, its_old
, *p_new
;
11605 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11613 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11615 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11622 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11623 case TARGET_NR_ioprio_get
:
11624 ret
= get_errno(ioprio_get(arg1
, arg2
));
11628 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11629 case TARGET_NR_ioprio_set
:
11630 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11634 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11635 case TARGET_NR_setns
:
11636 ret
= get_errno(setns(arg1
, arg2
));
11639 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11640 case TARGET_NR_unshare
:
11641 ret
= get_errno(unshare(arg1
));
11647 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11648 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11649 unimplemented_nowarn
:
11651 ret
= -TARGET_ENOSYS
;
11656 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11659 print_syscall_ret(num
, ret
);
11660 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11663 ret
= -TARGET_EFAULT
;