4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
292 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
294 typedef struct TargetFdTrans
{
295 TargetFdDataFunc host_to_target_data
;
296 TargetFdDataFunc target_to_host_data
;
297 TargetFdAddrFunc target_to_host_addr
;
300 static TargetFdTrans
**target_fd_trans
;
302 static unsigned int target_fd_max
;
304 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
306 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
307 return target_fd_trans
[fd
]->target_to_host_data
;
312 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
314 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
315 return target_fd_trans
[fd
]->host_to_target_data
;
320 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
322 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
323 return target_fd_trans
[fd
]->target_to_host_addr
;
328 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
332 if (fd
>= target_fd_max
) {
333 oldmax
= target_fd_max
;
334 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans
= g_renew(TargetFdTrans
*,
336 target_fd_trans
, target_fd_max
);
337 memset((void *)(target_fd_trans
+ oldmax
), 0,
338 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
340 target_fd_trans
[fd
] = trans
;
343 static void fd_trans_unregister(int fd
)
345 if (fd
>= 0 && fd
< target_fd_max
) {
346 target_fd_trans
[fd
] = NULL
;
350 static void fd_trans_dup(int oldfd
, int newfd
)
352 fd_trans_unregister(newfd
);
353 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
354 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
358 static int sys_getcwd1(char *buf
, size_t size
)
360 if (getcwd(buf
, size
) == NULL
) {
361 /* getcwd() sets errno */
364 return strlen(buf
)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd
, const char *pathname
,
370 const struct timespec times
[2], int flags
)
372 if (pathname
== NULL
)
373 return futimens(dirfd
, times
);
375 return utimensat(dirfd
, pathname
, times
, flags
);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
380 const struct timespec
*,tsp
,int,flags
)
382 static int sys_utimensat(int dirfd
, const char *pathname
,
383 const struct timespec times
[2], int flags
)
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
403 return (inotify_add_watch(fd
, pathname
, mask
));
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
409 return (inotify_rm_watch(fd
, wd
));
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags
)
416 return (inotify_init1(flags
));
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64
{
438 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
439 const struct host_rlimit64
*, new_limit
,
440 struct host_rlimit64
*, old_limit
)
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers
[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
453 if (g_posix_timers
[k
] == 0) {
454 g_posix_timers
[k
] = (timer_t
) 1;
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
464 static inline int regpairs_aligned(void *cpu_env
) {
465 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
473 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
475 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
490 [EAGAIN
] = TARGET_EAGAIN
,
491 [EIDRM
] = TARGET_EIDRM
,
492 [ECHRNG
] = TARGET_ECHRNG
,
493 [EL2NSYNC
] = TARGET_EL2NSYNC
,
494 [EL3HLT
] = TARGET_EL3HLT
,
495 [EL3RST
] = TARGET_EL3RST
,
496 [ELNRNG
] = TARGET_ELNRNG
,
497 [EUNATCH
] = TARGET_EUNATCH
,
498 [ENOCSI
] = TARGET_ENOCSI
,
499 [EL2HLT
] = TARGET_EL2HLT
,
500 [EDEADLK
] = TARGET_EDEADLK
,
501 [ENOLCK
] = TARGET_ENOLCK
,
502 [EBADE
] = TARGET_EBADE
,
503 [EBADR
] = TARGET_EBADR
,
504 [EXFULL
] = TARGET_EXFULL
,
505 [ENOANO
] = TARGET_ENOANO
,
506 [EBADRQC
] = TARGET_EBADRQC
,
507 [EBADSLT
] = TARGET_EBADSLT
,
508 [EBFONT
] = TARGET_EBFONT
,
509 [ENOSTR
] = TARGET_ENOSTR
,
510 [ENODATA
] = TARGET_ENODATA
,
511 [ETIME
] = TARGET_ETIME
,
512 [ENOSR
] = TARGET_ENOSR
,
513 [ENONET
] = TARGET_ENONET
,
514 [ENOPKG
] = TARGET_ENOPKG
,
515 [EREMOTE
] = TARGET_EREMOTE
,
516 [ENOLINK
] = TARGET_ENOLINK
,
517 [EADV
] = TARGET_EADV
,
518 [ESRMNT
] = TARGET_ESRMNT
,
519 [ECOMM
] = TARGET_ECOMM
,
520 [EPROTO
] = TARGET_EPROTO
,
521 [EDOTDOT
] = TARGET_EDOTDOT
,
522 [EMULTIHOP
] = TARGET_EMULTIHOP
,
523 [EBADMSG
] = TARGET_EBADMSG
,
524 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
525 [EOVERFLOW
] = TARGET_EOVERFLOW
,
526 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
527 [EBADFD
] = TARGET_EBADFD
,
528 [EREMCHG
] = TARGET_EREMCHG
,
529 [ELIBACC
] = TARGET_ELIBACC
,
530 [ELIBBAD
] = TARGET_ELIBBAD
,
531 [ELIBSCN
] = TARGET_ELIBSCN
,
532 [ELIBMAX
] = TARGET_ELIBMAX
,
533 [ELIBEXEC
] = TARGET_ELIBEXEC
,
534 [EILSEQ
] = TARGET_EILSEQ
,
535 [ENOSYS
] = TARGET_ENOSYS
,
536 [ELOOP
] = TARGET_ELOOP
,
537 [ERESTART
] = TARGET_ERESTART
,
538 [ESTRPIPE
] = TARGET_ESTRPIPE
,
539 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
540 [EUSERS
] = TARGET_EUSERS
,
541 [ENOTSOCK
] = TARGET_ENOTSOCK
,
542 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
543 [EMSGSIZE
] = TARGET_EMSGSIZE
,
544 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
545 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
546 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
547 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
548 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
549 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
550 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
551 [EADDRINUSE
] = TARGET_EADDRINUSE
,
552 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
553 [ENETDOWN
] = TARGET_ENETDOWN
,
554 [ENETUNREACH
] = TARGET_ENETUNREACH
,
555 [ENETRESET
] = TARGET_ENETRESET
,
556 [ECONNABORTED
] = TARGET_ECONNABORTED
,
557 [ECONNRESET
] = TARGET_ECONNRESET
,
558 [ENOBUFS
] = TARGET_ENOBUFS
,
559 [EISCONN
] = TARGET_EISCONN
,
560 [ENOTCONN
] = TARGET_ENOTCONN
,
561 [EUCLEAN
] = TARGET_EUCLEAN
,
562 [ENOTNAM
] = TARGET_ENOTNAM
,
563 [ENAVAIL
] = TARGET_ENAVAIL
,
564 [EISNAM
] = TARGET_EISNAM
,
565 [EREMOTEIO
] = TARGET_EREMOTEIO
,
566 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
567 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
568 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
569 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
570 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
571 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
572 [EALREADY
] = TARGET_EALREADY
,
573 [EINPROGRESS
] = TARGET_EINPROGRESS
,
574 [ESTALE
] = TARGET_ESTALE
,
575 [ECANCELED
] = TARGET_ECANCELED
,
576 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
577 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
579 [ENOKEY
] = TARGET_ENOKEY
,
582 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
585 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
588 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
591 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
598 static inline int host_to_target_errno(int err
)
600 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
601 host_to_target_errno_table
[err
]) {
602 return host_to_target_errno_table
[err
];
607 static inline int target_to_host_errno(int err
)
609 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
610 target_to_host_errno_table
[err
]) {
611 return target_to_host_errno_table
[err
];
616 static inline abi_long
get_errno(abi_long ret
)
619 return -host_to_target_errno(errno
);
624 static inline int is_error(abi_long ret
)
626 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
629 const char *target_strerror(int err
)
631 if (err
== TARGET_ERESTARTSYS
) {
632 return "To be restarted";
634 if (err
== TARGET_QEMU_ESIGRETURN
) {
635 return "Successful exit from sigreturn";
638 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
641 return strerror(target_to_host_errno(err
));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
692 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
693 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
694 int, flags
, mode_t
, mode
)
695 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
696 struct rusage
*, rusage
)
697 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
698 int, options
, struct rusage
*, rusage
)
699 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
700 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
701 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
702 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
703 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
705 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
706 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
708 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
709 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
710 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
711 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
712 safe_syscall2(int, tkill
, int, tid
, int, sig
)
713 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
714 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
715 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
718 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
719 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
720 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
721 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
722 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
723 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
724 safe_syscall2(int, flock
, int, fd
, int, operation
)
725 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
726 const struct timespec
*, uts
, size_t, sigsetsize
)
727 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
729 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
730 struct timespec
*, rem
)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
733 const struct timespec
*, req
, struct timespec
*, rem
)
736 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
738 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
739 long, msgtype
, int, flags
)
740 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
741 unsigned, nsops
, const struct timespec
*, timeout
)
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
756 void *, ptr
, long, fifth
)
757 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
761 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
765 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
766 const struct timespec
*timeout
)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
774 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
775 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
776 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
794 static inline int host_to_target_sock_type(int host_type
)
798 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
800 target_type
= TARGET_SOCK_DGRAM
;
803 target_type
= TARGET_SOCK_STREAM
;
806 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type
& SOCK_CLOEXEC
) {
812 target_type
|= TARGET_SOCK_CLOEXEC
;
816 #if defined(SOCK_NONBLOCK)
817 if (host_type
& SOCK_NONBLOCK
) {
818 target_type
|= TARGET_SOCK_NONBLOCK
;
825 static abi_ulong target_brk
;
826 static abi_ulong target_original_brk
;
827 static abi_ulong brk_page
;
829 void target_set_brk(abi_ulong new_brk
)
831 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
832 brk_page
= HOST_PAGE_ALIGN(target_brk
);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long
do_brk(abi_ulong new_brk
)
841 abi_long mapped_addr
;
842 abi_ulong new_alloc_size
;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
847 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
850 if (new_brk
< target_original_brk
) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk
<= brk_page
) {
859 /* Heap contents are initialized to zero, as for anonymous
861 if (new_brk
> target_brk
) {
862 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
864 target_brk
= new_brk
;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
876 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
877 PROT_READ
|PROT_WRITE
,
878 MAP_ANON
|MAP_PRIVATE
, 0, 0));
880 if (mapped_addr
== brk_page
) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
888 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
890 target_brk
= new_brk
;
891 brk_page
= HOST_PAGE_ALIGN(target_brk
);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
895 } else if (mapped_addr
!= -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr
, new_alloc_size
);
901 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM
;
912 /* For everything else, return the previous break. */
916 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
917 abi_ulong target_fds_addr
,
921 abi_ulong b
, *target_fds
;
923 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
924 if (!(target_fds
= lock_user(VERIFY_READ
,
926 sizeof(abi_ulong
) * nw
,
928 return -TARGET_EFAULT
;
932 for (i
= 0; i
< nw
; i
++) {
933 /* grab the abi_ulong */
934 __get_user(b
, &target_fds
[i
]);
935 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
936 /* check the bit inside the abi_ulong */
943 unlock_user(target_fds
, target_fds_addr
, 0);
948 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
949 abi_ulong target_fds_addr
,
952 if (target_fds_addr
) {
953 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
954 return -TARGET_EFAULT
;
962 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
968 abi_ulong
*target_fds
;
970 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
971 if (!(target_fds
= lock_user(VERIFY_WRITE
,
973 sizeof(abi_ulong
) * nw
,
975 return -TARGET_EFAULT
;
978 for (i
= 0; i
< nw
; i
++) {
980 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
981 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
984 __put_user(v
, &target_fds
[i
]);
987 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
992 #if defined(__alpha__)
998 static inline abi_long
host_to_target_clock_t(long ticks
)
1000 #if HOST_HZ == TARGET_HZ
1003 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1007 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1008 const struct rusage
*rusage
)
1010 struct target_rusage
*target_rusage
;
1012 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1013 return -TARGET_EFAULT
;
1014 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1015 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1016 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1017 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1018 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1019 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1020 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1021 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1022 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1023 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1024 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1025 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1026 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1027 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1028 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1029 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1030 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1031 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1032 unlock_user_struct(target_rusage
, target_addr
, 1);
1037 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1039 abi_ulong target_rlim_swap
;
1042 target_rlim_swap
= tswapal(target_rlim
);
1043 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1044 return RLIM_INFINITY
;
1046 result
= target_rlim_swap
;
1047 if (target_rlim_swap
!= (rlim_t
)result
)
1048 return RLIM_INFINITY
;
1053 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1055 abi_ulong target_rlim_swap
;
1058 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1059 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1061 target_rlim_swap
= rlim
;
1062 result
= tswapal(target_rlim_swap
);
1067 static inline int target_to_host_resource(int code
)
1070 case TARGET_RLIMIT_AS
:
1072 case TARGET_RLIMIT_CORE
:
1074 case TARGET_RLIMIT_CPU
:
1076 case TARGET_RLIMIT_DATA
:
1078 case TARGET_RLIMIT_FSIZE
:
1079 return RLIMIT_FSIZE
;
1080 case TARGET_RLIMIT_LOCKS
:
1081 return RLIMIT_LOCKS
;
1082 case TARGET_RLIMIT_MEMLOCK
:
1083 return RLIMIT_MEMLOCK
;
1084 case TARGET_RLIMIT_MSGQUEUE
:
1085 return RLIMIT_MSGQUEUE
;
1086 case TARGET_RLIMIT_NICE
:
1088 case TARGET_RLIMIT_NOFILE
:
1089 return RLIMIT_NOFILE
;
1090 case TARGET_RLIMIT_NPROC
:
1091 return RLIMIT_NPROC
;
1092 case TARGET_RLIMIT_RSS
:
1094 case TARGET_RLIMIT_RTPRIO
:
1095 return RLIMIT_RTPRIO
;
1096 case TARGET_RLIMIT_SIGPENDING
:
1097 return RLIMIT_SIGPENDING
;
1098 case TARGET_RLIMIT_STACK
:
1099 return RLIMIT_STACK
;
1105 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1106 abi_ulong target_tv_addr
)
1108 struct target_timeval
*target_tv
;
1110 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1111 return -TARGET_EFAULT
;
1113 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1114 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1116 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1121 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1122 const struct timeval
*tv
)
1124 struct target_timeval
*target_tv
;
1126 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1127 return -TARGET_EFAULT
;
1129 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1130 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1132 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1137 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1138 abi_ulong target_tz_addr
)
1140 struct target_timezone
*target_tz
;
1142 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1143 return -TARGET_EFAULT
;
1146 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1147 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1149 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1157 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1158 abi_ulong target_mq_attr_addr
)
1160 struct target_mq_attr
*target_mq_attr
;
1162 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1163 target_mq_attr_addr
, 1))
1164 return -TARGET_EFAULT
;
1166 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1167 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1168 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1169 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1171 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1176 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1177 const struct mq_attr
*attr
)
1179 struct target_mq_attr
*target_mq_attr
;
1181 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1182 target_mq_attr_addr
, 0))
1183 return -TARGET_EFAULT
;
1185 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1186 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1187 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1188 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1190 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long
do_select(int n
,
1199 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1200 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1202 fd_set rfds
, wfds
, efds
;
1203 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1205 struct timespec ts
, *ts_ptr
;
1208 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1212 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1216 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1221 if (target_tv_addr
) {
1222 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1223 return -TARGET_EFAULT
;
1224 ts
.tv_sec
= tv
.tv_sec
;
1225 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1231 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1234 if (!is_error(ret
)) {
1235 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1236 return -TARGET_EFAULT
;
1237 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1238 return -TARGET_EFAULT
;
1239 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1240 return -TARGET_EFAULT
;
1242 if (target_tv_addr
) {
1243 tv
.tv_sec
= ts
.tv_sec
;
1244 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1245 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1246 return -TARGET_EFAULT
;
1255 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1258 return pipe2(host_pipe
, flags
);
1264 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1265 int flags
, int is_pipe2
)
1269 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1272 return get_errno(ret
);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1279 return host_pipe
[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1282 return host_pipe
[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1285 return host_pipe
[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1288 return host_pipe
[0];
1292 if (put_user_s32(host_pipe
[0], pipedes
)
1293 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1294 return -TARGET_EFAULT
;
1295 return get_errno(ret
);
1298 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1299 abi_ulong target_addr
,
1302 struct target_ip_mreqn
*target_smreqn
;
1304 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1306 return -TARGET_EFAULT
;
1307 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1308 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1309 if (len
== sizeof(struct target_ip_mreqn
))
1310 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1311 unlock_user(target_smreqn
, target_addr
, 0);
1316 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1317 abi_ulong target_addr
,
1320 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1321 sa_family_t sa_family
;
1322 struct target_sockaddr
*target_saddr
;
1324 if (fd_trans_target_to_host_addr(fd
)) {
1325 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1328 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1330 return -TARGET_EFAULT
;
1332 sa_family
= tswap16(target_saddr
->sa_family
);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family
== AF_UNIX
) {
1343 if (len
< unix_maxlen
&& len
> 0) {
1344 char *cp
= (char*)target_saddr
;
1346 if ( cp
[len
-1] && !cp
[len
] )
1349 if (len
> unix_maxlen
)
1353 memcpy(addr
, target_saddr
, len
);
1354 addr
->sa_family
= sa_family
;
1355 if (sa_family
== AF_NETLINK
) {
1356 struct sockaddr_nl
*nladdr
;
1358 nladdr
= (struct sockaddr_nl
*)addr
;
1359 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1360 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1361 } else if (sa_family
== AF_PACKET
) {
1362 struct target_sockaddr_ll
*lladdr
;
1364 lladdr
= (struct target_sockaddr_ll
*)addr
;
1365 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1366 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1368 unlock_user(target_saddr
, target_addr
, 0);
1373 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1374 struct sockaddr
*addr
,
1377 struct target_sockaddr
*target_saddr
;
1383 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1385 return -TARGET_EFAULT
;
1386 memcpy(target_saddr
, addr
, len
);
1387 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1388 sizeof(target_saddr
->sa_family
)) {
1389 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1391 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1392 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1393 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1394 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1395 } else if (addr
->sa_family
== AF_PACKET
) {
1396 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1397 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1398 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1400 unlock_user(target_saddr
, target_addr
, len
);
1405 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1406 struct target_msghdr
*target_msgh
)
1408 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1409 abi_long msg_controllen
;
1410 abi_ulong target_cmsg_addr
;
1411 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1412 socklen_t space
= 0;
1414 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1415 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1417 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1418 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1419 target_cmsg_start
= target_cmsg
;
1421 return -TARGET_EFAULT
;
1423 while (cmsg
&& target_cmsg
) {
1424 void *data
= CMSG_DATA(cmsg
);
1425 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1427 int len
= tswapal(target_cmsg
->cmsg_len
)
1428 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1430 space
+= CMSG_SPACE(len
);
1431 if (space
> msgh
->msg_controllen
) {
1432 space
-= CMSG_SPACE(len
);
1433 /* This is a QEMU bug, since we allocated the payload
1434 * area ourselves (unlike overflow in host-to-target
1435 * conversion, which is just the guest giving us a buffer
1436 * that's too small). It can't happen for the payload types
1437 * we currently support; if it becomes an issue in future
1438 * we would need to improve our allocation strategy to
1439 * something more intelligent than "twice the size of the
1440 * target buffer we're reading from".
1442 gemu_log("Host cmsg overflow\n");
1446 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1447 cmsg
->cmsg_level
= SOL_SOCKET
;
1449 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1451 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1452 cmsg
->cmsg_len
= CMSG_LEN(len
);
1454 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1455 int *fd
= (int *)data
;
1456 int *target_fd
= (int *)target_data
;
1457 int i
, numfds
= len
/ sizeof(int);
1459 for (i
= 0; i
< numfds
; i
++) {
1460 __get_user(fd
[i
], target_fd
+ i
);
1462 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1463 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1464 struct ucred
*cred
= (struct ucred
*)data
;
1465 struct target_ucred
*target_cred
=
1466 (struct target_ucred
*)target_data
;
1468 __get_user(cred
->pid
, &target_cred
->pid
);
1469 __get_user(cred
->uid
, &target_cred
->uid
);
1470 __get_user(cred
->gid
, &target_cred
->gid
);
1472 gemu_log("Unsupported ancillary data: %d/%d\n",
1473 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1474 memcpy(data
, target_data
, len
);
1477 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1478 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1481 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1483 msgh
->msg_controllen
= space
;
1487 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1488 struct msghdr
*msgh
)
1490 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1491 abi_long msg_controllen
;
1492 abi_ulong target_cmsg_addr
;
1493 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1494 socklen_t space
= 0;
1496 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1497 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1499 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1500 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1501 target_cmsg_start
= target_cmsg
;
1503 return -TARGET_EFAULT
;
1505 while (cmsg
&& target_cmsg
) {
1506 void *data
= CMSG_DATA(cmsg
);
1507 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1509 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1510 int tgt_len
, tgt_space
;
1512 /* We never copy a half-header but may copy half-data;
1513 * this is Linux's behaviour in put_cmsg(). Note that
1514 * truncation here is a guest problem (which we report
1515 * to the guest via the CTRUNC bit), unlike truncation
1516 * in target_to_host_cmsg, which is a QEMU bug.
1518 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1519 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1523 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1524 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1526 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1528 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1530 tgt_len
= TARGET_CMSG_LEN(len
);
1532 /* Payload types which need a different size of payload on
1533 * the target must adjust tgt_len here.
1535 switch (cmsg
->cmsg_level
) {
1537 switch (cmsg
->cmsg_type
) {
1539 tgt_len
= sizeof(struct target_timeval
);
1548 if (msg_controllen
< tgt_len
) {
1549 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1550 tgt_len
= msg_controllen
;
1553 /* We must now copy-and-convert len bytes of payload
1554 * into tgt_len bytes of destination space. Bear in mind
1555 * that in both source and destination we may be dealing
1556 * with a truncated value!
1558 switch (cmsg
->cmsg_level
) {
1560 switch (cmsg
->cmsg_type
) {
1563 int *fd
= (int *)data
;
1564 int *target_fd
= (int *)target_data
;
1565 int i
, numfds
= tgt_len
/ sizeof(int);
1567 for (i
= 0; i
< numfds
; i
++) {
1568 __put_user(fd
[i
], target_fd
+ i
);
1574 struct timeval
*tv
= (struct timeval
*)data
;
1575 struct target_timeval
*target_tv
=
1576 (struct target_timeval
*)target_data
;
1578 if (len
!= sizeof(struct timeval
) ||
1579 tgt_len
!= sizeof(struct target_timeval
)) {
1583 /* copy struct timeval to target */
1584 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1585 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1588 case SCM_CREDENTIALS
:
1590 struct ucred
*cred
= (struct ucred
*)data
;
1591 struct target_ucred
*target_cred
=
1592 (struct target_ucred
*)target_data
;
1594 __put_user(cred
->pid
, &target_cred
->pid
);
1595 __put_user(cred
->uid
, &target_cred
->uid
);
1596 __put_user(cred
->gid
, &target_cred
->gid
);
1606 gemu_log("Unsupported ancillary data: %d/%d\n",
1607 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1608 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1609 if (tgt_len
> len
) {
1610 memset(target_data
+ len
, 0, tgt_len
- len
);
1614 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1615 tgt_space
= TARGET_CMSG_SPACE(len
);
1616 if (msg_controllen
< tgt_space
) {
1617 tgt_space
= msg_controllen
;
1619 msg_controllen
-= tgt_space
;
1621 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1622 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1625 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1627 target_msgh
->msg_controllen
= tswapal(space
);
1631 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1633 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1634 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1635 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1636 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1637 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1640 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1642 abi_long (*host_to_target_nlmsg
)
1643 (struct nlmsghdr
*))
1648 while (len
> sizeof(struct nlmsghdr
)) {
1650 nlmsg_len
= nlh
->nlmsg_len
;
1651 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1656 switch (nlh
->nlmsg_type
) {
1658 tswap_nlmsghdr(nlh
);
1664 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1665 e
->error
= tswap32(e
->error
);
1666 tswap_nlmsghdr(&e
->msg
);
1667 tswap_nlmsghdr(nlh
);
1671 ret
= host_to_target_nlmsg(nlh
);
1673 tswap_nlmsghdr(nlh
);
1678 tswap_nlmsghdr(nlh
);
1679 len
-= NLMSG_ALIGN(nlmsg_len
);
1680 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1685 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1687 abi_long (*target_to_host_nlmsg
)
1688 (struct nlmsghdr
*))
1692 while (len
> sizeof(struct nlmsghdr
)) {
1693 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1694 tswap32(nlh
->nlmsg_len
) > len
) {
1697 tswap_nlmsghdr(nlh
);
1698 switch (nlh
->nlmsg_type
) {
1705 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1706 e
->error
= tswap32(e
->error
);
1707 tswap_nlmsghdr(&e
->msg
);
1711 ret
= target_to_host_nlmsg(nlh
);
1716 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1717 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1722 #ifdef CONFIG_RTNETLINK
1723 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1724 size_t len
, void *context
,
1725 abi_long (*host_to_target_nlattr
)
1729 unsigned short nla_len
;
1732 while (len
> sizeof(struct nlattr
)) {
1733 nla_len
= nlattr
->nla_len
;
1734 if (nla_len
< sizeof(struct nlattr
) ||
1738 ret
= host_to_target_nlattr(nlattr
, context
);
1739 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1740 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1744 len
-= NLA_ALIGN(nla_len
);
1745 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1750 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1752 abi_long (*host_to_target_rtattr
)
1755 unsigned short rta_len
;
1758 while (len
> sizeof(struct rtattr
)) {
1759 rta_len
= rtattr
->rta_len
;
1760 if (rta_len
< sizeof(struct rtattr
) ||
1764 ret
= host_to_target_rtattr(rtattr
);
1765 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1766 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1770 len
-= RTA_ALIGN(rta_len
);
1771 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1776 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1778 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1785 switch (nlattr
->nla_type
) {
1786 #ifdef IFLA_BR_FDB_FLUSH
1788 case IFLA_BR_FDB_FLUSH
:
1791 #ifdef IFLA_BR_GROUP_ADDR
1793 case IFLA_BR_GROUP_ADDR
:
1797 case IFLA_BR_VLAN_FILTERING
:
1798 #ifdef IFLA_BR_TOPOLOGY_CHANGE
1799 case IFLA_BR_TOPOLOGY_CHANGE
:
1801 #ifdef IFLA_BR_TOPOLOGY_CHANGE_DETECTED
1802 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1804 #ifdef IFLA_BR_MCAST_ROUTER
1805 case IFLA_BR_MCAST_ROUTER
:
1807 #ifdef IFLA_BR_MCAST_SNOOPING
1808 case IFLA_BR_MCAST_SNOOPING
:
1810 #ifdef IFLA_BR_MCAST_QUERY_USE_IFADDR
1811 case IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1813 #ifdef IFLA_BR_MCAST_QUERIER
1814 case IFLA_BR_MCAST_QUERIER
:
1816 #ifdef IFLA_BR_NF_CALL_IPTABLES
1817 case IFLA_BR_NF_CALL_IPTABLES
:
1819 #ifdef IFLA_BR_NF_CALL_IP6TABLES
1820 case IFLA_BR_NF_CALL_IP6TABLES
:
1822 #ifdef IFLA_BR_NF_CALL_ARPTABLES
1823 case IFLA_BR_NF_CALL_ARPTABLES
:
1827 case IFLA_BR_PRIORITY
:
1828 case IFLA_BR_VLAN_PROTOCOL
:
1829 #ifdef IFLA_BR_GROUP_FWD_MASK
1830 case IFLA_BR_GROUP_FWD_MASK
:
1832 #ifdef IFLA_BR_ROOT_PORT
1833 case IFLA_BR_ROOT_PORT
:
1835 #ifdef IFLA_BR_VLAN_DEFAULT_PVID
1836 case IFLA_BR_VLAN_DEFAULT_PVID
:
1838 u16
= NLA_DATA(nlattr
);
1839 *u16
= tswap16(*u16
);
1842 case IFLA_BR_FORWARD_DELAY
:
1843 case IFLA_BR_HELLO_TIME
:
1844 case IFLA_BR_MAX_AGE
:
1845 case IFLA_BR_AGEING_TIME
:
1846 case IFLA_BR_STP_STATE
:
1847 #ifdef IFLA_BR_ROOT_PATH_COST
1848 case IFLA_BR_ROOT_PATH_COST
:
1850 #ifdef IFLA_BR_MCAST_HASH_ELASTICITY
1851 case IFLA_BR_MCAST_HASH_ELASTICITY
:
1853 #ifdef IFLA_BR_MCAST_HASH_MAX
1854 case IFLA_BR_MCAST_HASH_MAX
:
1856 #ifdef IFLA_BR_MCAST_LAST_MEMBER_CNT
1857 case IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1859 #ifdef IFLA_BR_MCAST_STARTUP_QUERY_CNT
1860 case IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1862 u32
= NLA_DATA(nlattr
);
1863 *u32
= tswap32(*u32
);
1866 #ifdef IFLA_BR_HELLO_TIMER
1867 case IFLA_BR_HELLO_TIMER
:
1869 #ifdef IFLA_BR_TCN_TIMER
1870 case IFLA_BR_TCN_TIMER
:
1872 #ifdef IFLA_BR_GC_TIMER
1873 case IFLA_BR_GC_TIMER
:
1875 #ifdef IFLA_BR_TOPOLOGY_CHANGE_TIMER
1876 case IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1878 #ifdef IFLA_BR_MCAST_LAST_MEMBER_INTVL
1879 case IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1881 #ifdef IFLA_BR_MCAST_MEMBERSHIP_INTVL
1882 case IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1884 #ifdef IFLA_BR_MCAST_QUERIER_INTVL
1885 case IFLA_BR_MCAST_QUERIER_INTVL
:
1887 #ifdef IFLA_BR_MCAST_QUERY_INTVL
1888 case IFLA_BR_MCAST_QUERY_INTVL
:
1890 #ifdef IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
1891 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1893 #ifdef IFLA_BR_MCAST_STARTUP_QUERY_INTVL
1894 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1896 u64
= NLA_DATA(nlattr
);
1897 *u64
= tswap64(*u64
);
1899 /* ifla_bridge_id: uin8_t[] */
1900 #ifdef IFLA_BR_ROOT_ID
1901 case IFLA_BR_ROOT_ID
:
1903 #ifdef IFLA_BR_BRIDGE_ID
1904 case IFLA_BR_BRIDGE_ID
:
1908 gemu_log("Unknown IFLA_BR type %d\n", nlattr
->nla_type
);
1914 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
1921 switch (nlattr
->nla_type
) {
1923 case IFLA_BRPORT_STATE
:
1924 case IFLA_BRPORT_MODE
:
1925 case IFLA_BRPORT_GUARD
:
1926 case IFLA_BRPORT_PROTECT
:
1927 case IFLA_BRPORT_FAST_LEAVE
:
1928 case IFLA_BRPORT_LEARNING
:
1929 case IFLA_BRPORT_UNICAST_FLOOD
:
1930 case IFLA_BRPORT_PROXYARP
:
1931 case IFLA_BRPORT_LEARNING_SYNC
:
1932 case IFLA_BRPORT_PROXYARP_WIFI
:
1933 #ifdef IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
1934 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
1936 #ifdef IFLA_BRPORT_CONFIG_PENDING
1937 case IFLA_BRPORT_CONFIG_PENDING
:
1939 #ifdef IFLA_BRPORT_MULTICAST_ROUTER
1940 case IFLA_BRPORT_MULTICAST_ROUTER
:
1944 case IFLA_BRPORT_PRIORITY
:
1945 #ifdef IFLA_BRPORT_DESIGNATED_PORT
1946 case IFLA_BRPORT_DESIGNATED_PORT
:
1948 #ifdef IFLA_BRPORT_DESIGNATED_COST
1949 case IFLA_BRPORT_DESIGNATED_COST
:
1951 #ifdef IFLA_BRPORT_ID
1952 case IFLA_BRPORT_ID
:
1954 #ifdef IFLA_BRPORT_NO
1955 case IFLA_BRPORT_NO
:
1957 u16
= NLA_DATA(nlattr
);
1958 *u16
= tswap16(*u16
);
1961 case IFLA_BRPORT_COST
:
1962 u32
= NLA_DATA(nlattr
);
1963 *u32
= tswap32(*u32
);
1966 #ifdef IFLA_BRPORT_MESSAGE_AGE_TIMER
1967 case IFLA_BRPORT_MESSAGE_AGE_TIMER
:
1969 #ifdef IFLA_BRPORT_FORWARD_DELAY_TIMER
1970 case IFLA_BRPORT_FORWARD_DELAY_TIMER
:
1972 #ifdef IFLA_BRPORT_HOLD_TIMER
1973 case IFLA_BRPORT_HOLD_TIMER
:
1975 u64
= NLA_DATA(nlattr
);
1976 *u64
= tswap64(*u64
);
1978 /* ifla_bridge_id: uint8_t[] */
1979 #ifdef IFLA_BRPORT_ROOT_ID
1980 case IFLA_BRPORT_ROOT_ID
:
1982 #ifdef IFLA_BRPORT_BRIDGE_ID
1983 case IFLA_BRPORT_BRIDGE_ID
:
1987 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr
->nla_type
);
1993 struct linkinfo_context
{
2000 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2003 struct linkinfo_context
*li_context
= context
;
2005 switch (nlattr
->nla_type
) {
2007 case IFLA_INFO_KIND
:
2008 li_context
->name
= NLA_DATA(nlattr
);
2009 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2011 case IFLA_INFO_SLAVE_KIND
:
2012 li_context
->slave_name
= NLA_DATA(nlattr
);
2013 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2016 case IFLA_INFO_XSTATS
:
2017 /* FIXME: only used by CAN */
2020 case IFLA_INFO_DATA
:
2021 if (strncmp(li_context
->name
, "bridge",
2022 li_context
->len
) == 0) {
2023 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2026 host_to_target_data_bridge_nlattr
);
2028 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context
->name
);
2031 case IFLA_INFO_SLAVE_DATA
:
2032 if (strncmp(li_context
->slave_name
, "bridge",
2033 li_context
->slave_len
) == 0) {
2034 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2037 host_to_target_slave_data_bridge_nlattr
);
2039 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
2040 li_context
->slave_name
);
2044 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr
->nla_type
);
2051 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2057 switch (nlattr
->nla_type
) {
2058 case IFLA_INET_CONF
:
2059 u32
= NLA_DATA(nlattr
);
2060 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2062 u32
[i
] = tswap32(u32
[i
]);
2066 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2071 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2076 struct ifla_cacheinfo
*ci
;
2079 switch (nlattr
->nla_type
) {
2081 case IFLA_INET6_TOKEN
:
2084 case IFLA_INET6_ADDR_GEN_MODE
:
2087 case IFLA_INET6_FLAGS
:
2088 u32
= NLA_DATA(nlattr
);
2089 *u32
= tswap32(*u32
);
2092 case IFLA_INET6_CONF
:
2093 u32
= NLA_DATA(nlattr
);
2094 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2096 u32
[i
] = tswap32(u32
[i
]);
2099 /* ifla_cacheinfo */
2100 case IFLA_INET6_CACHEINFO
:
2101 ci
= NLA_DATA(nlattr
);
2102 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2103 ci
->tstamp
= tswap32(ci
->tstamp
);
2104 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2105 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2108 case IFLA_INET6_STATS
:
2109 case IFLA_INET6_ICMP6STATS
:
2110 u64
= NLA_DATA(nlattr
);
2111 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2113 u64
[i
] = tswap64(u64
[i
]);
2117 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2122 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2125 switch (nlattr
->nla_type
) {
2127 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2129 host_to_target_data_inet_nlattr
);
2131 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2133 host_to_target_data_inet6_nlattr
);
2135 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2141 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2144 struct rtnl_link_stats
*st
;
2145 struct rtnl_link_stats64
*st64
;
2146 struct rtnl_link_ifmap
*map
;
2147 struct linkinfo_context li_context
;
2149 switch (rtattr
->rta_type
) {
2152 case IFLA_BROADCAST
:
2158 case IFLA_OPERSTATE
:
2161 case IFLA_PROTO_DOWN
:
2168 case IFLA_CARRIER_CHANGES
:
2169 case IFLA_NUM_RX_QUEUES
:
2170 case IFLA_NUM_TX_QUEUES
:
2171 case IFLA_PROMISCUITY
:
2173 case IFLA_LINK_NETNSID
:
2177 u32
= RTA_DATA(rtattr
);
2178 *u32
= tswap32(*u32
);
2180 /* struct rtnl_link_stats */
2182 st
= RTA_DATA(rtattr
);
2183 st
->rx_packets
= tswap32(st
->rx_packets
);
2184 st
->tx_packets
= tswap32(st
->tx_packets
);
2185 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2186 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2187 st
->rx_errors
= tswap32(st
->rx_errors
);
2188 st
->tx_errors
= tswap32(st
->tx_errors
);
2189 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2190 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2191 st
->multicast
= tswap32(st
->multicast
);
2192 st
->collisions
= tswap32(st
->collisions
);
2194 /* detailed rx_errors: */
2195 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2196 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2197 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2198 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2199 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2200 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2202 /* detailed tx_errors */
2203 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2204 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2205 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2206 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2207 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2210 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2211 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2213 /* struct rtnl_link_stats64 */
2215 st64
= RTA_DATA(rtattr
);
2216 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2217 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2218 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2219 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2220 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2221 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2222 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2223 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2224 st64
->multicast
= tswap64(st64
->multicast
);
2225 st64
->collisions
= tswap64(st64
->collisions
);
2227 /* detailed rx_errors: */
2228 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2229 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2230 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2231 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2232 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2233 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2235 /* detailed tx_errors */
2236 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2237 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2238 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2239 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2240 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2243 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2244 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2246 /* struct rtnl_link_ifmap */
2248 map
= RTA_DATA(rtattr
);
2249 map
->mem_start
= tswap64(map
->mem_start
);
2250 map
->mem_end
= tswap64(map
->mem_end
);
2251 map
->base_addr
= tswap64(map
->base_addr
);
2252 map
->irq
= tswap16(map
->irq
);
2256 memset(&li_context
, 0, sizeof(li_context
));
2257 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2259 host_to_target_data_linkinfo_nlattr
);
2261 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2263 host_to_target_data_spec_nlattr
);
2265 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
2271 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2274 struct ifa_cacheinfo
*ci
;
2276 switch (rtattr
->rta_type
) {
2277 /* binary: depends on family type */
2287 u32
= RTA_DATA(rtattr
);
2288 *u32
= tswap32(*u32
);
2290 /* struct ifa_cacheinfo */
2292 ci
= RTA_DATA(rtattr
);
2293 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2294 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2295 ci
->cstamp
= tswap32(ci
->cstamp
);
2296 ci
->tstamp
= tswap32(ci
->tstamp
);
2299 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2305 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2308 switch (rtattr
->rta_type
) {
2309 /* binary: depends on family type */
2318 u32
= RTA_DATA(rtattr
);
2319 *u32
= tswap32(*u32
);
2322 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2328 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2329 uint32_t rtattr_len
)
2331 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2332 host_to_target_data_link_rtattr
);
2335 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2336 uint32_t rtattr_len
)
2338 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2339 host_to_target_data_addr_rtattr
);
2342 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2343 uint32_t rtattr_len
)
2345 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2346 host_to_target_data_route_rtattr
);
2349 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2352 struct ifinfomsg
*ifi
;
2353 struct ifaddrmsg
*ifa
;
2356 nlmsg_len
= nlh
->nlmsg_len
;
2357 switch (nlh
->nlmsg_type
) {
2361 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2362 ifi
= NLMSG_DATA(nlh
);
2363 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2364 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2365 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2366 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2367 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2368 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2374 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2375 ifa
= NLMSG_DATA(nlh
);
2376 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2377 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2378 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2384 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2385 rtm
= NLMSG_DATA(nlh
);
2386 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2387 host_to_target_route_rtattr(RTM_RTA(rtm
),
2388 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2392 return -TARGET_EINVAL
;
2397 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2400 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2403 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2405 abi_long (*target_to_host_rtattr
)
2410 while (len
>= sizeof(struct rtattr
)) {
2411 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2412 tswap16(rtattr
->rta_len
) > len
) {
2415 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2416 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2417 ret
= target_to_host_rtattr(rtattr
);
2421 len
-= RTA_ALIGN(rtattr
->rta_len
);
2422 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2423 RTA_ALIGN(rtattr
->rta_len
));
2428 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2430 switch (rtattr
->rta_type
) {
2432 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2438 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2440 switch (rtattr
->rta_type
) {
2441 /* binary: depends on family type */
2446 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2452 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2455 switch (rtattr
->rta_type
) {
2456 /* binary: depends on family type */
2463 u32
= RTA_DATA(rtattr
);
2464 *u32
= tswap32(*u32
);
2467 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2473 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2474 uint32_t rtattr_len
)
2476 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2477 target_to_host_data_link_rtattr
);
2480 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2481 uint32_t rtattr_len
)
2483 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2484 target_to_host_data_addr_rtattr
);
2487 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2488 uint32_t rtattr_len
)
2490 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2491 target_to_host_data_route_rtattr
);
2494 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2496 struct ifinfomsg
*ifi
;
2497 struct ifaddrmsg
*ifa
;
2500 switch (nlh
->nlmsg_type
) {
2505 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2506 ifi
= NLMSG_DATA(nlh
);
2507 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2508 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2509 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2510 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2511 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2512 NLMSG_LENGTH(sizeof(*ifi
)));
2518 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2519 ifa
= NLMSG_DATA(nlh
);
2520 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2521 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2522 NLMSG_LENGTH(sizeof(*ifa
)));
2529 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2530 rtm
= NLMSG_DATA(nlh
);
2531 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2532 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2533 NLMSG_LENGTH(sizeof(*rtm
)));
2537 return -TARGET_EOPNOTSUPP
;
2542 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2544 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2546 #endif /* CONFIG_RTNETLINK */
2548 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2550 switch (nlh
->nlmsg_type
) {
2552 gemu_log("Unknown host audit message type %d\n",
2554 return -TARGET_EINVAL
;
2559 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2562 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2565 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2567 switch (nlh
->nlmsg_type
) {
2569 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2570 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2573 gemu_log("Unknown target audit message type %d\n",
2575 return -TARGET_EINVAL
;
2581 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2583 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2586 /* do_setsockopt() Must return target values and target errnos. */
2587 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2588 abi_ulong optval_addr
, socklen_t optlen
)
2592 struct ip_mreqn
*ip_mreq
;
2593 struct ip_mreq_source
*ip_mreq_source
;
2597 /* TCP options all take an 'int' value. */
2598 if (optlen
< sizeof(uint32_t))
2599 return -TARGET_EINVAL
;
2601 if (get_user_u32(val
, optval_addr
))
2602 return -TARGET_EFAULT
;
2603 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2610 case IP_ROUTER_ALERT
:
2614 case IP_MTU_DISCOVER
:
2620 case IP_MULTICAST_TTL
:
2621 case IP_MULTICAST_LOOP
:
2623 if (optlen
>= sizeof(uint32_t)) {
2624 if (get_user_u32(val
, optval_addr
))
2625 return -TARGET_EFAULT
;
2626 } else if (optlen
>= 1) {
2627 if (get_user_u8(val
, optval_addr
))
2628 return -TARGET_EFAULT
;
2630 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2632 case IP_ADD_MEMBERSHIP
:
2633 case IP_DROP_MEMBERSHIP
:
2634 if (optlen
< sizeof (struct target_ip_mreq
) ||
2635 optlen
> sizeof (struct target_ip_mreqn
))
2636 return -TARGET_EINVAL
;
2638 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2639 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2640 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2643 case IP_BLOCK_SOURCE
:
2644 case IP_UNBLOCK_SOURCE
:
2645 case IP_ADD_SOURCE_MEMBERSHIP
:
2646 case IP_DROP_SOURCE_MEMBERSHIP
:
2647 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2648 return -TARGET_EINVAL
;
2650 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2651 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2652 unlock_user (ip_mreq_source
, optval_addr
, 0);
2661 case IPV6_MTU_DISCOVER
:
2664 case IPV6_RECVPKTINFO
:
2666 if (optlen
< sizeof(uint32_t)) {
2667 return -TARGET_EINVAL
;
2669 if (get_user_u32(val
, optval_addr
)) {
2670 return -TARGET_EFAULT
;
2672 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2673 &val
, sizeof(val
)));
2682 /* struct icmp_filter takes an u32 value */
2683 if (optlen
< sizeof(uint32_t)) {
2684 return -TARGET_EINVAL
;
2687 if (get_user_u32(val
, optval_addr
)) {
2688 return -TARGET_EFAULT
;
2690 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2691 &val
, sizeof(val
)));
2698 case TARGET_SOL_SOCKET
:
2700 case TARGET_SO_RCVTIMEO
:
2704 optname
= SO_RCVTIMEO
;
2707 if (optlen
!= sizeof(struct target_timeval
)) {
2708 return -TARGET_EINVAL
;
2711 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2712 return -TARGET_EFAULT
;
2715 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2719 case TARGET_SO_SNDTIMEO
:
2720 optname
= SO_SNDTIMEO
;
2722 case TARGET_SO_ATTACH_FILTER
:
2724 struct target_sock_fprog
*tfprog
;
2725 struct target_sock_filter
*tfilter
;
2726 struct sock_fprog fprog
;
2727 struct sock_filter
*filter
;
2730 if (optlen
!= sizeof(*tfprog
)) {
2731 return -TARGET_EINVAL
;
2733 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2734 return -TARGET_EFAULT
;
2736 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2737 tswapal(tfprog
->filter
), 0)) {
2738 unlock_user_struct(tfprog
, optval_addr
, 1);
2739 return -TARGET_EFAULT
;
2742 fprog
.len
= tswap16(tfprog
->len
);
2743 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2744 if (filter
== NULL
) {
2745 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2746 unlock_user_struct(tfprog
, optval_addr
, 1);
2747 return -TARGET_ENOMEM
;
2749 for (i
= 0; i
< fprog
.len
; i
++) {
2750 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2751 filter
[i
].jt
= tfilter
[i
].jt
;
2752 filter
[i
].jf
= tfilter
[i
].jf
;
2753 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2755 fprog
.filter
= filter
;
2757 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2758 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2761 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2762 unlock_user_struct(tfprog
, optval_addr
, 1);
2765 case TARGET_SO_BINDTODEVICE
:
2767 char *dev_ifname
, *addr_ifname
;
2769 if (optlen
> IFNAMSIZ
- 1) {
2770 optlen
= IFNAMSIZ
- 1;
2772 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2774 return -TARGET_EFAULT
;
2776 optname
= SO_BINDTODEVICE
;
2777 addr_ifname
= alloca(IFNAMSIZ
);
2778 memcpy(addr_ifname
, dev_ifname
, optlen
);
2779 addr_ifname
[optlen
] = 0;
2780 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2781 addr_ifname
, optlen
));
2782 unlock_user (dev_ifname
, optval_addr
, 0);
2785 /* Options with 'int' argument. */
2786 case TARGET_SO_DEBUG
:
2789 case TARGET_SO_REUSEADDR
:
2790 optname
= SO_REUSEADDR
;
2792 case TARGET_SO_TYPE
:
2795 case TARGET_SO_ERROR
:
2798 case TARGET_SO_DONTROUTE
:
2799 optname
= SO_DONTROUTE
;
2801 case TARGET_SO_BROADCAST
:
2802 optname
= SO_BROADCAST
;
2804 case TARGET_SO_SNDBUF
:
2805 optname
= SO_SNDBUF
;
2807 case TARGET_SO_SNDBUFFORCE
:
2808 optname
= SO_SNDBUFFORCE
;
2810 case TARGET_SO_RCVBUF
:
2811 optname
= SO_RCVBUF
;
2813 case TARGET_SO_RCVBUFFORCE
:
2814 optname
= SO_RCVBUFFORCE
;
2816 case TARGET_SO_KEEPALIVE
:
2817 optname
= SO_KEEPALIVE
;
2819 case TARGET_SO_OOBINLINE
:
2820 optname
= SO_OOBINLINE
;
2822 case TARGET_SO_NO_CHECK
:
2823 optname
= SO_NO_CHECK
;
2825 case TARGET_SO_PRIORITY
:
2826 optname
= SO_PRIORITY
;
2829 case TARGET_SO_BSDCOMPAT
:
2830 optname
= SO_BSDCOMPAT
;
2833 case TARGET_SO_PASSCRED
:
2834 optname
= SO_PASSCRED
;
2836 case TARGET_SO_PASSSEC
:
2837 optname
= SO_PASSSEC
;
2839 case TARGET_SO_TIMESTAMP
:
2840 optname
= SO_TIMESTAMP
;
2842 case TARGET_SO_RCVLOWAT
:
2843 optname
= SO_RCVLOWAT
;
2849 if (optlen
< sizeof(uint32_t))
2850 return -TARGET_EINVAL
;
2852 if (get_user_u32(val
, optval_addr
))
2853 return -TARGET_EFAULT
;
2854 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2858 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2859 ret
= -TARGET_ENOPROTOOPT
;
2864 /* do_getsockopt() Must return target values and target errnos. */
2865 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2866 abi_ulong optval_addr
, abi_ulong optlen
)
2873 case TARGET_SOL_SOCKET
:
2876 /* These don't just return a single integer */
2877 case TARGET_SO_LINGER
:
2878 case TARGET_SO_RCVTIMEO
:
2879 case TARGET_SO_SNDTIMEO
:
2880 case TARGET_SO_PEERNAME
:
2882 case TARGET_SO_PEERCRED
: {
2885 struct target_ucred
*tcr
;
2887 if (get_user_u32(len
, optlen
)) {
2888 return -TARGET_EFAULT
;
2891 return -TARGET_EINVAL
;
2895 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2903 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2904 return -TARGET_EFAULT
;
2906 __put_user(cr
.pid
, &tcr
->pid
);
2907 __put_user(cr
.uid
, &tcr
->uid
);
2908 __put_user(cr
.gid
, &tcr
->gid
);
2909 unlock_user_struct(tcr
, optval_addr
, 1);
2910 if (put_user_u32(len
, optlen
)) {
2911 return -TARGET_EFAULT
;
2915 /* Options with 'int' argument. */
2916 case TARGET_SO_DEBUG
:
2919 case TARGET_SO_REUSEADDR
:
2920 optname
= SO_REUSEADDR
;
2922 case TARGET_SO_TYPE
:
2925 case TARGET_SO_ERROR
:
2928 case TARGET_SO_DONTROUTE
:
2929 optname
= SO_DONTROUTE
;
2931 case TARGET_SO_BROADCAST
:
2932 optname
= SO_BROADCAST
;
2934 case TARGET_SO_SNDBUF
:
2935 optname
= SO_SNDBUF
;
2937 case TARGET_SO_RCVBUF
:
2938 optname
= SO_RCVBUF
;
2940 case TARGET_SO_KEEPALIVE
:
2941 optname
= SO_KEEPALIVE
;
2943 case TARGET_SO_OOBINLINE
:
2944 optname
= SO_OOBINLINE
;
2946 case TARGET_SO_NO_CHECK
:
2947 optname
= SO_NO_CHECK
;
2949 case TARGET_SO_PRIORITY
:
2950 optname
= SO_PRIORITY
;
2953 case TARGET_SO_BSDCOMPAT
:
2954 optname
= SO_BSDCOMPAT
;
2957 case TARGET_SO_PASSCRED
:
2958 optname
= SO_PASSCRED
;
2960 case TARGET_SO_TIMESTAMP
:
2961 optname
= SO_TIMESTAMP
;
2963 case TARGET_SO_RCVLOWAT
:
2964 optname
= SO_RCVLOWAT
;
2966 case TARGET_SO_ACCEPTCONN
:
2967 optname
= SO_ACCEPTCONN
;
2974 /* TCP options all take an 'int' value. */
2976 if (get_user_u32(len
, optlen
))
2977 return -TARGET_EFAULT
;
2979 return -TARGET_EINVAL
;
2981 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2984 if (optname
== SO_TYPE
) {
2985 val
= host_to_target_sock_type(val
);
2990 if (put_user_u32(val
, optval_addr
))
2991 return -TARGET_EFAULT
;
2993 if (put_user_u8(val
, optval_addr
))
2994 return -TARGET_EFAULT
;
2996 if (put_user_u32(len
, optlen
))
2997 return -TARGET_EFAULT
;
3004 case IP_ROUTER_ALERT
:
3008 case IP_MTU_DISCOVER
:
3014 case IP_MULTICAST_TTL
:
3015 case IP_MULTICAST_LOOP
:
3016 if (get_user_u32(len
, optlen
))
3017 return -TARGET_EFAULT
;
3019 return -TARGET_EINVAL
;
3021 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3024 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3026 if (put_user_u32(len
, optlen
)
3027 || put_user_u8(val
, optval_addr
))
3028 return -TARGET_EFAULT
;
3030 if (len
> sizeof(int))
3032 if (put_user_u32(len
, optlen
)
3033 || put_user_u32(val
, optval_addr
))
3034 return -TARGET_EFAULT
;
3038 ret
= -TARGET_ENOPROTOOPT
;
3044 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3046 ret
= -TARGET_EOPNOTSUPP
;
3052 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3053 int count
, int copy
)
3055 struct target_iovec
*target_vec
;
3057 abi_ulong total_len
, max_len
;
3060 bool bad_address
= false;
3066 if (count
< 0 || count
> IOV_MAX
) {
3071 vec
= g_try_new0(struct iovec
, count
);
3077 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3078 count
* sizeof(struct target_iovec
), 1);
3079 if (target_vec
== NULL
) {
3084 /* ??? If host page size > target page size, this will result in a
3085 value larger than what we can actually support. */
3086 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3089 for (i
= 0; i
< count
; i
++) {
3090 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3091 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3096 } else if (len
== 0) {
3097 /* Zero length pointer is ignored. */
3098 vec
[i
].iov_base
= 0;
3100 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3101 /* If the first buffer pointer is bad, this is a fault. But
3102 * subsequent bad buffers will result in a partial write; this
3103 * is realized by filling the vector with null pointers and
3105 if (!vec
[i
].iov_base
) {
3116 if (len
> max_len
- total_len
) {
3117 len
= max_len
- total_len
;
3120 vec
[i
].iov_len
= len
;
3124 unlock_user(target_vec
, target_addr
, 0);
3129 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3130 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3133 unlock_user(target_vec
, target_addr
, 0);
3140 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3141 int count
, int copy
)
3143 struct target_iovec
*target_vec
;
3146 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3147 count
* sizeof(struct target_iovec
), 1);
3149 for (i
= 0; i
< count
; i
++) {
3150 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3151 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3155 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3157 unlock_user(target_vec
, target_addr
, 0);
3163 static inline int target_to_host_sock_type(int *type
)
3166 int target_type
= *type
;
3168 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3169 case TARGET_SOCK_DGRAM
:
3170 host_type
= SOCK_DGRAM
;
3172 case TARGET_SOCK_STREAM
:
3173 host_type
= SOCK_STREAM
;
3176 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3179 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3180 #if defined(SOCK_CLOEXEC)
3181 host_type
|= SOCK_CLOEXEC
;
3183 return -TARGET_EINVAL
;
3186 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3187 #if defined(SOCK_NONBLOCK)
3188 host_type
|= SOCK_NONBLOCK
;
3189 #elif !defined(O_NONBLOCK)
3190 return -TARGET_EINVAL
;
3197 /* Try to emulate socket type flags after socket creation. */
3198 static int sock_flags_fixup(int fd
, int target_type
)
3200 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3201 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3202 int flags
= fcntl(fd
, F_GETFL
);
3203 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3205 return -TARGET_EINVAL
;
3212 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3213 abi_ulong target_addr
,
3216 struct sockaddr
*addr
= host_addr
;
3217 struct target_sockaddr
*target_saddr
;
3219 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3220 if (!target_saddr
) {
3221 return -TARGET_EFAULT
;
3224 memcpy(addr
, target_saddr
, len
);
3225 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3226 /* spkt_protocol is big-endian */
3228 unlock_user(target_saddr
, target_addr
, 0);
3232 static TargetFdTrans target_packet_trans
= {
3233 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3236 #ifdef CONFIG_RTNETLINK
3237 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3241 ret
= target_to_host_nlmsg_route(buf
, len
);
3249 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3253 ret
= host_to_target_nlmsg_route(buf
, len
);
3261 static TargetFdTrans target_netlink_route_trans
= {
3262 .target_to_host_data
= netlink_route_target_to_host
,
3263 .host_to_target_data
= netlink_route_host_to_target
,
3265 #endif /* CONFIG_RTNETLINK */
3267 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3271 ret
= target_to_host_nlmsg_audit(buf
, len
);
3279 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3283 ret
= host_to_target_nlmsg_audit(buf
, len
);
3291 static TargetFdTrans target_netlink_audit_trans
= {
3292 .target_to_host_data
= netlink_audit_target_to_host
,
3293 .host_to_target_data
= netlink_audit_host_to_target
,
3296 /* do_socket() Must return target values and target errnos. */
3297 static abi_long
do_socket(int domain
, int type
, int protocol
)
3299 int target_type
= type
;
3302 ret
= target_to_host_sock_type(&type
);
3307 if (domain
== PF_NETLINK
&& !(
3308 #ifdef CONFIG_RTNETLINK
3309 protocol
== NETLINK_ROUTE
||
3311 protocol
== NETLINK_KOBJECT_UEVENT
||
3312 protocol
== NETLINK_AUDIT
)) {
3313 return -EPFNOSUPPORT
;
3316 if (domain
== AF_PACKET
||
3317 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3318 protocol
= tswap16(protocol
);
3321 ret
= get_errno(socket(domain
, type
, protocol
));
3323 ret
= sock_flags_fixup(ret
, target_type
);
3324 if (type
== SOCK_PACKET
) {
3325 /* Manage an obsolete case :
3326 * if socket type is SOCK_PACKET, bind by name
3328 fd_trans_register(ret
, &target_packet_trans
);
3329 } else if (domain
== PF_NETLINK
) {
3331 #ifdef CONFIG_RTNETLINK
3333 fd_trans_register(ret
, &target_netlink_route_trans
);
3336 case NETLINK_KOBJECT_UEVENT
:
3337 /* nothing to do: messages are strings */
3340 fd_trans_register(ret
, &target_netlink_audit_trans
);
3343 g_assert_not_reached();
3350 /* do_bind() Must return target values and target errnos. */
3351 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3357 if ((int)addrlen
< 0) {
3358 return -TARGET_EINVAL
;
3361 addr
= alloca(addrlen
+1);
3363 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3367 return get_errno(bind(sockfd
, addr
, addrlen
));
3370 /* do_connect() Must return target values and target errnos. */
3371 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3377 if ((int)addrlen
< 0) {
3378 return -TARGET_EINVAL
;
3381 addr
= alloca(addrlen
+1);
3383 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3387 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3390 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3391 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3392 int flags
, int send
)
3398 abi_ulong target_vec
;
3400 if (msgp
->msg_name
) {
3401 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3402 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3403 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3404 tswapal(msgp
->msg_name
),
3410 msg
.msg_name
= NULL
;
3411 msg
.msg_namelen
= 0;
3413 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3414 msg
.msg_control
= alloca(msg
.msg_controllen
);
3415 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3417 count
= tswapal(msgp
->msg_iovlen
);
3418 target_vec
= tswapal(msgp
->msg_iov
);
3419 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3420 target_vec
, count
, send
);
3422 ret
= -host_to_target_errno(errno
);
3425 msg
.msg_iovlen
= count
;
3429 if (fd_trans_target_to_host_data(fd
)) {
3432 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3433 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3434 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3435 msg
.msg_iov
->iov_len
);
3437 msg
.msg_iov
->iov_base
= host_msg
;
3438 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3442 ret
= target_to_host_cmsg(&msg
, msgp
);
3444 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3448 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3449 if (!is_error(ret
)) {
3451 if (fd_trans_host_to_target_data(fd
)) {
3452 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3455 ret
= host_to_target_cmsg(msgp
, &msg
);
3457 if (!is_error(ret
)) {
3458 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3459 if (msg
.msg_name
!= NULL
) {
3460 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3461 msg
.msg_name
, msg
.msg_namelen
);
3473 unlock_iovec(vec
, target_vec
, count
, !send
);
3478 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3479 int flags
, int send
)
3482 struct target_msghdr
*msgp
;
3484 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3488 return -TARGET_EFAULT
;
3490 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3491 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3495 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3496 * so it might not have this *mmsg-specific flag either.
3498 #ifndef MSG_WAITFORONE
3499 #define MSG_WAITFORONE 0x10000
3502 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3503 unsigned int vlen
, unsigned int flags
,
3506 struct target_mmsghdr
*mmsgp
;
3510 if (vlen
> UIO_MAXIOV
) {
3514 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3516 return -TARGET_EFAULT
;
3519 for (i
= 0; i
< vlen
; i
++) {
3520 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3521 if (is_error(ret
)) {
3524 mmsgp
[i
].msg_len
= tswap32(ret
);
3525 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3526 if (flags
& MSG_WAITFORONE
) {
3527 flags
|= MSG_DONTWAIT
;
3531 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3533 /* Return number of datagrams sent if we sent any at all;
3534 * otherwise return the error.
3542 /* do_accept4() Must return target values and target errnos. */
3543 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3544 abi_ulong target_addrlen_addr
, int flags
)
3551 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3553 if (target_addr
== 0) {
3554 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3557 /* linux returns EINVAL if addrlen pointer is invalid */
3558 if (get_user_u32(addrlen
, target_addrlen_addr
))
3559 return -TARGET_EINVAL
;
3561 if ((int)addrlen
< 0) {
3562 return -TARGET_EINVAL
;
3565 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3566 return -TARGET_EINVAL
;
3568 addr
= alloca(addrlen
);
3570 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3571 if (!is_error(ret
)) {
3572 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3573 if (put_user_u32(addrlen
, target_addrlen_addr
))
3574 ret
= -TARGET_EFAULT
;
3579 /* do_getpeername() Must return target values and target errnos. */
3580 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3581 abi_ulong target_addrlen_addr
)
3587 if (get_user_u32(addrlen
, target_addrlen_addr
))
3588 return -TARGET_EFAULT
;
3590 if ((int)addrlen
< 0) {
3591 return -TARGET_EINVAL
;
3594 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3595 return -TARGET_EFAULT
;
3597 addr
= alloca(addrlen
);
3599 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3600 if (!is_error(ret
)) {
3601 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3602 if (put_user_u32(addrlen
, target_addrlen_addr
))
3603 ret
= -TARGET_EFAULT
;
3608 /* do_getsockname() Must return target values and target errnos. */
3609 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3610 abi_ulong target_addrlen_addr
)
3616 if (get_user_u32(addrlen
, target_addrlen_addr
))
3617 return -TARGET_EFAULT
;
3619 if ((int)addrlen
< 0) {
3620 return -TARGET_EINVAL
;
3623 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3624 return -TARGET_EFAULT
;
3626 addr
= alloca(addrlen
);
3628 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3629 if (!is_error(ret
)) {
3630 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3631 if (put_user_u32(addrlen
, target_addrlen_addr
))
3632 ret
= -TARGET_EFAULT
;
3637 /* do_socketpair() Must return target values and target errnos. */
3638 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3639 abi_ulong target_tab_addr
)
3644 target_to_host_sock_type(&type
);
3646 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3647 if (!is_error(ret
)) {
3648 if (put_user_s32(tab
[0], target_tab_addr
)
3649 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3650 ret
= -TARGET_EFAULT
;
3655 /* do_sendto() Must return target values and target errnos. */
3656 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3657 abi_ulong target_addr
, socklen_t addrlen
)
3661 void *copy_msg
= NULL
;
3664 if ((int)addrlen
< 0) {
3665 return -TARGET_EINVAL
;
3668 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3670 return -TARGET_EFAULT
;
3671 if (fd_trans_target_to_host_data(fd
)) {
3672 copy_msg
= host_msg
;
3673 host_msg
= g_malloc(len
);
3674 memcpy(host_msg
, copy_msg
, len
);
3675 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3681 addr
= alloca(addrlen
+1);
3682 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3686 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3688 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3693 host_msg
= copy_msg
;
3695 unlock_user(host_msg
, msg
, 0);
3699 /* do_recvfrom() Must return target values and target errnos. */
3700 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3701 abi_ulong target_addr
,
3702 abi_ulong target_addrlen
)
3709 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3711 return -TARGET_EFAULT
;
3713 if (get_user_u32(addrlen
, target_addrlen
)) {
3714 ret
= -TARGET_EFAULT
;
3717 if ((int)addrlen
< 0) {
3718 ret
= -TARGET_EINVAL
;
3721 addr
= alloca(addrlen
);
3722 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3725 addr
= NULL
; /* To keep compiler quiet. */
3726 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3728 if (!is_error(ret
)) {
3729 if (fd_trans_host_to_target_data(fd
)) {
3730 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3733 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3734 if (put_user_u32(addrlen
, target_addrlen
)) {
3735 ret
= -TARGET_EFAULT
;
3739 unlock_user(host_msg
, msg
, len
);
3742 unlock_user(host_msg
, msg
, 0);
3747 #ifdef TARGET_NR_socketcall
3748 /* do_socketcall() Must return target values and target errnos. */
3749 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3751 static const unsigned ac
[] = { /* number of arguments per call */
3752 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3753 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3754 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3755 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3756 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3757 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3758 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3759 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3760 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3761 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3762 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3763 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3764 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3765 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3766 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3767 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3768 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3769 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3770 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3771 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3773 abi_long a
[6]; /* max 6 args */
3775 /* first, collect the arguments in a[] according to ac[] */
3776 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3778 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3779 for (i
= 0; i
< ac
[num
]; ++i
) {
3780 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3781 return -TARGET_EFAULT
;
3786 /* now when we have the args, actually handle the call */
3788 case SOCKOP_socket
: /* domain, type, protocol */
3789 return do_socket(a
[0], a
[1], a
[2]);
3790 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3791 return do_bind(a
[0], a
[1], a
[2]);
3792 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3793 return do_connect(a
[0], a
[1], a
[2]);
3794 case SOCKOP_listen
: /* sockfd, backlog */
3795 return get_errno(listen(a
[0], a
[1]));
3796 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3797 return do_accept4(a
[0], a
[1], a
[2], 0);
3798 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3799 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3800 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3801 return do_getsockname(a
[0], a
[1], a
[2]);
3802 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3803 return do_getpeername(a
[0], a
[1], a
[2]);
3804 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3805 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3806 case SOCKOP_send
: /* sockfd, msg, len, flags */
3807 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3808 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3809 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3810 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3811 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3812 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3813 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3814 case SOCKOP_shutdown
: /* sockfd, how */
3815 return get_errno(shutdown(a
[0], a
[1]));
3816 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3817 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3818 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3819 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3820 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3821 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3822 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3823 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3824 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3825 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3826 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3827 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3829 gemu_log("Unsupported socketcall: %d\n", num
);
3830 return -TARGET_ENOSYS
;
3835 #define N_SHM_REGIONS 32
3837 static struct shm_region
{
3841 } shm_regions
[N_SHM_REGIONS
];
3843 #ifndef TARGET_SEMID64_DS
3844 /* asm-generic version of this struct */
3845 struct target_semid64_ds
3847 struct target_ipc_perm sem_perm
;
3848 abi_ulong sem_otime
;
3849 #if TARGET_ABI_BITS == 32
3850 abi_ulong __unused1
;
3852 abi_ulong sem_ctime
;
3853 #if TARGET_ABI_BITS == 32
3854 abi_ulong __unused2
;
3856 abi_ulong sem_nsems
;
3857 abi_ulong __unused3
;
3858 abi_ulong __unused4
;
3862 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3863 abi_ulong target_addr
)
3865 struct target_ipc_perm
*target_ip
;
3866 struct target_semid64_ds
*target_sd
;
3868 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3869 return -TARGET_EFAULT
;
3870 target_ip
= &(target_sd
->sem_perm
);
3871 host_ip
->__key
= tswap32(target_ip
->__key
);
3872 host_ip
->uid
= tswap32(target_ip
->uid
);
3873 host_ip
->gid
= tswap32(target_ip
->gid
);
3874 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3875 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3876 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3877 host_ip
->mode
= tswap32(target_ip
->mode
);
3879 host_ip
->mode
= tswap16(target_ip
->mode
);
3881 #if defined(TARGET_PPC)
3882 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3884 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3886 unlock_user_struct(target_sd
, target_addr
, 0);
3890 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3891 struct ipc_perm
*host_ip
)
3893 struct target_ipc_perm
*target_ip
;
3894 struct target_semid64_ds
*target_sd
;
3896 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3897 return -TARGET_EFAULT
;
3898 target_ip
= &(target_sd
->sem_perm
);
3899 target_ip
->__key
= tswap32(host_ip
->__key
);
3900 target_ip
->uid
= tswap32(host_ip
->uid
);
3901 target_ip
->gid
= tswap32(host_ip
->gid
);
3902 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3903 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3904 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3905 target_ip
->mode
= tswap32(host_ip
->mode
);
3907 target_ip
->mode
= tswap16(host_ip
->mode
);
3909 #if defined(TARGET_PPC)
3910 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3912 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3914 unlock_user_struct(target_sd
, target_addr
, 1);
3918 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3919 abi_ulong target_addr
)
3921 struct target_semid64_ds
*target_sd
;
3923 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3924 return -TARGET_EFAULT
;
3925 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3926 return -TARGET_EFAULT
;
3927 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3928 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3929 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3930 unlock_user_struct(target_sd
, target_addr
, 0);
3934 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3935 struct semid_ds
*host_sd
)
3937 struct target_semid64_ds
*target_sd
;
3939 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3940 return -TARGET_EFAULT
;
3941 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3942 return -TARGET_EFAULT
;
3943 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3944 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3945 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3946 unlock_user_struct(target_sd
, target_addr
, 1);
3950 struct target_seminfo
{
3963 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3964 struct seminfo
*host_seminfo
)
3966 struct target_seminfo
*target_seminfo
;
3967 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3968 return -TARGET_EFAULT
;
3969 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3970 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3971 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3972 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3973 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3974 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3975 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3976 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3977 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3978 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3979 unlock_user_struct(target_seminfo
, target_addr
, 1);
3985 struct semid_ds
*buf
;
3986 unsigned short *array
;
3987 struct seminfo
*__buf
;
3990 union target_semun
{
3997 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3998 abi_ulong target_addr
)
4001 unsigned short *array
;
4003 struct semid_ds semid_ds
;
4006 semun
.buf
= &semid_ds
;
4008 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4010 return get_errno(ret
);
4012 nsems
= semid_ds
.sem_nsems
;
4014 *host_array
= g_try_new(unsigned short, nsems
);
4016 return -TARGET_ENOMEM
;
4018 array
= lock_user(VERIFY_READ
, target_addr
,
4019 nsems
*sizeof(unsigned short), 1);
4021 g_free(*host_array
);
4022 return -TARGET_EFAULT
;
4025 for(i
=0; i
<nsems
; i
++) {
4026 __get_user((*host_array
)[i
], &array
[i
]);
4028 unlock_user(array
, target_addr
, 0);
4033 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4034 unsigned short **host_array
)
4037 unsigned short *array
;
4039 struct semid_ds semid_ds
;
4042 semun
.buf
= &semid_ds
;
4044 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4046 return get_errno(ret
);
4048 nsems
= semid_ds
.sem_nsems
;
4050 array
= lock_user(VERIFY_WRITE
, target_addr
,
4051 nsems
*sizeof(unsigned short), 0);
4053 return -TARGET_EFAULT
;
4055 for(i
=0; i
<nsems
; i
++) {
4056 __put_user((*host_array
)[i
], &array
[i
]);
4058 g_free(*host_array
);
4059 unlock_user(array
, target_addr
, 1);
4064 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4065 abi_ulong target_arg
)
4067 union target_semun target_su
= { .buf
= target_arg
};
4069 struct semid_ds dsarg
;
4070 unsigned short *array
= NULL
;
4071 struct seminfo seminfo
;
4072 abi_long ret
= -TARGET_EINVAL
;
4079 /* In 64 bit cross-endian situations, we will erroneously pick up
4080 * the wrong half of the union for the "val" element. To rectify
4081 * this, the entire 8-byte structure is byteswapped, followed by
4082 * a swap of the 4 byte val field. In other cases, the data is
4083 * already in proper host byte order. */
4084 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4085 target_su
.buf
= tswapal(target_su
.buf
);
4086 arg
.val
= tswap32(target_su
.val
);
4088 arg
.val
= target_su
.val
;
4090 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4094 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4098 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4099 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4106 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4110 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4111 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4117 arg
.__buf
= &seminfo
;
4118 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4119 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4127 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4134 struct target_sembuf
{
4135 unsigned short sem_num
;
4140 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4141 abi_ulong target_addr
,
4144 struct target_sembuf
*target_sembuf
;
4147 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4148 nsops
*sizeof(struct target_sembuf
), 1);
4150 return -TARGET_EFAULT
;
4152 for(i
=0; i
<nsops
; i
++) {
4153 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4154 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4155 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4158 unlock_user(target_sembuf
, target_addr
, 0);
4163 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4165 struct sembuf sops
[nsops
];
4167 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4168 return -TARGET_EFAULT
;
4170 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4173 struct target_msqid_ds
4175 struct target_ipc_perm msg_perm
;
4176 abi_ulong msg_stime
;
4177 #if TARGET_ABI_BITS == 32
4178 abi_ulong __unused1
;
4180 abi_ulong msg_rtime
;
4181 #if TARGET_ABI_BITS == 32
4182 abi_ulong __unused2
;
4184 abi_ulong msg_ctime
;
4185 #if TARGET_ABI_BITS == 32
4186 abi_ulong __unused3
;
4188 abi_ulong __msg_cbytes
;
4190 abi_ulong msg_qbytes
;
4191 abi_ulong msg_lspid
;
4192 abi_ulong msg_lrpid
;
4193 abi_ulong __unused4
;
4194 abi_ulong __unused5
;
4197 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4198 abi_ulong target_addr
)
4200 struct target_msqid_ds
*target_md
;
4202 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4203 return -TARGET_EFAULT
;
4204 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4205 return -TARGET_EFAULT
;
4206 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4207 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4208 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4209 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4210 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4211 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4212 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4213 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4214 unlock_user_struct(target_md
, target_addr
, 0);
4218 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4219 struct msqid_ds
*host_md
)
4221 struct target_msqid_ds
*target_md
;
4223 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4224 return -TARGET_EFAULT
;
4225 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4226 return -TARGET_EFAULT
;
4227 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4228 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4229 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4230 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4231 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4232 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4233 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4234 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4235 unlock_user_struct(target_md
, target_addr
, 1);
4239 struct target_msginfo
{
4247 unsigned short int msgseg
;
4250 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4251 struct msginfo
*host_msginfo
)
4253 struct target_msginfo
*target_msginfo
;
4254 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4255 return -TARGET_EFAULT
;
4256 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4257 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4258 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4259 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4260 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4261 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4262 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4263 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4264 unlock_user_struct(target_msginfo
, target_addr
, 1);
4268 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4270 struct msqid_ds dsarg
;
4271 struct msginfo msginfo
;
4272 abi_long ret
= -TARGET_EINVAL
;
4280 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4281 return -TARGET_EFAULT
;
4282 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4283 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4284 return -TARGET_EFAULT
;
4287 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4291 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4292 if (host_to_target_msginfo(ptr
, &msginfo
))
4293 return -TARGET_EFAULT
;
4300 struct target_msgbuf
{
4305 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4306 ssize_t msgsz
, int msgflg
)
4308 struct target_msgbuf
*target_mb
;
4309 struct msgbuf
*host_mb
;
4313 return -TARGET_EINVAL
;
4316 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4317 return -TARGET_EFAULT
;
4318 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4320 unlock_user_struct(target_mb
, msgp
, 0);
4321 return -TARGET_ENOMEM
;
4323 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4324 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4325 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4327 unlock_user_struct(target_mb
, msgp
, 0);
4332 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4333 ssize_t msgsz
, abi_long msgtyp
,
4336 struct target_msgbuf
*target_mb
;
4338 struct msgbuf
*host_mb
;
4342 return -TARGET_EINVAL
;
4345 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4346 return -TARGET_EFAULT
;
4348 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4350 ret
= -TARGET_ENOMEM
;
4353 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4356 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4357 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4358 if (!target_mtext
) {
4359 ret
= -TARGET_EFAULT
;
4362 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4363 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4366 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4370 unlock_user_struct(target_mb
, msgp
, 1);
4375 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4376 abi_ulong target_addr
)
4378 struct target_shmid_ds
*target_sd
;
4380 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4381 return -TARGET_EFAULT
;
4382 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4383 return -TARGET_EFAULT
;
4384 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4385 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4386 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4387 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4388 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4389 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4390 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4391 unlock_user_struct(target_sd
, target_addr
, 0);
4395 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4396 struct shmid_ds
*host_sd
)
4398 struct target_shmid_ds
*target_sd
;
4400 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4401 return -TARGET_EFAULT
;
4402 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4403 return -TARGET_EFAULT
;
4404 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4405 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4406 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4407 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4408 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4409 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4410 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4411 unlock_user_struct(target_sd
, target_addr
, 1);
4415 struct target_shminfo
{
4423 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4424 struct shminfo
*host_shminfo
)
4426 struct target_shminfo
*target_shminfo
;
4427 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4428 return -TARGET_EFAULT
;
4429 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4430 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4431 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4432 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4433 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4434 unlock_user_struct(target_shminfo
, target_addr
, 1);
4438 struct target_shm_info
{
4443 abi_ulong swap_attempts
;
4444 abi_ulong swap_successes
;
4447 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4448 struct shm_info
*host_shm_info
)
4450 struct target_shm_info
*target_shm_info
;
4451 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4452 return -TARGET_EFAULT
;
4453 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4454 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4455 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4456 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4457 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4458 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4459 unlock_user_struct(target_shm_info
, target_addr
, 1);
4463 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4465 struct shmid_ds dsarg
;
4466 struct shminfo shminfo
;
4467 struct shm_info shm_info
;
4468 abi_long ret
= -TARGET_EINVAL
;
4476 if (target_to_host_shmid_ds(&dsarg
, buf
))
4477 return -TARGET_EFAULT
;
4478 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4479 if (host_to_target_shmid_ds(buf
, &dsarg
))
4480 return -TARGET_EFAULT
;
4483 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4484 if (host_to_target_shminfo(buf
, &shminfo
))
4485 return -TARGET_EFAULT
;
4488 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4489 if (host_to_target_shm_info(buf
, &shm_info
))
4490 return -TARGET_EFAULT
;
4495 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4502 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4506 struct shmid_ds shm_info
;
4509 /* find out the length of the shared memory segment */
4510 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4511 if (is_error(ret
)) {
4512 /* can't get length, bail out */
4519 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4521 abi_ulong mmap_start
;
4523 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4525 if (mmap_start
== -1) {
4527 host_raddr
= (void *)-1;
4529 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4532 if (host_raddr
== (void *)-1) {
4534 return get_errno((long)host_raddr
);
4536 raddr
=h2g((unsigned long)host_raddr
);
4538 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4539 PAGE_VALID
| PAGE_READ
|
4540 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4542 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4543 if (!shm_regions
[i
].in_use
) {
4544 shm_regions
[i
].in_use
= true;
4545 shm_regions
[i
].start
= raddr
;
4546 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4556 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4560 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4561 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4562 shm_regions
[i
].in_use
= false;
4563 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4568 return get_errno(shmdt(g2h(shmaddr
)));
4571 #ifdef TARGET_NR_ipc
4572 /* ??? This only works with linear mappings. */
4573 /* do_ipc() must return target values and target errnos. */
4574 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4575 abi_long second
, abi_long third
,
4576 abi_long ptr
, abi_long fifth
)
4581 version
= call
>> 16;
4586 ret
= do_semop(first
, ptr
, second
);
4590 ret
= get_errno(semget(first
, second
, third
));
4593 case IPCOP_semctl
: {
4594 /* The semun argument to semctl is passed by value, so dereference the
4597 get_user_ual(atptr
, ptr
);
4598 ret
= do_semctl(first
, second
, third
, atptr
);
4603 ret
= get_errno(msgget(first
, second
));
4607 ret
= do_msgsnd(first
, ptr
, second
, third
);
4611 ret
= do_msgctl(first
, second
, ptr
);
4618 struct target_ipc_kludge
{
4623 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4624 ret
= -TARGET_EFAULT
;
4628 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4630 unlock_user_struct(tmp
, ptr
, 0);
4634 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4643 raddr
= do_shmat(first
, ptr
, second
);
4644 if (is_error(raddr
))
4645 return get_errno(raddr
);
4646 if (put_user_ual(raddr
, third
))
4647 return -TARGET_EFAULT
;
4651 ret
= -TARGET_EINVAL
;
4656 ret
= do_shmdt(ptr
);
4660 /* IPC_* flag values are the same on all linux platforms */
4661 ret
= get_errno(shmget(first
, second
, third
));
4664 /* IPC_* and SHM_* command values are the same on all linux platforms */
4666 ret
= do_shmctl(first
, second
, ptr
);
4669 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4670 ret
= -TARGET_ENOSYS
;
4677 /* kernel structure types definitions */
4679 #define STRUCT(name, ...) STRUCT_ ## name,
4680 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4682 #include "syscall_types.h"
4686 #undef STRUCT_SPECIAL
4688 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4689 #define STRUCT_SPECIAL(name)
4690 #include "syscall_types.h"
4692 #undef STRUCT_SPECIAL
4694 typedef struct IOCTLEntry IOCTLEntry
;
4696 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4697 int fd
, int cmd
, abi_long arg
);
4701 unsigned int host_cmd
;
4704 do_ioctl_fn
*do_ioctl
;
4705 const argtype arg_type
[5];
4708 #define IOC_R 0x0001
4709 #define IOC_W 0x0002
4710 #define IOC_RW (IOC_R | IOC_W)
4712 #define MAX_STRUCT_SIZE 4096
4714 #ifdef CONFIG_FIEMAP
4715 /* So fiemap access checks don't overflow on 32 bit systems.
4716 * This is very slightly smaller than the limit imposed by
4717 * the underlying kernel.
4719 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4720 / sizeof(struct fiemap_extent))
4722 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4723 int fd
, int cmd
, abi_long arg
)
4725 /* The parameter for this ioctl is a struct fiemap followed
4726 * by an array of struct fiemap_extent whose size is set
4727 * in fiemap->fm_extent_count. The array is filled in by the
4730 int target_size_in
, target_size_out
;
4732 const argtype
*arg_type
= ie
->arg_type
;
4733 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4736 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4740 assert(arg_type
[0] == TYPE_PTR
);
4741 assert(ie
->access
== IOC_RW
);
4743 target_size_in
= thunk_type_size(arg_type
, 0);
4744 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4746 return -TARGET_EFAULT
;
4748 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4749 unlock_user(argptr
, arg
, 0);
4750 fm
= (struct fiemap
*)buf_temp
;
4751 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4752 return -TARGET_EINVAL
;
4755 outbufsz
= sizeof (*fm
) +
4756 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4758 if (outbufsz
> MAX_STRUCT_SIZE
) {
4759 /* We can't fit all the extents into the fixed size buffer.
4760 * Allocate one that is large enough and use it instead.
4762 fm
= g_try_malloc(outbufsz
);
4764 return -TARGET_ENOMEM
;
4766 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4769 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4770 if (!is_error(ret
)) {
4771 target_size_out
= target_size_in
;
4772 /* An extent_count of 0 means we were only counting the extents
4773 * so there are no structs to copy
4775 if (fm
->fm_extent_count
!= 0) {
4776 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4778 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4780 ret
= -TARGET_EFAULT
;
4782 /* Convert the struct fiemap */
4783 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4784 if (fm
->fm_extent_count
!= 0) {
4785 p
= argptr
+ target_size_in
;
4786 /* ...and then all the struct fiemap_extents */
4787 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4788 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4793 unlock_user(argptr
, arg
, target_size_out
);
4803 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4804 int fd
, int cmd
, abi_long arg
)
4806 const argtype
*arg_type
= ie
->arg_type
;
4810 struct ifconf
*host_ifconf
;
4812 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4813 int target_ifreq_size
;
4818 abi_long target_ifc_buf
;
4822 assert(arg_type
[0] == TYPE_PTR
);
4823 assert(ie
->access
== IOC_RW
);
4826 target_size
= thunk_type_size(arg_type
, 0);
4828 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4830 return -TARGET_EFAULT
;
4831 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4832 unlock_user(argptr
, arg
, 0);
4834 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4835 target_ifc_len
= host_ifconf
->ifc_len
;
4836 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4838 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4839 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4840 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4842 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4843 if (outbufsz
> MAX_STRUCT_SIZE
) {
4844 /* We can't fit all the extents into the fixed size buffer.
4845 * Allocate one that is large enough and use it instead.
4847 host_ifconf
= malloc(outbufsz
);
4849 return -TARGET_ENOMEM
;
4851 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4854 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4856 host_ifconf
->ifc_len
= host_ifc_len
;
4857 host_ifconf
->ifc_buf
= host_ifc_buf
;
4859 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4860 if (!is_error(ret
)) {
4861 /* convert host ifc_len to target ifc_len */
4863 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4864 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4865 host_ifconf
->ifc_len
= target_ifc_len
;
4867 /* restore target ifc_buf */
4869 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4871 /* copy struct ifconf to target user */
4873 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4875 return -TARGET_EFAULT
;
4876 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4877 unlock_user(argptr
, arg
, target_size
);
4879 /* copy ifreq[] to target user */
4881 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4882 for (i
= 0; i
< nb_ifreq
; i
++) {
4883 thunk_convert(argptr
+ i
* target_ifreq_size
,
4884 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4885 ifreq_arg_type
, THUNK_TARGET
);
4887 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4897 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4898 int cmd
, abi_long arg
)
4901 struct dm_ioctl
*host_dm
;
4902 abi_long guest_data
;
4903 uint32_t guest_data_size
;
4905 const argtype
*arg_type
= ie
->arg_type
;
4907 void *big_buf
= NULL
;
4911 target_size
= thunk_type_size(arg_type
, 0);
4912 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4914 ret
= -TARGET_EFAULT
;
4917 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4918 unlock_user(argptr
, arg
, 0);
4920 /* buf_temp is too small, so fetch things into a bigger buffer */
4921 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4922 memcpy(big_buf
, buf_temp
, target_size
);
4926 guest_data
= arg
+ host_dm
->data_start
;
4927 if ((guest_data
- arg
) < 0) {
4931 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4932 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4934 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4935 switch (ie
->host_cmd
) {
4937 case DM_LIST_DEVICES
:
4940 case DM_DEV_SUSPEND
:
4943 case DM_TABLE_STATUS
:
4944 case DM_TABLE_CLEAR
:
4946 case DM_LIST_VERSIONS
:
4950 case DM_DEV_SET_GEOMETRY
:
4951 /* data contains only strings */
4952 memcpy(host_data
, argptr
, guest_data_size
);
4955 memcpy(host_data
, argptr
, guest_data_size
);
4956 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4960 void *gspec
= argptr
;
4961 void *cur_data
= host_data
;
4962 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4963 int spec_size
= thunk_type_size(arg_type
, 0);
4966 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4967 struct dm_target_spec
*spec
= cur_data
;
4971 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4972 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4974 spec
->next
= sizeof(*spec
) + slen
;
4975 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4977 cur_data
+= spec
->next
;
4982 ret
= -TARGET_EINVAL
;
4983 unlock_user(argptr
, guest_data
, 0);
4986 unlock_user(argptr
, guest_data
, 0);
4988 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4989 if (!is_error(ret
)) {
4990 guest_data
= arg
+ host_dm
->data_start
;
4991 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4992 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4993 switch (ie
->host_cmd
) {
4998 case DM_DEV_SUSPEND
:
5001 case DM_TABLE_CLEAR
:
5003 case DM_DEV_SET_GEOMETRY
:
5004 /* no return data */
5006 case DM_LIST_DEVICES
:
5008 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5009 uint32_t remaining_data
= guest_data_size
;
5010 void *cur_data
= argptr
;
5011 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5012 int nl_size
= 12; /* can't use thunk_size due to alignment */
5015 uint32_t next
= nl
->next
;
5017 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5019 if (remaining_data
< nl
->next
) {
5020 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5023 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5024 strcpy(cur_data
+ nl_size
, nl
->name
);
5025 cur_data
+= nl
->next
;
5026 remaining_data
-= nl
->next
;
5030 nl
= (void*)nl
+ next
;
5035 case DM_TABLE_STATUS
:
5037 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5038 void *cur_data
= argptr
;
5039 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5040 int spec_size
= thunk_type_size(arg_type
, 0);
5043 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5044 uint32_t next
= spec
->next
;
5045 int slen
= strlen((char*)&spec
[1]) + 1;
5046 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5047 if (guest_data_size
< spec
->next
) {
5048 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5051 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5052 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5053 cur_data
= argptr
+ spec
->next
;
5054 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5060 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5061 int count
= *(uint32_t*)hdata
;
5062 uint64_t *hdev
= hdata
+ 8;
5063 uint64_t *gdev
= argptr
+ 8;
5066 *(uint32_t*)argptr
= tswap32(count
);
5067 for (i
= 0; i
< count
; i
++) {
5068 *gdev
= tswap64(*hdev
);
5074 case DM_LIST_VERSIONS
:
5076 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5077 uint32_t remaining_data
= guest_data_size
;
5078 void *cur_data
= argptr
;
5079 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5080 int vers_size
= thunk_type_size(arg_type
, 0);
5083 uint32_t next
= vers
->next
;
5085 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5087 if (remaining_data
< vers
->next
) {
5088 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5091 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5092 strcpy(cur_data
+ vers_size
, vers
->name
);
5093 cur_data
+= vers
->next
;
5094 remaining_data
-= vers
->next
;
5098 vers
= (void*)vers
+ next
;
5103 unlock_user(argptr
, guest_data
, 0);
5104 ret
= -TARGET_EINVAL
;
5107 unlock_user(argptr
, guest_data
, guest_data_size
);
5109 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5111 ret
= -TARGET_EFAULT
;
5114 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5115 unlock_user(argptr
, arg
, target_size
);
5122 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5123 int cmd
, abi_long arg
)
5127 const argtype
*arg_type
= ie
->arg_type
;
5128 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5131 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5132 struct blkpg_partition host_part
;
5134 /* Read and convert blkpg */
5136 target_size
= thunk_type_size(arg_type
, 0);
5137 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5139 ret
= -TARGET_EFAULT
;
5142 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5143 unlock_user(argptr
, arg
, 0);
5145 switch (host_blkpg
->op
) {
5146 case BLKPG_ADD_PARTITION
:
5147 case BLKPG_DEL_PARTITION
:
5148 /* payload is struct blkpg_partition */
5151 /* Unknown opcode */
5152 ret
= -TARGET_EINVAL
;
5156 /* Read and convert blkpg->data */
5157 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5158 target_size
= thunk_type_size(part_arg_type
, 0);
5159 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5161 ret
= -TARGET_EFAULT
;
5164 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5165 unlock_user(argptr
, arg
, 0);
5167 /* Swizzle the data pointer to our local copy and call! */
5168 host_blkpg
->data
= &host_part
;
5169 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5175 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5176 int fd
, int cmd
, abi_long arg
)
5178 const argtype
*arg_type
= ie
->arg_type
;
5179 const StructEntry
*se
;
5180 const argtype
*field_types
;
5181 const int *dst_offsets
, *src_offsets
;
5184 abi_ulong
*target_rt_dev_ptr
;
5185 unsigned long *host_rt_dev_ptr
;
5189 assert(ie
->access
== IOC_W
);
5190 assert(*arg_type
== TYPE_PTR
);
5192 assert(*arg_type
== TYPE_STRUCT
);
5193 target_size
= thunk_type_size(arg_type
, 0);
5194 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5196 return -TARGET_EFAULT
;
5199 assert(*arg_type
== (int)STRUCT_rtentry
);
5200 se
= struct_entries
+ *arg_type
++;
5201 assert(se
->convert
[0] == NULL
);
5202 /* convert struct here to be able to catch rt_dev string */
5203 field_types
= se
->field_types
;
5204 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5205 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5206 for (i
= 0; i
< se
->nb_fields
; i
++) {
5207 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5208 assert(*field_types
== TYPE_PTRVOID
);
5209 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5210 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5211 if (*target_rt_dev_ptr
!= 0) {
5212 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5213 tswapal(*target_rt_dev_ptr
));
5214 if (!*host_rt_dev_ptr
) {
5215 unlock_user(argptr
, arg
, 0);
5216 return -TARGET_EFAULT
;
5219 *host_rt_dev_ptr
= 0;
5224 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5225 argptr
+ src_offsets
[i
],
5226 field_types
, THUNK_HOST
);
5228 unlock_user(argptr
, arg
, 0);
5230 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5231 if (*host_rt_dev_ptr
!= 0) {
5232 unlock_user((void *)*host_rt_dev_ptr
,
5233 *target_rt_dev_ptr
, 0);
5238 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5239 int fd
, int cmd
, abi_long arg
)
5241 int sig
= target_to_host_signal(arg
);
5242 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5245 static IOCTLEntry ioctl_entries
[] = {
5246 #define IOCTL(cmd, access, ...) \
5247 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5248 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5249 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5254 /* ??? Implement proper locking for ioctls. */
5255 /* do_ioctl() Must return target values and target errnos. */
5256 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5258 const IOCTLEntry
*ie
;
5259 const argtype
*arg_type
;
5261 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5267 if (ie
->target_cmd
== 0) {
5268 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5269 return -TARGET_ENOSYS
;
5271 if (ie
->target_cmd
== cmd
)
5275 arg_type
= ie
->arg_type
;
5277 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5280 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5283 switch(arg_type
[0]) {
5286 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5290 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5294 target_size
= thunk_type_size(arg_type
, 0);
5295 switch(ie
->access
) {
5297 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5298 if (!is_error(ret
)) {
5299 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5301 return -TARGET_EFAULT
;
5302 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5303 unlock_user(argptr
, arg
, target_size
);
5307 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5309 return -TARGET_EFAULT
;
5310 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5311 unlock_user(argptr
, arg
, 0);
5312 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5316 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5318 return -TARGET_EFAULT
;
5319 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5320 unlock_user(argptr
, arg
, 0);
5321 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5322 if (!is_error(ret
)) {
5323 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5325 return -TARGET_EFAULT
;
5326 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5327 unlock_user(argptr
, arg
, target_size
);
5333 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5334 (long)cmd
, arg_type
[0]);
5335 ret
= -TARGET_ENOSYS
;
5341 static const bitmask_transtbl iflag_tbl
[] = {
5342 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5343 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5344 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5345 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5346 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5347 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5348 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5349 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5350 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5351 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5352 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5353 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5354 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5355 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5359 static const bitmask_transtbl oflag_tbl
[] = {
5360 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5361 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5362 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5363 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5364 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5365 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5366 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5367 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5368 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5369 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5370 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5371 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5372 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5373 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5374 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5375 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5376 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5377 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5378 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5379 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5380 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5381 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5382 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5383 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5387 static const bitmask_transtbl cflag_tbl
[] = {
5388 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5389 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5390 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5391 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5392 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5393 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5394 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5395 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5396 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5397 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5398 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5399 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5400 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5401 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5402 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5403 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5404 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5405 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5406 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5407 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5408 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5409 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5410 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5411 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5412 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5413 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5414 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5415 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5416 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5417 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5418 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5422 static const bitmask_transtbl lflag_tbl
[] = {
5423 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5424 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5425 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5426 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5427 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5428 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5429 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5430 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5431 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5432 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5433 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5434 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5435 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5436 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5437 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5441 static void target_to_host_termios (void *dst
, const void *src
)
5443 struct host_termios
*host
= dst
;
5444 const struct target_termios
*target
= src
;
5447 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5449 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5451 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5453 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5454 host
->c_line
= target
->c_line
;
5456 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5457 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5458 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5459 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5460 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5461 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5462 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5463 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5464 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5465 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5466 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5467 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5468 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5469 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5470 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5471 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5472 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5473 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5476 static void host_to_target_termios (void *dst
, const void *src
)
5478 struct target_termios
*target
= dst
;
5479 const struct host_termios
*host
= src
;
5482 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5484 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5486 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5488 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5489 target
->c_line
= host
->c_line
;
5491 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5492 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5493 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5494 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5495 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5496 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5497 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5498 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5499 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5500 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5501 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5502 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5503 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5504 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5505 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5506 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5507 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5508 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5511 static const StructEntry struct_termios_def
= {
5512 .convert
= { host_to_target_termios
, target_to_host_termios
},
5513 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5514 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5517 static bitmask_transtbl mmap_flags_tbl
[] = {
5518 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5519 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5520 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5521 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5522 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5523 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5524 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5525 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5526 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5531 #if defined(TARGET_I386)
5533 /* NOTE: there is really one LDT for all the threads */
5534 static uint8_t *ldt_table
;
5536 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5543 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5544 if (size
> bytecount
)
5546 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5548 return -TARGET_EFAULT
;
5549 /* ??? Should this by byteswapped? */
5550 memcpy(p
, ldt_table
, size
);
5551 unlock_user(p
, ptr
, size
);
5555 /* XXX: add locking support */
5556 static abi_long
write_ldt(CPUX86State
*env
,
5557 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5559 struct target_modify_ldt_ldt_s ldt_info
;
5560 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5561 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5562 int seg_not_present
, useable
, lm
;
5563 uint32_t *lp
, entry_1
, entry_2
;
5565 if (bytecount
!= sizeof(ldt_info
))
5566 return -TARGET_EINVAL
;
5567 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5568 return -TARGET_EFAULT
;
5569 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5570 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5571 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5572 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5573 unlock_user_struct(target_ldt_info
, ptr
, 0);
5575 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5576 return -TARGET_EINVAL
;
5577 seg_32bit
= ldt_info
.flags
& 1;
5578 contents
= (ldt_info
.flags
>> 1) & 3;
5579 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5580 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5581 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5582 useable
= (ldt_info
.flags
>> 6) & 1;
5586 lm
= (ldt_info
.flags
>> 7) & 1;
5588 if (contents
== 3) {
5590 return -TARGET_EINVAL
;
5591 if (seg_not_present
== 0)
5592 return -TARGET_EINVAL
;
5594 /* allocate the LDT */
5596 env
->ldt
.base
= target_mmap(0,
5597 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5598 PROT_READ
|PROT_WRITE
,
5599 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5600 if (env
->ldt
.base
== -1)
5601 return -TARGET_ENOMEM
;
5602 memset(g2h(env
->ldt
.base
), 0,
5603 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5604 env
->ldt
.limit
= 0xffff;
5605 ldt_table
= g2h(env
->ldt
.base
);
5608 /* NOTE: same code as Linux kernel */
5609 /* Allow LDTs to be cleared by the user. */
5610 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5613 read_exec_only
== 1 &&
5615 limit_in_pages
== 0 &&
5616 seg_not_present
== 1 &&
5624 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5625 (ldt_info
.limit
& 0x0ffff);
5626 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5627 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5628 (ldt_info
.limit
& 0xf0000) |
5629 ((read_exec_only
^ 1) << 9) |
5631 ((seg_not_present
^ 1) << 15) |
5633 (limit_in_pages
<< 23) |
5637 entry_2
|= (useable
<< 20);
5639 /* Install the new entry ... */
5641 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5642 lp
[0] = tswap32(entry_1
);
5643 lp
[1] = tswap32(entry_2
);
5647 /* specific and weird i386 syscalls */
5648 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5649 unsigned long bytecount
)
5655 ret
= read_ldt(ptr
, bytecount
);
5658 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5661 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5664 ret
= -TARGET_ENOSYS
;
5670 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5671 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5673 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5674 struct target_modify_ldt_ldt_s ldt_info
;
5675 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5676 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5677 int seg_not_present
, useable
, lm
;
5678 uint32_t *lp
, entry_1
, entry_2
;
5681 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5682 if (!target_ldt_info
)
5683 return -TARGET_EFAULT
;
5684 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5685 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5686 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5687 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5688 if (ldt_info
.entry_number
== -1) {
5689 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5690 if (gdt_table
[i
] == 0) {
5691 ldt_info
.entry_number
= i
;
5692 target_ldt_info
->entry_number
= tswap32(i
);
5697 unlock_user_struct(target_ldt_info
, ptr
, 1);
5699 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5700 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5701 return -TARGET_EINVAL
;
5702 seg_32bit
= ldt_info
.flags
& 1;
5703 contents
= (ldt_info
.flags
>> 1) & 3;
5704 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5705 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5706 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5707 useable
= (ldt_info
.flags
>> 6) & 1;
5711 lm
= (ldt_info
.flags
>> 7) & 1;
5714 if (contents
== 3) {
5715 if (seg_not_present
== 0)
5716 return -TARGET_EINVAL
;
5719 /* NOTE: same code as Linux kernel */
5720 /* Allow LDTs to be cleared by the user. */
5721 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5722 if ((contents
== 0 &&
5723 read_exec_only
== 1 &&
5725 limit_in_pages
== 0 &&
5726 seg_not_present
== 1 &&
5734 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5735 (ldt_info
.limit
& 0x0ffff);
5736 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5737 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5738 (ldt_info
.limit
& 0xf0000) |
5739 ((read_exec_only
^ 1) << 9) |
5741 ((seg_not_present
^ 1) << 15) |
5743 (limit_in_pages
<< 23) |
5748 /* Install the new entry ... */
5750 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5751 lp
[0] = tswap32(entry_1
);
5752 lp
[1] = tswap32(entry_2
);
5756 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5758 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5759 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5760 uint32_t base_addr
, limit
, flags
;
5761 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5762 int seg_not_present
, useable
, lm
;
5763 uint32_t *lp
, entry_1
, entry_2
;
5765 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5766 if (!target_ldt_info
)
5767 return -TARGET_EFAULT
;
5768 idx
= tswap32(target_ldt_info
->entry_number
);
5769 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5770 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5771 unlock_user_struct(target_ldt_info
, ptr
, 1);
5772 return -TARGET_EINVAL
;
5774 lp
= (uint32_t *)(gdt_table
+ idx
);
5775 entry_1
= tswap32(lp
[0]);
5776 entry_2
= tswap32(lp
[1]);
5778 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5779 contents
= (entry_2
>> 10) & 3;
5780 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5781 seg_32bit
= (entry_2
>> 22) & 1;
5782 limit_in_pages
= (entry_2
>> 23) & 1;
5783 useable
= (entry_2
>> 20) & 1;
5787 lm
= (entry_2
>> 21) & 1;
5789 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5790 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5791 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5792 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5793 base_addr
= (entry_1
>> 16) |
5794 (entry_2
& 0xff000000) |
5795 ((entry_2
& 0xff) << 16);
5796 target_ldt_info
->base_addr
= tswapal(base_addr
);
5797 target_ldt_info
->limit
= tswap32(limit
);
5798 target_ldt_info
->flags
= tswap32(flags
);
5799 unlock_user_struct(target_ldt_info
, ptr
, 1);
5802 #endif /* TARGET_I386 && TARGET_ABI32 */
5804 #ifndef TARGET_ABI32
5805 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5812 case TARGET_ARCH_SET_GS
:
5813 case TARGET_ARCH_SET_FS
:
5814 if (code
== TARGET_ARCH_SET_GS
)
5818 cpu_x86_load_seg(env
, idx
, 0);
5819 env
->segs
[idx
].base
= addr
;
5821 case TARGET_ARCH_GET_GS
:
5822 case TARGET_ARCH_GET_FS
:
5823 if (code
== TARGET_ARCH_GET_GS
)
5827 val
= env
->segs
[idx
].base
;
5828 if (put_user(val
, addr
, abi_ulong
))
5829 ret
= -TARGET_EFAULT
;
5832 ret
= -TARGET_EINVAL
;
5839 #endif /* defined(TARGET_I386) */
5841 #define NEW_STACK_SIZE 0x40000
5844 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5847 pthread_mutex_t mutex
;
5848 pthread_cond_t cond
;
5851 abi_ulong child_tidptr
;
5852 abi_ulong parent_tidptr
;
5856 static void *clone_func(void *arg
)
5858 new_thread_info
*info
= arg
;
5863 rcu_register_thread();
5865 cpu
= ENV_GET_CPU(env
);
5867 ts
= (TaskState
*)cpu
->opaque
;
5868 info
->tid
= gettid();
5869 cpu
->host_tid
= info
->tid
;
5871 if (info
->child_tidptr
)
5872 put_user_u32(info
->tid
, info
->child_tidptr
);
5873 if (info
->parent_tidptr
)
5874 put_user_u32(info
->tid
, info
->parent_tidptr
);
5875 /* Enable signals. */
5876 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5877 /* Signal to the parent that we're ready. */
5878 pthread_mutex_lock(&info
->mutex
);
5879 pthread_cond_broadcast(&info
->cond
);
5880 pthread_mutex_unlock(&info
->mutex
);
5881 /* Wait until the parent has finshed initializing the tls state. */
5882 pthread_mutex_lock(&clone_lock
);
5883 pthread_mutex_unlock(&clone_lock
);
5889 /* do_fork() Must return host values and target errnos (unlike most
5890 do_*() functions). */
5891 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5892 abi_ulong parent_tidptr
, target_ulong newtls
,
5893 abi_ulong child_tidptr
)
5895 CPUState
*cpu
= ENV_GET_CPU(env
);
5899 CPUArchState
*new_env
;
5900 unsigned int nptl_flags
;
5903 /* Emulate vfork() with fork() */
5904 if (flags
& CLONE_VFORK
)
5905 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5907 if (flags
& CLONE_VM
) {
5908 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5909 new_thread_info info
;
5910 pthread_attr_t attr
;
5912 ts
= g_new0(TaskState
, 1);
5913 init_task_state(ts
);
5914 /* we create a new CPU instance. */
5915 new_env
= cpu_copy(env
);
5916 /* Init regs that differ from the parent. */
5917 cpu_clone_regs(new_env
, newsp
);
5918 new_cpu
= ENV_GET_CPU(new_env
);
5919 new_cpu
->opaque
= ts
;
5920 ts
->bprm
= parent_ts
->bprm
;
5921 ts
->info
= parent_ts
->info
;
5922 ts
->signal_mask
= parent_ts
->signal_mask
;
5924 flags
&= ~CLONE_NPTL_FLAGS2
;
5926 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5927 ts
->child_tidptr
= child_tidptr
;
5930 if (nptl_flags
& CLONE_SETTLS
)
5931 cpu_set_tls (new_env
, newtls
);
5933 /* Grab a mutex so that thread setup appears atomic. */
5934 pthread_mutex_lock(&clone_lock
);
5936 memset(&info
, 0, sizeof(info
));
5937 pthread_mutex_init(&info
.mutex
, NULL
);
5938 pthread_mutex_lock(&info
.mutex
);
5939 pthread_cond_init(&info
.cond
, NULL
);
5941 if (nptl_flags
& CLONE_CHILD_SETTID
)
5942 info
.child_tidptr
= child_tidptr
;
5943 if (nptl_flags
& CLONE_PARENT_SETTID
)
5944 info
.parent_tidptr
= parent_tidptr
;
5946 ret
= pthread_attr_init(&attr
);
5947 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5948 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5949 /* It is not safe to deliver signals until the child has finished
5950 initializing, so temporarily block all signals. */
5951 sigfillset(&sigmask
);
5952 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5954 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5955 /* TODO: Free new CPU state if thread creation failed. */
5957 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5958 pthread_attr_destroy(&attr
);
5960 /* Wait for the child to initialize. */
5961 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5963 if (flags
& CLONE_PARENT_SETTID
)
5964 put_user_u32(ret
, parent_tidptr
);
5968 pthread_mutex_unlock(&info
.mutex
);
5969 pthread_cond_destroy(&info
.cond
);
5970 pthread_mutex_destroy(&info
.mutex
);
5971 pthread_mutex_unlock(&clone_lock
);
5973 /* if no CLONE_VM, we consider it is a fork */
5974 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5975 return -TARGET_EINVAL
;
5978 if (block_signals()) {
5979 return -TARGET_ERESTARTSYS
;
5985 /* Child Process. */
5987 cpu_clone_regs(env
, newsp
);
5989 /* There is a race condition here. The parent process could
5990 theoretically read the TID in the child process before the child
5991 tid is set. This would require using either ptrace
5992 (not implemented) or having *_tidptr to point at a shared memory
5993 mapping. We can't repeat the spinlock hack used above because
5994 the child process gets its own copy of the lock. */
5995 if (flags
& CLONE_CHILD_SETTID
)
5996 put_user_u32(gettid(), child_tidptr
);
5997 if (flags
& CLONE_PARENT_SETTID
)
5998 put_user_u32(gettid(), parent_tidptr
);
5999 ts
= (TaskState
*)cpu
->opaque
;
6000 if (flags
& CLONE_SETTLS
)
6001 cpu_set_tls (env
, newtls
);
6002 if (flags
& CLONE_CHILD_CLEARTID
)
6003 ts
->child_tidptr
= child_tidptr
;
6011 /* warning : doesn't handle linux specific flags... */
6012 static int target_to_host_fcntl_cmd(int cmd
)
6015 case TARGET_F_DUPFD
:
6016 case TARGET_F_GETFD
:
6017 case TARGET_F_SETFD
:
6018 case TARGET_F_GETFL
:
6019 case TARGET_F_SETFL
:
6021 case TARGET_F_GETLK
:
6023 case TARGET_F_SETLK
:
6025 case TARGET_F_SETLKW
:
6027 case TARGET_F_GETOWN
:
6029 case TARGET_F_SETOWN
:
6031 case TARGET_F_GETSIG
:
6033 case TARGET_F_SETSIG
:
6035 #if TARGET_ABI_BITS == 32
6036 case TARGET_F_GETLK64
:
6038 case TARGET_F_SETLK64
:
6040 case TARGET_F_SETLKW64
:
6043 case TARGET_F_SETLEASE
:
6045 case TARGET_F_GETLEASE
:
6047 #ifdef F_DUPFD_CLOEXEC
6048 case TARGET_F_DUPFD_CLOEXEC
:
6049 return F_DUPFD_CLOEXEC
;
6051 case TARGET_F_NOTIFY
:
6054 case TARGET_F_GETOWN_EX
:
6058 case TARGET_F_SETOWN_EX
:
6062 case TARGET_F_SETPIPE_SZ
:
6063 return F_SETPIPE_SZ
;
6064 case TARGET_F_GETPIPE_SZ
:
6065 return F_GETPIPE_SZ
;
6068 return -TARGET_EINVAL
;
6070 return -TARGET_EINVAL
;
6073 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6074 static const bitmask_transtbl flock_tbl
[] = {
6075 TRANSTBL_CONVERT(F_RDLCK
),
6076 TRANSTBL_CONVERT(F_WRLCK
),
6077 TRANSTBL_CONVERT(F_UNLCK
),
6078 TRANSTBL_CONVERT(F_EXLCK
),
6079 TRANSTBL_CONVERT(F_SHLCK
),
6083 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6084 abi_ulong target_flock_addr
)
6086 struct target_flock
*target_fl
;
6089 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6090 return -TARGET_EFAULT
;
6093 __get_user(l_type
, &target_fl
->l_type
);
6094 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6095 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6096 __get_user(fl
->l_start
, &target_fl
->l_start
);
6097 __get_user(fl
->l_len
, &target_fl
->l_len
);
6098 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6099 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6103 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6104 const struct flock64
*fl
)
6106 struct target_flock
*target_fl
;
6109 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6110 return -TARGET_EFAULT
;
6113 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6114 __put_user(l_type
, &target_fl
->l_type
);
6115 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6116 __put_user(fl
->l_start
, &target_fl
->l_start
);
6117 __put_user(fl
->l_len
, &target_fl
->l_len
);
6118 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6119 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6123 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6124 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6126 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6127 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6128 abi_ulong target_flock_addr
)
6130 struct target_eabi_flock64
*target_fl
;
6133 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6134 return -TARGET_EFAULT
;
6137 __get_user(l_type
, &target_fl
->l_type
);
6138 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6139 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6140 __get_user(fl
->l_start
, &target_fl
->l_start
);
6141 __get_user(fl
->l_len
, &target_fl
->l_len
);
6142 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6143 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6147 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6148 const struct flock64
*fl
)
6150 struct target_eabi_flock64
*target_fl
;
6153 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6154 return -TARGET_EFAULT
;
6157 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6158 __put_user(l_type
, &target_fl
->l_type
);
6159 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6160 __put_user(fl
->l_start
, &target_fl
->l_start
);
6161 __put_user(fl
->l_len
, &target_fl
->l_len
);
6162 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6163 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6168 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6169 abi_ulong target_flock_addr
)
6171 struct target_flock64
*target_fl
;
6174 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6175 return -TARGET_EFAULT
;
6178 __get_user(l_type
, &target_fl
->l_type
);
6179 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6180 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6181 __get_user(fl
->l_start
, &target_fl
->l_start
);
6182 __get_user(fl
->l_len
, &target_fl
->l_len
);
6183 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6184 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6188 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6189 const struct flock64
*fl
)
6191 struct target_flock64
*target_fl
;
6194 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6195 return -TARGET_EFAULT
;
6198 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6199 __put_user(l_type
, &target_fl
->l_type
);
6200 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6201 __put_user(fl
->l_start
, &target_fl
->l_start
);
6202 __put_user(fl
->l_len
, &target_fl
->l_len
);
6203 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6204 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6208 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6210 struct flock64 fl64
;
6212 struct f_owner_ex fox
;
6213 struct target_f_owner_ex
*target_fox
;
6216 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6218 if (host_cmd
== -TARGET_EINVAL
)
6222 case TARGET_F_GETLK
:
6223 ret
= copy_from_user_flock(&fl64
, arg
);
6227 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6229 ret
= copy_to_user_flock(arg
, &fl64
);
6233 case TARGET_F_SETLK
:
6234 case TARGET_F_SETLKW
:
6235 ret
= copy_from_user_flock(&fl64
, arg
);
6239 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6242 case TARGET_F_GETLK64
:
6243 ret
= copy_from_user_flock64(&fl64
, arg
);
6247 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6249 ret
= copy_to_user_flock64(arg
, &fl64
);
6252 case TARGET_F_SETLK64
:
6253 case TARGET_F_SETLKW64
:
6254 ret
= copy_from_user_flock64(&fl64
, arg
);
6258 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6261 case TARGET_F_GETFL
:
6262 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6264 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6268 case TARGET_F_SETFL
:
6269 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6270 target_to_host_bitmask(arg
,
6275 case TARGET_F_GETOWN_EX
:
6276 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6278 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6279 return -TARGET_EFAULT
;
6280 target_fox
->type
= tswap32(fox
.type
);
6281 target_fox
->pid
= tswap32(fox
.pid
);
6282 unlock_user_struct(target_fox
, arg
, 1);
6288 case TARGET_F_SETOWN_EX
:
6289 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6290 return -TARGET_EFAULT
;
6291 fox
.type
= tswap32(target_fox
->type
);
6292 fox
.pid
= tswap32(target_fox
->pid
);
6293 unlock_user_struct(target_fox
, arg
, 0);
6294 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6298 case TARGET_F_SETOWN
:
6299 case TARGET_F_GETOWN
:
6300 case TARGET_F_SETSIG
:
6301 case TARGET_F_GETSIG
:
6302 case TARGET_F_SETLEASE
:
6303 case TARGET_F_GETLEASE
:
6304 case TARGET_F_SETPIPE_SZ
:
6305 case TARGET_F_GETPIPE_SZ
:
6306 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6310 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6318 static inline int high2lowuid(int uid
)
6326 static inline int high2lowgid(int gid
)
6334 static inline int low2highuid(int uid
)
6336 if ((int16_t)uid
== -1)
6342 static inline int low2highgid(int gid
)
6344 if ((int16_t)gid
== -1)
6349 static inline int tswapid(int id
)
6354 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6356 #else /* !USE_UID16 */
6357 static inline int high2lowuid(int uid
)
6361 static inline int high2lowgid(int gid
)
6365 static inline int low2highuid(int uid
)
6369 static inline int low2highgid(int gid
)
6373 static inline int tswapid(int id
)
6378 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6380 #endif /* USE_UID16 */
6382 /* We must do direct syscalls for setting UID/GID, because we want to
6383 * implement the Linux system call semantics of "change only for this thread",
6384 * not the libc/POSIX semantics of "change for all threads in process".
6385 * (See http://ewontfix.com/17/ for more details.)
6386 * We use the 32-bit version of the syscalls if present; if it is not
6387 * then either the host architecture supports 32-bit UIDs natively with
6388 * the standard syscall, or the 16-bit UID is the best we can do.
6390 #ifdef __NR_setuid32
6391 #define __NR_sys_setuid __NR_setuid32
6393 #define __NR_sys_setuid __NR_setuid
6395 #ifdef __NR_setgid32
6396 #define __NR_sys_setgid __NR_setgid32
6398 #define __NR_sys_setgid __NR_setgid
6400 #ifdef __NR_setresuid32
6401 #define __NR_sys_setresuid __NR_setresuid32
6403 #define __NR_sys_setresuid __NR_setresuid
6405 #ifdef __NR_setresgid32
6406 #define __NR_sys_setresgid __NR_setresgid32
6408 #define __NR_sys_setresgid __NR_setresgid
6411 _syscall1(int, sys_setuid
, uid_t
, uid
)
6412 _syscall1(int, sys_setgid
, gid_t
, gid
)
6413 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6414 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6416 void syscall_init(void)
6419 const argtype
*arg_type
;
6423 thunk_init(STRUCT_MAX
);
6425 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6426 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6427 #include "syscall_types.h"
6429 #undef STRUCT_SPECIAL
6431 /* Build target_to_host_errno_table[] table from
6432 * host_to_target_errno_table[]. */
6433 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6434 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6437 /* we patch the ioctl size if necessary. We rely on the fact that
6438 no ioctl has all the bits at '1' in the size field */
6440 while (ie
->target_cmd
!= 0) {
6441 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6442 TARGET_IOC_SIZEMASK
) {
6443 arg_type
= ie
->arg_type
;
6444 if (arg_type
[0] != TYPE_PTR
) {
6445 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6450 size
= thunk_type_size(arg_type
, 0);
6451 ie
->target_cmd
= (ie
->target_cmd
&
6452 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6453 (size
<< TARGET_IOC_SIZESHIFT
);
6456 /* automatic consistency check if same arch */
6457 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6458 (defined(__x86_64__) && defined(TARGET_X86_64))
6459 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6460 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6461 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6468 #if TARGET_ABI_BITS == 32
6469 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6471 #ifdef TARGET_WORDS_BIGENDIAN
6472 return ((uint64_t)word0
<< 32) | word1
;
6474 return ((uint64_t)word1
<< 32) | word0
;
6477 #else /* TARGET_ABI_BITS == 32 */
6478 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6482 #endif /* TARGET_ABI_BITS != 32 */
6484 #ifdef TARGET_NR_truncate64
6485 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6490 if (regpairs_aligned(cpu_env
)) {
6494 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6498 #ifdef TARGET_NR_ftruncate64
6499 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6504 if (regpairs_aligned(cpu_env
)) {
6508 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6512 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6513 abi_ulong target_addr
)
6515 struct target_timespec
*target_ts
;
6517 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6518 return -TARGET_EFAULT
;
6519 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6520 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6521 unlock_user_struct(target_ts
, target_addr
, 0);
6525 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6526 struct timespec
*host_ts
)
6528 struct target_timespec
*target_ts
;
6530 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6531 return -TARGET_EFAULT
;
6532 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6533 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6534 unlock_user_struct(target_ts
, target_addr
, 1);
6538 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6539 abi_ulong target_addr
)
6541 struct target_itimerspec
*target_itspec
;
6543 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6544 return -TARGET_EFAULT
;
6547 host_itspec
->it_interval
.tv_sec
=
6548 tswapal(target_itspec
->it_interval
.tv_sec
);
6549 host_itspec
->it_interval
.tv_nsec
=
6550 tswapal(target_itspec
->it_interval
.tv_nsec
);
6551 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6552 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6554 unlock_user_struct(target_itspec
, target_addr
, 1);
6558 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6559 struct itimerspec
*host_its
)
6561 struct target_itimerspec
*target_itspec
;
6563 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6564 return -TARGET_EFAULT
;
6567 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6568 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6570 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6571 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6573 unlock_user_struct(target_itspec
, target_addr
, 0);
6577 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6578 abi_ulong target_addr
)
6580 struct target_sigevent
*target_sevp
;
6582 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6583 return -TARGET_EFAULT
;
6586 /* This union is awkward on 64 bit systems because it has a 32 bit
6587 * integer and a pointer in it; we follow the conversion approach
6588 * used for handling sigval types in signal.c so the guest should get
6589 * the correct value back even if we did a 64 bit byteswap and it's
6590 * using the 32 bit integer.
6592 host_sevp
->sigev_value
.sival_ptr
=
6593 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6594 host_sevp
->sigev_signo
=
6595 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6596 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6597 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6599 unlock_user_struct(target_sevp
, target_addr
, 1);
6603 #if defined(TARGET_NR_mlockall)
6604 static inline int target_to_host_mlockall_arg(int arg
)
6608 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6609 result
|= MCL_CURRENT
;
6611 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6612 result
|= MCL_FUTURE
;
6618 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6619 abi_ulong target_addr
,
6620 struct stat
*host_st
)
6622 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6623 if (((CPUARMState
*)cpu_env
)->eabi
) {
6624 struct target_eabi_stat64
*target_st
;
6626 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6627 return -TARGET_EFAULT
;
6628 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6629 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6630 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6631 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6632 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6634 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6635 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6636 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6637 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6638 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6639 __put_user(host_st
->st_size
, &target_st
->st_size
);
6640 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6641 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6642 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6643 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6644 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6645 unlock_user_struct(target_st
, target_addr
, 1);
6649 #if defined(TARGET_HAS_STRUCT_STAT64)
6650 struct target_stat64
*target_st
;
6652 struct target_stat
*target_st
;
6655 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6656 return -TARGET_EFAULT
;
6657 memset(target_st
, 0, sizeof(*target_st
));
6658 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6659 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6660 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6661 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6663 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6664 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6665 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6666 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6667 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6668 /* XXX: better use of kernel struct */
6669 __put_user(host_st
->st_size
, &target_st
->st_size
);
6670 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6671 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6672 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6673 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6674 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6675 unlock_user_struct(target_st
, target_addr
, 1);
6681 /* ??? Using host futex calls even when target atomic operations
6682 are not really atomic probably breaks things. However implementing
6683 futexes locally would make futexes shared between multiple processes
6684 tricky. However they're probably useless because guest atomic
6685 operations won't work either. */
6686 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6687 target_ulong uaddr2
, int val3
)
6689 struct timespec ts
, *pts
;
6692 /* ??? We assume FUTEX_* constants are the same on both host
6694 #ifdef FUTEX_CMD_MASK
6695 base_op
= op
& FUTEX_CMD_MASK
;
6701 case FUTEX_WAIT_BITSET
:
6704 target_to_host_timespec(pts
, timeout
);
6708 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6711 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6713 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6715 case FUTEX_CMP_REQUEUE
:
6717 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6718 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6719 But the prototype takes a `struct timespec *'; insert casts
6720 to satisfy the compiler. We do not need to tswap TIMEOUT
6721 since it's not compared to guest memory. */
6722 pts
= (struct timespec
*)(uintptr_t) timeout
;
6723 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6725 (base_op
== FUTEX_CMP_REQUEUE
6729 return -TARGET_ENOSYS
;
6732 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6733 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6734 abi_long handle
, abi_long mount_id
,
6737 struct file_handle
*target_fh
;
6738 struct file_handle
*fh
;
6742 unsigned int size
, total_size
;
6744 if (get_user_s32(size
, handle
)) {
6745 return -TARGET_EFAULT
;
6748 name
= lock_user_string(pathname
);
6750 return -TARGET_EFAULT
;
6753 total_size
= sizeof(struct file_handle
) + size
;
6754 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6756 unlock_user(name
, pathname
, 0);
6757 return -TARGET_EFAULT
;
6760 fh
= g_malloc0(total_size
);
6761 fh
->handle_bytes
= size
;
6763 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6764 unlock_user(name
, pathname
, 0);
6766 /* man name_to_handle_at(2):
6767 * Other than the use of the handle_bytes field, the caller should treat
6768 * the file_handle structure as an opaque data type
6771 memcpy(target_fh
, fh
, total_size
);
6772 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6773 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6775 unlock_user(target_fh
, handle
, total_size
);
6777 if (put_user_s32(mid
, mount_id
)) {
6778 return -TARGET_EFAULT
;
6786 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6787 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6790 struct file_handle
*target_fh
;
6791 struct file_handle
*fh
;
6792 unsigned int size
, total_size
;
6795 if (get_user_s32(size
, handle
)) {
6796 return -TARGET_EFAULT
;
6799 total_size
= sizeof(struct file_handle
) + size
;
6800 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6802 return -TARGET_EFAULT
;
6805 fh
= g_memdup(target_fh
, total_size
);
6806 fh
->handle_bytes
= size
;
6807 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6809 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6810 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6814 unlock_user(target_fh
, handle
, total_size
);
6820 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6822 /* signalfd siginfo conversion */
6825 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6826 const struct signalfd_siginfo
*info
)
6828 int sig
= host_to_target_signal(info
->ssi_signo
);
6830 /* linux/signalfd.h defines a ssi_addr_lsb
6831 * not defined in sys/signalfd.h but used by some kernels
6834 #ifdef BUS_MCEERR_AO
6835 if (tinfo
->ssi_signo
== SIGBUS
&&
6836 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6837 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6838 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6839 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6840 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6844 tinfo
->ssi_signo
= tswap32(sig
);
6845 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6846 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6847 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6848 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6849 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6850 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6851 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6852 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6853 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6854 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6855 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6856 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6857 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6858 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6859 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6862 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6866 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6867 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6873 static TargetFdTrans target_signalfd_trans
= {
6874 .host_to_target_data
= host_to_target_data_signalfd
,
6877 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6880 target_sigset_t
*target_mask
;
6884 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6885 return -TARGET_EINVAL
;
6887 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6888 return -TARGET_EFAULT
;
6891 target_to_host_sigset(&host_mask
, target_mask
);
6893 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6895 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6897 fd_trans_register(ret
, &target_signalfd_trans
);
6900 unlock_user_struct(target_mask
, mask
, 0);
6906 /* Map host to target signal numbers for the wait family of syscalls.
6907 Assume all other status bits are the same. */
6908 int host_to_target_waitstatus(int status
)
6910 if (WIFSIGNALED(status
)) {
6911 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6913 if (WIFSTOPPED(status
)) {
6914 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6920 static int open_self_cmdline(void *cpu_env
, int fd
)
6923 bool word_skipped
= false;
6925 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6935 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6938 fd_orig
= close(fd_orig
);
6941 } else if (nb_read
== 0) {
6945 if (!word_skipped
) {
6946 /* Skip the first string, which is the path to qemu-*-static
6947 instead of the actual command. */
6948 cp_buf
= memchr(buf
, 0, nb_read
);
6950 /* Null byte found, skip one string */
6952 nb_read
-= cp_buf
- buf
;
6953 word_skipped
= true;
6958 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6967 return close(fd_orig
);
6970 static int open_self_maps(void *cpu_env
, int fd
)
6972 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6973 TaskState
*ts
= cpu
->opaque
;
6979 fp
= fopen("/proc/self/maps", "r");
6984 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6985 int fields
, dev_maj
, dev_min
, inode
;
6986 uint64_t min
, max
, offset
;
6987 char flag_r
, flag_w
, flag_x
, flag_p
;
6988 char path
[512] = "";
6989 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6990 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6991 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6993 if ((fields
< 10) || (fields
> 11)) {
6996 if (h2g_valid(min
)) {
6997 int flags
= page_get_flags(h2g(min
));
6998 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6999 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7002 if (h2g(min
) == ts
->info
->stack_limit
) {
7003 pstrcpy(path
, sizeof(path
), " [stack]");
7005 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7006 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7007 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7008 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7009 path
[0] ? " " : "", path
);
7019 static int open_self_stat(void *cpu_env
, int fd
)
7021 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7022 TaskState
*ts
= cpu
->opaque
;
7023 abi_ulong start_stack
= ts
->info
->start_stack
;
7026 for (i
= 0; i
< 44; i
++) {
7034 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7035 } else if (i
== 1) {
7037 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7038 } else if (i
== 27) {
7041 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7043 /* for the rest, there is MasterCard */
7044 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7048 if (write(fd
, buf
, len
) != len
) {
7056 static int open_self_auxv(void *cpu_env
, int fd
)
7058 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7059 TaskState
*ts
= cpu
->opaque
;
7060 abi_ulong auxv
= ts
->info
->saved_auxv
;
7061 abi_ulong len
= ts
->info
->auxv_len
;
7065 * Auxiliary vector is stored in target process stack.
7066 * read in whole auxv vector and copy it to file
7068 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7072 r
= write(fd
, ptr
, len
);
7079 lseek(fd
, 0, SEEK_SET
);
7080 unlock_user(ptr
, auxv
, len
);
7086 static int is_proc_myself(const char *filename
, const char *entry
)
7088 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7089 filename
+= strlen("/proc/");
7090 if (!strncmp(filename
, "self/", strlen("self/"))) {
7091 filename
+= strlen("self/");
7092 } else if (*filename
>= '1' && *filename
<= '9') {
7094 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7095 if (!strncmp(filename
, myself
, strlen(myself
))) {
7096 filename
+= strlen(myself
);
7103 if (!strcmp(filename
, entry
)) {
7110 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7111 static int is_proc(const char *filename
, const char *entry
)
7113 return strcmp(filename
, entry
) == 0;
7116 static int open_net_route(void *cpu_env
, int fd
)
7123 fp
= fopen("/proc/net/route", "r");
7130 read
= getline(&line
, &len
, fp
);
7131 dprintf(fd
, "%s", line
);
7135 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7137 uint32_t dest
, gw
, mask
;
7138 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7139 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7140 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7141 &mask
, &mtu
, &window
, &irtt
);
7142 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7143 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7144 metric
, tswap32(mask
), mtu
, window
, irtt
);
7154 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7157 const char *filename
;
7158 int (*fill
)(void *cpu_env
, int fd
);
7159 int (*cmp
)(const char *s1
, const char *s2
);
7161 const struct fake_open
*fake_open
;
7162 static const struct fake_open fakes
[] = {
7163 { "maps", open_self_maps
, is_proc_myself
},
7164 { "stat", open_self_stat
, is_proc_myself
},
7165 { "auxv", open_self_auxv
, is_proc_myself
},
7166 { "cmdline", open_self_cmdline
, is_proc_myself
},
7167 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7168 { "/proc/net/route", open_net_route
, is_proc
},
7170 { NULL
, NULL
, NULL
}
7173 if (is_proc_myself(pathname
, "exe")) {
7174 int execfd
= qemu_getauxval(AT_EXECFD
);
7175 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7178 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7179 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7184 if (fake_open
->filename
) {
7186 char filename
[PATH_MAX
];
7189 /* create temporary file to map stat to */
7190 tmpdir
= getenv("TMPDIR");
7193 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7194 fd
= mkstemp(filename
);
7200 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7206 lseek(fd
, 0, SEEK_SET
);
7211 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7214 #define TIMER_MAGIC 0x0caf0000
7215 #define TIMER_MAGIC_MASK 0xffff0000
7217 /* Convert QEMU provided timer ID back to internal 16bit index format */
7218 static target_timer_t
get_timer_id(abi_long arg
)
7220 target_timer_t timerid
= arg
;
7222 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7223 return -TARGET_EINVAL
;
7228 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7229 return -TARGET_EINVAL
;
7235 /* do_syscall() should always have a single exit point at the end so
7236 that actions, such as logging of syscall results, can be performed.
7237 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7238 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7239 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7240 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7243 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7249 #if defined(DEBUG_ERESTARTSYS)
7250 /* Debug-only code for exercising the syscall-restart code paths
7251 * in the per-architecture cpu main loops: restart every syscall
7252 * the guest makes once before letting it through.
7259 return -TARGET_ERESTARTSYS
;
7265 gemu_log("syscall %d", num
);
7267 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7269 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7272 case TARGET_NR_exit
:
7273 /* In old applications this may be used to implement _exit(2).
7274 However in threaded applictions it is used for thread termination,
7275 and _exit_group is used for application termination.
7276 Do thread termination if we have more then one thread. */
7278 if (block_signals()) {
7279 ret
= -TARGET_ERESTARTSYS
;
7283 if (CPU_NEXT(first_cpu
)) {
7287 /* Remove the CPU from the list. */
7288 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7291 if (ts
->child_tidptr
) {
7292 put_user_u32(0, ts
->child_tidptr
);
7293 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7297 object_unref(OBJECT(cpu
));
7299 rcu_unregister_thread();
7305 gdb_exit(cpu_env
, arg1
);
7307 ret
= 0; /* avoid warning */
7309 case TARGET_NR_read
:
7313 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7315 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7317 fd_trans_host_to_target_data(arg1
)) {
7318 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7320 unlock_user(p
, arg2
, ret
);
7323 case TARGET_NR_write
:
7324 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7326 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7327 unlock_user(p
, arg2
, 0);
7329 #ifdef TARGET_NR_open
7330 case TARGET_NR_open
:
7331 if (!(p
= lock_user_string(arg1
)))
7333 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7334 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7336 fd_trans_unregister(ret
);
7337 unlock_user(p
, arg1
, 0);
7340 case TARGET_NR_openat
:
7341 if (!(p
= lock_user_string(arg2
)))
7343 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7344 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7346 fd_trans_unregister(ret
);
7347 unlock_user(p
, arg2
, 0);
7349 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7350 case TARGET_NR_name_to_handle_at
:
7351 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7354 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7355 case TARGET_NR_open_by_handle_at
:
7356 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7357 fd_trans_unregister(ret
);
7360 case TARGET_NR_close
:
7361 fd_trans_unregister(arg1
);
7362 ret
= get_errno(close(arg1
));
7367 #ifdef TARGET_NR_fork
7368 case TARGET_NR_fork
:
7369 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7372 #ifdef TARGET_NR_waitpid
7373 case TARGET_NR_waitpid
:
7376 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7377 if (!is_error(ret
) && arg2
&& ret
7378 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7383 #ifdef TARGET_NR_waitid
7384 case TARGET_NR_waitid
:
7388 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7389 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7390 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7392 host_to_target_siginfo(p
, &info
);
7393 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7398 #ifdef TARGET_NR_creat /* not on alpha */
7399 case TARGET_NR_creat
:
7400 if (!(p
= lock_user_string(arg1
)))
7402 ret
= get_errno(creat(p
, arg2
));
7403 fd_trans_unregister(ret
);
7404 unlock_user(p
, arg1
, 0);
7407 #ifdef TARGET_NR_link
7408 case TARGET_NR_link
:
7411 p
= lock_user_string(arg1
);
7412 p2
= lock_user_string(arg2
);
7414 ret
= -TARGET_EFAULT
;
7416 ret
= get_errno(link(p
, p2
));
7417 unlock_user(p2
, arg2
, 0);
7418 unlock_user(p
, arg1
, 0);
7422 #if defined(TARGET_NR_linkat)
7423 case TARGET_NR_linkat
:
7428 p
= lock_user_string(arg2
);
7429 p2
= lock_user_string(arg4
);
7431 ret
= -TARGET_EFAULT
;
7433 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7434 unlock_user(p
, arg2
, 0);
7435 unlock_user(p2
, arg4
, 0);
7439 #ifdef TARGET_NR_unlink
7440 case TARGET_NR_unlink
:
7441 if (!(p
= lock_user_string(arg1
)))
7443 ret
= get_errno(unlink(p
));
7444 unlock_user(p
, arg1
, 0);
7447 #if defined(TARGET_NR_unlinkat)
7448 case TARGET_NR_unlinkat
:
7449 if (!(p
= lock_user_string(arg2
)))
7451 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7452 unlock_user(p
, arg2
, 0);
7455 case TARGET_NR_execve
:
7457 char **argp
, **envp
;
7460 abi_ulong guest_argp
;
7461 abi_ulong guest_envp
;
7468 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7469 if (get_user_ual(addr
, gp
))
7477 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7478 if (get_user_ual(addr
, gp
))
7485 argp
= alloca((argc
+ 1) * sizeof(void *));
7486 envp
= alloca((envc
+ 1) * sizeof(void *));
7488 for (gp
= guest_argp
, q
= argp
; gp
;
7489 gp
+= sizeof(abi_ulong
), q
++) {
7490 if (get_user_ual(addr
, gp
))
7494 if (!(*q
= lock_user_string(addr
)))
7496 total_size
+= strlen(*q
) + 1;
7500 for (gp
= guest_envp
, q
= envp
; gp
;
7501 gp
+= sizeof(abi_ulong
), q
++) {
7502 if (get_user_ual(addr
, gp
))
7506 if (!(*q
= lock_user_string(addr
)))
7508 total_size
+= strlen(*q
) + 1;
7512 if (!(p
= lock_user_string(arg1
)))
7514 /* Although execve() is not an interruptible syscall it is
7515 * a special case where we must use the safe_syscall wrapper:
7516 * if we allow a signal to happen before we make the host
7517 * syscall then we will 'lose' it, because at the point of
7518 * execve the process leaves QEMU's control. So we use the
7519 * safe syscall wrapper to ensure that we either take the
7520 * signal as a guest signal, or else it does not happen
7521 * before the execve completes and makes it the other
7522 * program's problem.
7524 ret
= get_errno(safe_execve(p
, argp
, envp
));
7525 unlock_user(p
, arg1
, 0);
7530 ret
= -TARGET_EFAULT
;
7533 for (gp
= guest_argp
, q
= argp
; *q
;
7534 gp
+= sizeof(abi_ulong
), q
++) {
7535 if (get_user_ual(addr
, gp
)
7538 unlock_user(*q
, addr
, 0);
7540 for (gp
= guest_envp
, q
= envp
; *q
;
7541 gp
+= sizeof(abi_ulong
), q
++) {
7542 if (get_user_ual(addr
, gp
)
7545 unlock_user(*q
, addr
, 0);
7549 case TARGET_NR_chdir
:
7550 if (!(p
= lock_user_string(arg1
)))
7552 ret
= get_errno(chdir(p
));
7553 unlock_user(p
, arg1
, 0);
7555 #ifdef TARGET_NR_time
7556 case TARGET_NR_time
:
7559 ret
= get_errno(time(&host_time
));
7562 && put_user_sal(host_time
, arg1
))
7567 #ifdef TARGET_NR_mknod
7568 case TARGET_NR_mknod
:
7569 if (!(p
= lock_user_string(arg1
)))
7571 ret
= get_errno(mknod(p
, arg2
, arg3
));
7572 unlock_user(p
, arg1
, 0);
7575 #if defined(TARGET_NR_mknodat)
7576 case TARGET_NR_mknodat
:
7577 if (!(p
= lock_user_string(arg2
)))
7579 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7580 unlock_user(p
, arg2
, 0);
7583 #ifdef TARGET_NR_chmod
7584 case TARGET_NR_chmod
:
7585 if (!(p
= lock_user_string(arg1
)))
7587 ret
= get_errno(chmod(p
, arg2
));
7588 unlock_user(p
, arg1
, 0);
7591 #ifdef TARGET_NR_break
7592 case TARGET_NR_break
:
7595 #ifdef TARGET_NR_oldstat
7596 case TARGET_NR_oldstat
:
7599 case TARGET_NR_lseek
:
7600 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7602 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7603 /* Alpha specific */
7604 case TARGET_NR_getxpid
:
7605 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7606 ret
= get_errno(getpid());
7609 #ifdef TARGET_NR_getpid
7610 case TARGET_NR_getpid
:
7611 ret
= get_errno(getpid());
7614 case TARGET_NR_mount
:
7616 /* need to look at the data field */
7620 p
= lock_user_string(arg1
);
7628 p2
= lock_user_string(arg2
);
7631 unlock_user(p
, arg1
, 0);
7637 p3
= lock_user_string(arg3
);
7640 unlock_user(p
, arg1
, 0);
7642 unlock_user(p2
, arg2
, 0);
7649 /* FIXME - arg5 should be locked, but it isn't clear how to
7650 * do that since it's not guaranteed to be a NULL-terminated
7654 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7656 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7658 ret
= get_errno(ret
);
7661 unlock_user(p
, arg1
, 0);
7663 unlock_user(p2
, arg2
, 0);
7665 unlock_user(p3
, arg3
, 0);
7669 #ifdef TARGET_NR_umount
7670 case TARGET_NR_umount
:
7671 if (!(p
= lock_user_string(arg1
)))
7673 ret
= get_errno(umount(p
));
7674 unlock_user(p
, arg1
, 0);
7677 #ifdef TARGET_NR_stime /* not on alpha */
7678 case TARGET_NR_stime
:
7681 if (get_user_sal(host_time
, arg1
))
7683 ret
= get_errno(stime(&host_time
));
7687 case TARGET_NR_ptrace
:
7689 #ifdef TARGET_NR_alarm /* not on alpha */
7690 case TARGET_NR_alarm
:
7694 #ifdef TARGET_NR_oldfstat
7695 case TARGET_NR_oldfstat
:
7698 #ifdef TARGET_NR_pause /* not on alpha */
7699 case TARGET_NR_pause
:
7700 if (!block_signals()) {
7701 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7703 ret
= -TARGET_EINTR
;
7706 #ifdef TARGET_NR_utime
7707 case TARGET_NR_utime
:
7709 struct utimbuf tbuf
, *host_tbuf
;
7710 struct target_utimbuf
*target_tbuf
;
7712 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7714 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7715 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7716 unlock_user_struct(target_tbuf
, arg2
, 0);
7721 if (!(p
= lock_user_string(arg1
)))
7723 ret
= get_errno(utime(p
, host_tbuf
));
7724 unlock_user(p
, arg1
, 0);
7728 #ifdef TARGET_NR_utimes
7729 case TARGET_NR_utimes
:
7731 struct timeval
*tvp
, tv
[2];
7733 if (copy_from_user_timeval(&tv
[0], arg2
)
7734 || copy_from_user_timeval(&tv
[1],
7735 arg2
+ sizeof(struct target_timeval
)))
7741 if (!(p
= lock_user_string(arg1
)))
7743 ret
= get_errno(utimes(p
, tvp
));
7744 unlock_user(p
, arg1
, 0);
7748 #if defined(TARGET_NR_futimesat)
7749 case TARGET_NR_futimesat
:
7751 struct timeval
*tvp
, tv
[2];
7753 if (copy_from_user_timeval(&tv
[0], arg3
)
7754 || copy_from_user_timeval(&tv
[1],
7755 arg3
+ sizeof(struct target_timeval
)))
7761 if (!(p
= lock_user_string(arg2
)))
7763 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7764 unlock_user(p
, arg2
, 0);
7768 #ifdef TARGET_NR_stty
7769 case TARGET_NR_stty
:
7772 #ifdef TARGET_NR_gtty
7773 case TARGET_NR_gtty
:
7776 #ifdef TARGET_NR_access
7777 case TARGET_NR_access
:
7778 if (!(p
= lock_user_string(arg1
)))
7780 ret
= get_errno(access(path(p
), arg2
));
7781 unlock_user(p
, arg1
, 0);
7784 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7785 case TARGET_NR_faccessat
:
7786 if (!(p
= lock_user_string(arg2
)))
7788 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7789 unlock_user(p
, arg2
, 0);
7792 #ifdef TARGET_NR_nice /* not on alpha */
7793 case TARGET_NR_nice
:
7794 ret
= get_errno(nice(arg1
));
7797 #ifdef TARGET_NR_ftime
7798 case TARGET_NR_ftime
:
7801 case TARGET_NR_sync
:
7805 case TARGET_NR_kill
:
7806 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7808 #ifdef TARGET_NR_rename
7809 case TARGET_NR_rename
:
7812 p
= lock_user_string(arg1
);
7813 p2
= lock_user_string(arg2
);
7815 ret
= -TARGET_EFAULT
;
7817 ret
= get_errno(rename(p
, p2
));
7818 unlock_user(p2
, arg2
, 0);
7819 unlock_user(p
, arg1
, 0);
7823 #if defined(TARGET_NR_renameat)
7824 case TARGET_NR_renameat
:
7827 p
= lock_user_string(arg2
);
7828 p2
= lock_user_string(arg4
);
7830 ret
= -TARGET_EFAULT
;
7832 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7833 unlock_user(p2
, arg4
, 0);
7834 unlock_user(p
, arg2
, 0);
7838 #ifdef TARGET_NR_mkdir
7839 case TARGET_NR_mkdir
:
7840 if (!(p
= lock_user_string(arg1
)))
7842 ret
= get_errno(mkdir(p
, arg2
));
7843 unlock_user(p
, arg1
, 0);
7846 #if defined(TARGET_NR_mkdirat)
7847 case TARGET_NR_mkdirat
:
7848 if (!(p
= lock_user_string(arg2
)))
7850 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7851 unlock_user(p
, arg2
, 0);
7854 #ifdef TARGET_NR_rmdir
7855 case TARGET_NR_rmdir
:
7856 if (!(p
= lock_user_string(arg1
)))
7858 ret
= get_errno(rmdir(p
));
7859 unlock_user(p
, arg1
, 0);
7863 ret
= get_errno(dup(arg1
));
7865 fd_trans_dup(arg1
, ret
);
7868 #ifdef TARGET_NR_pipe
7869 case TARGET_NR_pipe
:
7870 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7873 #ifdef TARGET_NR_pipe2
7874 case TARGET_NR_pipe2
:
7875 ret
= do_pipe(cpu_env
, arg1
,
7876 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7879 case TARGET_NR_times
:
7881 struct target_tms
*tmsp
;
7883 ret
= get_errno(times(&tms
));
7885 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7888 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7889 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7890 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7891 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7894 ret
= host_to_target_clock_t(ret
);
7897 #ifdef TARGET_NR_prof
7898 case TARGET_NR_prof
:
7901 #ifdef TARGET_NR_signal
7902 case TARGET_NR_signal
:
7905 case TARGET_NR_acct
:
7907 ret
= get_errno(acct(NULL
));
7909 if (!(p
= lock_user_string(arg1
)))
7911 ret
= get_errno(acct(path(p
)));
7912 unlock_user(p
, arg1
, 0);
7915 #ifdef TARGET_NR_umount2
7916 case TARGET_NR_umount2
:
7917 if (!(p
= lock_user_string(arg1
)))
7919 ret
= get_errno(umount2(p
, arg2
));
7920 unlock_user(p
, arg1
, 0);
7923 #ifdef TARGET_NR_lock
7924 case TARGET_NR_lock
:
7927 case TARGET_NR_ioctl
:
7928 ret
= do_ioctl(arg1
, arg2
, arg3
);
7930 case TARGET_NR_fcntl
:
7931 ret
= do_fcntl(arg1
, arg2
, arg3
);
7933 #ifdef TARGET_NR_mpx
7937 case TARGET_NR_setpgid
:
7938 ret
= get_errno(setpgid(arg1
, arg2
));
7940 #ifdef TARGET_NR_ulimit
7941 case TARGET_NR_ulimit
:
7944 #ifdef TARGET_NR_oldolduname
7945 case TARGET_NR_oldolduname
:
7948 case TARGET_NR_umask
:
7949 ret
= get_errno(umask(arg1
));
7951 case TARGET_NR_chroot
:
7952 if (!(p
= lock_user_string(arg1
)))
7954 ret
= get_errno(chroot(p
));
7955 unlock_user(p
, arg1
, 0);
7957 #ifdef TARGET_NR_ustat
7958 case TARGET_NR_ustat
:
7961 #ifdef TARGET_NR_dup2
7962 case TARGET_NR_dup2
:
7963 ret
= get_errno(dup2(arg1
, arg2
));
7965 fd_trans_dup(arg1
, arg2
);
7969 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7970 case TARGET_NR_dup3
:
7971 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7973 fd_trans_dup(arg1
, arg2
);
7977 #ifdef TARGET_NR_getppid /* not on alpha */
7978 case TARGET_NR_getppid
:
7979 ret
= get_errno(getppid());
7982 #ifdef TARGET_NR_getpgrp
7983 case TARGET_NR_getpgrp
:
7984 ret
= get_errno(getpgrp());
7987 case TARGET_NR_setsid
:
7988 ret
= get_errno(setsid());
7990 #ifdef TARGET_NR_sigaction
7991 case TARGET_NR_sigaction
:
7993 #if defined(TARGET_ALPHA)
7994 struct target_sigaction act
, oact
, *pact
= 0;
7995 struct target_old_sigaction
*old_act
;
7997 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7999 act
._sa_handler
= old_act
->_sa_handler
;
8000 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8001 act
.sa_flags
= old_act
->sa_flags
;
8002 act
.sa_restorer
= 0;
8003 unlock_user_struct(old_act
, arg2
, 0);
8006 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8007 if (!is_error(ret
) && arg3
) {
8008 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8010 old_act
->_sa_handler
= oact
._sa_handler
;
8011 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8012 old_act
->sa_flags
= oact
.sa_flags
;
8013 unlock_user_struct(old_act
, arg3
, 1);
8015 #elif defined(TARGET_MIPS)
8016 struct target_sigaction act
, oact
, *pact
, *old_act
;
8019 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8021 act
._sa_handler
= old_act
->_sa_handler
;
8022 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8023 act
.sa_flags
= old_act
->sa_flags
;
8024 unlock_user_struct(old_act
, arg2
, 0);
8030 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8032 if (!is_error(ret
) && arg3
) {
8033 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8035 old_act
->_sa_handler
= oact
._sa_handler
;
8036 old_act
->sa_flags
= oact
.sa_flags
;
8037 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8038 old_act
->sa_mask
.sig
[1] = 0;
8039 old_act
->sa_mask
.sig
[2] = 0;
8040 old_act
->sa_mask
.sig
[3] = 0;
8041 unlock_user_struct(old_act
, arg3
, 1);
8044 struct target_old_sigaction
*old_act
;
8045 struct target_sigaction act
, oact
, *pact
;
8047 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8049 act
._sa_handler
= old_act
->_sa_handler
;
8050 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8051 act
.sa_flags
= old_act
->sa_flags
;
8052 act
.sa_restorer
= old_act
->sa_restorer
;
8053 unlock_user_struct(old_act
, arg2
, 0);
8058 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8059 if (!is_error(ret
) && arg3
) {
8060 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8062 old_act
->_sa_handler
= oact
._sa_handler
;
8063 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8064 old_act
->sa_flags
= oact
.sa_flags
;
8065 old_act
->sa_restorer
= oact
.sa_restorer
;
8066 unlock_user_struct(old_act
, arg3
, 1);
8072 case TARGET_NR_rt_sigaction
:
8074 #if defined(TARGET_ALPHA)
8075 struct target_sigaction act
, oact
, *pact
= 0;
8076 struct target_rt_sigaction
*rt_act
;
8078 if (arg4
!= sizeof(target_sigset_t
)) {
8079 ret
= -TARGET_EINVAL
;
8083 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8085 act
._sa_handler
= rt_act
->_sa_handler
;
8086 act
.sa_mask
= rt_act
->sa_mask
;
8087 act
.sa_flags
= rt_act
->sa_flags
;
8088 act
.sa_restorer
= arg5
;
8089 unlock_user_struct(rt_act
, arg2
, 0);
8092 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8093 if (!is_error(ret
) && arg3
) {
8094 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8096 rt_act
->_sa_handler
= oact
._sa_handler
;
8097 rt_act
->sa_mask
= oact
.sa_mask
;
8098 rt_act
->sa_flags
= oact
.sa_flags
;
8099 unlock_user_struct(rt_act
, arg3
, 1);
8102 struct target_sigaction
*act
;
8103 struct target_sigaction
*oact
;
8105 if (arg4
!= sizeof(target_sigset_t
)) {
8106 ret
= -TARGET_EINVAL
;
8110 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8115 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8116 ret
= -TARGET_EFAULT
;
8117 goto rt_sigaction_fail
;
8121 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8124 unlock_user_struct(act
, arg2
, 0);
8126 unlock_user_struct(oact
, arg3
, 1);
8130 #ifdef TARGET_NR_sgetmask /* not on alpha */
8131 case TARGET_NR_sgetmask
:
8134 abi_ulong target_set
;
8135 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8137 host_to_target_old_sigset(&target_set
, &cur_set
);
8143 #ifdef TARGET_NR_ssetmask /* not on alpha */
8144 case TARGET_NR_ssetmask
:
8146 sigset_t set
, oset
, cur_set
;
8147 abi_ulong target_set
= arg1
;
8148 /* We only have one word of the new mask so we must read
8149 * the rest of it with do_sigprocmask() and OR in this word.
8150 * We are guaranteed that a do_sigprocmask() that only queries
8151 * the signal mask will not fail.
8153 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8155 target_to_host_old_sigset(&set
, &target_set
);
8156 sigorset(&set
, &set
, &cur_set
);
8157 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8159 host_to_target_old_sigset(&target_set
, &oset
);
8165 #ifdef TARGET_NR_sigprocmask
8166 case TARGET_NR_sigprocmask
:
8168 #if defined(TARGET_ALPHA)
8169 sigset_t set
, oldset
;
8174 case TARGET_SIG_BLOCK
:
8177 case TARGET_SIG_UNBLOCK
:
8180 case TARGET_SIG_SETMASK
:
8184 ret
= -TARGET_EINVAL
;
8188 target_to_host_old_sigset(&set
, &mask
);
8190 ret
= do_sigprocmask(how
, &set
, &oldset
);
8191 if (!is_error(ret
)) {
8192 host_to_target_old_sigset(&mask
, &oldset
);
8194 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8197 sigset_t set
, oldset
, *set_ptr
;
8202 case TARGET_SIG_BLOCK
:
8205 case TARGET_SIG_UNBLOCK
:
8208 case TARGET_SIG_SETMASK
:
8212 ret
= -TARGET_EINVAL
;
8215 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8217 target_to_host_old_sigset(&set
, p
);
8218 unlock_user(p
, arg2
, 0);
8224 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8225 if (!is_error(ret
) && arg3
) {
8226 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8228 host_to_target_old_sigset(p
, &oldset
);
8229 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8235 case TARGET_NR_rt_sigprocmask
:
8238 sigset_t set
, oldset
, *set_ptr
;
8240 if (arg4
!= sizeof(target_sigset_t
)) {
8241 ret
= -TARGET_EINVAL
;
8247 case TARGET_SIG_BLOCK
:
8250 case TARGET_SIG_UNBLOCK
:
8253 case TARGET_SIG_SETMASK
:
8257 ret
= -TARGET_EINVAL
;
8260 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8262 target_to_host_sigset(&set
, p
);
8263 unlock_user(p
, arg2
, 0);
8269 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8270 if (!is_error(ret
) && arg3
) {
8271 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8273 host_to_target_sigset(p
, &oldset
);
8274 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8278 #ifdef TARGET_NR_sigpending
8279 case TARGET_NR_sigpending
:
8282 ret
= get_errno(sigpending(&set
));
8283 if (!is_error(ret
)) {
8284 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8286 host_to_target_old_sigset(p
, &set
);
8287 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8292 case TARGET_NR_rt_sigpending
:
8296 /* Yes, this check is >, not != like most. We follow the kernel's
8297 * logic and it does it like this because it implements
8298 * NR_sigpending through the same code path, and in that case
8299 * the old_sigset_t is smaller in size.
8301 if (arg2
> sizeof(target_sigset_t
)) {
8302 ret
= -TARGET_EINVAL
;
8306 ret
= get_errno(sigpending(&set
));
8307 if (!is_error(ret
)) {
8308 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8310 host_to_target_sigset(p
, &set
);
8311 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8315 #ifdef TARGET_NR_sigsuspend
8316 case TARGET_NR_sigsuspend
:
8318 TaskState
*ts
= cpu
->opaque
;
8319 #if defined(TARGET_ALPHA)
8320 abi_ulong mask
= arg1
;
8321 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8323 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8325 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8326 unlock_user(p
, arg1
, 0);
8328 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8330 if (ret
!= -TARGET_ERESTARTSYS
) {
8331 ts
->in_sigsuspend
= 1;
8336 case TARGET_NR_rt_sigsuspend
:
8338 TaskState
*ts
= cpu
->opaque
;
8340 if (arg2
!= sizeof(target_sigset_t
)) {
8341 ret
= -TARGET_EINVAL
;
8344 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8346 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8347 unlock_user(p
, arg1
, 0);
8348 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8350 if (ret
!= -TARGET_ERESTARTSYS
) {
8351 ts
->in_sigsuspend
= 1;
8355 case TARGET_NR_rt_sigtimedwait
:
8358 struct timespec uts
, *puts
;
8361 if (arg4
!= sizeof(target_sigset_t
)) {
8362 ret
= -TARGET_EINVAL
;
8366 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8368 target_to_host_sigset(&set
, p
);
8369 unlock_user(p
, arg1
, 0);
8372 target_to_host_timespec(puts
, arg3
);
8376 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8378 if (!is_error(ret
)) {
8380 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8385 host_to_target_siginfo(p
, &uinfo
);
8386 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8388 ret
= host_to_target_signal(ret
);
8392 case TARGET_NR_rt_sigqueueinfo
:
8396 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8400 target_to_host_siginfo(&uinfo
, p
);
8401 unlock_user(p
, arg1
, 0);
8402 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8405 #ifdef TARGET_NR_sigreturn
8406 case TARGET_NR_sigreturn
:
8407 if (block_signals()) {
8408 ret
= -TARGET_ERESTARTSYS
;
8410 ret
= do_sigreturn(cpu_env
);
8414 case TARGET_NR_rt_sigreturn
:
8415 if (block_signals()) {
8416 ret
= -TARGET_ERESTARTSYS
;
8418 ret
= do_rt_sigreturn(cpu_env
);
8421 case TARGET_NR_sethostname
:
8422 if (!(p
= lock_user_string(arg1
)))
8424 ret
= get_errno(sethostname(p
, arg2
));
8425 unlock_user(p
, arg1
, 0);
8427 case TARGET_NR_setrlimit
:
8429 int resource
= target_to_host_resource(arg1
);
8430 struct target_rlimit
*target_rlim
;
8432 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8434 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8435 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8436 unlock_user_struct(target_rlim
, arg2
, 0);
8437 ret
= get_errno(setrlimit(resource
, &rlim
));
8440 case TARGET_NR_getrlimit
:
8442 int resource
= target_to_host_resource(arg1
);
8443 struct target_rlimit
*target_rlim
;
8446 ret
= get_errno(getrlimit(resource
, &rlim
));
8447 if (!is_error(ret
)) {
8448 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8450 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8451 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8452 unlock_user_struct(target_rlim
, arg2
, 1);
8456 case TARGET_NR_getrusage
:
8458 struct rusage rusage
;
8459 ret
= get_errno(getrusage(arg1
, &rusage
));
8460 if (!is_error(ret
)) {
8461 ret
= host_to_target_rusage(arg2
, &rusage
);
8465 case TARGET_NR_gettimeofday
:
8468 ret
= get_errno(gettimeofday(&tv
, NULL
));
8469 if (!is_error(ret
)) {
8470 if (copy_to_user_timeval(arg1
, &tv
))
8475 case TARGET_NR_settimeofday
:
8477 struct timeval tv
, *ptv
= NULL
;
8478 struct timezone tz
, *ptz
= NULL
;
8481 if (copy_from_user_timeval(&tv
, arg1
)) {
8488 if (copy_from_user_timezone(&tz
, arg2
)) {
8494 ret
= get_errno(settimeofday(ptv
, ptz
));
8497 #if defined(TARGET_NR_select)
8498 case TARGET_NR_select
:
8499 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8500 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8503 struct target_sel_arg_struct
*sel
;
8504 abi_ulong inp
, outp
, exp
, tvp
;
8507 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8509 nsel
= tswapal(sel
->n
);
8510 inp
= tswapal(sel
->inp
);
8511 outp
= tswapal(sel
->outp
);
8512 exp
= tswapal(sel
->exp
);
8513 tvp
= tswapal(sel
->tvp
);
8514 unlock_user_struct(sel
, arg1
, 0);
8515 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8520 #ifdef TARGET_NR_pselect6
8521 case TARGET_NR_pselect6
:
8523 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8524 fd_set rfds
, wfds
, efds
;
8525 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8526 struct timespec ts
, *ts_ptr
;
8529 * The 6th arg is actually two args smashed together,
8530 * so we cannot use the C library.
8538 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8539 target_sigset_t
*target_sigset
;
8547 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8551 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8555 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8561 * This takes a timespec, and not a timeval, so we cannot
8562 * use the do_select() helper ...
8565 if (target_to_host_timespec(&ts
, ts_addr
)) {
8573 /* Extract the two packed args for the sigset */
8576 sig
.size
= SIGSET_T_SIZE
;
8578 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8582 arg_sigset
= tswapal(arg7
[0]);
8583 arg_sigsize
= tswapal(arg7
[1]);
8584 unlock_user(arg7
, arg6
, 0);
8588 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8589 /* Like the kernel, we enforce correct size sigsets */
8590 ret
= -TARGET_EINVAL
;
8593 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8594 sizeof(*target_sigset
), 1);
8595 if (!target_sigset
) {
8598 target_to_host_sigset(&set
, target_sigset
);
8599 unlock_user(target_sigset
, arg_sigset
, 0);
8607 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8610 if (!is_error(ret
)) {
8611 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8613 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8615 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8618 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8624 #ifdef TARGET_NR_symlink
8625 case TARGET_NR_symlink
:
8628 p
= lock_user_string(arg1
);
8629 p2
= lock_user_string(arg2
);
8631 ret
= -TARGET_EFAULT
;
8633 ret
= get_errno(symlink(p
, p2
));
8634 unlock_user(p2
, arg2
, 0);
8635 unlock_user(p
, arg1
, 0);
8639 #if defined(TARGET_NR_symlinkat)
8640 case TARGET_NR_symlinkat
:
8643 p
= lock_user_string(arg1
);
8644 p2
= lock_user_string(arg3
);
8646 ret
= -TARGET_EFAULT
;
8648 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8649 unlock_user(p2
, arg3
, 0);
8650 unlock_user(p
, arg1
, 0);
8654 #ifdef TARGET_NR_oldlstat
8655 case TARGET_NR_oldlstat
:
8658 #ifdef TARGET_NR_readlink
8659 case TARGET_NR_readlink
:
8662 p
= lock_user_string(arg1
);
8663 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8665 ret
= -TARGET_EFAULT
;
8667 /* Short circuit this for the magic exe check. */
8668 ret
= -TARGET_EINVAL
;
8669 } else if (is_proc_myself((const char *)p
, "exe")) {
8670 char real
[PATH_MAX
], *temp
;
8671 temp
= realpath(exec_path
, real
);
8672 /* Return value is # of bytes that we wrote to the buffer. */
8674 ret
= get_errno(-1);
8676 /* Don't worry about sign mismatch as earlier mapping
8677 * logic would have thrown a bad address error. */
8678 ret
= MIN(strlen(real
), arg3
);
8679 /* We cannot NUL terminate the string. */
8680 memcpy(p2
, real
, ret
);
8683 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8685 unlock_user(p2
, arg2
, ret
);
8686 unlock_user(p
, arg1
, 0);
8690 #if defined(TARGET_NR_readlinkat)
8691 case TARGET_NR_readlinkat
:
8694 p
= lock_user_string(arg2
);
8695 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8697 ret
= -TARGET_EFAULT
;
8698 } else if (is_proc_myself((const char *)p
, "exe")) {
8699 char real
[PATH_MAX
], *temp
;
8700 temp
= realpath(exec_path
, real
);
8701 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8702 snprintf((char *)p2
, arg4
, "%s", real
);
8704 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8706 unlock_user(p2
, arg3
, ret
);
8707 unlock_user(p
, arg2
, 0);
8711 #ifdef TARGET_NR_uselib
8712 case TARGET_NR_uselib
:
8715 #ifdef TARGET_NR_swapon
8716 case TARGET_NR_swapon
:
8717 if (!(p
= lock_user_string(arg1
)))
8719 ret
= get_errno(swapon(p
, arg2
));
8720 unlock_user(p
, arg1
, 0);
8723 case TARGET_NR_reboot
:
8724 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8725 /* arg4 must be ignored in all other cases */
8726 p
= lock_user_string(arg4
);
8730 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8731 unlock_user(p
, arg4
, 0);
8733 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8736 #ifdef TARGET_NR_readdir
8737 case TARGET_NR_readdir
:
8740 #ifdef TARGET_NR_mmap
8741 case TARGET_NR_mmap
:
8742 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8743 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8744 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8745 || defined(TARGET_S390X)
8748 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8749 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8757 unlock_user(v
, arg1
, 0);
8758 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8759 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8763 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8764 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8770 #ifdef TARGET_NR_mmap2
8771 case TARGET_NR_mmap2
:
8773 #define MMAP_SHIFT 12
8775 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8776 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8778 arg6
<< MMAP_SHIFT
));
8781 case TARGET_NR_munmap
:
8782 ret
= get_errno(target_munmap(arg1
, arg2
));
8784 case TARGET_NR_mprotect
:
8786 TaskState
*ts
= cpu
->opaque
;
8787 /* Special hack to detect libc making the stack executable. */
8788 if ((arg3
& PROT_GROWSDOWN
)
8789 && arg1
>= ts
->info
->stack_limit
8790 && arg1
<= ts
->info
->start_stack
) {
8791 arg3
&= ~PROT_GROWSDOWN
;
8792 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8793 arg1
= ts
->info
->stack_limit
;
8796 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8798 #ifdef TARGET_NR_mremap
8799 case TARGET_NR_mremap
:
8800 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8803 /* ??? msync/mlock/munlock are broken for softmmu. */
8804 #ifdef TARGET_NR_msync
8805 case TARGET_NR_msync
:
8806 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8809 #ifdef TARGET_NR_mlock
8810 case TARGET_NR_mlock
:
8811 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8814 #ifdef TARGET_NR_munlock
8815 case TARGET_NR_munlock
:
8816 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8819 #ifdef TARGET_NR_mlockall
8820 case TARGET_NR_mlockall
:
8821 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8824 #ifdef TARGET_NR_munlockall
8825 case TARGET_NR_munlockall
:
8826 ret
= get_errno(munlockall());
8829 case TARGET_NR_truncate
:
8830 if (!(p
= lock_user_string(arg1
)))
8832 ret
= get_errno(truncate(p
, arg2
));
8833 unlock_user(p
, arg1
, 0);
8835 case TARGET_NR_ftruncate
:
8836 ret
= get_errno(ftruncate(arg1
, arg2
));
8838 case TARGET_NR_fchmod
:
8839 ret
= get_errno(fchmod(arg1
, arg2
));
8841 #if defined(TARGET_NR_fchmodat)
8842 case TARGET_NR_fchmodat
:
8843 if (!(p
= lock_user_string(arg2
)))
8845 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8846 unlock_user(p
, arg2
, 0);
8849 case TARGET_NR_getpriority
:
8850 /* Note that negative values are valid for getpriority, so we must
8851 differentiate based on errno settings. */
8853 ret
= getpriority(arg1
, arg2
);
8854 if (ret
== -1 && errno
!= 0) {
8855 ret
= -host_to_target_errno(errno
);
8859 /* Return value is the unbiased priority. Signal no error. */
8860 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8862 /* Return value is a biased priority to avoid negative numbers. */
8866 case TARGET_NR_setpriority
:
8867 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8869 #ifdef TARGET_NR_profil
8870 case TARGET_NR_profil
:
8873 case TARGET_NR_statfs
:
8874 if (!(p
= lock_user_string(arg1
)))
8876 ret
= get_errno(statfs(path(p
), &stfs
));
8877 unlock_user(p
, arg1
, 0);
8879 if (!is_error(ret
)) {
8880 struct target_statfs
*target_stfs
;
8882 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8884 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8885 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8886 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8887 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8888 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8889 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8890 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8891 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8892 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8893 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8894 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8895 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8896 unlock_user_struct(target_stfs
, arg2
, 1);
8899 case TARGET_NR_fstatfs
:
8900 ret
= get_errno(fstatfs(arg1
, &stfs
));
8901 goto convert_statfs
;
8902 #ifdef TARGET_NR_statfs64
8903 case TARGET_NR_statfs64
:
8904 if (!(p
= lock_user_string(arg1
)))
8906 ret
= get_errno(statfs(path(p
), &stfs
));
8907 unlock_user(p
, arg1
, 0);
8909 if (!is_error(ret
)) {
8910 struct target_statfs64
*target_stfs
;
8912 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8914 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8915 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8916 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8917 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8918 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8919 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8920 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8921 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8922 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8923 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8924 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8925 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8926 unlock_user_struct(target_stfs
, arg3
, 1);
8929 case TARGET_NR_fstatfs64
:
8930 ret
= get_errno(fstatfs(arg1
, &stfs
));
8931 goto convert_statfs64
;
8933 #ifdef TARGET_NR_ioperm
8934 case TARGET_NR_ioperm
:
8937 #ifdef TARGET_NR_socketcall
8938 case TARGET_NR_socketcall
:
8939 ret
= do_socketcall(arg1
, arg2
);
8942 #ifdef TARGET_NR_accept
8943 case TARGET_NR_accept
:
8944 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8947 #ifdef TARGET_NR_accept4
8948 case TARGET_NR_accept4
:
8949 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8952 #ifdef TARGET_NR_bind
8953 case TARGET_NR_bind
:
8954 ret
= do_bind(arg1
, arg2
, arg3
);
8957 #ifdef TARGET_NR_connect
8958 case TARGET_NR_connect
:
8959 ret
= do_connect(arg1
, arg2
, arg3
);
8962 #ifdef TARGET_NR_getpeername
8963 case TARGET_NR_getpeername
:
8964 ret
= do_getpeername(arg1
, arg2
, arg3
);
8967 #ifdef TARGET_NR_getsockname
8968 case TARGET_NR_getsockname
:
8969 ret
= do_getsockname(arg1
, arg2
, arg3
);
8972 #ifdef TARGET_NR_getsockopt
8973 case TARGET_NR_getsockopt
:
8974 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8977 #ifdef TARGET_NR_listen
8978 case TARGET_NR_listen
:
8979 ret
= get_errno(listen(arg1
, arg2
));
8982 #ifdef TARGET_NR_recv
8983 case TARGET_NR_recv
:
8984 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8987 #ifdef TARGET_NR_recvfrom
8988 case TARGET_NR_recvfrom
:
8989 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8992 #ifdef TARGET_NR_recvmsg
8993 case TARGET_NR_recvmsg
:
8994 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8997 #ifdef TARGET_NR_send
8998 case TARGET_NR_send
:
8999 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9002 #ifdef TARGET_NR_sendmsg
9003 case TARGET_NR_sendmsg
:
9004 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9007 #ifdef TARGET_NR_sendmmsg
9008 case TARGET_NR_sendmmsg
:
9009 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9011 case TARGET_NR_recvmmsg
:
9012 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9015 #ifdef TARGET_NR_sendto
9016 case TARGET_NR_sendto
:
9017 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9020 #ifdef TARGET_NR_shutdown
9021 case TARGET_NR_shutdown
:
9022 ret
= get_errno(shutdown(arg1
, arg2
));
9025 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9026 case TARGET_NR_getrandom
:
9027 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9031 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9032 unlock_user(p
, arg1
, ret
);
9035 #ifdef TARGET_NR_socket
9036 case TARGET_NR_socket
:
9037 ret
= do_socket(arg1
, arg2
, arg3
);
9038 fd_trans_unregister(ret
);
9041 #ifdef TARGET_NR_socketpair
9042 case TARGET_NR_socketpair
:
9043 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9046 #ifdef TARGET_NR_setsockopt
9047 case TARGET_NR_setsockopt
:
9048 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9052 case TARGET_NR_syslog
:
9053 if (!(p
= lock_user_string(arg2
)))
9055 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9056 unlock_user(p
, arg2
, 0);
9059 case TARGET_NR_setitimer
:
9061 struct itimerval value
, ovalue
, *pvalue
;
9065 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9066 || copy_from_user_timeval(&pvalue
->it_value
,
9067 arg2
+ sizeof(struct target_timeval
)))
9072 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9073 if (!is_error(ret
) && arg3
) {
9074 if (copy_to_user_timeval(arg3
,
9075 &ovalue
.it_interval
)
9076 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9082 case TARGET_NR_getitimer
:
9084 struct itimerval value
;
9086 ret
= get_errno(getitimer(arg1
, &value
));
9087 if (!is_error(ret
) && arg2
) {
9088 if (copy_to_user_timeval(arg2
,
9090 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9096 #ifdef TARGET_NR_stat
9097 case TARGET_NR_stat
:
9098 if (!(p
= lock_user_string(arg1
)))
9100 ret
= get_errno(stat(path(p
), &st
));
9101 unlock_user(p
, arg1
, 0);
9104 #ifdef TARGET_NR_lstat
9105 case TARGET_NR_lstat
:
9106 if (!(p
= lock_user_string(arg1
)))
9108 ret
= get_errno(lstat(path(p
), &st
));
9109 unlock_user(p
, arg1
, 0);
9112 case TARGET_NR_fstat
:
9114 ret
= get_errno(fstat(arg1
, &st
));
9115 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9118 if (!is_error(ret
)) {
9119 struct target_stat
*target_st
;
9121 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9123 memset(target_st
, 0, sizeof(*target_st
));
9124 __put_user(st
.st_dev
, &target_st
->st_dev
);
9125 __put_user(st
.st_ino
, &target_st
->st_ino
);
9126 __put_user(st
.st_mode
, &target_st
->st_mode
);
9127 __put_user(st
.st_uid
, &target_st
->st_uid
);
9128 __put_user(st
.st_gid
, &target_st
->st_gid
);
9129 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9130 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9131 __put_user(st
.st_size
, &target_st
->st_size
);
9132 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9133 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9134 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9135 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9136 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9137 unlock_user_struct(target_st
, arg2
, 1);
9141 #ifdef TARGET_NR_olduname
9142 case TARGET_NR_olduname
:
9145 #ifdef TARGET_NR_iopl
9146 case TARGET_NR_iopl
:
9149 case TARGET_NR_vhangup
:
9150 ret
= get_errno(vhangup());
9152 #ifdef TARGET_NR_idle
9153 case TARGET_NR_idle
:
9156 #ifdef TARGET_NR_syscall
9157 case TARGET_NR_syscall
:
9158 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9159 arg6
, arg7
, arg8
, 0);
9162 case TARGET_NR_wait4
:
9165 abi_long status_ptr
= arg2
;
9166 struct rusage rusage
, *rusage_ptr
;
9167 abi_ulong target_rusage
= arg4
;
9168 abi_long rusage_err
;
9170 rusage_ptr
= &rusage
;
9173 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9174 if (!is_error(ret
)) {
9175 if (status_ptr
&& ret
) {
9176 status
= host_to_target_waitstatus(status
);
9177 if (put_user_s32(status
, status_ptr
))
9180 if (target_rusage
) {
9181 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9189 #ifdef TARGET_NR_swapoff
9190 case TARGET_NR_swapoff
:
9191 if (!(p
= lock_user_string(arg1
)))
9193 ret
= get_errno(swapoff(p
));
9194 unlock_user(p
, arg1
, 0);
9197 case TARGET_NR_sysinfo
:
9199 struct target_sysinfo
*target_value
;
9200 struct sysinfo value
;
9201 ret
= get_errno(sysinfo(&value
));
9202 if (!is_error(ret
) && arg1
)
9204 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9206 __put_user(value
.uptime
, &target_value
->uptime
);
9207 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9208 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9209 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9210 __put_user(value
.totalram
, &target_value
->totalram
);
9211 __put_user(value
.freeram
, &target_value
->freeram
);
9212 __put_user(value
.sharedram
, &target_value
->sharedram
);
9213 __put_user(value
.bufferram
, &target_value
->bufferram
);
9214 __put_user(value
.totalswap
, &target_value
->totalswap
);
9215 __put_user(value
.freeswap
, &target_value
->freeswap
);
9216 __put_user(value
.procs
, &target_value
->procs
);
9217 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9218 __put_user(value
.freehigh
, &target_value
->freehigh
);
9219 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9220 unlock_user_struct(target_value
, arg1
, 1);
9224 #ifdef TARGET_NR_ipc
9226 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9229 #ifdef TARGET_NR_semget
9230 case TARGET_NR_semget
:
9231 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9234 #ifdef TARGET_NR_semop
9235 case TARGET_NR_semop
:
9236 ret
= do_semop(arg1
, arg2
, arg3
);
9239 #ifdef TARGET_NR_semctl
9240 case TARGET_NR_semctl
:
9241 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9244 #ifdef TARGET_NR_msgctl
9245 case TARGET_NR_msgctl
:
9246 ret
= do_msgctl(arg1
, arg2
, arg3
);
9249 #ifdef TARGET_NR_msgget
9250 case TARGET_NR_msgget
:
9251 ret
= get_errno(msgget(arg1
, arg2
));
9254 #ifdef TARGET_NR_msgrcv
9255 case TARGET_NR_msgrcv
:
9256 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9259 #ifdef TARGET_NR_msgsnd
9260 case TARGET_NR_msgsnd
:
9261 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9264 #ifdef TARGET_NR_shmget
9265 case TARGET_NR_shmget
:
9266 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9269 #ifdef TARGET_NR_shmctl
9270 case TARGET_NR_shmctl
:
9271 ret
= do_shmctl(arg1
, arg2
, arg3
);
9274 #ifdef TARGET_NR_shmat
9275 case TARGET_NR_shmat
:
9276 ret
= do_shmat(arg1
, arg2
, arg3
);
9279 #ifdef TARGET_NR_shmdt
9280 case TARGET_NR_shmdt
:
9281 ret
= do_shmdt(arg1
);
9284 case TARGET_NR_fsync
:
9285 ret
= get_errno(fsync(arg1
));
9287 case TARGET_NR_clone
:
9288 /* Linux manages to have three different orderings for its
9289 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9290 * match the kernel's CONFIG_CLONE_* settings.
9291 * Microblaze is further special in that it uses a sixth
9292 * implicit argument to clone for the TLS pointer.
9294 #if defined(TARGET_MICROBLAZE)
9295 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9296 #elif defined(TARGET_CLONE_BACKWARDS)
9297 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9298 #elif defined(TARGET_CLONE_BACKWARDS2)
9299 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9301 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9304 #ifdef __NR_exit_group
9305 /* new thread calls */
9306 case TARGET_NR_exit_group
:
9310 gdb_exit(cpu_env
, arg1
);
9311 ret
= get_errno(exit_group(arg1
));
9314 case TARGET_NR_setdomainname
:
9315 if (!(p
= lock_user_string(arg1
)))
9317 ret
= get_errno(setdomainname(p
, arg2
));
9318 unlock_user(p
, arg1
, 0);
9320 case TARGET_NR_uname
:
9321 /* no need to transcode because we use the linux syscall */
9323 struct new_utsname
* buf
;
9325 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9327 ret
= get_errno(sys_uname(buf
));
9328 if (!is_error(ret
)) {
9329 /* Overwrite the native machine name with whatever is being
9331 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9332 /* Allow the user to override the reported release. */
9333 if (qemu_uname_release
&& *qemu_uname_release
) {
9334 g_strlcpy(buf
->release
, qemu_uname_release
,
9335 sizeof(buf
->release
));
9338 unlock_user_struct(buf
, arg1
, 1);
9342 case TARGET_NR_modify_ldt
:
9343 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9345 #if !defined(TARGET_X86_64)
9346 case TARGET_NR_vm86old
:
9348 case TARGET_NR_vm86
:
9349 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9353 case TARGET_NR_adjtimex
:
9355 #ifdef TARGET_NR_create_module
9356 case TARGET_NR_create_module
:
9358 case TARGET_NR_init_module
:
9359 case TARGET_NR_delete_module
:
9360 #ifdef TARGET_NR_get_kernel_syms
9361 case TARGET_NR_get_kernel_syms
:
9364 case TARGET_NR_quotactl
:
9366 case TARGET_NR_getpgid
:
9367 ret
= get_errno(getpgid(arg1
));
9369 case TARGET_NR_fchdir
:
9370 ret
= get_errno(fchdir(arg1
));
9372 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9373 case TARGET_NR_bdflush
:
9376 #ifdef TARGET_NR_sysfs
9377 case TARGET_NR_sysfs
:
9380 case TARGET_NR_personality
:
9381 ret
= get_errno(personality(arg1
));
9383 #ifdef TARGET_NR_afs_syscall
9384 case TARGET_NR_afs_syscall
:
9387 #ifdef TARGET_NR__llseek /* Not on alpha */
9388 case TARGET_NR__llseek
:
9391 #if !defined(__NR_llseek)
9392 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9394 ret
= get_errno(res
);
9399 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9401 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9407 #ifdef TARGET_NR_getdents
9408 case TARGET_NR_getdents
:
9409 #ifdef __NR_getdents
9410 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9412 struct target_dirent
*target_dirp
;
9413 struct linux_dirent
*dirp
;
9414 abi_long count
= arg3
;
9416 dirp
= g_try_malloc(count
);
9418 ret
= -TARGET_ENOMEM
;
9422 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9423 if (!is_error(ret
)) {
9424 struct linux_dirent
*de
;
9425 struct target_dirent
*tde
;
9427 int reclen
, treclen
;
9428 int count1
, tnamelen
;
9432 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9436 reclen
= de
->d_reclen
;
9437 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9438 assert(tnamelen
>= 0);
9439 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9440 assert(count1
+ treclen
<= count
);
9441 tde
->d_reclen
= tswap16(treclen
);
9442 tde
->d_ino
= tswapal(de
->d_ino
);
9443 tde
->d_off
= tswapal(de
->d_off
);
9444 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9445 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9447 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9451 unlock_user(target_dirp
, arg2
, ret
);
9457 struct linux_dirent
*dirp
;
9458 abi_long count
= arg3
;
9460 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9462 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9463 if (!is_error(ret
)) {
9464 struct linux_dirent
*de
;
9469 reclen
= de
->d_reclen
;
9472 de
->d_reclen
= tswap16(reclen
);
9473 tswapls(&de
->d_ino
);
9474 tswapls(&de
->d_off
);
9475 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9479 unlock_user(dirp
, arg2
, ret
);
9483 /* Implement getdents in terms of getdents64 */
9485 struct linux_dirent64
*dirp
;
9486 abi_long count
= arg3
;
9488 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9492 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9493 if (!is_error(ret
)) {
9494 /* Convert the dirent64 structs to target dirent. We do this
9495 * in-place, since we can guarantee that a target_dirent is no
9496 * larger than a dirent64; however this means we have to be
9497 * careful to read everything before writing in the new format.
9499 struct linux_dirent64
*de
;
9500 struct target_dirent
*tde
;
9505 tde
= (struct target_dirent
*)dirp
;
9507 int namelen
, treclen
;
9508 int reclen
= de
->d_reclen
;
9509 uint64_t ino
= de
->d_ino
;
9510 int64_t off
= de
->d_off
;
9511 uint8_t type
= de
->d_type
;
9513 namelen
= strlen(de
->d_name
);
9514 treclen
= offsetof(struct target_dirent
, d_name
)
9516 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9518 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9519 tde
->d_ino
= tswapal(ino
);
9520 tde
->d_off
= tswapal(off
);
9521 tde
->d_reclen
= tswap16(treclen
);
9522 /* The target_dirent type is in what was formerly a padding
9523 * byte at the end of the structure:
9525 *(((char *)tde
) + treclen
- 1) = type
;
9527 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9528 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9534 unlock_user(dirp
, arg2
, ret
);
9538 #endif /* TARGET_NR_getdents */
9539 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9540 case TARGET_NR_getdents64
:
9542 struct linux_dirent64
*dirp
;
9543 abi_long count
= arg3
;
9544 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9546 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9547 if (!is_error(ret
)) {
9548 struct linux_dirent64
*de
;
9553 reclen
= de
->d_reclen
;
9556 de
->d_reclen
= tswap16(reclen
);
9557 tswap64s((uint64_t *)&de
->d_ino
);
9558 tswap64s((uint64_t *)&de
->d_off
);
9559 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9563 unlock_user(dirp
, arg2
, ret
);
9566 #endif /* TARGET_NR_getdents64 */
9567 #if defined(TARGET_NR__newselect)
9568 case TARGET_NR__newselect
:
9569 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9572 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9573 # ifdef TARGET_NR_poll
9574 case TARGET_NR_poll
:
9576 # ifdef TARGET_NR_ppoll
9577 case TARGET_NR_ppoll
:
9580 struct target_pollfd
*target_pfd
;
9581 unsigned int nfds
= arg2
;
9588 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9589 sizeof(struct target_pollfd
) * nfds
, 1);
9594 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9595 for (i
= 0; i
< nfds
; i
++) {
9596 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9597 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9602 # ifdef TARGET_NR_ppoll
9603 case TARGET_NR_ppoll
:
9605 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9606 target_sigset_t
*target_set
;
9607 sigset_t _set
, *set
= &_set
;
9610 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9611 unlock_user(target_pfd
, arg1
, 0);
9619 if (arg5
!= sizeof(target_sigset_t
)) {
9620 unlock_user(target_pfd
, arg1
, 0);
9621 ret
= -TARGET_EINVAL
;
9625 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9627 unlock_user(target_pfd
, arg1
, 0);
9630 target_to_host_sigset(set
, target_set
);
9635 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9636 set
, SIGSET_T_SIZE
));
9638 if (!is_error(ret
) && arg3
) {
9639 host_to_target_timespec(arg3
, timeout_ts
);
9642 unlock_user(target_set
, arg4
, 0);
9647 # ifdef TARGET_NR_poll
9648 case TARGET_NR_poll
:
9650 struct timespec ts
, *pts
;
9653 /* Convert ms to secs, ns */
9654 ts
.tv_sec
= arg3
/ 1000;
9655 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9658 /* -ve poll() timeout means "infinite" */
9661 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9666 g_assert_not_reached();
9669 if (!is_error(ret
)) {
9670 for(i
= 0; i
< nfds
; i
++) {
9671 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9674 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9678 case TARGET_NR_flock
:
9679 /* NOTE: the flock constant seems to be the same for every
9681 ret
= get_errno(safe_flock(arg1
, arg2
));
9683 case TARGET_NR_readv
:
9685 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9687 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9688 unlock_iovec(vec
, arg2
, arg3
, 1);
9690 ret
= -host_to_target_errno(errno
);
9694 case TARGET_NR_writev
:
9696 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9698 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9699 unlock_iovec(vec
, arg2
, arg3
, 0);
9701 ret
= -host_to_target_errno(errno
);
9705 case TARGET_NR_getsid
:
9706 ret
= get_errno(getsid(arg1
));
9708 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9709 case TARGET_NR_fdatasync
:
9710 ret
= get_errno(fdatasync(arg1
));
9713 #ifdef TARGET_NR__sysctl
9714 case TARGET_NR__sysctl
:
9715 /* We don't implement this, but ENOTDIR is always a safe
9717 ret
= -TARGET_ENOTDIR
;
9720 case TARGET_NR_sched_getaffinity
:
9722 unsigned int mask_size
;
9723 unsigned long *mask
;
9726 * sched_getaffinity needs multiples of ulong, so need to take
9727 * care of mismatches between target ulong and host ulong sizes.
9729 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9730 ret
= -TARGET_EINVAL
;
9733 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9735 mask
= alloca(mask_size
);
9736 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9738 if (!is_error(ret
)) {
9740 /* More data returned than the caller's buffer will fit.
9741 * This only happens if sizeof(abi_long) < sizeof(long)
9742 * and the caller passed us a buffer holding an odd number
9743 * of abi_longs. If the host kernel is actually using the
9744 * extra 4 bytes then fail EINVAL; otherwise we can just
9745 * ignore them and only copy the interesting part.
9747 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9748 if (numcpus
> arg2
* 8) {
9749 ret
= -TARGET_EINVAL
;
9755 if (copy_to_user(arg3
, mask
, ret
)) {
9761 case TARGET_NR_sched_setaffinity
:
9763 unsigned int mask_size
;
9764 unsigned long *mask
;
9767 * sched_setaffinity needs multiples of ulong, so need to take
9768 * care of mismatches between target ulong and host ulong sizes.
9770 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9771 ret
= -TARGET_EINVAL
;
9774 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9776 mask
= alloca(mask_size
);
9777 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9780 memcpy(mask
, p
, arg2
);
9781 unlock_user_struct(p
, arg2
, 0);
9783 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9786 case TARGET_NR_sched_setparam
:
9788 struct sched_param
*target_schp
;
9789 struct sched_param schp
;
9792 return -TARGET_EINVAL
;
9794 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9796 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9797 unlock_user_struct(target_schp
, arg2
, 0);
9798 ret
= get_errno(sched_setparam(arg1
, &schp
));
9801 case TARGET_NR_sched_getparam
:
9803 struct sched_param
*target_schp
;
9804 struct sched_param schp
;
9807 return -TARGET_EINVAL
;
9809 ret
= get_errno(sched_getparam(arg1
, &schp
));
9810 if (!is_error(ret
)) {
9811 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9813 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9814 unlock_user_struct(target_schp
, arg2
, 1);
9818 case TARGET_NR_sched_setscheduler
:
9820 struct sched_param
*target_schp
;
9821 struct sched_param schp
;
9823 return -TARGET_EINVAL
;
9825 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9827 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9828 unlock_user_struct(target_schp
, arg3
, 0);
9829 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9832 case TARGET_NR_sched_getscheduler
:
9833 ret
= get_errno(sched_getscheduler(arg1
));
9835 case TARGET_NR_sched_yield
:
9836 ret
= get_errno(sched_yield());
9838 case TARGET_NR_sched_get_priority_max
:
9839 ret
= get_errno(sched_get_priority_max(arg1
));
9841 case TARGET_NR_sched_get_priority_min
:
9842 ret
= get_errno(sched_get_priority_min(arg1
));
9844 case TARGET_NR_sched_rr_get_interval
:
9847 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9848 if (!is_error(ret
)) {
9849 ret
= host_to_target_timespec(arg2
, &ts
);
9853 case TARGET_NR_nanosleep
:
9855 struct timespec req
, rem
;
9856 target_to_host_timespec(&req
, arg1
);
9857 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9858 if (is_error(ret
) && arg2
) {
9859 host_to_target_timespec(arg2
, &rem
);
9863 #ifdef TARGET_NR_query_module
9864 case TARGET_NR_query_module
:
9867 #ifdef TARGET_NR_nfsservctl
9868 case TARGET_NR_nfsservctl
:
9871 case TARGET_NR_prctl
:
9873 case PR_GET_PDEATHSIG
:
9876 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9877 if (!is_error(ret
) && arg2
9878 && put_user_ual(deathsig
, arg2
)) {
9886 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9890 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9892 unlock_user(name
, arg2
, 16);
9897 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9901 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9903 unlock_user(name
, arg2
, 0);
9908 /* Most prctl options have no pointer arguments */
9909 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9913 #ifdef TARGET_NR_arch_prctl
9914 case TARGET_NR_arch_prctl
:
9915 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9916 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9922 #ifdef TARGET_NR_pread64
9923 case TARGET_NR_pread64
:
9924 if (regpairs_aligned(cpu_env
)) {
9928 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9930 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9931 unlock_user(p
, arg2
, ret
);
9933 case TARGET_NR_pwrite64
:
9934 if (regpairs_aligned(cpu_env
)) {
9938 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9940 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9941 unlock_user(p
, arg2
, 0);
9944 case TARGET_NR_getcwd
:
9945 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9947 ret
= get_errno(sys_getcwd1(p
, arg2
));
9948 unlock_user(p
, arg1
, ret
);
9950 case TARGET_NR_capget
:
9951 case TARGET_NR_capset
:
9953 struct target_user_cap_header
*target_header
;
9954 struct target_user_cap_data
*target_data
= NULL
;
9955 struct __user_cap_header_struct header
;
9956 struct __user_cap_data_struct data
[2];
9957 struct __user_cap_data_struct
*dataptr
= NULL
;
9958 int i
, target_datalen
;
9961 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9964 header
.version
= tswap32(target_header
->version
);
9965 header
.pid
= tswap32(target_header
->pid
);
9967 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9968 /* Version 2 and up takes pointer to two user_data structs */
9972 target_datalen
= sizeof(*target_data
) * data_items
;
9975 if (num
== TARGET_NR_capget
) {
9976 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9978 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9981 unlock_user_struct(target_header
, arg1
, 0);
9985 if (num
== TARGET_NR_capset
) {
9986 for (i
= 0; i
< data_items
; i
++) {
9987 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9988 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9989 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9996 if (num
== TARGET_NR_capget
) {
9997 ret
= get_errno(capget(&header
, dataptr
));
9999 ret
= get_errno(capset(&header
, dataptr
));
10002 /* The kernel always updates version for both capget and capset */
10003 target_header
->version
= tswap32(header
.version
);
10004 unlock_user_struct(target_header
, arg1
, 1);
10007 if (num
== TARGET_NR_capget
) {
10008 for (i
= 0; i
< data_items
; i
++) {
10009 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10010 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10011 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10013 unlock_user(target_data
, arg2
, target_datalen
);
10015 unlock_user(target_data
, arg2
, 0);
10020 case TARGET_NR_sigaltstack
:
10021 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10024 #ifdef CONFIG_SENDFILE
10025 case TARGET_NR_sendfile
:
10027 off_t
*offp
= NULL
;
10030 ret
= get_user_sal(off
, arg3
);
10031 if (is_error(ret
)) {
10036 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10037 if (!is_error(ret
) && arg3
) {
10038 abi_long ret2
= put_user_sal(off
, arg3
);
10039 if (is_error(ret2
)) {
10045 #ifdef TARGET_NR_sendfile64
10046 case TARGET_NR_sendfile64
:
10048 off_t
*offp
= NULL
;
10051 ret
= get_user_s64(off
, arg3
);
10052 if (is_error(ret
)) {
10057 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10058 if (!is_error(ret
) && arg3
) {
10059 abi_long ret2
= put_user_s64(off
, arg3
);
10060 if (is_error(ret2
)) {
10068 case TARGET_NR_sendfile
:
10069 #ifdef TARGET_NR_sendfile64
10070 case TARGET_NR_sendfile64
:
10072 goto unimplemented
;
10075 #ifdef TARGET_NR_getpmsg
10076 case TARGET_NR_getpmsg
:
10077 goto unimplemented
;
10079 #ifdef TARGET_NR_putpmsg
10080 case TARGET_NR_putpmsg
:
10081 goto unimplemented
;
10083 #ifdef TARGET_NR_vfork
10084 case TARGET_NR_vfork
:
10085 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10089 #ifdef TARGET_NR_ugetrlimit
10090 case TARGET_NR_ugetrlimit
:
10092 struct rlimit rlim
;
10093 int resource
= target_to_host_resource(arg1
);
10094 ret
= get_errno(getrlimit(resource
, &rlim
));
10095 if (!is_error(ret
)) {
10096 struct target_rlimit
*target_rlim
;
10097 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10099 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10100 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10101 unlock_user_struct(target_rlim
, arg2
, 1);
10106 #ifdef TARGET_NR_truncate64
10107 case TARGET_NR_truncate64
:
10108 if (!(p
= lock_user_string(arg1
)))
10110 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10111 unlock_user(p
, arg1
, 0);
10114 #ifdef TARGET_NR_ftruncate64
10115 case TARGET_NR_ftruncate64
:
10116 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10119 #ifdef TARGET_NR_stat64
10120 case TARGET_NR_stat64
:
10121 if (!(p
= lock_user_string(arg1
)))
10123 ret
= get_errno(stat(path(p
), &st
));
10124 unlock_user(p
, arg1
, 0);
10125 if (!is_error(ret
))
10126 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10129 #ifdef TARGET_NR_lstat64
10130 case TARGET_NR_lstat64
:
10131 if (!(p
= lock_user_string(arg1
)))
10133 ret
= get_errno(lstat(path(p
), &st
));
10134 unlock_user(p
, arg1
, 0);
10135 if (!is_error(ret
))
10136 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10139 #ifdef TARGET_NR_fstat64
10140 case TARGET_NR_fstat64
:
10141 ret
= get_errno(fstat(arg1
, &st
));
10142 if (!is_error(ret
))
10143 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10147 #ifdef TARGET_NR_fstatat64
10148 case TARGET_NR_fstatat64
:
10150 #ifdef TARGET_NR_newfstatat
10151 case TARGET_NR_newfstatat
:
10153 if (!(p
= lock_user_string(arg2
)))
10155 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10156 if (!is_error(ret
))
10157 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10160 #ifdef TARGET_NR_lchown
10161 case TARGET_NR_lchown
:
10162 if (!(p
= lock_user_string(arg1
)))
10164 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10165 unlock_user(p
, arg1
, 0);
10168 #ifdef TARGET_NR_getuid
10169 case TARGET_NR_getuid
:
10170 ret
= get_errno(high2lowuid(getuid()));
10173 #ifdef TARGET_NR_getgid
10174 case TARGET_NR_getgid
:
10175 ret
= get_errno(high2lowgid(getgid()));
10178 #ifdef TARGET_NR_geteuid
10179 case TARGET_NR_geteuid
:
10180 ret
= get_errno(high2lowuid(geteuid()));
10183 #ifdef TARGET_NR_getegid
10184 case TARGET_NR_getegid
:
10185 ret
= get_errno(high2lowgid(getegid()));
10188 case TARGET_NR_setreuid
:
10189 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10191 case TARGET_NR_setregid
:
10192 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10194 case TARGET_NR_getgroups
:
10196 int gidsetsize
= arg1
;
10197 target_id
*target_grouplist
;
10201 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10202 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10203 if (gidsetsize
== 0)
10205 if (!is_error(ret
)) {
10206 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10207 if (!target_grouplist
)
10209 for(i
= 0;i
< ret
; i
++)
10210 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10211 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10215 case TARGET_NR_setgroups
:
10217 int gidsetsize
= arg1
;
10218 target_id
*target_grouplist
;
10219 gid_t
*grouplist
= NULL
;
10222 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10223 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10224 if (!target_grouplist
) {
10225 ret
= -TARGET_EFAULT
;
10228 for (i
= 0; i
< gidsetsize
; i
++) {
10229 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10231 unlock_user(target_grouplist
, arg2
, 0);
10233 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10236 case TARGET_NR_fchown
:
10237 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10239 #if defined(TARGET_NR_fchownat)
10240 case TARGET_NR_fchownat
:
10241 if (!(p
= lock_user_string(arg2
)))
10243 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10244 low2highgid(arg4
), arg5
));
10245 unlock_user(p
, arg2
, 0);
10248 #ifdef TARGET_NR_setresuid
10249 case TARGET_NR_setresuid
:
10250 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10252 low2highuid(arg3
)));
10255 #ifdef TARGET_NR_getresuid
10256 case TARGET_NR_getresuid
:
10258 uid_t ruid
, euid
, suid
;
10259 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10260 if (!is_error(ret
)) {
10261 if (put_user_id(high2lowuid(ruid
), arg1
)
10262 || put_user_id(high2lowuid(euid
), arg2
)
10263 || put_user_id(high2lowuid(suid
), arg3
))
10269 #ifdef TARGET_NR_getresgid
10270 case TARGET_NR_setresgid
:
10271 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10273 low2highgid(arg3
)));
10276 #ifdef TARGET_NR_getresgid
10277 case TARGET_NR_getresgid
:
10279 gid_t rgid
, egid
, sgid
;
10280 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10281 if (!is_error(ret
)) {
10282 if (put_user_id(high2lowgid(rgid
), arg1
)
10283 || put_user_id(high2lowgid(egid
), arg2
)
10284 || put_user_id(high2lowgid(sgid
), arg3
))
10290 #ifdef TARGET_NR_chown
10291 case TARGET_NR_chown
:
10292 if (!(p
= lock_user_string(arg1
)))
10294 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10295 unlock_user(p
, arg1
, 0);
10298 case TARGET_NR_setuid
:
10299 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10301 case TARGET_NR_setgid
:
10302 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10304 case TARGET_NR_setfsuid
:
10305 ret
= get_errno(setfsuid(arg1
));
10307 case TARGET_NR_setfsgid
:
10308 ret
= get_errno(setfsgid(arg1
));
10311 #ifdef TARGET_NR_lchown32
10312 case TARGET_NR_lchown32
:
10313 if (!(p
= lock_user_string(arg1
)))
10315 ret
= get_errno(lchown(p
, arg2
, arg3
));
10316 unlock_user(p
, arg1
, 0);
10319 #ifdef TARGET_NR_getuid32
10320 case TARGET_NR_getuid32
:
10321 ret
= get_errno(getuid());
10325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10326 /* Alpha specific */
10327 case TARGET_NR_getxuid
:
10331 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10333 ret
= get_errno(getuid());
10336 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10337 /* Alpha specific */
10338 case TARGET_NR_getxgid
:
10342 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10344 ret
= get_errno(getgid());
10347 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10348 /* Alpha specific */
10349 case TARGET_NR_osf_getsysinfo
:
10350 ret
= -TARGET_EOPNOTSUPP
;
10352 case TARGET_GSI_IEEE_FP_CONTROL
:
10354 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10356 /* Copied from linux ieee_fpcr_to_swcr. */
10357 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10358 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10359 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10360 | SWCR_TRAP_ENABLE_DZE
10361 | SWCR_TRAP_ENABLE_OVF
);
10362 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10363 | SWCR_TRAP_ENABLE_INE
);
10364 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10365 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10367 if (put_user_u64 (swcr
, arg2
))
10373 /* case GSI_IEEE_STATE_AT_SIGNAL:
10374 -- Not implemented in linux kernel.
10376 -- Retrieves current unaligned access state; not much used.
10377 case GSI_PROC_TYPE:
10378 -- Retrieves implver information; surely not used.
10379 case GSI_GET_HWRPB:
10380 -- Grabs a copy of the HWRPB; surely not used.
10385 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10386 /* Alpha specific */
10387 case TARGET_NR_osf_setsysinfo
:
10388 ret
= -TARGET_EOPNOTSUPP
;
10390 case TARGET_SSI_IEEE_FP_CONTROL
:
10392 uint64_t swcr
, fpcr
, orig_fpcr
;
10394 if (get_user_u64 (swcr
, arg2
)) {
10397 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10398 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10400 /* Copied from linux ieee_swcr_to_fpcr. */
10401 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10402 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10403 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10404 | SWCR_TRAP_ENABLE_DZE
10405 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10406 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10407 | SWCR_TRAP_ENABLE_INE
)) << 57;
10408 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10409 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10411 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10416 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10418 uint64_t exc
, fpcr
, orig_fpcr
;
10421 if (get_user_u64(exc
, arg2
)) {
10425 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10427 /* We only add to the exception status here. */
10428 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10430 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10433 /* Old exceptions are not signaled. */
10434 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10436 /* If any exceptions set by this call,
10437 and are unmasked, send a signal. */
10439 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10440 si_code
= TARGET_FPE_FLTRES
;
10442 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10443 si_code
= TARGET_FPE_FLTUND
;
10445 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10446 si_code
= TARGET_FPE_FLTOVF
;
10448 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10449 si_code
= TARGET_FPE_FLTDIV
;
10451 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10452 si_code
= TARGET_FPE_FLTINV
;
10454 if (si_code
!= 0) {
10455 target_siginfo_t info
;
10456 info
.si_signo
= SIGFPE
;
10458 info
.si_code
= si_code
;
10459 info
._sifields
._sigfault
._addr
10460 = ((CPUArchState
*)cpu_env
)->pc
;
10461 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10466 /* case SSI_NVPAIRS:
10467 -- Used with SSIN_UACPROC to enable unaligned accesses.
10468 case SSI_IEEE_STATE_AT_SIGNAL:
10469 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10470 -- Not implemented in linux kernel
10475 #ifdef TARGET_NR_osf_sigprocmask
10476 /* Alpha specific. */
10477 case TARGET_NR_osf_sigprocmask
:
10481 sigset_t set
, oldset
;
10484 case TARGET_SIG_BLOCK
:
10487 case TARGET_SIG_UNBLOCK
:
10490 case TARGET_SIG_SETMASK
:
10494 ret
= -TARGET_EINVAL
;
10498 target_to_host_old_sigset(&set
, &mask
);
10499 ret
= do_sigprocmask(how
, &set
, &oldset
);
10501 host_to_target_old_sigset(&mask
, &oldset
);
10508 #ifdef TARGET_NR_getgid32
10509 case TARGET_NR_getgid32
:
10510 ret
= get_errno(getgid());
10513 #ifdef TARGET_NR_geteuid32
10514 case TARGET_NR_geteuid32
:
10515 ret
= get_errno(geteuid());
10518 #ifdef TARGET_NR_getegid32
10519 case TARGET_NR_getegid32
:
10520 ret
= get_errno(getegid());
10523 #ifdef TARGET_NR_setreuid32
10524 case TARGET_NR_setreuid32
:
10525 ret
= get_errno(setreuid(arg1
, arg2
));
10528 #ifdef TARGET_NR_setregid32
10529 case TARGET_NR_setregid32
:
10530 ret
= get_errno(setregid(arg1
, arg2
));
10533 #ifdef TARGET_NR_getgroups32
10534 case TARGET_NR_getgroups32
:
10536 int gidsetsize
= arg1
;
10537 uint32_t *target_grouplist
;
10541 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10542 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10543 if (gidsetsize
== 0)
10545 if (!is_error(ret
)) {
10546 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10547 if (!target_grouplist
) {
10548 ret
= -TARGET_EFAULT
;
10551 for(i
= 0;i
< ret
; i
++)
10552 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10553 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10558 #ifdef TARGET_NR_setgroups32
10559 case TARGET_NR_setgroups32
:
10561 int gidsetsize
= arg1
;
10562 uint32_t *target_grouplist
;
10566 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10567 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10568 if (!target_grouplist
) {
10569 ret
= -TARGET_EFAULT
;
10572 for(i
= 0;i
< gidsetsize
; i
++)
10573 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10574 unlock_user(target_grouplist
, arg2
, 0);
10575 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10579 #ifdef TARGET_NR_fchown32
10580 case TARGET_NR_fchown32
:
10581 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10584 #ifdef TARGET_NR_setresuid32
10585 case TARGET_NR_setresuid32
:
10586 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10589 #ifdef TARGET_NR_getresuid32
10590 case TARGET_NR_getresuid32
:
10592 uid_t ruid
, euid
, suid
;
10593 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10594 if (!is_error(ret
)) {
10595 if (put_user_u32(ruid
, arg1
)
10596 || put_user_u32(euid
, arg2
)
10597 || put_user_u32(suid
, arg3
))
10603 #ifdef TARGET_NR_setresgid32
10604 case TARGET_NR_setresgid32
:
10605 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10608 #ifdef TARGET_NR_getresgid32
10609 case TARGET_NR_getresgid32
:
10611 gid_t rgid
, egid
, sgid
;
10612 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10613 if (!is_error(ret
)) {
10614 if (put_user_u32(rgid
, arg1
)
10615 || put_user_u32(egid
, arg2
)
10616 || put_user_u32(sgid
, arg3
))
10622 #ifdef TARGET_NR_chown32
10623 case TARGET_NR_chown32
:
10624 if (!(p
= lock_user_string(arg1
)))
10626 ret
= get_errno(chown(p
, arg2
, arg3
));
10627 unlock_user(p
, arg1
, 0);
10630 #ifdef TARGET_NR_setuid32
10631 case TARGET_NR_setuid32
:
10632 ret
= get_errno(sys_setuid(arg1
));
10635 #ifdef TARGET_NR_setgid32
10636 case TARGET_NR_setgid32
:
10637 ret
= get_errno(sys_setgid(arg1
));
10640 #ifdef TARGET_NR_setfsuid32
10641 case TARGET_NR_setfsuid32
:
10642 ret
= get_errno(setfsuid(arg1
));
10645 #ifdef TARGET_NR_setfsgid32
10646 case TARGET_NR_setfsgid32
:
10647 ret
= get_errno(setfsgid(arg1
));
10651 case TARGET_NR_pivot_root
:
10652 goto unimplemented
;
10653 #ifdef TARGET_NR_mincore
10654 case TARGET_NR_mincore
:
10657 ret
= -TARGET_EFAULT
;
10658 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10660 if (!(p
= lock_user_string(arg3
)))
10662 ret
= get_errno(mincore(a
, arg2
, p
));
10663 unlock_user(p
, arg3
, ret
);
10665 unlock_user(a
, arg1
, 0);
10669 #ifdef TARGET_NR_arm_fadvise64_64
10670 case TARGET_NR_arm_fadvise64_64
:
10671 /* arm_fadvise64_64 looks like fadvise64_64 but
10672 * with different argument order: fd, advice, offset, len
10673 * rather than the usual fd, offset, len, advice.
10674 * Note that offset and len are both 64-bit so appear as
10675 * pairs of 32-bit registers.
10677 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10678 target_offset64(arg5
, arg6
), arg2
);
10679 ret
= -host_to_target_errno(ret
);
10683 #if TARGET_ABI_BITS == 32
10685 #ifdef TARGET_NR_fadvise64_64
10686 case TARGET_NR_fadvise64_64
:
10687 /* 6 args: fd, offset (high, low), len (high, low), advice */
10688 if (regpairs_aligned(cpu_env
)) {
10689 /* offset is in (3,4), len in (5,6) and advice in 7 */
10696 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10697 target_offset64(arg2
, arg3
),
10698 target_offset64(arg4
, arg5
),
10703 #ifdef TARGET_NR_fadvise64
10704 case TARGET_NR_fadvise64
:
10705 /* 5 args: fd, offset (high, low), len, advice */
10706 if (regpairs_aligned(cpu_env
)) {
10707 /* offset is in (3,4), len in 5 and advice in 6 */
10713 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10714 target_offset64(arg2
, arg3
),
10719 #else /* not a 32-bit ABI */
10720 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10721 #ifdef TARGET_NR_fadvise64_64
10722 case TARGET_NR_fadvise64_64
:
10724 #ifdef TARGET_NR_fadvise64
10725 case TARGET_NR_fadvise64
:
10727 #ifdef TARGET_S390X
10729 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10730 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10731 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10732 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10736 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10739 #endif /* end of 64-bit ABI fadvise handling */
10741 #ifdef TARGET_NR_madvise
10742 case TARGET_NR_madvise
:
10743 /* A straight passthrough may not be safe because qemu sometimes
10744 turns private file-backed mappings into anonymous mappings.
10745 This will break MADV_DONTNEED.
10746 This is a hint, so ignoring and returning success is ok. */
10747 ret
= get_errno(0);
10750 #if TARGET_ABI_BITS == 32
10751 case TARGET_NR_fcntl64
:
10755 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10756 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10759 if (((CPUARMState
*)cpu_env
)->eabi
) {
10760 copyfrom
= copy_from_user_eabi_flock64
;
10761 copyto
= copy_to_user_eabi_flock64
;
10765 cmd
= target_to_host_fcntl_cmd(arg2
);
10766 if (cmd
== -TARGET_EINVAL
) {
10772 case TARGET_F_GETLK64
:
10773 ret
= copyfrom(&fl
, arg3
);
10777 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10779 ret
= copyto(arg3
, &fl
);
10783 case TARGET_F_SETLK64
:
10784 case TARGET_F_SETLKW64
:
10785 ret
= copyfrom(&fl
, arg3
);
10789 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10792 ret
= do_fcntl(arg1
, arg2
, arg3
);
10798 #ifdef TARGET_NR_cacheflush
10799 case TARGET_NR_cacheflush
:
10800 /* self-modifying code is handled automatically, so nothing needed */
10804 #ifdef TARGET_NR_security
10805 case TARGET_NR_security
:
10806 goto unimplemented
;
10808 #ifdef TARGET_NR_getpagesize
10809 case TARGET_NR_getpagesize
:
10810 ret
= TARGET_PAGE_SIZE
;
10813 case TARGET_NR_gettid
:
10814 ret
= get_errno(gettid());
10816 #ifdef TARGET_NR_readahead
10817 case TARGET_NR_readahead
:
10818 #if TARGET_ABI_BITS == 32
10819 if (regpairs_aligned(cpu_env
)) {
10824 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10826 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10831 #ifdef TARGET_NR_setxattr
10832 case TARGET_NR_listxattr
:
10833 case TARGET_NR_llistxattr
:
10837 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10839 ret
= -TARGET_EFAULT
;
10843 p
= lock_user_string(arg1
);
10845 if (num
== TARGET_NR_listxattr
) {
10846 ret
= get_errno(listxattr(p
, b
, arg3
));
10848 ret
= get_errno(llistxattr(p
, b
, arg3
));
10851 ret
= -TARGET_EFAULT
;
10853 unlock_user(p
, arg1
, 0);
10854 unlock_user(b
, arg2
, arg3
);
10857 case TARGET_NR_flistxattr
:
10861 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10863 ret
= -TARGET_EFAULT
;
10867 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10868 unlock_user(b
, arg2
, arg3
);
10871 case TARGET_NR_setxattr
:
10872 case TARGET_NR_lsetxattr
:
10874 void *p
, *n
, *v
= 0;
10876 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10878 ret
= -TARGET_EFAULT
;
10882 p
= lock_user_string(arg1
);
10883 n
= lock_user_string(arg2
);
10885 if (num
== TARGET_NR_setxattr
) {
10886 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10888 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10891 ret
= -TARGET_EFAULT
;
10893 unlock_user(p
, arg1
, 0);
10894 unlock_user(n
, arg2
, 0);
10895 unlock_user(v
, arg3
, 0);
10898 case TARGET_NR_fsetxattr
:
10902 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10904 ret
= -TARGET_EFAULT
;
10908 n
= lock_user_string(arg2
);
10910 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10912 ret
= -TARGET_EFAULT
;
10914 unlock_user(n
, arg2
, 0);
10915 unlock_user(v
, arg3
, 0);
10918 case TARGET_NR_getxattr
:
10919 case TARGET_NR_lgetxattr
:
10921 void *p
, *n
, *v
= 0;
10923 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10925 ret
= -TARGET_EFAULT
;
10929 p
= lock_user_string(arg1
);
10930 n
= lock_user_string(arg2
);
10932 if (num
== TARGET_NR_getxattr
) {
10933 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10935 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10938 ret
= -TARGET_EFAULT
;
10940 unlock_user(p
, arg1
, 0);
10941 unlock_user(n
, arg2
, 0);
10942 unlock_user(v
, arg3
, arg4
);
10945 case TARGET_NR_fgetxattr
:
10949 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10951 ret
= -TARGET_EFAULT
;
10955 n
= lock_user_string(arg2
);
10957 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10959 ret
= -TARGET_EFAULT
;
10961 unlock_user(n
, arg2
, 0);
10962 unlock_user(v
, arg3
, arg4
);
10965 case TARGET_NR_removexattr
:
10966 case TARGET_NR_lremovexattr
:
10969 p
= lock_user_string(arg1
);
10970 n
= lock_user_string(arg2
);
10972 if (num
== TARGET_NR_removexattr
) {
10973 ret
= get_errno(removexattr(p
, n
));
10975 ret
= get_errno(lremovexattr(p
, n
));
10978 ret
= -TARGET_EFAULT
;
10980 unlock_user(p
, arg1
, 0);
10981 unlock_user(n
, arg2
, 0);
10984 case TARGET_NR_fremovexattr
:
10987 n
= lock_user_string(arg2
);
10989 ret
= get_errno(fremovexattr(arg1
, n
));
10991 ret
= -TARGET_EFAULT
;
10993 unlock_user(n
, arg2
, 0);
10997 #endif /* CONFIG_ATTR */
10998 #ifdef TARGET_NR_set_thread_area
10999 case TARGET_NR_set_thread_area
:
11000 #if defined(TARGET_MIPS)
11001 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11004 #elif defined(TARGET_CRIS)
11006 ret
= -TARGET_EINVAL
;
11008 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11012 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11013 ret
= do_set_thread_area(cpu_env
, arg1
);
11015 #elif defined(TARGET_M68K)
11017 TaskState
*ts
= cpu
->opaque
;
11018 ts
->tp_value
= arg1
;
11023 goto unimplemented_nowarn
;
11026 #ifdef TARGET_NR_get_thread_area
11027 case TARGET_NR_get_thread_area
:
11028 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11029 ret
= do_get_thread_area(cpu_env
, arg1
);
11031 #elif defined(TARGET_M68K)
11033 TaskState
*ts
= cpu
->opaque
;
11034 ret
= ts
->tp_value
;
11038 goto unimplemented_nowarn
;
11041 #ifdef TARGET_NR_getdomainname
11042 case TARGET_NR_getdomainname
:
11043 goto unimplemented_nowarn
;
11046 #ifdef TARGET_NR_clock_gettime
11047 case TARGET_NR_clock_gettime
:
11049 struct timespec ts
;
11050 ret
= get_errno(clock_gettime(arg1
, &ts
));
11051 if (!is_error(ret
)) {
11052 host_to_target_timespec(arg2
, &ts
);
11057 #ifdef TARGET_NR_clock_getres
11058 case TARGET_NR_clock_getres
:
11060 struct timespec ts
;
11061 ret
= get_errno(clock_getres(arg1
, &ts
));
11062 if (!is_error(ret
)) {
11063 host_to_target_timespec(arg2
, &ts
);
11068 #ifdef TARGET_NR_clock_nanosleep
11069 case TARGET_NR_clock_nanosleep
:
11071 struct timespec ts
;
11072 target_to_host_timespec(&ts
, arg3
);
11073 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11074 &ts
, arg4
? &ts
: NULL
));
11076 host_to_target_timespec(arg4
, &ts
);
11078 #if defined(TARGET_PPC)
11079 /* clock_nanosleep is odd in that it returns positive errno values.
11080 * On PPC, CR0 bit 3 should be set in such a situation. */
11081 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11082 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11089 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11090 case TARGET_NR_set_tid_address
:
11091 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11095 case TARGET_NR_tkill
:
11096 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11099 case TARGET_NR_tgkill
:
11100 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11101 target_to_host_signal(arg3
)));
11104 #ifdef TARGET_NR_set_robust_list
11105 case TARGET_NR_set_robust_list
:
11106 case TARGET_NR_get_robust_list
:
11107 /* The ABI for supporting robust futexes has userspace pass
11108 * the kernel a pointer to a linked list which is updated by
11109 * userspace after the syscall; the list is walked by the kernel
11110 * when the thread exits. Since the linked list in QEMU guest
11111 * memory isn't a valid linked list for the host and we have
11112 * no way to reliably intercept the thread-death event, we can't
11113 * support these. Silently return ENOSYS so that guest userspace
11114 * falls back to a non-robust futex implementation (which should
11115 * be OK except in the corner case of the guest crashing while
11116 * holding a mutex that is shared with another process via
11119 goto unimplemented_nowarn
;
11122 #if defined(TARGET_NR_utimensat)
11123 case TARGET_NR_utimensat
:
11125 struct timespec
*tsp
, ts
[2];
11129 target_to_host_timespec(ts
, arg3
);
11130 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11134 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11136 if (!(p
= lock_user_string(arg2
))) {
11137 ret
= -TARGET_EFAULT
;
11140 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11141 unlock_user(p
, arg2
, 0);
11146 case TARGET_NR_futex
:
11147 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11149 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11150 case TARGET_NR_inotify_init
:
11151 ret
= get_errno(sys_inotify_init());
11154 #ifdef CONFIG_INOTIFY1
11155 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11156 case TARGET_NR_inotify_init1
:
11157 ret
= get_errno(sys_inotify_init1(arg1
));
11161 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11162 case TARGET_NR_inotify_add_watch
:
11163 p
= lock_user_string(arg2
);
11164 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11165 unlock_user(p
, arg2
, 0);
11168 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11169 case TARGET_NR_inotify_rm_watch
:
11170 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11174 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11175 case TARGET_NR_mq_open
:
11177 struct mq_attr posix_mq_attr
, *attrp
;
11179 p
= lock_user_string(arg1
- 1);
11181 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11182 attrp
= &posix_mq_attr
;
11186 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11187 unlock_user (p
, arg1
, 0);
11191 case TARGET_NR_mq_unlink
:
11192 p
= lock_user_string(arg1
- 1);
11193 ret
= get_errno(mq_unlink(p
));
11194 unlock_user (p
, arg1
, 0);
11197 case TARGET_NR_mq_timedsend
:
11199 struct timespec ts
;
11201 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11203 target_to_host_timespec(&ts
, arg5
);
11204 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11205 host_to_target_timespec(arg5
, &ts
);
11207 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11209 unlock_user (p
, arg2
, arg3
);
11213 case TARGET_NR_mq_timedreceive
:
11215 struct timespec ts
;
11218 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11220 target_to_host_timespec(&ts
, arg5
);
11221 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11223 host_to_target_timespec(arg5
, &ts
);
11225 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11228 unlock_user (p
, arg2
, arg3
);
11230 put_user_u32(prio
, arg4
);
11234 /* Not implemented for now... */
11235 /* case TARGET_NR_mq_notify: */
11238 case TARGET_NR_mq_getsetattr
:
11240 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11243 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11244 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11247 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11248 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11255 #ifdef CONFIG_SPLICE
11256 #ifdef TARGET_NR_tee
11257 case TARGET_NR_tee
:
11259 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11263 #ifdef TARGET_NR_splice
11264 case TARGET_NR_splice
:
11266 loff_t loff_in
, loff_out
;
11267 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11269 if (get_user_u64(loff_in
, arg2
)) {
11272 ploff_in
= &loff_in
;
11275 if (get_user_u64(loff_out
, arg4
)) {
11278 ploff_out
= &loff_out
;
11280 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11282 if (put_user_u64(loff_in
, arg2
)) {
11287 if (put_user_u64(loff_out
, arg4
)) {
11294 #ifdef TARGET_NR_vmsplice
11295 case TARGET_NR_vmsplice
:
11297 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11299 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11300 unlock_iovec(vec
, arg2
, arg3
, 0);
11302 ret
= -host_to_target_errno(errno
);
11307 #endif /* CONFIG_SPLICE */
11308 #ifdef CONFIG_EVENTFD
11309 #if defined(TARGET_NR_eventfd)
11310 case TARGET_NR_eventfd
:
11311 ret
= get_errno(eventfd(arg1
, 0));
11312 fd_trans_unregister(ret
);
11315 #if defined(TARGET_NR_eventfd2)
11316 case TARGET_NR_eventfd2
:
11318 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11319 if (arg2
& TARGET_O_NONBLOCK
) {
11320 host_flags
|= O_NONBLOCK
;
11322 if (arg2
& TARGET_O_CLOEXEC
) {
11323 host_flags
|= O_CLOEXEC
;
11325 ret
= get_errno(eventfd(arg1
, host_flags
));
11326 fd_trans_unregister(ret
);
11330 #endif /* CONFIG_EVENTFD */
11331 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11332 case TARGET_NR_fallocate
:
11333 #if TARGET_ABI_BITS == 32
11334 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11335 target_offset64(arg5
, arg6
)));
11337 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11341 #if defined(CONFIG_SYNC_FILE_RANGE)
11342 #if defined(TARGET_NR_sync_file_range)
11343 case TARGET_NR_sync_file_range
:
11344 #if TARGET_ABI_BITS == 32
11345 #if defined(TARGET_MIPS)
11346 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11347 target_offset64(arg5
, arg6
), arg7
));
11349 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11350 target_offset64(arg4
, arg5
), arg6
));
11351 #endif /* !TARGET_MIPS */
11353 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11357 #if defined(TARGET_NR_sync_file_range2)
11358 case TARGET_NR_sync_file_range2
:
11359 /* This is like sync_file_range but the arguments are reordered */
11360 #if TARGET_ABI_BITS == 32
11361 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11362 target_offset64(arg5
, arg6
), arg2
));
11364 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11369 #if defined(TARGET_NR_signalfd4)
11370 case TARGET_NR_signalfd4
:
11371 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11374 #if defined(TARGET_NR_signalfd)
11375 case TARGET_NR_signalfd
:
11376 ret
= do_signalfd4(arg1
, arg2
, 0);
11379 #if defined(CONFIG_EPOLL)
11380 #if defined(TARGET_NR_epoll_create)
11381 case TARGET_NR_epoll_create
:
11382 ret
= get_errno(epoll_create(arg1
));
11385 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11386 case TARGET_NR_epoll_create1
:
11387 ret
= get_errno(epoll_create1(arg1
));
11390 #if defined(TARGET_NR_epoll_ctl)
11391 case TARGET_NR_epoll_ctl
:
11393 struct epoll_event ep
;
11394 struct epoll_event
*epp
= 0;
11396 struct target_epoll_event
*target_ep
;
11397 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11400 ep
.events
= tswap32(target_ep
->events
);
11401 /* The epoll_data_t union is just opaque data to the kernel,
11402 * so we transfer all 64 bits across and need not worry what
11403 * actual data type it is.
11405 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11406 unlock_user_struct(target_ep
, arg4
, 0);
11409 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11414 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11415 #if defined(TARGET_NR_epoll_wait)
11416 case TARGET_NR_epoll_wait
:
11418 #if defined(TARGET_NR_epoll_pwait)
11419 case TARGET_NR_epoll_pwait
:
11422 struct target_epoll_event
*target_ep
;
11423 struct epoll_event
*ep
;
11425 int maxevents
= arg3
;
11426 int timeout
= arg4
;
11428 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11429 maxevents
* sizeof(struct target_epoll_event
), 1);
11434 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11437 #if defined(TARGET_NR_epoll_pwait)
11438 case TARGET_NR_epoll_pwait
:
11440 target_sigset_t
*target_set
;
11441 sigset_t _set
, *set
= &_set
;
11444 if (arg6
!= sizeof(target_sigset_t
)) {
11445 ret
= -TARGET_EINVAL
;
11449 target_set
= lock_user(VERIFY_READ
, arg5
,
11450 sizeof(target_sigset_t
), 1);
11452 unlock_user(target_ep
, arg2
, 0);
11455 target_to_host_sigset(set
, target_set
);
11456 unlock_user(target_set
, arg5
, 0);
11461 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11462 set
, SIGSET_T_SIZE
));
11466 #if defined(TARGET_NR_epoll_wait)
11467 case TARGET_NR_epoll_wait
:
11468 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11473 ret
= -TARGET_ENOSYS
;
11475 if (!is_error(ret
)) {
11477 for (i
= 0; i
< ret
; i
++) {
11478 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11479 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11482 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11487 #ifdef TARGET_NR_prlimit64
11488 case TARGET_NR_prlimit64
:
11490 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11491 struct target_rlimit64
*target_rnew
, *target_rold
;
11492 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11493 int resource
= target_to_host_resource(arg2
);
11495 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11498 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11499 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11500 unlock_user_struct(target_rnew
, arg3
, 0);
11504 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11505 if (!is_error(ret
) && arg4
) {
11506 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11509 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11510 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11511 unlock_user_struct(target_rold
, arg4
, 1);
11516 #ifdef TARGET_NR_gethostname
11517 case TARGET_NR_gethostname
:
11519 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11521 ret
= get_errno(gethostname(name
, arg2
));
11522 unlock_user(name
, arg1
, arg2
);
11524 ret
= -TARGET_EFAULT
;
11529 #ifdef TARGET_NR_atomic_cmpxchg_32
11530 case TARGET_NR_atomic_cmpxchg_32
:
11532 /* should use start_exclusive from main.c */
11533 abi_ulong mem_value
;
11534 if (get_user_u32(mem_value
, arg6
)) {
11535 target_siginfo_t info
;
11536 info
.si_signo
= SIGSEGV
;
11538 info
.si_code
= TARGET_SEGV_MAPERR
;
11539 info
._sifields
._sigfault
._addr
= arg6
;
11540 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11544 if (mem_value
== arg2
)
11545 put_user_u32(arg1
, arg6
);
11550 #ifdef TARGET_NR_atomic_barrier
11551 case TARGET_NR_atomic_barrier
:
11553 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11559 #ifdef TARGET_NR_timer_create
11560 case TARGET_NR_timer_create
:
11562 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11564 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11567 int timer_index
= next_free_host_timer();
11569 if (timer_index
< 0) {
11570 ret
= -TARGET_EAGAIN
;
11572 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11575 phost_sevp
= &host_sevp
;
11576 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11582 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11586 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11595 #ifdef TARGET_NR_timer_settime
11596 case TARGET_NR_timer_settime
:
11598 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11599 * struct itimerspec * old_value */
11600 target_timer_t timerid
= get_timer_id(arg1
);
11604 } else if (arg3
== 0) {
11605 ret
= -TARGET_EINVAL
;
11607 timer_t htimer
= g_posix_timers
[timerid
];
11608 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11610 target_to_host_itimerspec(&hspec_new
, arg3
);
11612 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11613 host_to_target_itimerspec(arg2
, &hspec_old
);
11619 #ifdef TARGET_NR_timer_gettime
11620 case TARGET_NR_timer_gettime
:
11622 /* args: timer_t timerid, struct itimerspec *curr_value */
11623 target_timer_t timerid
= get_timer_id(arg1
);
11627 } else if (!arg2
) {
11628 ret
= -TARGET_EFAULT
;
11630 timer_t htimer
= g_posix_timers
[timerid
];
11631 struct itimerspec hspec
;
11632 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11634 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11635 ret
= -TARGET_EFAULT
;
11642 #ifdef TARGET_NR_timer_getoverrun
11643 case TARGET_NR_timer_getoverrun
:
11645 /* args: timer_t timerid */
11646 target_timer_t timerid
= get_timer_id(arg1
);
11651 timer_t htimer
= g_posix_timers
[timerid
];
11652 ret
= get_errno(timer_getoverrun(htimer
));
11654 fd_trans_unregister(ret
);
11659 #ifdef TARGET_NR_timer_delete
11660 case TARGET_NR_timer_delete
:
11662 /* args: timer_t timerid */
11663 target_timer_t timerid
= get_timer_id(arg1
);
11668 timer_t htimer
= g_posix_timers
[timerid
];
11669 ret
= get_errno(timer_delete(htimer
));
11670 g_posix_timers
[timerid
] = 0;
11676 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11677 case TARGET_NR_timerfd_create
:
11678 ret
= get_errno(timerfd_create(arg1
,
11679 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11683 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11684 case TARGET_NR_timerfd_gettime
:
11686 struct itimerspec its_curr
;
11688 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11690 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11697 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11698 case TARGET_NR_timerfd_settime
:
11700 struct itimerspec its_new
, its_old
, *p_new
;
11703 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11711 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11713 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11720 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11721 case TARGET_NR_ioprio_get
:
11722 ret
= get_errno(ioprio_get(arg1
, arg2
));
11726 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11727 case TARGET_NR_ioprio_set
:
11728 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11732 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11733 case TARGET_NR_setns
:
11734 ret
= get_errno(setns(arg1
, arg2
));
11737 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11738 case TARGET_NR_unshare
:
11739 ret
= get_errno(unshare(arg1
));
11745 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11746 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11747 unimplemented_nowarn
:
11749 ret
= -TARGET_ENOSYS
;
11754 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11757 print_syscall_ret(num
, ret
);
11758 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11761 ret
= -TARGET_EFAULT
;