4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
292 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
294 typedef struct TargetFdTrans
{
295 TargetFdDataFunc host_to_target_data
;
296 TargetFdDataFunc target_to_host_data
;
297 TargetFdAddrFunc target_to_host_addr
;
300 static TargetFdTrans
**target_fd_trans
;
302 static unsigned int target_fd_max
;
304 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
306 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
307 return target_fd_trans
[fd
]->target_to_host_data
;
312 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
314 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
315 return target_fd_trans
[fd
]->host_to_target_data
;
320 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
322 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
323 return target_fd_trans
[fd
]->target_to_host_addr
;
328 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
332 if (fd
>= target_fd_max
) {
333 oldmax
= target_fd_max
;
334 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans
= g_renew(TargetFdTrans
*,
336 target_fd_trans
, target_fd_max
);
337 memset((void *)(target_fd_trans
+ oldmax
), 0,
338 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
340 target_fd_trans
[fd
] = trans
;
343 static void fd_trans_unregister(int fd
)
345 if (fd
>= 0 && fd
< target_fd_max
) {
346 target_fd_trans
[fd
] = NULL
;
350 static void fd_trans_dup(int oldfd
, int newfd
)
352 fd_trans_unregister(newfd
);
353 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
354 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
358 static int sys_getcwd1(char *buf
, size_t size
)
360 if (getcwd(buf
, size
) == NULL
) {
361 /* getcwd() sets errno */
364 return strlen(buf
)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd
, const char *pathname
,
370 const struct timespec times
[2], int flags
)
372 if (pathname
== NULL
)
373 return futimens(dirfd
, times
);
375 return utimensat(dirfd
, pathname
, times
, flags
);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
380 const struct timespec
*,tsp
,int,flags
)
382 static int sys_utimensat(int dirfd
, const char *pathname
,
383 const struct timespec times
[2], int flags
)
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
403 return (inotify_add_watch(fd
, pathname
, mask
));
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
409 return (inotify_rm_watch(fd
, wd
));
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags
)
416 return (inotify_init1(flags
));
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64
{
438 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
439 const struct host_rlimit64
*, new_limit
,
440 struct host_rlimit64
*, old_limit
)
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers
[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
453 if (g_posix_timers
[k
] == 0) {
454 g_posix_timers
[k
] = (timer_t
) 1;
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
464 static inline int regpairs_aligned(void *cpu_env
) {
465 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
473 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
475 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
490 [EAGAIN
] = TARGET_EAGAIN
,
491 [EIDRM
] = TARGET_EIDRM
,
492 [ECHRNG
] = TARGET_ECHRNG
,
493 [EL2NSYNC
] = TARGET_EL2NSYNC
,
494 [EL3HLT
] = TARGET_EL3HLT
,
495 [EL3RST
] = TARGET_EL3RST
,
496 [ELNRNG
] = TARGET_ELNRNG
,
497 [EUNATCH
] = TARGET_EUNATCH
,
498 [ENOCSI
] = TARGET_ENOCSI
,
499 [EL2HLT
] = TARGET_EL2HLT
,
500 [EDEADLK
] = TARGET_EDEADLK
,
501 [ENOLCK
] = TARGET_ENOLCK
,
502 [EBADE
] = TARGET_EBADE
,
503 [EBADR
] = TARGET_EBADR
,
504 [EXFULL
] = TARGET_EXFULL
,
505 [ENOANO
] = TARGET_ENOANO
,
506 [EBADRQC
] = TARGET_EBADRQC
,
507 [EBADSLT
] = TARGET_EBADSLT
,
508 [EBFONT
] = TARGET_EBFONT
,
509 [ENOSTR
] = TARGET_ENOSTR
,
510 [ENODATA
] = TARGET_ENODATA
,
511 [ETIME
] = TARGET_ETIME
,
512 [ENOSR
] = TARGET_ENOSR
,
513 [ENONET
] = TARGET_ENONET
,
514 [ENOPKG
] = TARGET_ENOPKG
,
515 [EREMOTE
] = TARGET_EREMOTE
,
516 [ENOLINK
] = TARGET_ENOLINK
,
517 [EADV
] = TARGET_EADV
,
518 [ESRMNT
] = TARGET_ESRMNT
,
519 [ECOMM
] = TARGET_ECOMM
,
520 [EPROTO
] = TARGET_EPROTO
,
521 [EDOTDOT
] = TARGET_EDOTDOT
,
522 [EMULTIHOP
] = TARGET_EMULTIHOP
,
523 [EBADMSG
] = TARGET_EBADMSG
,
524 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
525 [EOVERFLOW
] = TARGET_EOVERFLOW
,
526 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
527 [EBADFD
] = TARGET_EBADFD
,
528 [EREMCHG
] = TARGET_EREMCHG
,
529 [ELIBACC
] = TARGET_ELIBACC
,
530 [ELIBBAD
] = TARGET_ELIBBAD
,
531 [ELIBSCN
] = TARGET_ELIBSCN
,
532 [ELIBMAX
] = TARGET_ELIBMAX
,
533 [ELIBEXEC
] = TARGET_ELIBEXEC
,
534 [EILSEQ
] = TARGET_EILSEQ
,
535 [ENOSYS
] = TARGET_ENOSYS
,
536 [ELOOP
] = TARGET_ELOOP
,
537 [ERESTART
] = TARGET_ERESTART
,
538 [ESTRPIPE
] = TARGET_ESTRPIPE
,
539 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
540 [EUSERS
] = TARGET_EUSERS
,
541 [ENOTSOCK
] = TARGET_ENOTSOCK
,
542 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
543 [EMSGSIZE
] = TARGET_EMSGSIZE
,
544 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
545 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
546 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
547 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
548 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
549 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
550 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
551 [EADDRINUSE
] = TARGET_EADDRINUSE
,
552 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
553 [ENETDOWN
] = TARGET_ENETDOWN
,
554 [ENETUNREACH
] = TARGET_ENETUNREACH
,
555 [ENETRESET
] = TARGET_ENETRESET
,
556 [ECONNABORTED
] = TARGET_ECONNABORTED
,
557 [ECONNRESET
] = TARGET_ECONNRESET
,
558 [ENOBUFS
] = TARGET_ENOBUFS
,
559 [EISCONN
] = TARGET_EISCONN
,
560 [ENOTCONN
] = TARGET_ENOTCONN
,
561 [EUCLEAN
] = TARGET_EUCLEAN
,
562 [ENOTNAM
] = TARGET_ENOTNAM
,
563 [ENAVAIL
] = TARGET_ENAVAIL
,
564 [EISNAM
] = TARGET_EISNAM
,
565 [EREMOTEIO
] = TARGET_EREMOTEIO
,
566 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
567 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
568 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
569 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
570 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
571 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
572 [EALREADY
] = TARGET_EALREADY
,
573 [EINPROGRESS
] = TARGET_EINPROGRESS
,
574 [ESTALE
] = TARGET_ESTALE
,
575 [ECANCELED
] = TARGET_ECANCELED
,
576 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
577 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
579 [ENOKEY
] = TARGET_ENOKEY
,
582 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
585 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
588 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
591 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
598 static inline int host_to_target_errno(int err
)
600 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
601 host_to_target_errno_table
[err
]) {
602 return host_to_target_errno_table
[err
];
607 static inline int target_to_host_errno(int err
)
609 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
610 target_to_host_errno_table
[err
]) {
611 return target_to_host_errno_table
[err
];
616 static inline abi_long
get_errno(abi_long ret
)
619 return -host_to_target_errno(errno
);
624 static inline int is_error(abi_long ret
)
626 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
629 const char *target_strerror(int err
)
631 if (err
== TARGET_ERESTARTSYS
) {
632 return "To be restarted";
634 if (err
== TARGET_QEMU_ESIGRETURN
) {
635 return "Successful exit from sigreturn";
638 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
641 return strerror(target_to_host_errno(err
));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
692 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
693 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
694 int, flags
, mode_t
, mode
)
695 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
696 struct rusage
*, rusage
)
697 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
698 int, options
, struct rusage
*, rusage
)
699 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
700 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
701 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
702 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
703 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
705 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
706 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
708 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
709 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
710 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
711 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
712 safe_syscall2(int, tkill
, int, tid
, int, sig
)
713 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
714 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
715 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
716 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
718 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
719 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
720 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
721 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
722 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
723 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
724 safe_syscall2(int, flock
, int, fd
, int, operation
)
725 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
726 const struct timespec
*, uts
, size_t, sigsetsize
)
727 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
729 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
730 struct timespec
*, rem
)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
733 const struct timespec
*, req
, struct timespec
*, rem
)
736 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
738 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
739 long, msgtype
, int, flags
)
740 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
741 unsigned, nsops
, const struct timespec
*, timeout
)
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
756 void *, ptr
, long, fifth
)
757 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
761 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
765 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
766 const struct timespec
*timeout
)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
774 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
775 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
776 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
794 static inline int host_to_target_sock_type(int host_type
)
798 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
800 target_type
= TARGET_SOCK_DGRAM
;
803 target_type
= TARGET_SOCK_STREAM
;
806 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type
& SOCK_CLOEXEC
) {
812 target_type
|= TARGET_SOCK_CLOEXEC
;
816 #if defined(SOCK_NONBLOCK)
817 if (host_type
& SOCK_NONBLOCK
) {
818 target_type
|= TARGET_SOCK_NONBLOCK
;
825 static abi_ulong target_brk
;
826 static abi_ulong target_original_brk
;
827 static abi_ulong brk_page
;
829 void target_set_brk(abi_ulong new_brk
)
831 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
832 brk_page
= HOST_PAGE_ALIGN(target_brk
);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long
do_brk(abi_ulong new_brk
)
841 abi_long mapped_addr
;
842 abi_ulong new_alloc_size
;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
847 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
850 if (new_brk
< target_original_brk
) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk
<= brk_page
) {
859 /* Heap contents are initialized to zero, as for anonymous
861 if (new_brk
> target_brk
) {
862 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
864 target_brk
= new_brk
;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
876 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
877 PROT_READ
|PROT_WRITE
,
878 MAP_ANON
|MAP_PRIVATE
, 0, 0));
880 if (mapped_addr
== brk_page
) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
888 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
890 target_brk
= new_brk
;
891 brk_page
= HOST_PAGE_ALIGN(target_brk
);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
895 } else if (mapped_addr
!= -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr
, new_alloc_size
);
901 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM
;
912 /* For everything else, return the previous break. */
916 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
917 abi_ulong target_fds_addr
,
921 abi_ulong b
, *target_fds
;
923 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
924 if (!(target_fds
= lock_user(VERIFY_READ
,
926 sizeof(abi_ulong
) * nw
,
928 return -TARGET_EFAULT
;
932 for (i
= 0; i
< nw
; i
++) {
933 /* grab the abi_ulong */
934 __get_user(b
, &target_fds
[i
]);
935 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
936 /* check the bit inside the abi_ulong */
943 unlock_user(target_fds
, target_fds_addr
, 0);
948 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
949 abi_ulong target_fds_addr
,
952 if (target_fds_addr
) {
953 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
954 return -TARGET_EFAULT
;
962 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
968 abi_ulong
*target_fds
;
970 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
971 if (!(target_fds
= lock_user(VERIFY_WRITE
,
973 sizeof(abi_ulong
) * nw
,
975 return -TARGET_EFAULT
;
978 for (i
= 0; i
< nw
; i
++) {
980 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
981 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
984 __put_user(v
, &target_fds
[i
]);
987 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
992 #if defined(__alpha__)
998 static inline abi_long
host_to_target_clock_t(long ticks
)
1000 #if HOST_HZ == TARGET_HZ
1003 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1007 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1008 const struct rusage
*rusage
)
1010 struct target_rusage
*target_rusage
;
1012 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1013 return -TARGET_EFAULT
;
1014 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1015 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1016 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1017 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1018 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1019 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1020 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1021 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1022 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1023 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1024 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1025 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1026 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1027 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1028 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1029 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1030 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1031 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1032 unlock_user_struct(target_rusage
, target_addr
, 1);
1037 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1039 abi_ulong target_rlim_swap
;
1042 target_rlim_swap
= tswapal(target_rlim
);
1043 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1044 return RLIM_INFINITY
;
1046 result
= target_rlim_swap
;
1047 if (target_rlim_swap
!= (rlim_t
)result
)
1048 return RLIM_INFINITY
;
1053 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1055 abi_ulong target_rlim_swap
;
1058 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1059 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1061 target_rlim_swap
= rlim
;
1062 result
= tswapal(target_rlim_swap
);
1067 static inline int target_to_host_resource(int code
)
1070 case TARGET_RLIMIT_AS
:
1072 case TARGET_RLIMIT_CORE
:
1074 case TARGET_RLIMIT_CPU
:
1076 case TARGET_RLIMIT_DATA
:
1078 case TARGET_RLIMIT_FSIZE
:
1079 return RLIMIT_FSIZE
;
1080 case TARGET_RLIMIT_LOCKS
:
1081 return RLIMIT_LOCKS
;
1082 case TARGET_RLIMIT_MEMLOCK
:
1083 return RLIMIT_MEMLOCK
;
1084 case TARGET_RLIMIT_MSGQUEUE
:
1085 return RLIMIT_MSGQUEUE
;
1086 case TARGET_RLIMIT_NICE
:
1088 case TARGET_RLIMIT_NOFILE
:
1089 return RLIMIT_NOFILE
;
1090 case TARGET_RLIMIT_NPROC
:
1091 return RLIMIT_NPROC
;
1092 case TARGET_RLIMIT_RSS
:
1094 case TARGET_RLIMIT_RTPRIO
:
1095 return RLIMIT_RTPRIO
;
1096 case TARGET_RLIMIT_SIGPENDING
:
1097 return RLIMIT_SIGPENDING
;
1098 case TARGET_RLIMIT_STACK
:
1099 return RLIMIT_STACK
;
1105 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1106 abi_ulong target_tv_addr
)
1108 struct target_timeval
*target_tv
;
1110 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1111 return -TARGET_EFAULT
;
1113 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1114 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1116 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1121 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1122 const struct timeval
*tv
)
1124 struct target_timeval
*target_tv
;
1126 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1127 return -TARGET_EFAULT
;
1129 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1130 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1132 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1137 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1138 abi_ulong target_tz_addr
)
1140 struct target_timezone
*target_tz
;
1142 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1143 return -TARGET_EFAULT
;
1146 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1147 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1149 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1157 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1158 abi_ulong target_mq_attr_addr
)
1160 struct target_mq_attr
*target_mq_attr
;
1162 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1163 target_mq_attr_addr
, 1))
1164 return -TARGET_EFAULT
;
1166 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1167 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1168 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1169 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1171 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1176 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1177 const struct mq_attr
*attr
)
1179 struct target_mq_attr
*target_mq_attr
;
1181 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1182 target_mq_attr_addr
, 0))
1183 return -TARGET_EFAULT
;
1185 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1186 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1187 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1188 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1190 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long
do_select(int n
,
1199 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1200 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1202 fd_set rfds
, wfds
, efds
;
1203 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1205 struct timespec ts
, *ts_ptr
;
1208 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1212 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1216 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1221 if (target_tv_addr
) {
1222 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1223 return -TARGET_EFAULT
;
1224 ts
.tv_sec
= tv
.tv_sec
;
1225 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1231 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1234 if (!is_error(ret
)) {
1235 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1236 return -TARGET_EFAULT
;
1237 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1238 return -TARGET_EFAULT
;
1239 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1240 return -TARGET_EFAULT
;
1242 if (target_tv_addr
) {
1243 tv
.tv_sec
= ts
.tv_sec
;
1244 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1245 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1246 return -TARGET_EFAULT
;
1255 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1258 return pipe2(host_pipe
, flags
);
1264 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1265 int flags
, int is_pipe2
)
1269 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1272 return get_errno(ret
);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1279 return host_pipe
[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1282 return host_pipe
[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1285 return host_pipe
[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1288 return host_pipe
[0];
1292 if (put_user_s32(host_pipe
[0], pipedes
)
1293 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1294 return -TARGET_EFAULT
;
1295 return get_errno(ret
);
1298 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1299 abi_ulong target_addr
,
1302 struct target_ip_mreqn
*target_smreqn
;
1304 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1306 return -TARGET_EFAULT
;
1307 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1308 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1309 if (len
== sizeof(struct target_ip_mreqn
))
1310 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1311 unlock_user(target_smreqn
, target_addr
, 0);
1316 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1317 abi_ulong target_addr
,
1320 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1321 sa_family_t sa_family
;
1322 struct target_sockaddr
*target_saddr
;
1324 if (fd_trans_target_to_host_addr(fd
)) {
1325 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1328 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1330 return -TARGET_EFAULT
;
1332 sa_family
= tswap16(target_saddr
->sa_family
);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family
== AF_UNIX
) {
1343 if (len
< unix_maxlen
&& len
> 0) {
1344 char *cp
= (char*)target_saddr
;
1346 if ( cp
[len
-1] && !cp
[len
] )
1349 if (len
> unix_maxlen
)
1353 memcpy(addr
, target_saddr
, len
);
1354 addr
->sa_family
= sa_family
;
1355 if (sa_family
== AF_NETLINK
) {
1356 struct sockaddr_nl
*nladdr
;
1358 nladdr
= (struct sockaddr_nl
*)addr
;
1359 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1360 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1361 } else if (sa_family
== AF_PACKET
) {
1362 struct target_sockaddr_ll
*lladdr
;
1364 lladdr
= (struct target_sockaddr_ll
*)addr
;
1365 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1366 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1368 unlock_user(target_saddr
, target_addr
, 0);
1373 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1374 struct sockaddr
*addr
,
1377 struct target_sockaddr
*target_saddr
;
1383 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1385 return -TARGET_EFAULT
;
1386 memcpy(target_saddr
, addr
, len
);
1387 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1388 sizeof(target_saddr
->sa_family
)) {
1389 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1391 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1392 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1393 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1394 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1395 } else if (addr
->sa_family
== AF_PACKET
) {
1396 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1397 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1398 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1400 unlock_user(target_saddr
, target_addr
, len
);
1405 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1406 struct target_msghdr
*target_msgh
)
1408 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1409 abi_long msg_controllen
;
1410 abi_ulong target_cmsg_addr
;
1411 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1412 socklen_t space
= 0;
1414 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1415 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1417 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1418 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1419 target_cmsg_start
= target_cmsg
;
1421 return -TARGET_EFAULT
;
1423 while (cmsg
&& target_cmsg
) {
1424 void *data
= CMSG_DATA(cmsg
);
1425 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1427 int len
= tswapal(target_cmsg
->cmsg_len
)
1428 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1430 space
+= CMSG_SPACE(len
);
1431 if (space
> msgh
->msg_controllen
) {
1432 space
-= CMSG_SPACE(len
);
1433 /* This is a QEMU bug, since we allocated the payload
1434 * area ourselves (unlike overflow in host-to-target
1435 * conversion, which is just the guest giving us a buffer
1436 * that's too small). It can't happen for the payload types
1437 * we currently support; if it becomes an issue in future
1438 * we would need to improve our allocation strategy to
1439 * something more intelligent than "twice the size of the
1440 * target buffer we're reading from".
1442 gemu_log("Host cmsg overflow\n");
1446 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1447 cmsg
->cmsg_level
= SOL_SOCKET
;
1449 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1451 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1452 cmsg
->cmsg_len
= CMSG_LEN(len
);
1454 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1455 int *fd
= (int *)data
;
1456 int *target_fd
= (int *)target_data
;
1457 int i
, numfds
= len
/ sizeof(int);
1459 for (i
= 0; i
< numfds
; i
++) {
1460 __get_user(fd
[i
], target_fd
+ i
);
1462 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1463 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1464 struct ucred
*cred
= (struct ucred
*)data
;
1465 struct target_ucred
*target_cred
=
1466 (struct target_ucred
*)target_data
;
1468 __get_user(cred
->pid
, &target_cred
->pid
);
1469 __get_user(cred
->uid
, &target_cred
->uid
);
1470 __get_user(cred
->gid
, &target_cred
->gid
);
1472 gemu_log("Unsupported ancillary data: %d/%d\n",
1473 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1474 memcpy(data
, target_data
, len
);
1477 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1478 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1481 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1483 msgh
->msg_controllen
= space
;
1487 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1488 struct msghdr
*msgh
)
1490 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1491 abi_long msg_controllen
;
1492 abi_ulong target_cmsg_addr
;
1493 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1494 socklen_t space
= 0;
1496 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1497 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1499 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1500 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1501 target_cmsg_start
= target_cmsg
;
1503 return -TARGET_EFAULT
;
1505 while (cmsg
&& target_cmsg
) {
1506 void *data
= CMSG_DATA(cmsg
);
1507 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1509 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1510 int tgt_len
, tgt_space
;
1512 /* We never copy a half-header but may copy half-data;
1513 * this is Linux's behaviour in put_cmsg(). Note that
1514 * truncation here is a guest problem (which we report
1515 * to the guest via the CTRUNC bit), unlike truncation
1516 * in target_to_host_cmsg, which is a QEMU bug.
1518 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1519 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1523 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1524 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1526 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1528 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1530 tgt_len
= TARGET_CMSG_LEN(len
);
1532 /* Payload types which need a different size of payload on
1533 * the target must adjust tgt_len here.
1535 switch (cmsg
->cmsg_level
) {
1537 switch (cmsg
->cmsg_type
) {
1539 tgt_len
= sizeof(struct target_timeval
);
1548 if (msg_controllen
< tgt_len
) {
1549 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1550 tgt_len
= msg_controllen
;
1553 /* We must now copy-and-convert len bytes of payload
1554 * into tgt_len bytes of destination space. Bear in mind
1555 * that in both source and destination we may be dealing
1556 * with a truncated value!
1558 switch (cmsg
->cmsg_level
) {
1560 switch (cmsg
->cmsg_type
) {
1563 int *fd
= (int *)data
;
1564 int *target_fd
= (int *)target_data
;
1565 int i
, numfds
= tgt_len
/ sizeof(int);
1567 for (i
= 0; i
< numfds
; i
++) {
1568 __put_user(fd
[i
], target_fd
+ i
);
1574 struct timeval
*tv
= (struct timeval
*)data
;
1575 struct target_timeval
*target_tv
=
1576 (struct target_timeval
*)target_data
;
1578 if (len
!= sizeof(struct timeval
) ||
1579 tgt_len
!= sizeof(struct target_timeval
)) {
1583 /* copy struct timeval to target */
1584 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1585 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1588 case SCM_CREDENTIALS
:
1590 struct ucred
*cred
= (struct ucred
*)data
;
1591 struct target_ucred
*target_cred
=
1592 (struct target_ucred
*)target_data
;
1594 __put_user(cred
->pid
, &target_cred
->pid
);
1595 __put_user(cred
->uid
, &target_cred
->uid
);
1596 __put_user(cred
->gid
, &target_cred
->gid
);
1606 gemu_log("Unsupported ancillary data: %d/%d\n",
1607 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1608 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1609 if (tgt_len
> len
) {
1610 memset(target_data
+ len
, 0, tgt_len
- len
);
1614 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1615 tgt_space
= TARGET_CMSG_SPACE(len
);
1616 if (msg_controllen
< tgt_space
) {
1617 tgt_space
= msg_controllen
;
1619 msg_controllen
-= tgt_space
;
1621 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1622 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1625 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1627 target_msgh
->msg_controllen
= tswapal(space
);
1631 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1633 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1634 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1635 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1636 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1637 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1640 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1642 abi_long (*host_to_target_nlmsg
)
1643 (struct nlmsghdr
*))
1648 while (len
> sizeof(struct nlmsghdr
)) {
1650 nlmsg_len
= nlh
->nlmsg_len
;
1651 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1656 switch (nlh
->nlmsg_type
) {
1658 tswap_nlmsghdr(nlh
);
1664 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1665 e
->error
= tswap32(e
->error
);
1666 tswap_nlmsghdr(&e
->msg
);
1667 tswap_nlmsghdr(nlh
);
1671 ret
= host_to_target_nlmsg(nlh
);
1673 tswap_nlmsghdr(nlh
);
1678 tswap_nlmsghdr(nlh
);
1679 len
-= NLMSG_ALIGN(nlmsg_len
);
1680 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1685 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1687 abi_long (*target_to_host_nlmsg
)
1688 (struct nlmsghdr
*))
1692 while (len
> sizeof(struct nlmsghdr
)) {
1693 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1694 tswap32(nlh
->nlmsg_len
) > len
) {
1697 tswap_nlmsghdr(nlh
);
1698 switch (nlh
->nlmsg_type
) {
1705 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1706 e
->error
= tswap32(e
->error
);
1707 tswap_nlmsghdr(&e
->msg
);
1711 ret
= target_to_host_nlmsg(nlh
);
1716 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1717 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1722 #ifdef CONFIG_RTNETLINK
1723 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1724 size_t len
, void *context
,
1725 abi_long (*host_to_target_nlattr
)
1729 unsigned short nla_len
;
1732 while (len
> sizeof(struct nlattr
)) {
1733 nla_len
= nlattr
->nla_len
;
1734 if (nla_len
< sizeof(struct nlattr
) ||
1738 ret
= host_to_target_nlattr(nlattr
, context
);
1739 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1740 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1744 len
-= NLA_ALIGN(nla_len
);
1745 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1750 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1752 abi_long (*host_to_target_rtattr
)
1755 unsigned short rta_len
;
1758 while (len
> sizeof(struct rtattr
)) {
1759 rta_len
= rtattr
->rta_len
;
1760 if (rta_len
< sizeof(struct rtattr
) ||
1764 ret
= host_to_target_rtattr(rtattr
);
1765 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1766 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1770 len
-= RTA_ALIGN(rta_len
);
1771 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1776 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1778 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1785 switch (nlattr
->nla_type
) {
1787 case IFLA_BR_FDB_FLUSH
:
1790 case IFLA_BR_GROUP_ADDR
:
1793 case IFLA_BR_VLAN_FILTERING
:
1794 case IFLA_BR_TOPOLOGY_CHANGE
:
1795 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1796 case IFLA_BR_MCAST_ROUTER
:
1797 case IFLA_BR_MCAST_SNOOPING
:
1798 case IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1799 case IFLA_BR_MCAST_QUERIER
:
1800 case IFLA_BR_NF_CALL_IPTABLES
:
1801 case IFLA_BR_NF_CALL_IP6TABLES
:
1802 case IFLA_BR_NF_CALL_ARPTABLES
:
1805 case IFLA_BR_PRIORITY
:
1806 case IFLA_BR_VLAN_PROTOCOL
:
1807 case IFLA_BR_GROUP_FWD_MASK
:
1808 case IFLA_BR_ROOT_PORT
:
1809 case IFLA_BR_VLAN_DEFAULT_PVID
:
1810 u16
= NLA_DATA(nlattr
);
1811 *u16
= tswap16(*u16
);
1814 case IFLA_BR_FORWARD_DELAY
:
1815 case IFLA_BR_HELLO_TIME
:
1816 case IFLA_BR_MAX_AGE
:
1817 case IFLA_BR_AGEING_TIME
:
1818 case IFLA_BR_STP_STATE
:
1819 case IFLA_BR_ROOT_PATH_COST
:
1820 case IFLA_BR_MCAST_HASH_ELASTICITY
:
1821 case IFLA_BR_MCAST_HASH_MAX
:
1822 case IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1823 case IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1824 u32
= NLA_DATA(nlattr
);
1825 *u32
= tswap32(*u32
);
1828 case IFLA_BR_HELLO_TIMER
:
1829 case IFLA_BR_TCN_TIMER
:
1830 case IFLA_BR_GC_TIMER
:
1831 case IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1832 case IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1833 case IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1834 case IFLA_BR_MCAST_QUERIER_INTVL
:
1835 case IFLA_BR_MCAST_QUERY_INTVL
:
1836 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1837 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1838 u64
= NLA_DATA(nlattr
);
1839 *u64
= tswap64(*u64
);
1841 /* ifla_bridge_id: uin8_t[] */
1842 case IFLA_BR_ROOT_ID
:
1843 case IFLA_BR_BRIDGE_ID
:
1846 gemu_log("Unknown IFLA_BR type %d\n", nlattr
->nla_type
);
1852 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
1859 switch (nlattr
->nla_type
) {
1861 case IFLA_BRPORT_STATE
:
1862 case IFLA_BRPORT_MODE
:
1863 case IFLA_BRPORT_GUARD
:
1864 case IFLA_BRPORT_PROTECT
:
1865 case IFLA_BRPORT_FAST_LEAVE
:
1866 case IFLA_BRPORT_LEARNING
:
1867 case IFLA_BRPORT_UNICAST_FLOOD
:
1868 case IFLA_BRPORT_PROXYARP
:
1869 case IFLA_BRPORT_LEARNING_SYNC
:
1870 case IFLA_BRPORT_PROXYARP_WIFI
:
1871 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
1872 case IFLA_BRPORT_CONFIG_PENDING
:
1873 case IFLA_BRPORT_MULTICAST_ROUTER
:
1876 case IFLA_BRPORT_PRIORITY
:
1877 case IFLA_BRPORT_DESIGNATED_PORT
:
1878 case IFLA_BRPORT_DESIGNATED_COST
:
1879 case IFLA_BRPORT_ID
:
1880 case IFLA_BRPORT_NO
:
1881 u16
= NLA_DATA(nlattr
);
1882 *u16
= tswap16(*u16
);
1885 case IFLA_BRPORT_COST
:
1886 u32
= NLA_DATA(nlattr
);
1887 *u32
= tswap32(*u32
);
1890 case IFLA_BRPORT_MESSAGE_AGE_TIMER
:
1891 case IFLA_BRPORT_FORWARD_DELAY_TIMER
:
1892 case IFLA_BRPORT_HOLD_TIMER
:
1893 u64
= NLA_DATA(nlattr
);
1894 *u64
= tswap64(*u64
);
1896 /* ifla_bridge_id: uint8_t[] */
1897 case IFLA_BRPORT_ROOT_ID
:
1898 case IFLA_BRPORT_BRIDGE_ID
:
1901 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr
->nla_type
);
1907 struct linkinfo_context
{
1914 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
1917 struct linkinfo_context
*li_context
= context
;
1919 switch (nlattr
->nla_type
) {
1921 case IFLA_INFO_KIND
:
1922 li_context
->name
= NLA_DATA(nlattr
);
1923 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
1925 case IFLA_INFO_SLAVE_KIND
:
1926 li_context
->slave_name
= NLA_DATA(nlattr
);
1927 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
1930 case IFLA_INFO_XSTATS
:
1931 /* FIXME: only used by CAN */
1934 case IFLA_INFO_DATA
:
1935 if (strncmp(li_context
->name
, "bridge",
1936 li_context
->len
) == 0) {
1937 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
1940 host_to_target_data_bridge_nlattr
);
1942 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context
->name
);
1945 case IFLA_INFO_SLAVE_DATA
:
1946 if (strncmp(li_context
->slave_name
, "bridge",
1947 li_context
->slave_len
) == 0) {
1948 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
1951 host_to_target_slave_data_bridge_nlattr
);
1953 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
1954 li_context
->slave_name
);
1958 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr
->nla_type
);
1965 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
1971 switch (nlattr
->nla_type
) {
1972 case IFLA_INET_CONF
:
1973 u32
= NLA_DATA(nlattr
);
1974 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
1976 u32
[i
] = tswap32(u32
[i
]);
1980 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
1985 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
1990 struct ifla_cacheinfo
*ci
;
1993 switch (nlattr
->nla_type
) {
1995 case IFLA_INET6_TOKEN
:
1998 case IFLA_INET6_ADDR_GEN_MODE
:
2001 case IFLA_INET6_FLAGS
:
2002 u32
= NLA_DATA(nlattr
);
2003 *u32
= tswap32(*u32
);
2006 case IFLA_INET6_CONF
:
2007 u32
= NLA_DATA(nlattr
);
2008 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2010 u32
[i
] = tswap32(u32
[i
]);
2013 /* ifla_cacheinfo */
2014 case IFLA_INET6_CACHEINFO
:
2015 ci
= NLA_DATA(nlattr
);
2016 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2017 ci
->tstamp
= tswap32(ci
->tstamp
);
2018 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2019 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2022 case IFLA_INET6_STATS
:
2023 case IFLA_INET6_ICMP6STATS
:
2024 u64
= NLA_DATA(nlattr
);
2025 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2027 u64
[i
] = tswap64(u64
[i
]);
2031 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2036 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2039 switch (nlattr
->nla_type
) {
2041 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2043 host_to_target_data_inet_nlattr
);
2045 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2047 host_to_target_data_inet6_nlattr
);
2049 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2055 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2058 struct rtnl_link_stats
*st
;
2059 struct rtnl_link_stats64
*st64
;
2060 struct rtnl_link_ifmap
*map
;
2061 struct linkinfo_context li_context
;
2063 switch (rtattr
->rta_type
) {
2066 case IFLA_BROADCAST
:
2072 case IFLA_OPERSTATE
:
2075 case IFLA_PROTO_DOWN
:
2082 case IFLA_CARRIER_CHANGES
:
2083 case IFLA_NUM_RX_QUEUES
:
2084 case IFLA_NUM_TX_QUEUES
:
2085 case IFLA_PROMISCUITY
:
2087 case IFLA_LINK_NETNSID
:
2091 u32
= RTA_DATA(rtattr
);
2092 *u32
= tswap32(*u32
);
2094 /* struct rtnl_link_stats */
2096 st
= RTA_DATA(rtattr
);
2097 st
->rx_packets
= tswap32(st
->rx_packets
);
2098 st
->tx_packets
= tswap32(st
->tx_packets
);
2099 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2100 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2101 st
->rx_errors
= tswap32(st
->rx_errors
);
2102 st
->tx_errors
= tswap32(st
->tx_errors
);
2103 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2104 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2105 st
->multicast
= tswap32(st
->multicast
);
2106 st
->collisions
= tswap32(st
->collisions
);
2108 /* detailed rx_errors: */
2109 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2110 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2111 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2112 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2113 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2114 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2116 /* detailed tx_errors */
2117 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2118 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2119 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2120 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2121 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2124 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2125 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2127 /* struct rtnl_link_stats64 */
2129 st64
= RTA_DATA(rtattr
);
2130 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2131 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2132 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2133 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2134 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2135 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2136 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2137 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2138 st64
->multicast
= tswap64(st64
->multicast
);
2139 st64
->collisions
= tswap64(st64
->collisions
);
2141 /* detailed rx_errors: */
2142 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2143 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2144 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2145 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2146 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2147 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2149 /* detailed tx_errors */
2150 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2151 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2152 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2153 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2154 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2157 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2158 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2160 /* struct rtnl_link_ifmap */
2162 map
= RTA_DATA(rtattr
);
2163 map
->mem_start
= tswap64(map
->mem_start
);
2164 map
->mem_end
= tswap64(map
->mem_end
);
2165 map
->base_addr
= tswap64(map
->base_addr
);
2166 map
->irq
= tswap16(map
->irq
);
2170 memset(&li_context
, 0, sizeof(li_context
));
2171 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2173 host_to_target_data_linkinfo_nlattr
);
2175 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2177 host_to_target_data_spec_nlattr
);
2179 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
2185 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2188 struct ifa_cacheinfo
*ci
;
2190 switch (rtattr
->rta_type
) {
2191 /* binary: depends on family type */
2201 u32
= RTA_DATA(rtattr
);
2202 *u32
= tswap32(*u32
);
2204 /* struct ifa_cacheinfo */
2206 ci
= RTA_DATA(rtattr
);
2207 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2208 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2209 ci
->cstamp
= tswap32(ci
->cstamp
);
2210 ci
->tstamp
= tswap32(ci
->tstamp
);
2213 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2219 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2222 switch (rtattr
->rta_type
) {
2223 /* binary: depends on family type */
2232 u32
= RTA_DATA(rtattr
);
2233 *u32
= tswap32(*u32
);
2236 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2242 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2243 uint32_t rtattr_len
)
2245 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2246 host_to_target_data_link_rtattr
);
2249 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2250 uint32_t rtattr_len
)
2252 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2253 host_to_target_data_addr_rtattr
);
2256 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2257 uint32_t rtattr_len
)
2259 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2260 host_to_target_data_route_rtattr
);
2263 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2266 struct ifinfomsg
*ifi
;
2267 struct ifaddrmsg
*ifa
;
2270 nlmsg_len
= nlh
->nlmsg_len
;
2271 switch (nlh
->nlmsg_type
) {
2275 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2276 ifi
= NLMSG_DATA(nlh
);
2277 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2278 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2279 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2280 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2281 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2282 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2288 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2289 ifa
= NLMSG_DATA(nlh
);
2290 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2291 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2292 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2298 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2299 rtm
= NLMSG_DATA(nlh
);
2300 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2301 host_to_target_route_rtattr(RTM_RTA(rtm
),
2302 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2306 return -TARGET_EINVAL
;
2311 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2314 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2317 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2319 abi_long (*target_to_host_rtattr
)
2324 while (len
>= sizeof(struct rtattr
)) {
2325 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2326 tswap16(rtattr
->rta_len
) > len
) {
2329 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2330 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2331 ret
= target_to_host_rtattr(rtattr
);
2335 len
-= RTA_ALIGN(rtattr
->rta_len
);
2336 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2337 RTA_ALIGN(rtattr
->rta_len
));
2342 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2344 switch (rtattr
->rta_type
) {
2346 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2352 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2354 switch (rtattr
->rta_type
) {
2355 /* binary: depends on family type */
2360 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2366 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2369 switch (rtattr
->rta_type
) {
2370 /* binary: depends on family type */
2377 u32
= RTA_DATA(rtattr
);
2378 *u32
= tswap32(*u32
);
2381 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2387 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2388 uint32_t rtattr_len
)
2390 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2391 target_to_host_data_link_rtattr
);
2394 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2395 uint32_t rtattr_len
)
2397 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2398 target_to_host_data_addr_rtattr
);
2401 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2402 uint32_t rtattr_len
)
2404 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2405 target_to_host_data_route_rtattr
);
2408 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2410 struct ifinfomsg
*ifi
;
2411 struct ifaddrmsg
*ifa
;
2414 switch (nlh
->nlmsg_type
) {
2419 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2420 ifi
= NLMSG_DATA(nlh
);
2421 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2422 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2423 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2424 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2425 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2426 NLMSG_LENGTH(sizeof(*ifi
)));
2432 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2433 ifa
= NLMSG_DATA(nlh
);
2434 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2435 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2436 NLMSG_LENGTH(sizeof(*ifa
)));
2443 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2444 rtm
= NLMSG_DATA(nlh
);
2445 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2446 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2447 NLMSG_LENGTH(sizeof(*rtm
)));
2451 return -TARGET_EOPNOTSUPP
;
2456 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2458 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2460 #endif /* CONFIG_RTNETLINK */
2462 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2464 switch (nlh
->nlmsg_type
) {
2466 gemu_log("Unknown host audit message type %d\n",
2468 return -TARGET_EINVAL
;
2473 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2476 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2479 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2481 switch (nlh
->nlmsg_type
) {
2483 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2484 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2487 gemu_log("Unknown target audit message type %d\n",
2489 return -TARGET_EINVAL
;
2495 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2497 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2500 /* do_setsockopt() Must return target values and target errnos. */
2501 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2502 abi_ulong optval_addr
, socklen_t optlen
)
2506 struct ip_mreqn
*ip_mreq
;
2507 struct ip_mreq_source
*ip_mreq_source
;
2511 /* TCP options all take an 'int' value. */
2512 if (optlen
< sizeof(uint32_t))
2513 return -TARGET_EINVAL
;
2515 if (get_user_u32(val
, optval_addr
))
2516 return -TARGET_EFAULT
;
2517 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2524 case IP_ROUTER_ALERT
:
2528 case IP_MTU_DISCOVER
:
2534 case IP_MULTICAST_TTL
:
2535 case IP_MULTICAST_LOOP
:
2537 if (optlen
>= sizeof(uint32_t)) {
2538 if (get_user_u32(val
, optval_addr
))
2539 return -TARGET_EFAULT
;
2540 } else if (optlen
>= 1) {
2541 if (get_user_u8(val
, optval_addr
))
2542 return -TARGET_EFAULT
;
2544 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2546 case IP_ADD_MEMBERSHIP
:
2547 case IP_DROP_MEMBERSHIP
:
2548 if (optlen
< sizeof (struct target_ip_mreq
) ||
2549 optlen
> sizeof (struct target_ip_mreqn
))
2550 return -TARGET_EINVAL
;
2552 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2553 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2554 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2557 case IP_BLOCK_SOURCE
:
2558 case IP_UNBLOCK_SOURCE
:
2559 case IP_ADD_SOURCE_MEMBERSHIP
:
2560 case IP_DROP_SOURCE_MEMBERSHIP
:
2561 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2562 return -TARGET_EINVAL
;
2564 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2565 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2566 unlock_user (ip_mreq_source
, optval_addr
, 0);
2575 case IPV6_MTU_DISCOVER
:
2578 case IPV6_RECVPKTINFO
:
2580 if (optlen
< sizeof(uint32_t)) {
2581 return -TARGET_EINVAL
;
2583 if (get_user_u32(val
, optval_addr
)) {
2584 return -TARGET_EFAULT
;
2586 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2587 &val
, sizeof(val
)));
2596 /* struct icmp_filter takes an u32 value */
2597 if (optlen
< sizeof(uint32_t)) {
2598 return -TARGET_EINVAL
;
2601 if (get_user_u32(val
, optval_addr
)) {
2602 return -TARGET_EFAULT
;
2604 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2605 &val
, sizeof(val
)));
2612 case TARGET_SOL_SOCKET
:
2614 case TARGET_SO_RCVTIMEO
:
2618 optname
= SO_RCVTIMEO
;
2621 if (optlen
!= sizeof(struct target_timeval
)) {
2622 return -TARGET_EINVAL
;
2625 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2626 return -TARGET_EFAULT
;
2629 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2633 case TARGET_SO_SNDTIMEO
:
2634 optname
= SO_SNDTIMEO
;
2636 case TARGET_SO_ATTACH_FILTER
:
2638 struct target_sock_fprog
*tfprog
;
2639 struct target_sock_filter
*tfilter
;
2640 struct sock_fprog fprog
;
2641 struct sock_filter
*filter
;
2644 if (optlen
!= sizeof(*tfprog
)) {
2645 return -TARGET_EINVAL
;
2647 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2648 return -TARGET_EFAULT
;
2650 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2651 tswapal(tfprog
->filter
), 0)) {
2652 unlock_user_struct(tfprog
, optval_addr
, 1);
2653 return -TARGET_EFAULT
;
2656 fprog
.len
= tswap16(tfprog
->len
);
2657 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2658 if (filter
== NULL
) {
2659 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2660 unlock_user_struct(tfprog
, optval_addr
, 1);
2661 return -TARGET_ENOMEM
;
2663 for (i
= 0; i
< fprog
.len
; i
++) {
2664 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2665 filter
[i
].jt
= tfilter
[i
].jt
;
2666 filter
[i
].jf
= tfilter
[i
].jf
;
2667 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2669 fprog
.filter
= filter
;
2671 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2672 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2675 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2676 unlock_user_struct(tfprog
, optval_addr
, 1);
2679 case TARGET_SO_BINDTODEVICE
:
2681 char *dev_ifname
, *addr_ifname
;
2683 if (optlen
> IFNAMSIZ
- 1) {
2684 optlen
= IFNAMSIZ
- 1;
2686 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2688 return -TARGET_EFAULT
;
2690 optname
= SO_BINDTODEVICE
;
2691 addr_ifname
= alloca(IFNAMSIZ
);
2692 memcpy(addr_ifname
, dev_ifname
, optlen
);
2693 addr_ifname
[optlen
] = 0;
2694 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2695 addr_ifname
, optlen
));
2696 unlock_user (dev_ifname
, optval_addr
, 0);
2699 /* Options with 'int' argument. */
2700 case TARGET_SO_DEBUG
:
2703 case TARGET_SO_REUSEADDR
:
2704 optname
= SO_REUSEADDR
;
2706 case TARGET_SO_TYPE
:
2709 case TARGET_SO_ERROR
:
2712 case TARGET_SO_DONTROUTE
:
2713 optname
= SO_DONTROUTE
;
2715 case TARGET_SO_BROADCAST
:
2716 optname
= SO_BROADCAST
;
2718 case TARGET_SO_SNDBUF
:
2719 optname
= SO_SNDBUF
;
2721 case TARGET_SO_SNDBUFFORCE
:
2722 optname
= SO_SNDBUFFORCE
;
2724 case TARGET_SO_RCVBUF
:
2725 optname
= SO_RCVBUF
;
2727 case TARGET_SO_RCVBUFFORCE
:
2728 optname
= SO_RCVBUFFORCE
;
2730 case TARGET_SO_KEEPALIVE
:
2731 optname
= SO_KEEPALIVE
;
2733 case TARGET_SO_OOBINLINE
:
2734 optname
= SO_OOBINLINE
;
2736 case TARGET_SO_NO_CHECK
:
2737 optname
= SO_NO_CHECK
;
2739 case TARGET_SO_PRIORITY
:
2740 optname
= SO_PRIORITY
;
2743 case TARGET_SO_BSDCOMPAT
:
2744 optname
= SO_BSDCOMPAT
;
2747 case TARGET_SO_PASSCRED
:
2748 optname
= SO_PASSCRED
;
2750 case TARGET_SO_PASSSEC
:
2751 optname
= SO_PASSSEC
;
2753 case TARGET_SO_TIMESTAMP
:
2754 optname
= SO_TIMESTAMP
;
2756 case TARGET_SO_RCVLOWAT
:
2757 optname
= SO_RCVLOWAT
;
2763 if (optlen
< sizeof(uint32_t))
2764 return -TARGET_EINVAL
;
2766 if (get_user_u32(val
, optval_addr
))
2767 return -TARGET_EFAULT
;
2768 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2772 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2773 ret
= -TARGET_ENOPROTOOPT
;
2778 /* do_getsockopt() Must return target values and target errnos. */
2779 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2780 abi_ulong optval_addr
, abi_ulong optlen
)
2787 case TARGET_SOL_SOCKET
:
2790 /* These don't just return a single integer */
2791 case TARGET_SO_LINGER
:
2792 case TARGET_SO_RCVTIMEO
:
2793 case TARGET_SO_SNDTIMEO
:
2794 case TARGET_SO_PEERNAME
:
2796 case TARGET_SO_PEERCRED
: {
2799 struct target_ucred
*tcr
;
2801 if (get_user_u32(len
, optlen
)) {
2802 return -TARGET_EFAULT
;
2805 return -TARGET_EINVAL
;
2809 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2817 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2818 return -TARGET_EFAULT
;
2820 __put_user(cr
.pid
, &tcr
->pid
);
2821 __put_user(cr
.uid
, &tcr
->uid
);
2822 __put_user(cr
.gid
, &tcr
->gid
);
2823 unlock_user_struct(tcr
, optval_addr
, 1);
2824 if (put_user_u32(len
, optlen
)) {
2825 return -TARGET_EFAULT
;
2829 /* Options with 'int' argument. */
2830 case TARGET_SO_DEBUG
:
2833 case TARGET_SO_REUSEADDR
:
2834 optname
= SO_REUSEADDR
;
2836 case TARGET_SO_TYPE
:
2839 case TARGET_SO_ERROR
:
2842 case TARGET_SO_DONTROUTE
:
2843 optname
= SO_DONTROUTE
;
2845 case TARGET_SO_BROADCAST
:
2846 optname
= SO_BROADCAST
;
2848 case TARGET_SO_SNDBUF
:
2849 optname
= SO_SNDBUF
;
2851 case TARGET_SO_RCVBUF
:
2852 optname
= SO_RCVBUF
;
2854 case TARGET_SO_KEEPALIVE
:
2855 optname
= SO_KEEPALIVE
;
2857 case TARGET_SO_OOBINLINE
:
2858 optname
= SO_OOBINLINE
;
2860 case TARGET_SO_NO_CHECK
:
2861 optname
= SO_NO_CHECK
;
2863 case TARGET_SO_PRIORITY
:
2864 optname
= SO_PRIORITY
;
2867 case TARGET_SO_BSDCOMPAT
:
2868 optname
= SO_BSDCOMPAT
;
2871 case TARGET_SO_PASSCRED
:
2872 optname
= SO_PASSCRED
;
2874 case TARGET_SO_TIMESTAMP
:
2875 optname
= SO_TIMESTAMP
;
2877 case TARGET_SO_RCVLOWAT
:
2878 optname
= SO_RCVLOWAT
;
2880 case TARGET_SO_ACCEPTCONN
:
2881 optname
= SO_ACCEPTCONN
;
2888 /* TCP options all take an 'int' value. */
2890 if (get_user_u32(len
, optlen
))
2891 return -TARGET_EFAULT
;
2893 return -TARGET_EINVAL
;
2895 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2898 if (optname
== SO_TYPE
) {
2899 val
= host_to_target_sock_type(val
);
2904 if (put_user_u32(val
, optval_addr
))
2905 return -TARGET_EFAULT
;
2907 if (put_user_u8(val
, optval_addr
))
2908 return -TARGET_EFAULT
;
2910 if (put_user_u32(len
, optlen
))
2911 return -TARGET_EFAULT
;
2918 case IP_ROUTER_ALERT
:
2922 case IP_MTU_DISCOVER
:
2928 case IP_MULTICAST_TTL
:
2929 case IP_MULTICAST_LOOP
:
2930 if (get_user_u32(len
, optlen
))
2931 return -TARGET_EFAULT
;
2933 return -TARGET_EINVAL
;
2935 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2938 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2940 if (put_user_u32(len
, optlen
)
2941 || put_user_u8(val
, optval_addr
))
2942 return -TARGET_EFAULT
;
2944 if (len
> sizeof(int))
2946 if (put_user_u32(len
, optlen
)
2947 || put_user_u32(val
, optval_addr
))
2948 return -TARGET_EFAULT
;
2952 ret
= -TARGET_ENOPROTOOPT
;
2958 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2960 ret
= -TARGET_EOPNOTSUPP
;
2966 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2967 int count
, int copy
)
2969 struct target_iovec
*target_vec
;
2971 abi_ulong total_len
, max_len
;
2974 bool bad_address
= false;
2980 if (count
< 0 || count
> IOV_MAX
) {
2985 vec
= g_try_new0(struct iovec
, count
);
2991 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2992 count
* sizeof(struct target_iovec
), 1);
2993 if (target_vec
== NULL
) {
2998 /* ??? If host page size > target page size, this will result in a
2999 value larger than what we can actually support. */
3000 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3003 for (i
= 0; i
< count
; i
++) {
3004 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3005 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3010 } else if (len
== 0) {
3011 /* Zero length pointer is ignored. */
3012 vec
[i
].iov_base
= 0;
3014 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3015 /* If the first buffer pointer is bad, this is a fault. But
3016 * subsequent bad buffers will result in a partial write; this
3017 * is realized by filling the vector with null pointers and
3019 if (!vec
[i
].iov_base
) {
3030 if (len
> max_len
- total_len
) {
3031 len
= max_len
- total_len
;
3034 vec
[i
].iov_len
= len
;
3038 unlock_user(target_vec
, target_addr
, 0);
3043 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3044 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3047 unlock_user(target_vec
, target_addr
, 0);
3054 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3055 int count
, int copy
)
3057 struct target_iovec
*target_vec
;
3060 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3061 count
* sizeof(struct target_iovec
), 1);
3063 for (i
= 0; i
< count
; i
++) {
3064 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3065 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3069 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3071 unlock_user(target_vec
, target_addr
, 0);
3077 static inline int target_to_host_sock_type(int *type
)
3080 int target_type
= *type
;
3082 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3083 case TARGET_SOCK_DGRAM
:
3084 host_type
= SOCK_DGRAM
;
3086 case TARGET_SOCK_STREAM
:
3087 host_type
= SOCK_STREAM
;
3090 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3093 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3094 #if defined(SOCK_CLOEXEC)
3095 host_type
|= SOCK_CLOEXEC
;
3097 return -TARGET_EINVAL
;
3100 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3101 #if defined(SOCK_NONBLOCK)
3102 host_type
|= SOCK_NONBLOCK
;
3103 #elif !defined(O_NONBLOCK)
3104 return -TARGET_EINVAL
;
3111 /* Try to emulate socket type flags after socket creation. */
3112 static int sock_flags_fixup(int fd
, int target_type
)
3114 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3115 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3116 int flags
= fcntl(fd
, F_GETFL
);
3117 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3119 return -TARGET_EINVAL
;
3126 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3127 abi_ulong target_addr
,
3130 struct sockaddr
*addr
= host_addr
;
3131 struct target_sockaddr
*target_saddr
;
3133 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3134 if (!target_saddr
) {
3135 return -TARGET_EFAULT
;
3138 memcpy(addr
, target_saddr
, len
);
3139 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3140 /* spkt_protocol is big-endian */
3142 unlock_user(target_saddr
, target_addr
, 0);
3146 static TargetFdTrans target_packet_trans
= {
3147 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3150 #ifdef CONFIG_RTNETLINK
3151 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3155 ret
= target_to_host_nlmsg_route(buf
, len
);
3163 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3167 ret
= host_to_target_nlmsg_route(buf
, len
);
3175 static TargetFdTrans target_netlink_route_trans
= {
3176 .target_to_host_data
= netlink_route_target_to_host
,
3177 .host_to_target_data
= netlink_route_host_to_target
,
3179 #endif /* CONFIG_RTNETLINK */
3181 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3185 ret
= target_to_host_nlmsg_audit(buf
, len
);
3193 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3197 ret
= host_to_target_nlmsg_audit(buf
, len
);
3205 static TargetFdTrans target_netlink_audit_trans
= {
3206 .target_to_host_data
= netlink_audit_target_to_host
,
3207 .host_to_target_data
= netlink_audit_host_to_target
,
3210 /* do_socket() Must return target values and target errnos. */
3211 static abi_long
do_socket(int domain
, int type
, int protocol
)
3213 int target_type
= type
;
3216 ret
= target_to_host_sock_type(&type
);
3221 if (domain
== PF_NETLINK
&& !(
3222 #ifdef CONFIG_RTNETLINK
3223 protocol
== NETLINK_ROUTE
||
3225 protocol
== NETLINK_KOBJECT_UEVENT
||
3226 protocol
== NETLINK_AUDIT
)) {
3227 return -EPFNOSUPPORT
;
3230 if (domain
== AF_PACKET
||
3231 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3232 protocol
= tswap16(protocol
);
3235 ret
= get_errno(socket(domain
, type
, protocol
));
3237 ret
= sock_flags_fixup(ret
, target_type
);
3238 if (type
== SOCK_PACKET
) {
3239 /* Manage an obsolete case :
3240 * if socket type is SOCK_PACKET, bind by name
3242 fd_trans_register(ret
, &target_packet_trans
);
3243 } else if (domain
== PF_NETLINK
) {
3245 #ifdef CONFIG_RTNETLINK
3247 fd_trans_register(ret
, &target_netlink_route_trans
);
3250 case NETLINK_KOBJECT_UEVENT
:
3251 /* nothing to do: messages are strings */
3254 fd_trans_register(ret
, &target_netlink_audit_trans
);
3257 g_assert_not_reached();
3264 /* do_bind() Must return target values and target errnos. */
3265 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3271 if ((int)addrlen
< 0) {
3272 return -TARGET_EINVAL
;
3275 addr
= alloca(addrlen
+1);
3277 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3281 return get_errno(bind(sockfd
, addr
, addrlen
));
3284 /* do_connect() Must return target values and target errnos. */
3285 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3291 if ((int)addrlen
< 0) {
3292 return -TARGET_EINVAL
;
3295 addr
= alloca(addrlen
+1);
3297 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3301 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3304 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3305 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3306 int flags
, int send
)
3312 abi_ulong target_vec
;
3314 if (msgp
->msg_name
) {
3315 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3316 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3317 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3318 tswapal(msgp
->msg_name
),
3324 msg
.msg_name
= NULL
;
3325 msg
.msg_namelen
= 0;
3327 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3328 msg
.msg_control
= alloca(msg
.msg_controllen
);
3329 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3331 count
= tswapal(msgp
->msg_iovlen
);
3332 target_vec
= tswapal(msgp
->msg_iov
);
3333 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3334 target_vec
, count
, send
);
3336 ret
= -host_to_target_errno(errno
);
3339 msg
.msg_iovlen
= count
;
3343 if (fd_trans_target_to_host_data(fd
)) {
3346 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3347 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3348 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3349 msg
.msg_iov
->iov_len
);
3351 msg
.msg_iov
->iov_base
= host_msg
;
3352 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3356 ret
= target_to_host_cmsg(&msg
, msgp
);
3358 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3362 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3363 if (!is_error(ret
)) {
3365 if (fd_trans_host_to_target_data(fd
)) {
3366 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3369 ret
= host_to_target_cmsg(msgp
, &msg
);
3371 if (!is_error(ret
)) {
3372 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3373 if (msg
.msg_name
!= NULL
) {
3374 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3375 msg
.msg_name
, msg
.msg_namelen
);
3387 unlock_iovec(vec
, target_vec
, count
, !send
);
3392 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3393 int flags
, int send
)
3396 struct target_msghdr
*msgp
;
3398 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3402 return -TARGET_EFAULT
;
3404 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3405 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3409 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3410 * so it might not have this *mmsg-specific flag either.
3412 #ifndef MSG_WAITFORONE
3413 #define MSG_WAITFORONE 0x10000
3416 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3417 unsigned int vlen
, unsigned int flags
,
3420 struct target_mmsghdr
*mmsgp
;
3424 if (vlen
> UIO_MAXIOV
) {
3428 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3430 return -TARGET_EFAULT
;
3433 for (i
= 0; i
< vlen
; i
++) {
3434 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3435 if (is_error(ret
)) {
3438 mmsgp
[i
].msg_len
= tswap32(ret
);
3439 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3440 if (flags
& MSG_WAITFORONE
) {
3441 flags
|= MSG_DONTWAIT
;
3445 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3447 /* Return number of datagrams sent if we sent any at all;
3448 * otherwise return the error.
3456 /* do_accept4() Must return target values and target errnos. */
3457 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3458 abi_ulong target_addrlen_addr
, int flags
)
3465 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3467 if (target_addr
== 0) {
3468 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3471 /* linux returns EINVAL if addrlen pointer is invalid */
3472 if (get_user_u32(addrlen
, target_addrlen_addr
))
3473 return -TARGET_EINVAL
;
3475 if ((int)addrlen
< 0) {
3476 return -TARGET_EINVAL
;
3479 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3480 return -TARGET_EINVAL
;
3482 addr
= alloca(addrlen
);
3484 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3485 if (!is_error(ret
)) {
3486 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3487 if (put_user_u32(addrlen
, target_addrlen_addr
))
3488 ret
= -TARGET_EFAULT
;
3493 /* do_getpeername() Must return target values and target errnos. */
3494 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3495 abi_ulong target_addrlen_addr
)
3501 if (get_user_u32(addrlen
, target_addrlen_addr
))
3502 return -TARGET_EFAULT
;
3504 if ((int)addrlen
< 0) {
3505 return -TARGET_EINVAL
;
3508 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3509 return -TARGET_EFAULT
;
3511 addr
= alloca(addrlen
);
3513 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3514 if (!is_error(ret
)) {
3515 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3516 if (put_user_u32(addrlen
, target_addrlen_addr
))
3517 ret
= -TARGET_EFAULT
;
3522 /* do_getsockname() Must return target values and target errnos. */
3523 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3524 abi_ulong target_addrlen_addr
)
3530 if (get_user_u32(addrlen
, target_addrlen_addr
))
3531 return -TARGET_EFAULT
;
3533 if ((int)addrlen
< 0) {
3534 return -TARGET_EINVAL
;
3537 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3538 return -TARGET_EFAULT
;
3540 addr
= alloca(addrlen
);
3542 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3543 if (!is_error(ret
)) {
3544 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3545 if (put_user_u32(addrlen
, target_addrlen_addr
))
3546 ret
= -TARGET_EFAULT
;
3551 /* do_socketpair() Must return target values and target errnos. */
3552 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3553 abi_ulong target_tab_addr
)
3558 target_to_host_sock_type(&type
);
3560 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3561 if (!is_error(ret
)) {
3562 if (put_user_s32(tab
[0], target_tab_addr
)
3563 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3564 ret
= -TARGET_EFAULT
;
3569 /* do_sendto() Must return target values and target errnos. */
3570 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3571 abi_ulong target_addr
, socklen_t addrlen
)
3575 void *copy_msg
= NULL
;
3578 if ((int)addrlen
< 0) {
3579 return -TARGET_EINVAL
;
3582 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3584 return -TARGET_EFAULT
;
3585 if (fd_trans_target_to_host_data(fd
)) {
3586 copy_msg
= host_msg
;
3587 host_msg
= g_malloc(len
);
3588 memcpy(host_msg
, copy_msg
, len
);
3589 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3595 addr
= alloca(addrlen
+1);
3596 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3600 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3602 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3607 host_msg
= copy_msg
;
3609 unlock_user(host_msg
, msg
, 0);
3613 /* do_recvfrom() Must return target values and target errnos. */
3614 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3615 abi_ulong target_addr
,
3616 abi_ulong target_addrlen
)
3623 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3625 return -TARGET_EFAULT
;
3627 if (get_user_u32(addrlen
, target_addrlen
)) {
3628 ret
= -TARGET_EFAULT
;
3631 if ((int)addrlen
< 0) {
3632 ret
= -TARGET_EINVAL
;
3635 addr
= alloca(addrlen
);
3636 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3639 addr
= NULL
; /* To keep compiler quiet. */
3640 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3642 if (!is_error(ret
)) {
3643 if (fd_trans_host_to_target_data(fd
)) {
3644 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3647 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3648 if (put_user_u32(addrlen
, target_addrlen
)) {
3649 ret
= -TARGET_EFAULT
;
3653 unlock_user(host_msg
, msg
, len
);
3656 unlock_user(host_msg
, msg
, 0);
3661 #ifdef TARGET_NR_socketcall
3662 /* do_socketcall() Must return target values and target errnos. */
3663 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3665 static const unsigned ac
[] = { /* number of arguments per call */
3666 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3667 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3668 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3669 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3670 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3671 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3672 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3673 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3674 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3675 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3676 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3677 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3678 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3679 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3680 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3681 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3682 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3683 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3684 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3685 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3687 abi_long a
[6]; /* max 6 args */
3689 /* first, collect the arguments in a[] according to ac[] */
3690 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3692 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3693 for (i
= 0; i
< ac
[num
]; ++i
) {
3694 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3695 return -TARGET_EFAULT
;
3700 /* now when we have the args, actually handle the call */
3702 case SOCKOP_socket
: /* domain, type, protocol */
3703 return do_socket(a
[0], a
[1], a
[2]);
3704 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3705 return do_bind(a
[0], a
[1], a
[2]);
3706 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3707 return do_connect(a
[0], a
[1], a
[2]);
3708 case SOCKOP_listen
: /* sockfd, backlog */
3709 return get_errno(listen(a
[0], a
[1]));
3710 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3711 return do_accept4(a
[0], a
[1], a
[2], 0);
3712 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3713 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3714 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3715 return do_getsockname(a
[0], a
[1], a
[2]);
3716 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3717 return do_getpeername(a
[0], a
[1], a
[2]);
3718 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3719 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3720 case SOCKOP_send
: /* sockfd, msg, len, flags */
3721 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3722 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3723 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3724 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3725 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3726 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3727 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3728 case SOCKOP_shutdown
: /* sockfd, how */
3729 return get_errno(shutdown(a
[0], a
[1]));
3730 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3731 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3732 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3733 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3734 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3735 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3736 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3737 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3738 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3739 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3740 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3741 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3743 gemu_log("Unsupported socketcall: %d\n", num
);
3744 return -TARGET_ENOSYS
;
3749 #define N_SHM_REGIONS 32
3751 static struct shm_region
{
3755 } shm_regions
[N_SHM_REGIONS
];
3757 #ifndef TARGET_SEMID64_DS
3758 /* asm-generic version of this struct */
3759 struct target_semid64_ds
3761 struct target_ipc_perm sem_perm
;
3762 abi_ulong sem_otime
;
3763 #if TARGET_ABI_BITS == 32
3764 abi_ulong __unused1
;
3766 abi_ulong sem_ctime
;
3767 #if TARGET_ABI_BITS == 32
3768 abi_ulong __unused2
;
3770 abi_ulong sem_nsems
;
3771 abi_ulong __unused3
;
3772 abi_ulong __unused4
;
3776 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3777 abi_ulong target_addr
)
3779 struct target_ipc_perm
*target_ip
;
3780 struct target_semid64_ds
*target_sd
;
3782 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3783 return -TARGET_EFAULT
;
3784 target_ip
= &(target_sd
->sem_perm
);
3785 host_ip
->__key
= tswap32(target_ip
->__key
);
3786 host_ip
->uid
= tswap32(target_ip
->uid
);
3787 host_ip
->gid
= tswap32(target_ip
->gid
);
3788 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3789 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3790 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3791 host_ip
->mode
= tswap32(target_ip
->mode
);
3793 host_ip
->mode
= tswap16(target_ip
->mode
);
3795 #if defined(TARGET_PPC)
3796 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3798 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3800 unlock_user_struct(target_sd
, target_addr
, 0);
3804 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3805 struct ipc_perm
*host_ip
)
3807 struct target_ipc_perm
*target_ip
;
3808 struct target_semid64_ds
*target_sd
;
3810 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3811 return -TARGET_EFAULT
;
3812 target_ip
= &(target_sd
->sem_perm
);
3813 target_ip
->__key
= tswap32(host_ip
->__key
);
3814 target_ip
->uid
= tswap32(host_ip
->uid
);
3815 target_ip
->gid
= tswap32(host_ip
->gid
);
3816 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3817 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3818 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3819 target_ip
->mode
= tswap32(host_ip
->mode
);
3821 target_ip
->mode
= tswap16(host_ip
->mode
);
3823 #if defined(TARGET_PPC)
3824 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3826 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3828 unlock_user_struct(target_sd
, target_addr
, 1);
3832 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3833 abi_ulong target_addr
)
3835 struct target_semid64_ds
*target_sd
;
3837 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3838 return -TARGET_EFAULT
;
3839 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3840 return -TARGET_EFAULT
;
3841 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3842 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3843 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3844 unlock_user_struct(target_sd
, target_addr
, 0);
3848 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3849 struct semid_ds
*host_sd
)
3851 struct target_semid64_ds
*target_sd
;
3853 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3854 return -TARGET_EFAULT
;
3855 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3856 return -TARGET_EFAULT
;
3857 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3858 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3859 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3860 unlock_user_struct(target_sd
, target_addr
, 1);
3864 struct target_seminfo
{
3877 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3878 struct seminfo
*host_seminfo
)
3880 struct target_seminfo
*target_seminfo
;
3881 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3882 return -TARGET_EFAULT
;
3883 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3884 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3885 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3886 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3887 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3888 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3889 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3890 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3891 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3892 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3893 unlock_user_struct(target_seminfo
, target_addr
, 1);
3899 struct semid_ds
*buf
;
3900 unsigned short *array
;
3901 struct seminfo
*__buf
;
3904 union target_semun
{
3911 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3912 abi_ulong target_addr
)
3915 unsigned short *array
;
3917 struct semid_ds semid_ds
;
3920 semun
.buf
= &semid_ds
;
3922 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3924 return get_errno(ret
);
3926 nsems
= semid_ds
.sem_nsems
;
3928 *host_array
= g_try_new(unsigned short, nsems
);
3930 return -TARGET_ENOMEM
;
3932 array
= lock_user(VERIFY_READ
, target_addr
,
3933 nsems
*sizeof(unsigned short), 1);
3935 g_free(*host_array
);
3936 return -TARGET_EFAULT
;
3939 for(i
=0; i
<nsems
; i
++) {
3940 __get_user((*host_array
)[i
], &array
[i
]);
3942 unlock_user(array
, target_addr
, 0);
3947 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3948 unsigned short **host_array
)
3951 unsigned short *array
;
3953 struct semid_ds semid_ds
;
3956 semun
.buf
= &semid_ds
;
3958 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3960 return get_errno(ret
);
3962 nsems
= semid_ds
.sem_nsems
;
3964 array
= lock_user(VERIFY_WRITE
, target_addr
,
3965 nsems
*sizeof(unsigned short), 0);
3967 return -TARGET_EFAULT
;
3969 for(i
=0; i
<nsems
; i
++) {
3970 __put_user((*host_array
)[i
], &array
[i
]);
3972 g_free(*host_array
);
3973 unlock_user(array
, target_addr
, 1);
3978 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3979 abi_ulong target_arg
)
3981 union target_semun target_su
= { .buf
= target_arg
};
3983 struct semid_ds dsarg
;
3984 unsigned short *array
= NULL
;
3985 struct seminfo seminfo
;
3986 abi_long ret
= -TARGET_EINVAL
;
3993 /* In 64 bit cross-endian situations, we will erroneously pick up
3994 * the wrong half of the union for the "val" element. To rectify
3995 * this, the entire 8-byte structure is byteswapped, followed by
3996 * a swap of the 4 byte val field. In other cases, the data is
3997 * already in proper host byte order. */
3998 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3999 target_su
.buf
= tswapal(target_su
.buf
);
4000 arg
.val
= tswap32(target_su
.val
);
4002 arg
.val
= target_su
.val
;
4004 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4008 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4012 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4013 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4020 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4024 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4025 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4031 arg
.__buf
= &seminfo
;
4032 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4033 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4041 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4048 struct target_sembuf
{
4049 unsigned short sem_num
;
4054 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4055 abi_ulong target_addr
,
4058 struct target_sembuf
*target_sembuf
;
4061 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4062 nsops
*sizeof(struct target_sembuf
), 1);
4064 return -TARGET_EFAULT
;
4066 for(i
=0; i
<nsops
; i
++) {
4067 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4068 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4069 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4072 unlock_user(target_sembuf
, target_addr
, 0);
4077 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4079 struct sembuf sops
[nsops
];
4081 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4082 return -TARGET_EFAULT
;
4084 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4087 struct target_msqid_ds
4089 struct target_ipc_perm msg_perm
;
4090 abi_ulong msg_stime
;
4091 #if TARGET_ABI_BITS == 32
4092 abi_ulong __unused1
;
4094 abi_ulong msg_rtime
;
4095 #if TARGET_ABI_BITS == 32
4096 abi_ulong __unused2
;
4098 abi_ulong msg_ctime
;
4099 #if TARGET_ABI_BITS == 32
4100 abi_ulong __unused3
;
4102 abi_ulong __msg_cbytes
;
4104 abi_ulong msg_qbytes
;
4105 abi_ulong msg_lspid
;
4106 abi_ulong msg_lrpid
;
4107 abi_ulong __unused4
;
4108 abi_ulong __unused5
;
4111 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4112 abi_ulong target_addr
)
4114 struct target_msqid_ds
*target_md
;
4116 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4117 return -TARGET_EFAULT
;
4118 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4119 return -TARGET_EFAULT
;
4120 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4121 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4122 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4123 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4124 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4125 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4126 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4127 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4128 unlock_user_struct(target_md
, target_addr
, 0);
4132 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4133 struct msqid_ds
*host_md
)
4135 struct target_msqid_ds
*target_md
;
4137 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4138 return -TARGET_EFAULT
;
4139 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4140 return -TARGET_EFAULT
;
4141 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4142 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4143 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4144 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4145 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4146 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4147 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4148 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4149 unlock_user_struct(target_md
, target_addr
, 1);
4153 struct target_msginfo
{
4161 unsigned short int msgseg
;
4164 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4165 struct msginfo
*host_msginfo
)
4167 struct target_msginfo
*target_msginfo
;
4168 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4169 return -TARGET_EFAULT
;
4170 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4171 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4172 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4173 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4174 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4175 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4176 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4177 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4178 unlock_user_struct(target_msginfo
, target_addr
, 1);
4182 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4184 struct msqid_ds dsarg
;
4185 struct msginfo msginfo
;
4186 abi_long ret
= -TARGET_EINVAL
;
4194 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4195 return -TARGET_EFAULT
;
4196 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4197 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4198 return -TARGET_EFAULT
;
4201 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4205 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4206 if (host_to_target_msginfo(ptr
, &msginfo
))
4207 return -TARGET_EFAULT
;
4214 struct target_msgbuf
{
4219 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4220 ssize_t msgsz
, int msgflg
)
4222 struct target_msgbuf
*target_mb
;
4223 struct msgbuf
*host_mb
;
4227 return -TARGET_EINVAL
;
4230 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4231 return -TARGET_EFAULT
;
4232 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4234 unlock_user_struct(target_mb
, msgp
, 0);
4235 return -TARGET_ENOMEM
;
4237 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4238 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4239 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4241 unlock_user_struct(target_mb
, msgp
, 0);
4246 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4247 ssize_t msgsz
, abi_long msgtyp
,
4250 struct target_msgbuf
*target_mb
;
4252 struct msgbuf
*host_mb
;
4256 return -TARGET_EINVAL
;
4259 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4260 return -TARGET_EFAULT
;
4262 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4264 ret
= -TARGET_ENOMEM
;
4267 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4270 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4271 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4272 if (!target_mtext
) {
4273 ret
= -TARGET_EFAULT
;
4276 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4277 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4280 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4284 unlock_user_struct(target_mb
, msgp
, 1);
4289 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4290 abi_ulong target_addr
)
4292 struct target_shmid_ds
*target_sd
;
4294 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4295 return -TARGET_EFAULT
;
4296 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4297 return -TARGET_EFAULT
;
4298 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4299 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4300 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4301 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4302 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4303 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4304 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4305 unlock_user_struct(target_sd
, target_addr
, 0);
4309 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4310 struct shmid_ds
*host_sd
)
4312 struct target_shmid_ds
*target_sd
;
4314 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4315 return -TARGET_EFAULT
;
4316 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4317 return -TARGET_EFAULT
;
4318 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4319 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4320 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4321 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4322 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4323 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4324 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4325 unlock_user_struct(target_sd
, target_addr
, 1);
4329 struct target_shminfo
{
4337 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4338 struct shminfo
*host_shminfo
)
4340 struct target_shminfo
*target_shminfo
;
4341 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4342 return -TARGET_EFAULT
;
4343 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4344 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4345 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4346 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4347 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4348 unlock_user_struct(target_shminfo
, target_addr
, 1);
4352 struct target_shm_info
{
4357 abi_ulong swap_attempts
;
4358 abi_ulong swap_successes
;
4361 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4362 struct shm_info
*host_shm_info
)
4364 struct target_shm_info
*target_shm_info
;
4365 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4366 return -TARGET_EFAULT
;
4367 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4368 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4369 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4370 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4371 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4372 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4373 unlock_user_struct(target_shm_info
, target_addr
, 1);
4377 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4379 struct shmid_ds dsarg
;
4380 struct shminfo shminfo
;
4381 struct shm_info shm_info
;
4382 abi_long ret
= -TARGET_EINVAL
;
4390 if (target_to_host_shmid_ds(&dsarg
, buf
))
4391 return -TARGET_EFAULT
;
4392 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4393 if (host_to_target_shmid_ds(buf
, &dsarg
))
4394 return -TARGET_EFAULT
;
4397 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4398 if (host_to_target_shminfo(buf
, &shminfo
))
4399 return -TARGET_EFAULT
;
4402 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4403 if (host_to_target_shm_info(buf
, &shm_info
))
4404 return -TARGET_EFAULT
;
4409 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4416 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4420 struct shmid_ds shm_info
;
4423 /* find out the length of the shared memory segment */
4424 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4425 if (is_error(ret
)) {
4426 /* can't get length, bail out */
4433 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4435 abi_ulong mmap_start
;
4437 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4439 if (mmap_start
== -1) {
4441 host_raddr
= (void *)-1;
4443 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4446 if (host_raddr
== (void *)-1) {
4448 return get_errno((long)host_raddr
);
4450 raddr
=h2g((unsigned long)host_raddr
);
4452 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4453 PAGE_VALID
| PAGE_READ
|
4454 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4456 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4457 if (!shm_regions
[i
].in_use
) {
4458 shm_regions
[i
].in_use
= true;
4459 shm_regions
[i
].start
= raddr
;
4460 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4470 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4474 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4475 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4476 shm_regions
[i
].in_use
= false;
4477 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4482 return get_errno(shmdt(g2h(shmaddr
)));
4485 #ifdef TARGET_NR_ipc
4486 /* ??? This only works with linear mappings. */
4487 /* do_ipc() must return target values and target errnos. */
4488 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4489 abi_long second
, abi_long third
,
4490 abi_long ptr
, abi_long fifth
)
4495 version
= call
>> 16;
4500 ret
= do_semop(first
, ptr
, second
);
4504 ret
= get_errno(semget(first
, second
, third
));
4507 case IPCOP_semctl
: {
4508 /* The semun argument to semctl is passed by value, so dereference the
4511 get_user_ual(atptr
, ptr
);
4512 ret
= do_semctl(first
, second
, third
, atptr
);
4517 ret
= get_errno(msgget(first
, second
));
4521 ret
= do_msgsnd(first
, ptr
, second
, third
);
4525 ret
= do_msgctl(first
, second
, ptr
);
4532 struct target_ipc_kludge
{
4537 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4538 ret
= -TARGET_EFAULT
;
4542 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4544 unlock_user_struct(tmp
, ptr
, 0);
4548 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4557 raddr
= do_shmat(first
, ptr
, second
);
4558 if (is_error(raddr
))
4559 return get_errno(raddr
);
4560 if (put_user_ual(raddr
, third
))
4561 return -TARGET_EFAULT
;
4565 ret
= -TARGET_EINVAL
;
4570 ret
= do_shmdt(ptr
);
4574 /* IPC_* flag values are the same on all linux platforms */
4575 ret
= get_errno(shmget(first
, second
, third
));
4578 /* IPC_* and SHM_* command values are the same on all linux platforms */
4580 ret
= do_shmctl(first
, second
, ptr
);
4583 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4584 ret
= -TARGET_ENOSYS
;
4591 /* kernel structure types definitions */
4593 #define STRUCT(name, ...) STRUCT_ ## name,
4594 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4596 #include "syscall_types.h"
4600 #undef STRUCT_SPECIAL
4602 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4603 #define STRUCT_SPECIAL(name)
4604 #include "syscall_types.h"
4606 #undef STRUCT_SPECIAL
4608 typedef struct IOCTLEntry IOCTLEntry
;
4610 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4611 int fd
, int cmd
, abi_long arg
);
4615 unsigned int host_cmd
;
4618 do_ioctl_fn
*do_ioctl
;
4619 const argtype arg_type
[5];
4622 #define IOC_R 0x0001
4623 #define IOC_W 0x0002
4624 #define IOC_RW (IOC_R | IOC_W)
4626 #define MAX_STRUCT_SIZE 4096
4628 #ifdef CONFIG_FIEMAP
4629 /* So fiemap access checks don't overflow on 32 bit systems.
4630 * This is very slightly smaller than the limit imposed by
4631 * the underlying kernel.
4633 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4634 / sizeof(struct fiemap_extent))
4636 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4637 int fd
, int cmd
, abi_long arg
)
4639 /* The parameter for this ioctl is a struct fiemap followed
4640 * by an array of struct fiemap_extent whose size is set
4641 * in fiemap->fm_extent_count. The array is filled in by the
4644 int target_size_in
, target_size_out
;
4646 const argtype
*arg_type
= ie
->arg_type
;
4647 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4650 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4654 assert(arg_type
[0] == TYPE_PTR
);
4655 assert(ie
->access
== IOC_RW
);
4657 target_size_in
= thunk_type_size(arg_type
, 0);
4658 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4660 return -TARGET_EFAULT
;
4662 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4663 unlock_user(argptr
, arg
, 0);
4664 fm
= (struct fiemap
*)buf_temp
;
4665 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4666 return -TARGET_EINVAL
;
4669 outbufsz
= sizeof (*fm
) +
4670 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4672 if (outbufsz
> MAX_STRUCT_SIZE
) {
4673 /* We can't fit all the extents into the fixed size buffer.
4674 * Allocate one that is large enough and use it instead.
4676 fm
= g_try_malloc(outbufsz
);
4678 return -TARGET_ENOMEM
;
4680 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4683 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4684 if (!is_error(ret
)) {
4685 target_size_out
= target_size_in
;
4686 /* An extent_count of 0 means we were only counting the extents
4687 * so there are no structs to copy
4689 if (fm
->fm_extent_count
!= 0) {
4690 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4692 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4694 ret
= -TARGET_EFAULT
;
4696 /* Convert the struct fiemap */
4697 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4698 if (fm
->fm_extent_count
!= 0) {
4699 p
= argptr
+ target_size_in
;
4700 /* ...and then all the struct fiemap_extents */
4701 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4702 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4707 unlock_user(argptr
, arg
, target_size_out
);
4717 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4718 int fd
, int cmd
, abi_long arg
)
4720 const argtype
*arg_type
= ie
->arg_type
;
4724 struct ifconf
*host_ifconf
;
4726 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4727 int target_ifreq_size
;
4732 abi_long target_ifc_buf
;
4736 assert(arg_type
[0] == TYPE_PTR
);
4737 assert(ie
->access
== IOC_RW
);
4740 target_size
= thunk_type_size(arg_type
, 0);
4742 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4744 return -TARGET_EFAULT
;
4745 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4746 unlock_user(argptr
, arg
, 0);
4748 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4749 target_ifc_len
= host_ifconf
->ifc_len
;
4750 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4752 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4753 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4754 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4756 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4757 if (outbufsz
> MAX_STRUCT_SIZE
) {
4758 /* We can't fit all the extents into the fixed size buffer.
4759 * Allocate one that is large enough and use it instead.
4761 host_ifconf
= malloc(outbufsz
);
4763 return -TARGET_ENOMEM
;
4765 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4768 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4770 host_ifconf
->ifc_len
= host_ifc_len
;
4771 host_ifconf
->ifc_buf
= host_ifc_buf
;
4773 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4774 if (!is_error(ret
)) {
4775 /* convert host ifc_len to target ifc_len */
4777 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4778 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4779 host_ifconf
->ifc_len
= target_ifc_len
;
4781 /* restore target ifc_buf */
4783 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4785 /* copy struct ifconf to target user */
4787 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4789 return -TARGET_EFAULT
;
4790 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4791 unlock_user(argptr
, arg
, target_size
);
4793 /* copy ifreq[] to target user */
4795 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4796 for (i
= 0; i
< nb_ifreq
; i
++) {
4797 thunk_convert(argptr
+ i
* target_ifreq_size
,
4798 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4799 ifreq_arg_type
, THUNK_TARGET
);
4801 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4811 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4812 int cmd
, abi_long arg
)
4815 struct dm_ioctl
*host_dm
;
4816 abi_long guest_data
;
4817 uint32_t guest_data_size
;
4819 const argtype
*arg_type
= ie
->arg_type
;
4821 void *big_buf
= NULL
;
4825 target_size
= thunk_type_size(arg_type
, 0);
4826 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4828 ret
= -TARGET_EFAULT
;
4831 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4832 unlock_user(argptr
, arg
, 0);
4834 /* buf_temp is too small, so fetch things into a bigger buffer */
4835 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4836 memcpy(big_buf
, buf_temp
, target_size
);
4840 guest_data
= arg
+ host_dm
->data_start
;
4841 if ((guest_data
- arg
) < 0) {
4845 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4846 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4848 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4849 switch (ie
->host_cmd
) {
4851 case DM_LIST_DEVICES
:
4854 case DM_DEV_SUSPEND
:
4857 case DM_TABLE_STATUS
:
4858 case DM_TABLE_CLEAR
:
4860 case DM_LIST_VERSIONS
:
4864 case DM_DEV_SET_GEOMETRY
:
4865 /* data contains only strings */
4866 memcpy(host_data
, argptr
, guest_data_size
);
4869 memcpy(host_data
, argptr
, guest_data_size
);
4870 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4874 void *gspec
= argptr
;
4875 void *cur_data
= host_data
;
4876 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4877 int spec_size
= thunk_type_size(arg_type
, 0);
4880 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4881 struct dm_target_spec
*spec
= cur_data
;
4885 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4886 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4888 spec
->next
= sizeof(*spec
) + slen
;
4889 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4891 cur_data
+= spec
->next
;
4896 ret
= -TARGET_EINVAL
;
4897 unlock_user(argptr
, guest_data
, 0);
4900 unlock_user(argptr
, guest_data
, 0);
4902 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4903 if (!is_error(ret
)) {
4904 guest_data
= arg
+ host_dm
->data_start
;
4905 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4906 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4907 switch (ie
->host_cmd
) {
4912 case DM_DEV_SUSPEND
:
4915 case DM_TABLE_CLEAR
:
4917 case DM_DEV_SET_GEOMETRY
:
4918 /* no return data */
4920 case DM_LIST_DEVICES
:
4922 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4923 uint32_t remaining_data
= guest_data_size
;
4924 void *cur_data
= argptr
;
4925 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4926 int nl_size
= 12; /* can't use thunk_size due to alignment */
4929 uint32_t next
= nl
->next
;
4931 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4933 if (remaining_data
< nl
->next
) {
4934 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4937 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4938 strcpy(cur_data
+ nl_size
, nl
->name
);
4939 cur_data
+= nl
->next
;
4940 remaining_data
-= nl
->next
;
4944 nl
= (void*)nl
+ next
;
4949 case DM_TABLE_STATUS
:
4951 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4952 void *cur_data
= argptr
;
4953 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4954 int spec_size
= thunk_type_size(arg_type
, 0);
4957 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4958 uint32_t next
= spec
->next
;
4959 int slen
= strlen((char*)&spec
[1]) + 1;
4960 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4961 if (guest_data_size
< spec
->next
) {
4962 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4965 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4966 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4967 cur_data
= argptr
+ spec
->next
;
4968 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4974 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4975 int count
= *(uint32_t*)hdata
;
4976 uint64_t *hdev
= hdata
+ 8;
4977 uint64_t *gdev
= argptr
+ 8;
4980 *(uint32_t*)argptr
= tswap32(count
);
4981 for (i
= 0; i
< count
; i
++) {
4982 *gdev
= tswap64(*hdev
);
4988 case DM_LIST_VERSIONS
:
4990 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4991 uint32_t remaining_data
= guest_data_size
;
4992 void *cur_data
= argptr
;
4993 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4994 int vers_size
= thunk_type_size(arg_type
, 0);
4997 uint32_t next
= vers
->next
;
4999 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5001 if (remaining_data
< vers
->next
) {
5002 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5005 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5006 strcpy(cur_data
+ vers_size
, vers
->name
);
5007 cur_data
+= vers
->next
;
5008 remaining_data
-= vers
->next
;
5012 vers
= (void*)vers
+ next
;
5017 unlock_user(argptr
, guest_data
, 0);
5018 ret
= -TARGET_EINVAL
;
5021 unlock_user(argptr
, guest_data
, guest_data_size
);
5023 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5025 ret
= -TARGET_EFAULT
;
5028 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5029 unlock_user(argptr
, arg
, target_size
);
5036 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5037 int cmd
, abi_long arg
)
5041 const argtype
*arg_type
= ie
->arg_type
;
5042 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5045 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5046 struct blkpg_partition host_part
;
5048 /* Read and convert blkpg */
5050 target_size
= thunk_type_size(arg_type
, 0);
5051 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5053 ret
= -TARGET_EFAULT
;
5056 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5057 unlock_user(argptr
, arg
, 0);
5059 switch (host_blkpg
->op
) {
5060 case BLKPG_ADD_PARTITION
:
5061 case BLKPG_DEL_PARTITION
:
5062 /* payload is struct blkpg_partition */
5065 /* Unknown opcode */
5066 ret
= -TARGET_EINVAL
;
5070 /* Read and convert blkpg->data */
5071 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5072 target_size
= thunk_type_size(part_arg_type
, 0);
5073 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5075 ret
= -TARGET_EFAULT
;
5078 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5079 unlock_user(argptr
, arg
, 0);
5081 /* Swizzle the data pointer to our local copy and call! */
5082 host_blkpg
->data
= &host_part
;
5083 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5089 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5090 int fd
, int cmd
, abi_long arg
)
5092 const argtype
*arg_type
= ie
->arg_type
;
5093 const StructEntry
*se
;
5094 const argtype
*field_types
;
5095 const int *dst_offsets
, *src_offsets
;
5098 abi_ulong
*target_rt_dev_ptr
;
5099 unsigned long *host_rt_dev_ptr
;
5103 assert(ie
->access
== IOC_W
);
5104 assert(*arg_type
== TYPE_PTR
);
5106 assert(*arg_type
== TYPE_STRUCT
);
5107 target_size
= thunk_type_size(arg_type
, 0);
5108 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5110 return -TARGET_EFAULT
;
5113 assert(*arg_type
== (int)STRUCT_rtentry
);
5114 se
= struct_entries
+ *arg_type
++;
5115 assert(se
->convert
[0] == NULL
);
5116 /* convert struct here to be able to catch rt_dev string */
5117 field_types
= se
->field_types
;
5118 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5119 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5120 for (i
= 0; i
< se
->nb_fields
; i
++) {
5121 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5122 assert(*field_types
== TYPE_PTRVOID
);
5123 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5124 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5125 if (*target_rt_dev_ptr
!= 0) {
5126 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5127 tswapal(*target_rt_dev_ptr
));
5128 if (!*host_rt_dev_ptr
) {
5129 unlock_user(argptr
, arg
, 0);
5130 return -TARGET_EFAULT
;
5133 *host_rt_dev_ptr
= 0;
5138 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5139 argptr
+ src_offsets
[i
],
5140 field_types
, THUNK_HOST
);
5142 unlock_user(argptr
, arg
, 0);
5144 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5145 if (*host_rt_dev_ptr
!= 0) {
5146 unlock_user((void *)*host_rt_dev_ptr
,
5147 *target_rt_dev_ptr
, 0);
5152 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5153 int fd
, int cmd
, abi_long arg
)
5155 int sig
= target_to_host_signal(arg
);
5156 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5159 static IOCTLEntry ioctl_entries
[] = {
5160 #define IOCTL(cmd, access, ...) \
5161 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5162 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5163 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5168 /* ??? Implement proper locking for ioctls. */
5169 /* do_ioctl() Must return target values and target errnos. */
5170 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5172 const IOCTLEntry
*ie
;
5173 const argtype
*arg_type
;
5175 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5181 if (ie
->target_cmd
== 0) {
5182 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5183 return -TARGET_ENOSYS
;
5185 if (ie
->target_cmd
== cmd
)
5189 arg_type
= ie
->arg_type
;
5191 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5194 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5197 switch(arg_type
[0]) {
5200 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5204 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5208 target_size
= thunk_type_size(arg_type
, 0);
5209 switch(ie
->access
) {
5211 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5212 if (!is_error(ret
)) {
5213 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5215 return -TARGET_EFAULT
;
5216 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5217 unlock_user(argptr
, arg
, target_size
);
5221 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5223 return -TARGET_EFAULT
;
5224 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5225 unlock_user(argptr
, arg
, 0);
5226 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5230 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5232 return -TARGET_EFAULT
;
5233 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5234 unlock_user(argptr
, arg
, 0);
5235 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5236 if (!is_error(ret
)) {
5237 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5239 return -TARGET_EFAULT
;
5240 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5241 unlock_user(argptr
, arg
, target_size
);
5247 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5248 (long)cmd
, arg_type
[0]);
5249 ret
= -TARGET_ENOSYS
;
5255 static const bitmask_transtbl iflag_tbl
[] = {
5256 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5257 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5258 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5259 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5260 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5261 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5262 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5263 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5264 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5265 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5266 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5267 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5268 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5269 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5273 static const bitmask_transtbl oflag_tbl
[] = {
5274 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5275 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5276 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5277 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5278 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5279 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5280 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5281 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5282 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5283 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5284 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5285 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5286 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5287 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5288 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5289 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5290 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5291 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5292 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5293 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5294 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5295 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5296 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5297 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5301 static const bitmask_transtbl cflag_tbl
[] = {
5302 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5303 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5304 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5305 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5306 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5307 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5308 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5309 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5310 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5311 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5312 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5313 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5314 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5315 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5316 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5317 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5318 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5319 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5320 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5321 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5322 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5323 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5324 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5325 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5326 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5327 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5328 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5329 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5330 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5331 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5332 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5336 static const bitmask_transtbl lflag_tbl
[] = {
5337 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5338 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5339 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5340 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5341 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5342 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5343 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5344 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5345 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5346 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5347 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5348 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5349 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5350 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5351 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5355 static void target_to_host_termios (void *dst
, const void *src
)
5357 struct host_termios
*host
= dst
;
5358 const struct target_termios
*target
= src
;
5361 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5363 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5365 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5367 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5368 host
->c_line
= target
->c_line
;
5370 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5371 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5372 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5373 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5374 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5375 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5376 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5377 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5378 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5379 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5380 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5381 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5382 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5383 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5384 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5385 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5386 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5387 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5390 static void host_to_target_termios (void *dst
, const void *src
)
5392 struct target_termios
*target
= dst
;
5393 const struct host_termios
*host
= src
;
5396 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5398 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5400 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5402 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5403 target
->c_line
= host
->c_line
;
5405 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5406 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5407 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5408 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5409 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5410 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5411 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5412 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5413 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5414 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5415 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5416 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5417 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5418 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5419 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5420 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5421 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5422 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5425 static const StructEntry struct_termios_def
= {
5426 .convert
= { host_to_target_termios
, target_to_host_termios
},
5427 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5428 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5431 static bitmask_transtbl mmap_flags_tbl
[] = {
5432 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5433 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5434 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5435 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5436 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5437 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5438 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5439 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5440 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5445 #if defined(TARGET_I386)
5447 /* NOTE: there is really one LDT for all the threads */
5448 static uint8_t *ldt_table
;
5450 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5457 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5458 if (size
> bytecount
)
5460 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5462 return -TARGET_EFAULT
;
5463 /* ??? Should this by byteswapped? */
5464 memcpy(p
, ldt_table
, size
);
5465 unlock_user(p
, ptr
, size
);
5469 /* XXX: add locking support */
5470 static abi_long
write_ldt(CPUX86State
*env
,
5471 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5473 struct target_modify_ldt_ldt_s ldt_info
;
5474 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5475 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5476 int seg_not_present
, useable
, lm
;
5477 uint32_t *lp
, entry_1
, entry_2
;
5479 if (bytecount
!= sizeof(ldt_info
))
5480 return -TARGET_EINVAL
;
5481 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5482 return -TARGET_EFAULT
;
5483 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5484 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5485 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5486 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5487 unlock_user_struct(target_ldt_info
, ptr
, 0);
5489 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5490 return -TARGET_EINVAL
;
5491 seg_32bit
= ldt_info
.flags
& 1;
5492 contents
= (ldt_info
.flags
>> 1) & 3;
5493 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5494 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5495 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5496 useable
= (ldt_info
.flags
>> 6) & 1;
5500 lm
= (ldt_info
.flags
>> 7) & 1;
5502 if (contents
== 3) {
5504 return -TARGET_EINVAL
;
5505 if (seg_not_present
== 0)
5506 return -TARGET_EINVAL
;
5508 /* allocate the LDT */
5510 env
->ldt
.base
= target_mmap(0,
5511 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5512 PROT_READ
|PROT_WRITE
,
5513 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5514 if (env
->ldt
.base
== -1)
5515 return -TARGET_ENOMEM
;
5516 memset(g2h(env
->ldt
.base
), 0,
5517 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5518 env
->ldt
.limit
= 0xffff;
5519 ldt_table
= g2h(env
->ldt
.base
);
5522 /* NOTE: same code as Linux kernel */
5523 /* Allow LDTs to be cleared by the user. */
5524 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5527 read_exec_only
== 1 &&
5529 limit_in_pages
== 0 &&
5530 seg_not_present
== 1 &&
5538 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5539 (ldt_info
.limit
& 0x0ffff);
5540 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5541 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5542 (ldt_info
.limit
& 0xf0000) |
5543 ((read_exec_only
^ 1) << 9) |
5545 ((seg_not_present
^ 1) << 15) |
5547 (limit_in_pages
<< 23) |
5551 entry_2
|= (useable
<< 20);
5553 /* Install the new entry ... */
5555 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5556 lp
[0] = tswap32(entry_1
);
5557 lp
[1] = tswap32(entry_2
);
5561 /* specific and weird i386 syscalls */
5562 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5563 unsigned long bytecount
)
5569 ret
= read_ldt(ptr
, bytecount
);
5572 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5575 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5578 ret
= -TARGET_ENOSYS
;
5584 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5585 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5587 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5588 struct target_modify_ldt_ldt_s ldt_info
;
5589 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5590 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5591 int seg_not_present
, useable
, lm
;
5592 uint32_t *lp
, entry_1
, entry_2
;
5595 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5596 if (!target_ldt_info
)
5597 return -TARGET_EFAULT
;
5598 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5599 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5600 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5601 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5602 if (ldt_info
.entry_number
== -1) {
5603 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5604 if (gdt_table
[i
] == 0) {
5605 ldt_info
.entry_number
= i
;
5606 target_ldt_info
->entry_number
= tswap32(i
);
5611 unlock_user_struct(target_ldt_info
, ptr
, 1);
5613 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5614 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5615 return -TARGET_EINVAL
;
5616 seg_32bit
= ldt_info
.flags
& 1;
5617 contents
= (ldt_info
.flags
>> 1) & 3;
5618 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5619 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5620 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5621 useable
= (ldt_info
.flags
>> 6) & 1;
5625 lm
= (ldt_info
.flags
>> 7) & 1;
5628 if (contents
== 3) {
5629 if (seg_not_present
== 0)
5630 return -TARGET_EINVAL
;
5633 /* NOTE: same code as Linux kernel */
5634 /* Allow LDTs to be cleared by the user. */
5635 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5636 if ((contents
== 0 &&
5637 read_exec_only
== 1 &&
5639 limit_in_pages
== 0 &&
5640 seg_not_present
== 1 &&
5648 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5649 (ldt_info
.limit
& 0x0ffff);
5650 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5651 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5652 (ldt_info
.limit
& 0xf0000) |
5653 ((read_exec_only
^ 1) << 9) |
5655 ((seg_not_present
^ 1) << 15) |
5657 (limit_in_pages
<< 23) |
5662 /* Install the new entry ... */
5664 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5665 lp
[0] = tswap32(entry_1
);
5666 lp
[1] = tswap32(entry_2
);
5670 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5672 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5673 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5674 uint32_t base_addr
, limit
, flags
;
5675 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5676 int seg_not_present
, useable
, lm
;
5677 uint32_t *lp
, entry_1
, entry_2
;
5679 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5680 if (!target_ldt_info
)
5681 return -TARGET_EFAULT
;
5682 idx
= tswap32(target_ldt_info
->entry_number
);
5683 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5684 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5685 unlock_user_struct(target_ldt_info
, ptr
, 1);
5686 return -TARGET_EINVAL
;
5688 lp
= (uint32_t *)(gdt_table
+ idx
);
5689 entry_1
= tswap32(lp
[0]);
5690 entry_2
= tswap32(lp
[1]);
5692 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5693 contents
= (entry_2
>> 10) & 3;
5694 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5695 seg_32bit
= (entry_2
>> 22) & 1;
5696 limit_in_pages
= (entry_2
>> 23) & 1;
5697 useable
= (entry_2
>> 20) & 1;
5701 lm
= (entry_2
>> 21) & 1;
5703 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5704 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5705 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5706 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5707 base_addr
= (entry_1
>> 16) |
5708 (entry_2
& 0xff000000) |
5709 ((entry_2
& 0xff) << 16);
5710 target_ldt_info
->base_addr
= tswapal(base_addr
);
5711 target_ldt_info
->limit
= tswap32(limit
);
5712 target_ldt_info
->flags
= tswap32(flags
);
5713 unlock_user_struct(target_ldt_info
, ptr
, 1);
5716 #endif /* TARGET_I386 && TARGET_ABI32 */
5718 #ifndef TARGET_ABI32
5719 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5726 case TARGET_ARCH_SET_GS
:
5727 case TARGET_ARCH_SET_FS
:
5728 if (code
== TARGET_ARCH_SET_GS
)
5732 cpu_x86_load_seg(env
, idx
, 0);
5733 env
->segs
[idx
].base
= addr
;
5735 case TARGET_ARCH_GET_GS
:
5736 case TARGET_ARCH_GET_FS
:
5737 if (code
== TARGET_ARCH_GET_GS
)
5741 val
= env
->segs
[idx
].base
;
5742 if (put_user(val
, addr
, abi_ulong
))
5743 ret
= -TARGET_EFAULT
;
5746 ret
= -TARGET_EINVAL
;
5753 #endif /* defined(TARGET_I386) */
5755 #define NEW_STACK_SIZE 0x40000
5758 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5761 pthread_mutex_t mutex
;
5762 pthread_cond_t cond
;
5765 abi_ulong child_tidptr
;
5766 abi_ulong parent_tidptr
;
5770 static void *clone_func(void *arg
)
5772 new_thread_info
*info
= arg
;
5777 rcu_register_thread();
5779 cpu
= ENV_GET_CPU(env
);
5781 ts
= (TaskState
*)cpu
->opaque
;
5782 info
->tid
= gettid();
5783 cpu
->host_tid
= info
->tid
;
5785 if (info
->child_tidptr
)
5786 put_user_u32(info
->tid
, info
->child_tidptr
);
5787 if (info
->parent_tidptr
)
5788 put_user_u32(info
->tid
, info
->parent_tidptr
);
5789 /* Enable signals. */
5790 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5791 /* Signal to the parent that we're ready. */
5792 pthread_mutex_lock(&info
->mutex
);
5793 pthread_cond_broadcast(&info
->cond
);
5794 pthread_mutex_unlock(&info
->mutex
);
5795 /* Wait until the parent has finshed initializing the tls state. */
5796 pthread_mutex_lock(&clone_lock
);
5797 pthread_mutex_unlock(&clone_lock
);
5803 /* do_fork() Must return host values and target errnos (unlike most
5804 do_*() functions). */
5805 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5806 abi_ulong parent_tidptr
, target_ulong newtls
,
5807 abi_ulong child_tidptr
)
5809 CPUState
*cpu
= ENV_GET_CPU(env
);
5813 CPUArchState
*new_env
;
5814 unsigned int nptl_flags
;
5817 /* Emulate vfork() with fork() */
5818 if (flags
& CLONE_VFORK
)
5819 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5821 if (flags
& CLONE_VM
) {
5822 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5823 new_thread_info info
;
5824 pthread_attr_t attr
;
5826 ts
= g_new0(TaskState
, 1);
5827 init_task_state(ts
);
5828 /* we create a new CPU instance. */
5829 new_env
= cpu_copy(env
);
5830 /* Init regs that differ from the parent. */
5831 cpu_clone_regs(new_env
, newsp
);
5832 new_cpu
= ENV_GET_CPU(new_env
);
5833 new_cpu
->opaque
= ts
;
5834 ts
->bprm
= parent_ts
->bprm
;
5835 ts
->info
= parent_ts
->info
;
5836 ts
->signal_mask
= parent_ts
->signal_mask
;
5838 flags
&= ~CLONE_NPTL_FLAGS2
;
5840 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5841 ts
->child_tidptr
= child_tidptr
;
5844 if (nptl_flags
& CLONE_SETTLS
)
5845 cpu_set_tls (new_env
, newtls
);
5847 /* Grab a mutex so that thread setup appears atomic. */
5848 pthread_mutex_lock(&clone_lock
);
5850 memset(&info
, 0, sizeof(info
));
5851 pthread_mutex_init(&info
.mutex
, NULL
);
5852 pthread_mutex_lock(&info
.mutex
);
5853 pthread_cond_init(&info
.cond
, NULL
);
5855 if (nptl_flags
& CLONE_CHILD_SETTID
)
5856 info
.child_tidptr
= child_tidptr
;
5857 if (nptl_flags
& CLONE_PARENT_SETTID
)
5858 info
.parent_tidptr
= parent_tidptr
;
5860 ret
= pthread_attr_init(&attr
);
5861 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5862 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5863 /* It is not safe to deliver signals until the child has finished
5864 initializing, so temporarily block all signals. */
5865 sigfillset(&sigmask
);
5866 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5868 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5869 /* TODO: Free new CPU state if thread creation failed. */
5871 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5872 pthread_attr_destroy(&attr
);
5874 /* Wait for the child to initialize. */
5875 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5877 if (flags
& CLONE_PARENT_SETTID
)
5878 put_user_u32(ret
, parent_tidptr
);
5882 pthread_mutex_unlock(&info
.mutex
);
5883 pthread_cond_destroy(&info
.cond
);
5884 pthread_mutex_destroy(&info
.mutex
);
5885 pthread_mutex_unlock(&clone_lock
);
5887 /* if no CLONE_VM, we consider it is a fork */
5888 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5889 return -TARGET_EINVAL
;
5892 if (block_signals()) {
5893 return -TARGET_ERESTARTSYS
;
5899 /* Child Process. */
5901 cpu_clone_regs(env
, newsp
);
5903 /* There is a race condition here. The parent process could
5904 theoretically read the TID in the child process before the child
5905 tid is set. This would require using either ptrace
5906 (not implemented) or having *_tidptr to point at a shared memory
5907 mapping. We can't repeat the spinlock hack used above because
5908 the child process gets its own copy of the lock. */
5909 if (flags
& CLONE_CHILD_SETTID
)
5910 put_user_u32(gettid(), child_tidptr
);
5911 if (flags
& CLONE_PARENT_SETTID
)
5912 put_user_u32(gettid(), parent_tidptr
);
5913 ts
= (TaskState
*)cpu
->opaque
;
5914 if (flags
& CLONE_SETTLS
)
5915 cpu_set_tls (env
, newtls
);
5916 if (flags
& CLONE_CHILD_CLEARTID
)
5917 ts
->child_tidptr
= child_tidptr
;
5925 /* warning : doesn't handle linux specific flags... */
5926 static int target_to_host_fcntl_cmd(int cmd
)
5929 case TARGET_F_DUPFD
:
5930 case TARGET_F_GETFD
:
5931 case TARGET_F_SETFD
:
5932 case TARGET_F_GETFL
:
5933 case TARGET_F_SETFL
:
5935 case TARGET_F_GETLK
:
5937 case TARGET_F_SETLK
:
5939 case TARGET_F_SETLKW
:
5941 case TARGET_F_GETOWN
:
5943 case TARGET_F_SETOWN
:
5945 case TARGET_F_GETSIG
:
5947 case TARGET_F_SETSIG
:
5949 #if TARGET_ABI_BITS == 32
5950 case TARGET_F_GETLK64
:
5952 case TARGET_F_SETLK64
:
5954 case TARGET_F_SETLKW64
:
5957 case TARGET_F_SETLEASE
:
5959 case TARGET_F_GETLEASE
:
5961 #ifdef F_DUPFD_CLOEXEC
5962 case TARGET_F_DUPFD_CLOEXEC
:
5963 return F_DUPFD_CLOEXEC
;
5965 case TARGET_F_NOTIFY
:
5968 case TARGET_F_GETOWN_EX
:
5972 case TARGET_F_SETOWN_EX
:
5976 case TARGET_F_SETPIPE_SZ
:
5977 return F_SETPIPE_SZ
;
5978 case TARGET_F_GETPIPE_SZ
:
5979 return F_GETPIPE_SZ
;
5982 return -TARGET_EINVAL
;
5984 return -TARGET_EINVAL
;
5987 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5988 static const bitmask_transtbl flock_tbl
[] = {
5989 TRANSTBL_CONVERT(F_RDLCK
),
5990 TRANSTBL_CONVERT(F_WRLCK
),
5991 TRANSTBL_CONVERT(F_UNLCK
),
5992 TRANSTBL_CONVERT(F_EXLCK
),
5993 TRANSTBL_CONVERT(F_SHLCK
),
5997 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5998 abi_ulong target_flock_addr
)
6000 struct target_flock
*target_fl
;
6003 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6004 return -TARGET_EFAULT
;
6007 __get_user(l_type
, &target_fl
->l_type
);
6008 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6009 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6010 __get_user(fl
->l_start
, &target_fl
->l_start
);
6011 __get_user(fl
->l_len
, &target_fl
->l_len
);
6012 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6013 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6017 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6018 const struct flock64
*fl
)
6020 struct target_flock
*target_fl
;
6023 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6024 return -TARGET_EFAULT
;
6027 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6028 __put_user(l_type
, &target_fl
->l_type
);
6029 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6030 __put_user(fl
->l_start
, &target_fl
->l_start
);
6031 __put_user(fl
->l_len
, &target_fl
->l_len
);
6032 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6033 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6037 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6038 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6040 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6041 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6042 abi_ulong target_flock_addr
)
6044 struct target_eabi_flock64
*target_fl
;
6047 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6048 return -TARGET_EFAULT
;
6051 __get_user(l_type
, &target_fl
->l_type
);
6052 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6053 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6054 __get_user(fl
->l_start
, &target_fl
->l_start
);
6055 __get_user(fl
->l_len
, &target_fl
->l_len
);
6056 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6057 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6061 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6062 const struct flock64
*fl
)
6064 struct target_eabi_flock64
*target_fl
;
6067 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6068 return -TARGET_EFAULT
;
6071 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6072 __put_user(l_type
, &target_fl
->l_type
);
6073 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6074 __put_user(fl
->l_start
, &target_fl
->l_start
);
6075 __put_user(fl
->l_len
, &target_fl
->l_len
);
6076 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6077 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6082 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6083 abi_ulong target_flock_addr
)
6085 struct target_flock64
*target_fl
;
6088 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6089 return -TARGET_EFAULT
;
6092 __get_user(l_type
, &target_fl
->l_type
);
6093 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6094 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6095 __get_user(fl
->l_start
, &target_fl
->l_start
);
6096 __get_user(fl
->l_len
, &target_fl
->l_len
);
6097 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6098 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6102 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6103 const struct flock64
*fl
)
6105 struct target_flock64
*target_fl
;
6108 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6109 return -TARGET_EFAULT
;
6112 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6113 __put_user(l_type
, &target_fl
->l_type
);
6114 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6115 __put_user(fl
->l_start
, &target_fl
->l_start
);
6116 __put_user(fl
->l_len
, &target_fl
->l_len
);
6117 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6118 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6122 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6124 struct flock64 fl64
;
6126 struct f_owner_ex fox
;
6127 struct target_f_owner_ex
*target_fox
;
6130 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6132 if (host_cmd
== -TARGET_EINVAL
)
6136 case TARGET_F_GETLK
:
6137 ret
= copy_from_user_flock(&fl64
, arg
);
6141 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6143 ret
= copy_to_user_flock(arg
, &fl64
);
6147 case TARGET_F_SETLK
:
6148 case TARGET_F_SETLKW
:
6149 ret
= copy_from_user_flock(&fl64
, arg
);
6153 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6156 case TARGET_F_GETLK64
:
6157 ret
= copy_from_user_flock64(&fl64
, arg
);
6161 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6163 ret
= copy_to_user_flock64(arg
, &fl64
);
6166 case TARGET_F_SETLK64
:
6167 case TARGET_F_SETLKW64
:
6168 ret
= copy_from_user_flock64(&fl64
, arg
);
6172 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6175 case TARGET_F_GETFL
:
6176 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6178 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6182 case TARGET_F_SETFL
:
6183 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6184 target_to_host_bitmask(arg
,
6189 case TARGET_F_GETOWN_EX
:
6190 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6192 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6193 return -TARGET_EFAULT
;
6194 target_fox
->type
= tswap32(fox
.type
);
6195 target_fox
->pid
= tswap32(fox
.pid
);
6196 unlock_user_struct(target_fox
, arg
, 1);
6202 case TARGET_F_SETOWN_EX
:
6203 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6204 return -TARGET_EFAULT
;
6205 fox
.type
= tswap32(target_fox
->type
);
6206 fox
.pid
= tswap32(target_fox
->pid
);
6207 unlock_user_struct(target_fox
, arg
, 0);
6208 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6212 case TARGET_F_SETOWN
:
6213 case TARGET_F_GETOWN
:
6214 case TARGET_F_SETSIG
:
6215 case TARGET_F_GETSIG
:
6216 case TARGET_F_SETLEASE
:
6217 case TARGET_F_GETLEASE
:
6218 case TARGET_F_SETPIPE_SZ
:
6219 case TARGET_F_GETPIPE_SZ
:
6220 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6224 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6232 static inline int high2lowuid(int uid
)
6240 static inline int high2lowgid(int gid
)
6248 static inline int low2highuid(int uid
)
6250 if ((int16_t)uid
== -1)
6256 static inline int low2highgid(int gid
)
6258 if ((int16_t)gid
== -1)
6263 static inline int tswapid(int id
)
6268 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6270 #else /* !USE_UID16 */
6271 static inline int high2lowuid(int uid
)
6275 static inline int high2lowgid(int gid
)
6279 static inline int low2highuid(int uid
)
6283 static inline int low2highgid(int gid
)
6287 static inline int tswapid(int id
)
6292 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6294 #endif /* USE_UID16 */
6296 /* We must do direct syscalls for setting UID/GID, because we want to
6297 * implement the Linux system call semantics of "change only for this thread",
6298 * not the libc/POSIX semantics of "change for all threads in process".
6299 * (See http://ewontfix.com/17/ for more details.)
6300 * We use the 32-bit version of the syscalls if present; if it is not
6301 * then either the host architecture supports 32-bit UIDs natively with
6302 * the standard syscall, or the 16-bit UID is the best we can do.
6304 #ifdef __NR_setuid32
6305 #define __NR_sys_setuid __NR_setuid32
6307 #define __NR_sys_setuid __NR_setuid
6309 #ifdef __NR_setgid32
6310 #define __NR_sys_setgid __NR_setgid32
6312 #define __NR_sys_setgid __NR_setgid
6314 #ifdef __NR_setresuid32
6315 #define __NR_sys_setresuid __NR_setresuid32
6317 #define __NR_sys_setresuid __NR_setresuid
6319 #ifdef __NR_setresgid32
6320 #define __NR_sys_setresgid __NR_setresgid32
6322 #define __NR_sys_setresgid __NR_setresgid
6325 _syscall1(int, sys_setuid
, uid_t
, uid
)
6326 _syscall1(int, sys_setgid
, gid_t
, gid
)
6327 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6328 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6330 void syscall_init(void)
6333 const argtype
*arg_type
;
6337 thunk_init(STRUCT_MAX
);
6339 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6340 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6341 #include "syscall_types.h"
6343 #undef STRUCT_SPECIAL
6345 /* Build target_to_host_errno_table[] table from
6346 * host_to_target_errno_table[]. */
6347 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6348 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6351 /* we patch the ioctl size if necessary. We rely on the fact that
6352 no ioctl has all the bits at '1' in the size field */
6354 while (ie
->target_cmd
!= 0) {
6355 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6356 TARGET_IOC_SIZEMASK
) {
6357 arg_type
= ie
->arg_type
;
6358 if (arg_type
[0] != TYPE_PTR
) {
6359 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6364 size
= thunk_type_size(arg_type
, 0);
6365 ie
->target_cmd
= (ie
->target_cmd
&
6366 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6367 (size
<< TARGET_IOC_SIZESHIFT
);
6370 /* automatic consistency check if same arch */
6371 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6372 (defined(__x86_64__) && defined(TARGET_X86_64))
6373 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6374 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6375 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6382 #if TARGET_ABI_BITS == 32
6383 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6385 #ifdef TARGET_WORDS_BIGENDIAN
6386 return ((uint64_t)word0
<< 32) | word1
;
6388 return ((uint64_t)word1
<< 32) | word0
;
6391 #else /* TARGET_ABI_BITS == 32 */
6392 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6396 #endif /* TARGET_ABI_BITS != 32 */
6398 #ifdef TARGET_NR_truncate64
6399 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6404 if (regpairs_aligned(cpu_env
)) {
6408 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6412 #ifdef TARGET_NR_ftruncate64
6413 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6418 if (regpairs_aligned(cpu_env
)) {
6422 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6426 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6427 abi_ulong target_addr
)
6429 struct target_timespec
*target_ts
;
6431 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6432 return -TARGET_EFAULT
;
6433 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6434 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6435 unlock_user_struct(target_ts
, target_addr
, 0);
6439 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6440 struct timespec
*host_ts
)
6442 struct target_timespec
*target_ts
;
6444 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6445 return -TARGET_EFAULT
;
6446 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6447 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6448 unlock_user_struct(target_ts
, target_addr
, 1);
6452 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6453 abi_ulong target_addr
)
6455 struct target_itimerspec
*target_itspec
;
6457 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6458 return -TARGET_EFAULT
;
6461 host_itspec
->it_interval
.tv_sec
=
6462 tswapal(target_itspec
->it_interval
.tv_sec
);
6463 host_itspec
->it_interval
.tv_nsec
=
6464 tswapal(target_itspec
->it_interval
.tv_nsec
);
6465 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6466 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6468 unlock_user_struct(target_itspec
, target_addr
, 1);
6472 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6473 struct itimerspec
*host_its
)
6475 struct target_itimerspec
*target_itspec
;
6477 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6478 return -TARGET_EFAULT
;
6481 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6482 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6484 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6485 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6487 unlock_user_struct(target_itspec
, target_addr
, 0);
6491 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6492 abi_ulong target_addr
)
6494 struct target_sigevent
*target_sevp
;
6496 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6497 return -TARGET_EFAULT
;
6500 /* This union is awkward on 64 bit systems because it has a 32 bit
6501 * integer and a pointer in it; we follow the conversion approach
6502 * used for handling sigval types in signal.c so the guest should get
6503 * the correct value back even if we did a 64 bit byteswap and it's
6504 * using the 32 bit integer.
6506 host_sevp
->sigev_value
.sival_ptr
=
6507 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6508 host_sevp
->sigev_signo
=
6509 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6510 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6511 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6513 unlock_user_struct(target_sevp
, target_addr
, 1);
6517 #if defined(TARGET_NR_mlockall)
6518 static inline int target_to_host_mlockall_arg(int arg
)
6522 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6523 result
|= MCL_CURRENT
;
6525 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6526 result
|= MCL_FUTURE
;
6532 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6533 abi_ulong target_addr
,
6534 struct stat
*host_st
)
6536 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6537 if (((CPUARMState
*)cpu_env
)->eabi
) {
6538 struct target_eabi_stat64
*target_st
;
6540 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6541 return -TARGET_EFAULT
;
6542 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6543 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6544 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6545 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6546 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6548 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6549 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6550 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6551 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6552 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6553 __put_user(host_st
->st_size
, &target_st
->st_size
);
6554 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6555 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6556 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6557 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6558 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6559 unlock_user_struct(target_st
, target_addr
, 1);
6563 #if defined(TARGET_HAS_STRUCT_STAT64)
6564 struct target_stat64
*target_st
;
6566 struct target_stat
*target_st
;
6569 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6570 return -TARGET_EFAULT
;
6571 memset(target_st
, 0, sizeof(*target_st
));
6572 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6573 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6574 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6575 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6577 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6578 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6579 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6580 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6581 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6582 /* XXX: better use of kernel struct */
6583 __put_user(host_st
->st_size
, &target_st
->st_size
);
6584 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6585 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6586 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6587 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6588 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6589 unlock_user_struct(target_st
, target_addr
, 1);
6595 /* ??? Using host futex calls even when target atomic operations
6596 are not really atomic probably breaks things. However implementing
6597 futexes locally would make futexes shared between multiple processes
6598 tricky. However they're probably useless because guest atomic
6599 operations won't work either. */
6600 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6601 target_ulong uaddr2
, int val3
)
6603 struct timespec ts
, *pts
;
6606 /* ??? We assume FUTEX_* constants are the same on both host
6608 #ifdef FUTEX_CMD_MASK
6609 base_op
= op
& FUTEX_CMD_MASK
;
6615 case FUTEX_WAIT_BITSET
:
6618 target_to_host_timespec(pts
, timeout
);
6622 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6625 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6627 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6629 case FUTEX_CMP_REQUEUE
:
6631 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6632 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6633 But the prototype takes a `struct timespec *'; insert casts
6634 to satisfy the compiler. We do not need to tswap TIMEOUT
6635 since it's not compared to guest memory. */
6636 pts
= (struct timespec
*)(uintptr_t) timeout
;
6637 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6639 (base_op
== FUTEX_CMP_REQUEUE
6643 return -TARGET_ENOSYS
;
6646 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6647 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6648 abi_long handle
, abi_long mount_id
,
6651 struct file_handle
*target_fh
;
6652 struct file_handle
*fh
;
6656 unsigned int size
, total_size
;
6658 if (get_user_s32(size
, handle
)) {
6659 return -TARGET_EFAULT
;
6662 name
= lock_user_string(pathname
);
6664 return -TARGET_EFAULT
;
6667 total_size
= sizeof(struct file_handle
) + size
;
6668 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6670 unlock_user(name
, pathname
, 0);
6671 return -TARGET_EFAULT
;
6674 fh
= g_malloc0(total_size
);
6675 fh
->handle_bytes
= size
;
6677 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6678 unlock_user(name
, pathname
, 0);
6680 /* man name_to_handle_at(2):
6681 * Other than the use of the handle_bytes field, the caller should treat
6682 * the file_handle structure as an opaque data type
6685 memcpy(target_fh
, fh
, total_size
);
6686 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6687 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6689 unlock_user(target_fh
, handle
, total_size
);
6691 if (put_user_s32(mid
, mount_id
)) {
6692 return -TARGET_EFAULT
;
6700 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6701 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6704 struct file_handle
*target_fh
;
6705 struct file_handle
*fh
;
6706 unsigned int size
, total_size
;
6709 if (get_user_s32(size
, handle
)) {
6710 return -TARGET_EFAULT
;
6713 total_size
= sizeof(struct file_handle
) + size
;
6714 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6716 return -TARGET_EFAULT
;
6719 fh
= g_memdup(target_fh
, total_size
);
6720 fh
->handle_bytes
= size
;
6721 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6723 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6724 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6728 unlock_user(target_fh
, handle
, total_size
);
6734 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6736 /* signalfd siginfo conversion */
6739 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6740 const struct signalfd_siginfo
*info
)
6742 int sig
= host_to_target_signal(info
->ssi_signo
);
6744 /* linux/signalfd.h defines a ssi_addr_lsb
6745 * not defined in sys/signalfd.h but used by some kernels
6748 #ifdef BUS_MCEERR_AO
6749 if (tinfo
->ssi_signo
== SIGBUS
&&
6750 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6751 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6752 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6753 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6754 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6758 tinfo
->ssi_signo
= tswap32(sig
);
6759 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6760 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6761 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6762 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6763 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6764 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6765 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6766 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6767 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6768 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6769 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6770 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6771 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6772 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6773 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6776 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6780 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6781 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6787 static TargetFdTrans target_signalfd_trans
= {
6788 .host_to_target_data
= host_to_target_data_signalfd
,
6791 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6794 target_sigset_t
*target_mask
;
6798 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6799 return -TARGET_EINVAL
;
6801 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6802 return -TARGET_EFAULT
;
6805 target_to_host_sigset(&host_mask
, target_mask
);
6807 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6809 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6811 fd_trans_register(ret
, &target_signalfd_trans
);
6814 unlock_user_struct(target_mask
, mask
, 0);
6820 /* Map host to target signal numbers for the wait family of syscalls.
6821 Assume all other status bits are the same. */
6822 int host_to_target_waitstatus(int status
)
6824 if (WIFSIGNALED(status
)) {
6825 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6827 if (WIFSTOPPED(status
)) {
6828 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6834 static int open_self_cmdline(void *cpu_env
, int fd
)
6837 bool word_skipped
= false;
6839 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6849 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6852 fd_orig
= close(fd_orig
);
6855 } else if (nb_read
== 0) {
6859 if (!word_skipped
) {
6860 /* Skip the first string, which is the path to qemu-*-static
6861 instead of the actual command. */
6862 cp_buf
= memchr(buf
, 0, nb_read
);
6864 /* Null byte found, skip one string */
6866 nb_read
-= cp_buf
- buf
;
6867 word_skipped
= true;
6872 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6881 return close(fd_orig
);
6884 static int open_self_maps(void *cpu_env
, int fd
)
6886 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6887 TaskState
*ts
= cpu
->opaque
;
6893 fp
= fopen("/proc/self/maps", "r");
6898 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6899 int fields
, dev_maj
, dev_min
, inode
;
6900 uint64_t min
, max
, offset
;
6901 char flag_r
, flag_w
, flag_x
, flag_p
;
6902 char path
[512] = "";
6903 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6904 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6905 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6907 if ((fields
< 10) || (fields
> 11)) {
6910 if (h2g_valid(min
)) {
6911 int flags
= page_get_flags(h2g(min
));
6912 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6913 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6916 if (h2g(min
) == ts
->info
->stack_limit
) {
6917 pstrcpy(path
, sizeof(path
), " [stack]");
6919 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6920 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6921 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6922 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6923 path
[0] ? " " : "", path
);
6933 static int open_self_stat(void *cpu_env
, int fd
)
6935 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6936 TaskState
*ts
= cpu
->opaque
;
6937 abi_ulong start_stack
= ts
->info
->start_stack
;
6940 for (i
= 0; i
< 44; i
++) {
6948 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6949 } else if (i
== 1) {
6951 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6952 } else if (i
== 27) {
6955 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6957 /* for the rest, there is MasterCard */
6958 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6962 if (write(fd
, buf
, len
) != len
) {
6970 static int open_self_auxv(void *cpu_env
, int fd
)
6972 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6973 TaskState
*ts
= cpu
->opaque
;
6974 abi_ulong auxv
= ts
->info
->saved_auxv
;
6975 abi_ulong len
= ts
->info
->auxv_len
;
6979 * Auxiliary vector is stored in target process stack.
6980 * read in whole auxv vector and copy it to file
6982 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6986 r
= write(fd
, ptr
, len
);
6993 lseek(fd
, 0, SEEK_SET
);
6994 unlock_user(ptr
, auxv
, len
);
7000 static int is_proc_myself(const char *filename
, const char *entry
)
7002 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7003 filename
+= strlen("/proc/");
7004 if (!strncmp(filename
, "self/", strlen("self/"))) {
7005 filename
+= strlen("self/");
7006 } else if (*filename
>= '1' && *filename
<= '9') {
7008 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7009 if (!strncmp(filename
, myself
, strlen(myself
))) {
7010 filename
+= strlen(myself
);
7017 if (!strcmp(filename
, entry
)) {
7024 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7025 static int is_proc(const char *filename
, const char *entry
)
7027 return strcmp(filename
, entry
) == 0;
7030 static int open_net_route(void *cpu_env
, int fd
)
7037 fp
= fopen("/proc/net/route", "r");
7044 read
= getline(&line
, &len
, fp
);
7045 dprintf(fd
, "%s", line
);
7049 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7051 uint32_t dest
, gw
, mask
;
7052 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7053 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7054 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7055 &mask
, &mtu
, &window
, &irtt
);
7056 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7057 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7058 metric
, tswap32(mask
), mtu
, window
, irtt
);
7068 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7071 const char *filename
;
7072 int (*fill
)(void *cpu_env
, int fd
);
7073 int (*cmp
)(const char *s1
, const char *s2
);
7075 const struct fake_open
*fake_open
;
7076 static const struct fake_open fakes
[] = {
7077 { "maps", open_self_maps
, is_proc_myself
},
7078 { "stat", open_self_stat
, is_proc_myself
},
7079 { "auxv", open_self_auxv
, is_proc_myself
},
7080 { "cmdline", open_self_cmdline
, is_proc_myself
},
7081 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7082 { "/proc/net/route", open_net_route
, is_proc
},
7084 { NULL
, NULL
, NULL
}
7087 if (is_proc_myself(pathname
, "exe")) {
7088 int execfd
= qemu_getauxval(AT_EXECFD
);
7089 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7092 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7093 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7098 if (fake_open
->filename
) {
7100 char filename
[PATH_MAX
];
7103 /* create temporary file to map stat to */
7104 tmpdir
= getenv("TMPDIR");
7107 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7108 fd
= mkstemp(filename
);
7114 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7120 lseek(fd
, 0, SEEK_SET
);
7125 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7128 #define TIMER_MAGIC 0x0caf0000
7129 #define TIMER_MAGIC_MASK 0xffff0000
7131 /* Convert QEMU provided timer ID back to internal 16bit index format */
7132 static target_timer_t
get_timer_id(abi_long arg
)
7134 target_timer_t timerid
= arg
;
7136 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7137 return -TARGET_EINVAL
;
7142 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7143 return -TARGET_EINVAL
;
7149 /* do_syscall() should always have a single exit point at the end so
7150 that actions, such as logging of syscall results, can be performed.
7151 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7152 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7153 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7154 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7157 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7163 #if defined(DEBUG_ERESTARTSYS)
7164 /* Debug-only code for exercising the syscall-restart code paths
7165 * in the per-architecture cpu main loops: restart every syscall
7166 * the guest makes once before letting it through.
7173 return -TARGET_ERESTARTSYS
;
7179 gemu_log("syscall %d", num
);
7181 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7183 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7186 case TARGET_NR_exit
:
7187 /* In old applications this may be used to implement _exit(2).
7188 However in threaded applictions it is used for thread termination,
7189 and _exit_group is used for application termination.
7190 Do thread termination if we have more then one thread. */
7192 if (block_signals()) {
7193 ret
= -TARGET_ERESTARTSYS
;
7197 if (CPU_NEXT(first_cpu
)) {
7201 /* Remove the CPU from the list. */
7202 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7205 if (ts
->child_tidptr
) {
7206 put_user_u32(0, ts
->child_tidptr
);
7207 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7211 object_unref(OBJECT(cpu
));
7213 rcu_unregister_thread();
7219 gdb_exit(cpu_env
, arg1
);
7221 ret
= 0; /* avoid warning */
7223 case TARGET_NR_read
:
7227 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7229 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7231 fd_trans_host_to_target_data(arg1
)) {
7232 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7234 unlock_user(p
, arg2
, ret
);
7237 case TARGET_NR_write
:
7238 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7240 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7241 unlock_user(p
, arg2
, 0);
7243 #ifdef TARGET_NR_open
7244 case TARGET_NR_open
:
7245 if (!(p
= lock_user_string(arg1
)))
7247 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7248 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7250 fd_trans_unregister(ret
);
7251 unlock_user(p
, arg1
, 0);
7254 case TARGET_NR_openat
:
7255 if (!(p
= lock_user_string(arg2
)))
7257 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7258 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7260 fd_trans_unregister(ret
);
7261 unlock_user(p
, arg2
, 0);
7263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7264 case TARGET_NR_name_to_handle_at
:
7265 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7269 case TARGET_NR_open_by_handle_at
:
7270 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7271 fd_trans_unregister(ret
);
7274 case TARGET_NR_close
:
7275 fd_trans_unregister(arg1
);
7276 ret
= get_errno(close(arg1
));
7281 #ifdef TARGET_NR_fork
7282 case TARGET_NR_fork
:
7283 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7286 #ifdef TARGET_NR_waitpid
7287 case TARGET_NR_waitpid
:
7290 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7291 if (!is_error(ret
) && arg2
&& ret
7292 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7297 #ifdef TARGET_NR_waitid
7298 case TARGET_NR_waitid
:
7302 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7303 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7304 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7306 host_to_target_siginfo(p
, &info
);
7307 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7312 #ifdef TARGET_NR_creat /* not on alpha */
7313 case TARGET_NR_creat
:
7314 if (!(p
= lock_user_string(arg1
)))
7316 ret
= get_errno(creat(p
, arg2
));
7317 fd_trans_unregister(ret
);
7318 unlock_user(p
, arg1
, 0);
7321 #ifdef TARGET_NR_link
7322 case TARGET_NR_link
:
7325 p
= lock_user_string(arg1
);
7326 p2
= lock_user_string(arg2
);
7328 ret
= -TARGET_EFAULT
;
7330 ret
= get_errno(link(p
, p2
));
7331 unlock_user(p2
, arg2
, 0);
7332 unlock_user(p
, arg1
, 0);
7336 #if defined(TARGET_NR_linkat)
7337 case TARGET_NR_linkat
:
7342 p
= lock_user_string(arg2
);
7343 p2
= lock_user_string(arg4
);
7345 ret
= -TARGET_EFAULT
;
7347 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7348 unlock_user(p
, arg2
, 0);
7349 unlock_user(p2
, arg4
, 0);
7353 #ifdef TARGET_NR_unlink
7354 case TARGET_NR_unlink
:
7355 if (!(p
= lock_user_string(arg1
)))
7357 ret
= get_errno(unlink(p
));
7358 unlock_user(p
, arg1
, 0);
7361 #if defined(TARGET_NR_unlinkat)
7362 case TARGET_NR_unlinkat
:
7363 if (!(p
= lock_user_string(arg2
)))
7365 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7366 unlock_user(p
, arg2
, 0);
7369 case TARGET_NR_execve
:
7371 char **argp
, **envp
;
7374 abi_ulong guest_argp
;
7375 abi_ulong guest_envp
;
7382 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7383 if (get_user_ual(addr
, gp
))
7391 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7392 if (get_user_ual(addr
, gp
))
7399 argp
= alloca((argc
+ 1) * sizeof(void *));
7400 envp
= alloca((envc
+ 1) * sizeof(void *));
7402 for (gp
= guest_argp
, q
= argp
; gp
;
7403 gp
+= sizeof(abi_ulong
), q
++) {
7404 if (get_user_ual(addr
, gp
))
7408 if (!(*q
= lock_user_string(addr
)))
7410 total_size
+= strlen(*q
) + 1;
7414 for (gp
= guest_envp
, q
= envp
; gp
;
7415 gp
+= sizeof(abi_ulong
), q
++) {
7416 if (get_user_ual(addr
, gp
))
7420 if (!(*q
= lock_user_string(addr
)))
7422 total_size
+= strlen(*q
) + 1;
7426 if (!(p
= lock_user_string(arg1
)))
7428 /* Although execve() is not an interruptible syscall it is
7429 * a special case where we must use the safe_syscall wrapper:
7430 * if we allow a signal to happen before we make the host
7431 * syscall then we will 'lose' it, because at the point of
7432 * execve the process leaves QEMU's control. So we use the
7433 * safe syscall wrapper to ensure that we either take the
7434 * signal as a guest signal, or else it does not happen
7435 * before the execve completes and makes it the other
7436 * program's problem.
7438 ret
= get_errno(safe_execve(p
, argp
, envp
));
7439 unlock_user(p
, arg1
, 0);
7444 ret
= -TARGET_EFAULT
;
7447 for (gp
= guest_argp
, q
= argp
; *q
;
7448 gp
+= sizeof(abi_ulong
), q
++) {
7449 if (get_user_ual(addr
, gp
)
7452 unlock_user(*q
, addr
, 0);
7454 for (gp
= guest_envp
, q
= envp
; *q
;
7455 gp
+= sizeof(abi_ulong
), q
++) {
7456 if (get_user_ual(addr
, gp
)
7459 unlock_user(*q
, addr
, 0);
7463 case TARGET_NR_chdir
:
7464 if (!(p
= lock_user_string(arg1
)))
7466 ret
= get_errno(chdir(p
));
7467 unlock_user(p
, arg1
, 0);
7469 #ifdef TARGET_NR_time
7470 case TARGET_NR_time
:
7473 ret
= get_errno(time(&host_time
));
7476 && put_user_sal(host_time
, arg1
))
7481 #ifdef TARGET_NR_mknod
7482 case TARGET_NR_mknod
:
7483 if (!(p
= lock_user_string(arg1
)))
7485 ret
= get_errno(mknod(p
, arg2
, arg3
));
7486 unlock_user(p
, arg1
, 0);
7489 #if defined(TARGET_NR_mknodat)
7490 case TARGET_NR_mknodat
:
7491 if (!(p
= lock_user_string(arg2
)))
7493 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7494 unlock_user(p
, arg2
, 0);
7497 #ifdef TARGET_NR_chmod
7498 case TARGET_NR_chmod
:
7499 if (!(p
= lock_user_string(arg1
)))
7501 ret
= get_errno(chmod(p
, arg2
));
7502 unlock_user(p
, arg1
, 0);
7505 #ifdef TARGET_NR_break
7506 case TARGET_NR_break
:
7509 #ifdef TARGET_NR_oldstat
7510 case TARGET_NR_oldstat
:
7513 case TARGET_NR_lseek
:
7514 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7516 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7517 /* Alpha specific */
7518 case TARGET_NR_getxpid
:
7519 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7520 ret
= get_errno(getpid());
7523 #ifdef TARGET_NR_getpid
7524 case TARGET_NR_getpid
:
7525 ret
= get_errno(getpid());
7528 case TARGET_NR_mount
:
7530 /* need to look at the data field */
7534 p
= lock_user_string(arg1
);
7542 p2
= lock_user_string(arg2
);
7545 unlock_user(p
, arg1
, 0);
7551 p3
= lock_user_string(arg3
);
7554 unlock_user(p
, arg1
, 0);
7556 unlock_user(p2
, arg2
, 0);
7563 /* FIXME - arg5 should be locked, but it isn't clear how to
7564 * do that since it's not guaranteed to be a NULL-terminated
7568 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7570 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7572 ret
= get_errno(ret
);
7575 unlock_user(p
, arg1
, 0);
7577 unlock_user(p2
, arg2
, 0);
7579 unlock_user(p3
, arg3
, 0);
7583 #ifdef TARGET_NR_umount
7584 case TARGET_NR_umount
:
7585 if (!(p
= lock_user_string(arg1
)))
7587 ret
= get_errno(umount(p
));
7588 unlock_user(p
, arg1
, 0);
7591 #ifdef TARGET_NR_stime /* not on alpha */
7592 case TARGET_NR_stime
:
7595 if (get_user_sal(host_time
, arg1
))
7597 ret
= get_errno(stime(&host_time
));
7601 case TARGET_NR_ptrace
:
7603 #ifdef TARGET_NR_alarm /* not on alpha */
7604 case TARGET_NR_alarm
:
7608 #ifdef TARGET_NR_oldfstat
7609 case TARGET_NR_oldfstat
:
7612 #ifdef TARGET_NR_pause /* not on alpha */
7613 case TARGET_NR_pause
:
7614 if (!block_signals()) {
7615 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7617 ret
= -TARGET_EINTR
;
7620 #ifdef TARGET_NR_utime
7621 case TARGET_NR_utime
:
7623 struct utimbuf tbuf
, *host_tbuf
;
7624 struct target_utimbuf
*target_tbuf
;
7626 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7628 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7629 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7630 unlock_user_struct(target_tbuf
, arg2
, 0);
7635 if (!(p
= lock_user_string(arg1
)))
7637 ret
= get_errno(utime(p
, host_tbuf
));
7638 unlock_user(p
, arg1
, 0);
7642 #ifdef TARGET_NR_utimes
7643 case TARGET_NR_utimes
:
7645 struct timeval
*tvp
, tv
[2];
7647 if (copy_from_user_timeval(&tv
[0], arg2
)
7648 || copy_from_user_timeval(&tv
[1],
7649 arg2
+ sizeof(struct target_timeval
)))
7655 if (!(p
= lock_user_string(arg1
)))
7657 ret
= get_errno(utimes(p
, tvp
));
7658 unlock_user(p
, arg1
, 0);
7662 #if defined(TARGET_NR_futimesat)
7663 case TARGET_NR_futimesat
:
7665 struct timeval
*tvp
, tv
[2];
7667 if (copy_from_user_timeval(&tv
[0], arg3
)
7668 || copy_from_user_timeval(&tv
[1],
7669 arg3
+ sizeof(struct target_timeval
)))
7675 if (!(p
= lock_user_string(arg2
)))
7677 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7678 unlock_user(p
, arg2
, 0);
7682 #ifdef TARGET_NR_stty
7683 case TARGET_NR_stty
:
7686 #ifdef TARGET_NR_gtty
7687 case TARGET_NR_gtty
:
7690 #ifdef TARGET_NR_access
7691 case TARGET_NR_access
:
7692 if (!(p
= lock_user_string(arg1
)))
7694 ret
= get_errno(access(path(p
), arg2
));
7695 unlock_user(p
, arg1
, 0);
7698 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7699 case TARGET_NR_faccessat
:
7700 if (!(p
= lock_user_string(arg2
)))
7702 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7703 unlock_user(p
, arg2
, 0);
7706 #ifdef TARGET_NR_nice /* not on alpha */
7707 case TARGET_NR_nice
:
7708 ret
= get_errno(nice(arg1
));
7711 #ifdef TARGET_NR_ftime
7712 case TARGET_NR_ftime
:
7715 case TARGET_NR_sync
:
7719 case TARGET_NR_kill
:
7720 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7722 #ifdef TARGET_NR_rename
7723 case TARGET_NR_rename
:
7726 p
= lock_user_string(arg1
);
7727 p2
= lock_user_string(arg2
);
7729 ret
= -TARGET_EFAULT
;
7731 ret
= get_errno(rename(p
, p2
));
7732 unlock_user(p2
, arg2
, 0);
7733 unlock_user(p
, arg1
, 0);
7737 #if defined(TARGET_NR_renameat)
7738 case TARGET_NR_renameat
:
7741 p
= lock_user_string(arg2
);
7742 p2
= lock_user_string(arg4
);
7744 ret
= -TARGET_EFAULT
;
7746 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7747 unlock_user(p2
, arg4
, 0);
7748 unlock_user(p
, arg2
, 0);
7752 #ifdef TARGET_NR_mkdir
7753 case TARGET_NR_mkdir
:
7754 if (!(p
= lock_user_string(arg1
)))
7756 ret
= get_errno(mkdir(p
, arg2
));
7757 unlock_user(p
, arg1
, 0);
7760 #if defined(TARGET_NR_mkdirat)
7761 case TARGET_NR_mkdirat
:
7762 if (!(p
= lock_user_string(arg2
)))
7764 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7765 unlock_user(p
, arg2
, 0);
7768 #ifdef TARGET_NR_rmdir
7769 case TARGET_NR_rmdir
:
7770 if (!(p
= lock_user_string(arg1
)))
7772 ret
= get_errno(rmdir(p
));
7773 unlock_user(p
, arg1
, 0);
7777 ret
= get_errno(dup(arg1
));
7779 fd_trans_dup(arg1
, ret
);
7782 #ifdef TARGET_NR_pipe
7783 case TARGET_NR_pipe
:
7784 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7787 #ifdef TARGET_NR_pipe2
7788 case TARGET_NR_pipe2
:
7789 ret
= do_pipe(cpu_env
, arg1
,
7790 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7793 case TARGET_NR_times
:
7795 struct target_tms
*tmsp
;
7797 ret
= get_errno(times(&tms
));
7799 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7802 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7803 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7804 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7805 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7808 ret
= host_to_target_clock_t(ret
);
7811 #ifdef TARGET_NR_prof
7812 case TARGET_NR_prof
:
7815 #ifdef TARGET_NR_signal
7816 case TARGET_NR_signal
:
7819 case TARGET_NR_acct
:
7821 ret
= get_errno(acct(NULL
));
7823 if (!(p
= lock_user_string(arg1
)))
7825 ret
= get_errno(acct(path(p
)));
7826 unlock_user(p
, arg1
, 0);
7829 #ifdef TARGET_NR_umount2
7830 case TARGET_NR_umount2
:
7831 if (!(p
= lock_user_string(arg1
)))
7833 ret
= get_errno(umount2(p
, arg2
));
7834 unlock_user(p
, arg1
, 0);
7837 #ifdef TARGET_NR_lock
7838 case TARGET_NR_lock
:
7841 case TARGET_NR_ioctl
:
7842 ret
= do_ioctl(arg1
, arg2
, arg3
);
7844 case TARGET_NR_fcntl
:
7845 ret
= do_fcntl(arg1
, arg2
, arg3
);
7847 #ifdef TARGET_NR_mpx
7851 case TARGET_NR_setpgid
:
7852 ret
= get_errno(setpgid(arg1
, arg2
));
7854 #ifdef TARGET_NR_ulimit
7855 case TARGET_NR_ulimit
:
7858 #ifdef TARGET_NR_oldolduname
7859 case TARGET_NR_oldolduname
:
7862 case TARGET_NR_umask
:
7863 ret
= get_errno(umask(arg1
));
7865 case TARGET_NR_chroot
:
7866 if (!(p
= lock_user_string(arg1
)))
7868 ret
= get_errno(chroot(p
));
7869 unlock_user(p
, arg1
, 0);
7871 #ifdef TARGET_NR_ustat
7872 case TARGET_NR_ustat
:
7875 #ifdef TARGET_NR_dup2
7876 case TARGET_NR_dup2
:
7877 ret
= get_errno(dup2(arg1
, arg2
));
7879 fd_trans_dup(arg1
, arg2
);
7883 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7884 case TARGET_NR_dup3
:
7885 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7887 fd_trans_dup(arg1
, arg2
);
7891 #ifdef TARGET_NR_getppid /* not on alpha */
7892 case TARGET_NR_getppid
:
7893 ret
= get_errno(getppid());
7896 #ifdef TARGET_NR_getpgrp
7897 case TARGET_NR_getpgrp
:
7898 ret
= get_errno(getpgrp());
7901 case TARGET_NR_setsid
:
7902 ret
= get_errno(setsid());
7904 #ifdef TARGET_NR_sigaction
7905 case TARGET_NR_sigaction
:
7907 #if defined(TARGET_ALPHA)
7908 struct target_sigaction act
, oact
, *pact
= 0;
7909 struct target_old_sigaction
*old_act
;
7911 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7913 act
._sa_handler
= old_act
->_sa_handler
;
7914 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7915 act
.sa_flags
= old_act
->sa_flags
;
7916 act
.sa_restorer
= 0;
7917 unlock_user_struct(old_act
, arg2
, 0);
7920 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7921 if (!is_error(ret
) && arg3
) {
7922 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7924 old_act
->_sa_handler
= oact
._sa_handler
;
7925 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7926 old_act
->sa_flags
= oact
.sa_flags
;
7927 unlock_user_struct(old_act
, arg3
, 1);
7929 #elif defined(TARGET_MIPS)
7930 struct target_sigaction act
, oact
, *pact
, *old_act
;
7933 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7935 act
._sa_handler
= old_act
->_sa_handler
;
7936 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7937 act
.sa_flags
= old_act
->sa_flags
;
7938 unlock_user_struct(old_act
, arg2
, 0);
7944 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7946 if (!is_error(ret
) && arg3
) {
7947 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7949 old_act
->_sa_handler
= oact
._sa_handler
;
7950 old_act
->sa_flags
= oact
.sa_flags
;
7951 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7952 old_act
->sa_mask
.sig
[1] = 0;
7953 old_act
->sa_mask
.sig
[2] = 0;
7954 old_act
->sa_mask
.sig
[3] = 0;
7955 unlock_user_struct(old_act
, arg3
, 1);
7958 struct target_old_sigaction
*old_act
;
7959 struct target_sigaction act
, oact
, *pact
;
7961 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7963 act
._sa_handler
= old_act
->_sa_handler
;
7964 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7965 act
.sa_flags
= old_act
->sa_flags
;
7966 act
.sa_restorer
= old_act
->sa_restorer
;
7967 unlock_user_struct(old_act
, arg2
, 0);
7972 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7973 if (!is_error(ret
) && arg3
) {
7974 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7976 old_act
->_sa_handler
= oact
._sa_handler
;
7977 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7978 old_act
->sa_flags
= oact
.sa_flags
;
7979 old_act
->sa_restorer
= oact
.sa_restorer
;
7980 unlock_user_struct(old_act
, arg3
, 1);
7986 case TARGET_NR_rt_sigaction
:
7988 #if defined(TARGET_ALPHA)
7989 struct target_sigaction act
, oact
, *pact
= 0;
7990 struct target_rt_sigaction
*rt_act
;
7992 if (arg4
!= sizeof(target_sigset_t
)) {
7993 ret
= -TARGET_EINVAL
;
7997 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7999 act
._sa_handler
= rt_act
->_sa_handler
;
8000 act
.sa_mask
= rt_act
->sa_mask
;
8001 act
.sa_flags
= rt_act
->sa_flags
;
8002 act
.sa_restorer
= arg5
;
8003 unlock_user_struct(rt_act
, arg2
, 0);
8006 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8007 if (!is_error(ret
) && arg3
) {
8008 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8010 rt_act
->_sa_handler
= oact
._sa_handler
;
8011 rt_act
->sa_mask
= oact
.sa_mask
;
8012 rt_act
->sa_flags
= oact
.sa_flags
;
8013 unlock_user_struct(rt_act
, arg3
, 1);
8016 struct target_sigaction
*act
;
8017 struct target_sigaction
*oact
;
8019 if (arg4
!= sizeof(target_sigset_t
)) {
8020 ret
= -TARGET_EINVAL
;
8024 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8029 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8030 ret
= -TARGET_EFAULT
;
8031 goto rt_sigaction_fail
;
8035 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8038 unlock_user_struct(act
, arg2
, 0);
8040 unlock_user_struct(oact
, arg3
, 1);
8044 #ifdef TARGET_NR_sgetmask /* not on alpha */
8045 case TARGET_NR_sgetmask
:
8048 abi_ulong target_set
;
8049 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8051 host_to_target_old_sigset(&target_set
, &cur_set
);
8057 #ifdef TARGET_NR_ssetmask /* not on alpha */
8058 case TARGET_NR_ssetmask
:
8060 sigset_t set
, oset
, cur_set
;
8061 abi_ulong target_set
= arg1
;
8062 /* We only have one word of the new mask so we must read
8063 * the rest of it with do_sigprocmask() and OR in this word.
8064 * We are guaranteed that a do_sigprocmask() that only queries
8065 * the signal mask will not fail.
8067 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8069 target_to_host_old_sigset(&set
, &target_set
);
8070 sigorset(&set
, &set
, &cur_set
);
8071 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8073 host_to_target_old_sigset(&target_set
, &oset
);
8079 #ifdef TARGET_NR_sigprocmask
8080 case TARGET_NR_sigprocmask
:
8082 #if defined(TARGET_ALPHA)
8083 sigset_t set
, oldset
;
8088 case TARGET_SIG_BLOCK
:
8091 case TARGET_SIG_UNBLOCK
:
8094 case TARGET_SIG_SETMASK
:
8098 ret
= -TARGET_EINVAL
;
8102 target_to_host_old_sigset(&set
, &mask
);
8104 ret
= do_sigprocmask(how
, &set
, &oldset
);
8105 if (!is_error(ret
)) {
8106 host_to_target_old_sigset(&mask
, &oldset
);
8108 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8111 sigset_t set
, oldset
, *set_ptr
;
8116 case TARGET_SIG_BLOCK
:
8119 case TARGET_SIG_UNBLOCK
:
8122 case TARGET_SIG_SETMASK
:
8126 ret
= -TARGET_EINVAL
;
8129 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8131 target_to_host_old_sigset(&set
, p
);
8132 unlock_user(p
, arg2
, 0);
8138 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8139 if (!is_error(ret
) && arg3
) {
8140 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8142 host_to_target_old_sigset(p
, &oldset
);
8143 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8149 case TARGET_NR_rt_sigprocmask
:
8152 sigset_t set
, oldset
, *set_ptr
;
8154 if (arg4
!= sizeof(target_sigset_t
)) {
8155 ret
= -TARGET_EINVAL
;
8161 case TARGET_SIG_BLOCK
:
8164 case TARGET_SIG_UNBLOCK
:
8167 case TARGET_SIG_SETMASK
:
8171 ret
= -TARGET_EINVAL
;
8174 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8176 target_to_host_sigset(&set
, p
);
8177 unlock_user(p
, arg2
, 0);
8183 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8184 if (!is_error(ret
) && arg3
) {
8185 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8187 host_to_target_sigset(p
, &oldset
);
8188 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8192 #ifdef TARGET_NR_sigpending
8193 case TARGET_NR_sigpending
:
8196 ret
= get_errno(sigpending(&set
));
8197 if (!is_error(ret
)) {
8198 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8200 host_to_target_old_sigset(p
, &set
);
8201 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8206 case TARGET_NR_rt_sigpending
:
8210 /* Yes, this check is >, not != like most. We follow the kernel's
8211 * logic and it does it like this because it implements
8212 * NR_sigpending through the same code path, and in that case
8213 * the old_sigset_t is smaller in size.
8215 if (arg2
> sizeof(target_sigset_t
)) {
8216 ret
= -TARGET_EINVAL
;
8220 ret
= get_errno(sigpending(&set
));
8221 if (!is_error(ret
)) {
8222 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8224 host_to_target_sigset(p
, &set
);
8225 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8229 #ifdef TARGET_NR_sigsuspend
8230 case TARGET_NR_sigsuspend
:
8232 TaskState
*ts
= cpu
->opaque
;
8233 #if defined(TARGET_ALPHA)
8234 abi_ulong mask
= arg1
;
8235 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8237 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8239 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8240 unlock_user(p
, arg1
, 0);
8242 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8244 if (ret
!= -TARGET_ERESTARTSYS
) {
8245 ts
->in_sigsuspend
= 1;
8250 case TARGET_NR_rt_sigsuspend
:
8252 TaskState
*ts
= cpu
->opaque
;
8254 if (arg2
!= sizeof(target_sigset_t
)) {
8255 ret
= -TARGET_EINVAL
;
8258 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8260 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8261 unlock_user(p
, arg1
, 0);
8262 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8264 if (ret
!= -TARGET_ERESTARTSYS
) {
8265 ts
->in_sigsuspend
= 1;
8269 case TARGET_NR_rt_sigtimedwait
:
8272 struct timespec uts
, *puts
;
8275 if (arg4
!= sizeof(target_sigset_t
)) {
8276 ret
= -TARGET_EINVAL
;
8280 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8282 target_to_host_sigset(&set
, p
);
8283 unlock_user(p
, arg1
, 0);
8286 target_to_host_timespec(puts
, arg3
);
8290 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8292 if (!is_error(ret
)) {
8294 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8299 host_to_target_siginfo(p
, &uinfo
);
8300 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8302 ret
= host_to_target_signal(ret
);
8306 case TARGET_NR_rt_sigqueueinfo
:
8310 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8314 target_to_host_siginfo(&uinfo
, p
);
8315 unlock_user(p
, arg1
, 0);
8316 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8319 #ifdef TARGET_NR_sigreturn
8320 case TARGET_NR_sigreturn
:
8321 if (block_signals()) {
8322 ret
= -TARGET_ERESTARTSYS
;
8324 ret
= do_sigreturn(cpu_env
);
8328 case TARGET_NR_rt_sigreturn
:
8329 if (block_signals()) {
8330 ret
= -TARGET_ERESTARTSYS
;
8332 ret
= do_rt_sigreturn(cpu_env
);
8335 case TARGET_NR_sethostname
:
8336 if (!(p
= lock_user_string(arg1
)))
8338 ret
= get_errno(sethostname(p
, arg2
));
8339 unlock_user(p
, arg1
, 0);
8341 case TARGET_NR_setrlimit
:
8343 int resource
= target_to_host_resource(arg1
);
8344 struct target_rlimit
*target_rlim
;
8346 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8348 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8349 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8350 unlock_user_struct(target_rlim
, arg2
, 0);
8351 ret
= get_errno(setrlimit(resource
, &rlim
));
8354 case TARGET_NR_getrlimit
:
8356 int resource
= target_to_host_resource(arg1
);
8357 struct target_rlimit
*target_rlim
;
8360 ret
= get_errno(getrlimit(resource
, &rlim
));
8361 if (!is_error(ret
)) {
8362 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8364 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8365 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8366 unlock_user_struct(target_rlim
, arg2
, 1);
8370 case TARGET_NR_getrusage
:
8372 struct rusage rusage
;
8373 ret
= get_errno(getrusage(arg1
, &rusage
));
8374 if (!is_error(ret
)) {
8375 ret
= host_to_target_rusage(arg2
, &rusage
);
8379 case TARGET_NR_gettimeofday
:
8382 ret
= get_errno(gettimeofday(&tv
, NULL
));
8383 if (!is_error(ret
)) {
8384 if (copy_to_user_timeval(arg1
, &tv
))
8389 case TARGET_NR_settimeofday
:
8391 struct timeval tv
, *ptv
= NULL
;
8392 struct timezone tz
, *ptz
= NULL
;
8395 if (copy_from_user_timeval(&tv
, arg1
)) {
8402 if (copy_from_user_timezone(&tz
, arg2
)) {
8408 ret
= get_errno(settimeofday(ptv
, ptz
));
8411 #if defined(TARGET_NR_select)
8412 case TARGET_NR_select
:
8413 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8414 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8417 struct target_sel_arg_struct
*sel
;
8418 abi_ulong inp
, outp
, exp
, tvp
;
8421 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8423 nsel
= tswapal(sel
->n
);
8424 inp
= tswapal(sel
->inp
);
8425 outp
= tswapal(sel
->outp
);
8426 exp
= tswapal(sel
->exp
);
8427 tvp
= tswapal(sel
->tvp
);
8428 unlock_user_struct(sel
, arg1
, 0);
8429 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8434 #ifdef TARGET_NR_pselect6
8435 case TARGET_NR_pselect6
:
8437 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8438 fd_set rfds
, wfds
, efds
;
8439 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8440 struct timespec ts
, *ts_ptr
;
8443 * The 6th arg is actually two args smashed together,
8444 * so we cannot use the C library.
8452 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8453 target_sigset_t
*target_sigset
;
8461 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8465 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8469 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8475 * This takes a timespec, and not a timeval, so we cannot
8476 * use the do_select() helper ...
8479 if (target_to_host_timespec(&ts
, ts_addr
)) {
8487 /* Extract the two packed args for the sigset */
8490 sig
.size
= SIGSET_T_SIZE
;
8492 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8496 arg_sigset
= tswapal(arg7
[0]);
8497 arg_sigsize
= tswapal(arg7
[1]);
8498 unlock_user(arg7
, arg6
, 0);
8502 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8503 /* Like the kernel, we enforce correct size sigsets */
8504 ret
= -TARGET_EINVAL
;
8507 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8508 sizeof(*target_sigset
), 1);
8509 if (!target_sigset
) {
8512 target_to_host_sigset(&set
, target_sigset
);
8513 unlock_user(target_sigset
, arg_sigset
, 0);
8521 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8524 if (!is_error(ret
)) {
8525 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8527 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8529 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8532 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8538 #ifdef TARGET_NR_symlink
8539 case TARGET_NR_symlink
:
8542 p
= lock_user_string(arg1
);
8543 p2
= lock_user_string(arg2
);
8545 ret
= -TARGET_EFAULT
;
8547 ret
= get_errno(symlink(p
, p2
));
8548 unlock_user(p2
, arg2
, 0);
8549 unlock_user(p
, arg1
, 0);
8553 #if defined(TARGET_NR_symlinkat)
8554 case TARGET_NR_symlinkat
:
8557 p
= lock_user_string(arg1
);
8558 p2
= lock_user_string(arg3
);
8560 ret
= -TARGET_EFAULT
;
8562 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8563 unlock_user(p2
, arg3
, 0);
8564 unlock_user(p
, arg1
, 0);
8568 #ifdef TARGET_NR_oldlstat
8569 case TARGET_NR_oldlstat
:
8572 #ifdef TARGET_NR_readlink
8573 case TARGET_NR_readlink
:
8576 p
= lock_user_string(arg1
);
8577 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8579 ret
= -TARGET_EFAULT
;
8581 /* Short circuit this for the magic exe check. */
8582 ret
= -TARGET_EINVAL
;
8583 } else if (is_proc_myself((const char *)p
, "exe")) {
8584 char real
[PATH_MAX
], *temp
;
8585 temp
= realpath(exec_path
, real
);
8586 /* Return value is # of bytes that we wrote to the buffer. */
8588 ret
= get_errno(-1);
8590 /* Don't worry about sign mismatch as earlier mapping
8591 * logic would have thrown a bad address error. */
8592 ret
= MIN(strlen(real
), arg3
);
8593 /* We cannot NUL terminate the string. */
8594 memcpy(p2
, real
, ret
);
8597 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8599 unlock_user(p2
, arg2
, ret
);
8600 unlock_user(p
, arg1
, 0);
8604 #if defined(TARGET_NR_readlinkat)
8605 case TARGET_NR_readlinkat
:
8608 p
= lock_user_string(arg2
);
8609 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8611 ret
= -TARGET_EFAULT
;
8612 } else if (is_proc_myself((const char *)p
, "exe")) {
8613 char real
[PATH_MAX
], *temp
;
8614 temp
= realpath(exec_path
, real
);
8615 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8616 snprintf((char *)p2
, arg4
, "%s", real
);
8618 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8620 unlock_user(p2
, arg3
, ret
);
8621 unlock_user(p
, arg2
, 0);
8625 #ifdef TARGET_NR_uselib
8626 case TARGET_NR_uselib
:
8629 #ifdef TARGET_NR_swapon
8630 case TARGET_NR_swapon
:
8631 if (!(p
= lock_user_string(arg1
)))
8633 ret
= get_errno(swapon(p
, arg2
));
8634 unlock_user(p
, arg1
, 0);
8637 case TARGET_NR_reboot
:
8638 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8639 /* arg4 must be ignored in all other cases */
8640 p
= lock_user_string(arg4
);
8644 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8645 unlock_user(p
, arg4
, 0);
8647 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8650 #ifdef TARGET_NR_readdir
8651 case TARGET_NR_readdir
:
8654 #ifdef TARGET_NR_mmap
8655 case TARGET_NR_mmap
:
8656 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8657 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8658 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8659 || defined(TARGET_S390X)
8662 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8663 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8671 unlock_user(v
, arg1
, 0);
8672 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8673 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8677 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8678 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8684 #ifdef TARGET_NR_mmap2
8685 case TARGET_NR_mmap2
:
8687 #define MMAP_SHIFT 12
8689 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8690 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8692 arg6
<< MMAP_SHIFT
));
8695 case TARGET_NR_munmap
:
8696 ret
= get_errno(target_munmap(arg1
, arg2
));
8698 case TARGET_NR_mprotect
:
8700 TaskState
*ts
= cpu
->opaque
;
8701 /* Special hack to detect libc making the stack executable. */
8702 if ((arg3
& PROT_GROWSDOWN
)
8703 && arg1
>= ts
->info
->stack_limit
8704 && arg1
<= ts
->info
->start_stack
) {
8705 arg3
&= ~PROT_GROWSDOWN
;
8706 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8707 arg1
= ts
->info
->stack_limit
;
8710 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8712 #ifdef TARGET_NR_mremap
8713 case TARGET_NR_mremap
:
8714 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8717 /* ??? msync/mlock/munlock are broken for softmmu. */
8718 #ifdef TARGET_NR_msync
8719 case TARGET_NR_msync
:
8720 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8723 #ifdef TARGET_NR_mlock
8724 case TARGET_NR_mlock
:
8725 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8728 #ifdef TARGET_NR_munlock
8729 case TARGET_NR_munlock
:
8730 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8733 #ifdef TARGET_NR_mlockall
8734 case TARGET_NR_mlockall
:
8735 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8738 #ifdef TARGET_NR_munlockall
8739 case TARGET_NR_munlockall
:
8740 ret
= get_errno(munlockall());
8743 case TARGET_NR_truncate
:
8744 if (!(p
= lock_user_string(arg1
)))
8746 ret
= get_errno(truncate(p
, arg2
));
8747 unlock_user(p
, arg1
, 0);
8749 case TARGET_NR_ftruncate
:
8750 ret
= get_errno(ftruncate(arg1
, arg2
));
8752 case TARGET_NR_fchmod
:
8753 ret
= get_errno(fchmod(arg1
, arg2
));
8755 #if defined(TARGET_NR_fchmodat)
8756 case TARGET_NR_fchmodat
:
8757 if (!(p
= lock_user_string(arg2
)))
8759 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8760 unlock_user(p
, arg2
, 0);
8763 case TARGET_NR_getpriority
:
8764 /* Note that negative values are valid for getpriority, so we must
8765 differentiate based on errno settings. */
8767 ret
= getpriority(arg1
, arg2
);
8768 if (ret
== -1 && errno
!= 0) {
8769 ret
= -host_to_target_errno(errno
);
8773 /* Return value is the unbiased priority. Signal no error. */
8774 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8776 /* Return value is a biased priority to avoid negative numbers. */
8780 case TARGET_NR_setpriority
:
8781 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8783 #ifdef TARGET_NR_profil
8784 case TARGET_NR_profil
:
8787 case TARGET_NR_statfs
:
8788 if (!(p
= lock_user_string(arg1
)))
8790 ret
= get_errno(statfs(path(p
), &stfs
));
8791 unlock_user(p
, arg1
, 0);
8793 if (!is_error(ret
)) {
8794 struct target_statfs
*target_stfs
;
8796 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8798 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8799 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8800 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8801 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8802 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8803 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8804 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8805 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8806 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8807 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8808 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8809 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8810 unlock_user_struct(target_stfs
, arg2
, 1);
8813 case TARGET_NR_fstatfs
:
8814 ret
= get_errno(fstatfs(arg1
, &stfs
));
8815 goto convert_statfs
;
8816 #ifdef TARGET_NR_statfs64
8817 case TARGET_NR_statfs64
:
8818 if (!(p
= lock_user_string(arg1
)))
8820 ret
= get_errno(statfs(path(p
), &stfs
));
8821 unlock_user(p
, arg1
, 0);
8823 if (!is_error(ret
)) {
8824 struct target_statfs64
*target_stfs
;
8826 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8828 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8829 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8830 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8831 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8832 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8833 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8834 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8835 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8836 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8837 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8838 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8839 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8840 unlock_user_struct(target_stfs
, arg3
, 1);
8843 case TARGET_NR_fstatfs64
:
8844 ret
= get_errno(fstatfs(arg1
, &stfs
));
8845 goto convert_statfs64
;
8847 #ifdef TARGET_NR_ioperm
8848 case TARGET_NR_ioperm
:
8851 #ifdef TARGET_NR_socketcall
8852 case TARGET_NR_socketcall
:
8853 ret
= do_socketcall(arg1
, arg2
);
8856 #ifdef TARGET_NR_accept
8857 case TARGET_NR_accept
:
8858 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8861 #ifdef TARGET_NR_accept4
8862 case TARGET_NR_accept4
:
8863 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8866 #ifdef TARGET_NR_bind
8867 case TARGET_NR_bind
:
8868 ret
= do_bind(arg1
, arg2
, arg3
);
8871 #ifdef TARGET_NR_connect
8872 case TARGET_NR_connect
:
8873 ret
= do_connect(arg1
, arg2
, arg3
);
8876 #ifdef TARGET_NR_getpeername
8877 case TARGET_NR_getpeername
:
8878 ret
= do_getpeername(arg1
, arg2
, arg3
);
8881 #ifdef TARGET_NR_getsockname
8882 case TARGET_NR_getsockname
:
8883 ret
= do_getsockname(arg1
, arg2
, arg3
);
8886 #ifdef TARGET_NR_getsockopt
8887 case TARGET_NR_getsockopt
:
8888 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8891 #ifdef TARGET_NR_listen
8892 case TARGET_NR_listen
:
8893 ret
= get_errno(listen(arg1
, arg2
));
8896 #ifdef TARGET_NR_recv
8897 case TARGET_NR_recv
:
8898 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8901 #ifdef TARGET_NR_recvfrom
8902 case TARGET_NR_recvfrom
:
8903 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8906 #ifdef TARGET_NR_recvmsg
8907 case TARGET_NR_recvmsg
:
8908 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8911 #ifdef TARGET_NR_send
8912 case TARGET_NR_send
:
8913 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8916 #ifdef TARGET_NR_sendmsg
8917 case TARGET_NR_sendmsg
:
8918 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8921 #ifdef TARGET_NR_sendmmsg
8922 case TARGET_NR_sendmmsg
:
8923 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8925 case TARGET_NR_recvmmsg
:
8926 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8929 #ifdef TARGET_NR_sendto
8930 case TARGET_NR_sendto
:
8931 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8934 #ifdef TARGET_NR_shutdown
8935 case TARGET_NR_shutdown
:
8936 ret
= get_errno(shutdown(arg1
, arg2
));
8939 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8940 case TARGET_NR_getrandom
:
8941 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8945 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8946 unlock_user(p
, arg1
, ret
);
8949 #ifdef TARGET_NR_socket
8950 case TARGET_NR_socket
:
8951 ret
= do_socket(arg1
, arg2
, arg3
);
8952 fd_trans_unregister(ret
);
8955 #ifdef TARGET_NR_socketpair
8956 case TARGET_NR_socketpair
:
8957 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8960 #ifdef TARGET_NR_setsockopt
8961 case TARGET_NR_setsockopt
:
8962 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8966 case TARGET_NR_syslog
:
8967 if (!(p
= lock_user_string(arg2
)))
8969 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8970 unlock_user(p
, arg2
, 0);
8973 case TARGET_NR_setitimer
:
8975 struct itimerval value
, ovalue
, *pvalue
;
8979 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8980 || copy_from_user_timeval(&pvalue
->it_value
,
8981 arg2
+ sizeof(struct target_timeval
)))
8986 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8987 if (!is_error(ret
) && arg3
) {
8988 if (copy_to_user_timeval(arg3
,
8989 &ovalue
.it_interval
)
8990 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8996 case TARGET_NR_getitimer
:
8998 struct itimerval value
;
9000 ret
= get_errno(getitimer(arg1
, &value
));
9001 if (!is_error(ret
) && arg2
) {
9002 if (copy_to_user_timeval(arg2
,
9004 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9010 #ifdef TARGET_NR_stat
9011 case TARGET_NR_stat
:
9012 if (!(p
= lock_user_string(arg1
)))
9014 ret
= get_errno(stat(path(p
), &st
));
9015 unlock_user(p
, arg1
, 0);
9018 #ifdef TARGET_NR_lstat
9019 case TARGET_NR_lstat
:
9020 if (!(p
= lock_user_string(arg1
)))
9022 ret
= get_errno(lstat(path(p
), &st
));
9023 unlock_user(p
, arg1
, 0);
9026 case TARGET_NR_fstat
:
9028 ret
= get_errno(fstat(arg1
, &st
));
9029 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9032 if (!is_error(ret
)) {
9033 struct target_stat
*target_st
;
9035 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9037 memset(target_st
, 0, sizeof(*target_st
));
9038 __put_user(st
.st_dev
, &target_st
->st_dev
);
9039 __put_user(st
.st_ino
, &target_st
->st_ino
);
9040 __put_user(st
.st_mode
, &target_st
->st_mode
);
9041 __put_user(st
.st_uid
, &target_st
->st_uid
);
9042 __put_user(st
.st_gid
, &target_st
->st_gid
);
9043 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9044 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9045 __put_user(st
.st_size
, &target_st
->st_size
);
9046 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9047 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9048 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9049 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9050 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9051 unlock_user_struct(target_st
, arg2
, 1);
9055 #ifdef TARGET_NR_olduname
9056 case TARGET_NR_olduname
:
9059 #ifdef TARGET_NR_iopl
9060 case TARGET_NR_iopl
:
9063 case TARGET_NR_vhangup
:
9064 ret
= get_errno(vhangup());
9066 #ifdef TARGET_NR_idle
9067 case TARGET_NR_idle
:
9070 #ifdef TARGET_NR_syscall
9071 case TARGET_NR_syscall
:
9072 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9073 arg6
, arg7
, arg8
, 0);
9076 case TARGET_NR_wait4
:
9079 abi_long status_ptr
= arg2
;
9080 struct rusage rusage
, *rusage_ptr
;
9081 abi_ulong target_rusage
= arg4
;
9082 abi_long rusage_err
;
9084 rusage_ptr
= &rusage
;
9087 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9088 if (!is_error(ret
)) {
9089 if (status_ptr
&& ret
) {
9090 status
= host_to_target_waitstatus(status
);
9091 if (put_user_s32(status
, status_ptr
))
9094 if (target_rusage
) {
9095 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9103 #ifdef TARGET_NR_swapoff
9104 case TARGET_NR_swapoff
:
9105 if (!(p
= lock_user_string(arg1
)))
9107 ret
= get_errno(swapoff(p
));
9108 unlock_user(p
, arg1
, 0);
9111 case TARGET_NR_sysinfo
:
9113 struct target_sysinfo
*target_value
;
9114 struct sysinfo value
;
9115 ret
= get_errno(sysinfo(&value
));
9116 if (!is_error(ret
) && arg1
)
9118 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9120 __put_user(value
.uptime
, &target_value
->uptime
);
9121 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9122 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9123 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9124 __put_user(value
.totalram
, &target_value
->totalram
);
9125 __put_user(value
.freeram
, &target_value
->freeram
);
9126 __put_user(value
.sharedram
, &target_value
->sharedram
);
9127 __put_user(value
.bufferram
, &target_value
->bufferram
);
9128 __put_user(value
.totalswap
, &target_value
->totalswap
);
9129 __put_user(value
.freeswap
, &target_value
->freeswap
);
9130 __put_user(value
.procs
, &target_value
->procs
);
9131 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9132 __put_user(value
.freehigh
, &target_value
->freehigh
);
9133 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9134 unlock_user_struct(target_value
, arg1
, 1);
9138 #ifdef TARGET_NR_ipc
9140 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9143 #ifdef TARGET_NR_semget
9144 case TARGET_NR_semget
:
9145 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9148 #ifdef TARGET_NR_semop
9149 case TARGET_NR_semop
:
9150 ret
= do_semop(arg1
, arg2
, arg3
);
9153 #ifdef TARGET_NR_semctl
9154 case TARGET_NR_semctl
:
9155 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9158 #ifdef TARGET_NR_msgctl
9159 case TARGET_NR_msgctl
:
9160 ret
= do_msgctl(arg1
, arg2
, arg3
);
9163 #ifdef TARGET_NR_msgget
9164 case TARGET_NR_msgget
:
9165 ret
= get_errno(msgget(arg1
, arg2
));
9168 #ifdef TARGET_NR_msgrcv
9169 case TARGET_NR_msgrcv
:
9170 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9173 #ifdef TARGET_NR_msgsnd
9174 case TARGET_NR_msgsnd
:
9175 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9178 #ifdef TARGET_NR_shmget
9179 case TARGET_NR_shmget
:
9180 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9183 #ifdef TARGET_NR_shmctl
9184 case TARGET_NR_shmctl
:
9185 ret
= do_shmctl(arg1
, arg2
, arg3
);
9188 #ifdef TARGET_NR_shmat
9189 case TARGET_NR_shmat
:
9190 ret
= do_shmat(arg1
, arg2
, arg3
);
9193 #ifdef TARGET_NR_shmdt
9194 case TARGET_NR_shmdt
:
9195 ret
= do_shmdt(arg1
);
9198 case TARGET_NR_fsync
:
9199 ret
= get_errno(fsync(arg1
));
9201 case TARGET_NR_clone
:
9202 /* Linux manages to have three different orderings for its
9203 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9204 * match the kernel's CONFIG_CLONE_* settings.
9205 * Microblaze is further special in that it uses a sixth
9206 * implicit argument to clone for the TLS pointer.
9208 #if defined(TARGET_MICROBLAZE)
9209 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9210 #elif defined(TARGET_CLONE_BACKWARDS)
9211 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9212 #elif defined(TARGET_CLONE_BACKWARDS2)
9213 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9215 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9218 #ifdef __NR_exit_group
9219 /* new thread calls */
9220 case TARGET_NR_exit_group
:
9224 gdb_exit(cpu_env
, arg1
);
9225 ret
= get_errno(exit_group(arg1
));
9228 case TARGET_NR_setdomainname
:
9229 if (!(p
= lock_user_string(arg1
)))
9231 ret
= get_errno(setdomainname(p
, arg2
));
9232 unlock_user(p
, arg1
, 0);
9234 case TARGET_NR_uname
:
9235 /* no need to transcode because we use the linux syscall */
9237 struct new_utsname
* buf
;
9239 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9241 ret
= get_errno(sys_uname(buf
));
9242 if (!is_error(ret
)) {
9243 /* Overwrite the native machine name with whatever is being
9245 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9246 /* Allow the user to override the reported release. */
9247 if (qemu_uname_release
&& *qemu_uname_release
) {
9248 g_strlcpy(buf
->release
, qemu_uname_release
,
9249 sizeof(buf
->release
));
9252 unlock_user_struct(buf
, arg1
, 1);
9256 case TARGET_NR_modify_ldt
:
9257 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9259 #if !defined(TARGET_X86_64)
9260 case TARGET_NR_vm86old
:
9262 case TARGET_NR_vm86
:
9263 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9267 case TARGET_NR_adjtimex
:
9269 #ifdef TARGET_NR_create_module
9270 case TARGET_NR_create_module
:
9272 case TARGET_NR_init_module
:
9273 case TARGET_NR_delete_module
:
9274 #ifdef TARGET_NR_get_kernel_syms
9275 case TARGET_NR_get_kernel_syms
:
9278 case TARGET_NR_quotactl
:
9280 case TARGET_NR_getpgid
:
9281 ret
= get_errno(getpgid(arg1
));
9283 case TARGET_NR_fchdir
:
9284 ret
= get_errno(fchdir(arg1
));
9286 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9287 case TARGET_NR_bdflush
:
9290 #ifdef TARGET_NR_sysfs
9291 case TARGET_NR_sysfs
:
9294 case TARGET_NR_personality
:
9295 ret
= get_errno(personality(arg1
));
9297 #ifdef TARGET_NR_afs_syscall
9298 case TARGET_NR_afs_syscall
:
9301 #ifdef TARGET_NR__llseek /* Not on alpha */
9302 case TARGET_NR__llseek
:
9305 #if !defined(__NR_llseek)
9306 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
9308 ret
= get_errno(res
);
9313 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9315 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9321 #ifdef TARGET_NR_getdents
9322 case TARGET_NR_getdents
:
9323 #ifdef __NR_getdents
9324 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9326 struct target_dirent
*target_dirp
;
9327 struct linux_dirent
*dirp
;
9328 abi_long count
= arg3
;
9330 dirp
= g_try_malloc(count
);
9332 ret
= -TARGET_ENOMEM
;
9336 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9337 if (!is_error(ret
)) {
9338 struct linux_dirent
*de
;
9339 struct target_dirent
*tde
;
9341 int reclen
, treclen
;
9342 int count1
, tnamelen
;
9346 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9350 reclen
= de
->d_reclen
;
9351 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9352 assert(tnamelen
>= 0);
9353 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9354 assert(count1
+ treclen
<= count
);
9355 tde
->d_reclen
= tswap16(treclen
);
9356 tde
->d_ino
= tswapal(de
->d_ino
);
9357 tde
->d_off
= tswapal(de
->d_off
);
9358 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9359 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9361 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9365 unlock_user(target_dirp
, arg2
, ret
);
9371 struct linux_dirent
*dirp
;
9372 abi_long count
= arg3
;
9374 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9376 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9377 if (!is_error(ret
)) {
9378 struct linux_dirent
*de
;
9383 reclen
= de
->d_reclen
;
9386 de
->d_reclen
= tswap16(reclen
);
9387 tswapls(&de
->d_ino
);
9388 tswapls(&de
->d_off
);
9389 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9393 unlock_user(dirp
, arg2
, ret
);
9397 /* Implement getdents in terms of getdents64 */
9399 struct linux_dirent64
*dirp
;
9400 abi_long count
= arg3
;
9402 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9406 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9407 if (!is_error(ret
)) {
9408 /* Convert the dirent64 structs to target dirent. We do this
9409 * in-place, since we can guarantee that a target_dirent is no
9410 * larger than a dirent64; however this means we have to be
9411 * careful to read everything before writing in the new format.
9413 struct linux_dirent64
*de
;
9414 struct target_dirent
*tde
;
9419 tde
= (struct target_dirent
*)dirp
;
9421 int namelen
, treclen
;
9422 int reclen
= de
->d_reclen
;
9423 uint64_t ino
= de
->d_ino
;
9424 int64_t off
= de
->d_off
;
9425 uint8_t type
= de
->d_type
;
9427 namelen
= strlen(de
->d_name
);
9428 treclen
= offsetof(struct target_dirent
, d_name
)
9430 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9432 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9433 tde
->d_ino
= tswapal(ino
);
9434 tde
->d_off
= tswapal(off
);
9435 tde
->d_reclen
= tswap16(treclen
);
9436 /* The target_dirent type is in what was formerly a padding
9437 * byte at the end of the structure:
9439 *(((char *)tde
) + treclen
- 1) = type
;
9441 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9442 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9448 unlock_user(dirp
, arg2
, ret
);
9452 #endif /* TARGET_NR_getdents */
9453 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9454 case TARGET_NR_getdents64
:
9456 struct linux_dirent64
*dirp
;
9457 abi_long count
= arg3
;
9458 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9460 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9461 if (!is_error(ret
)) {
9462 struct linux_dirent64
*de
;
9467 reclen
= de
->d_reclen
;
9470 de
->d_reclen
= tswap16(reclen
);
9471 tswap64s((uint64_t *)&de
->d_ino
);
9472 tswap64s((uint64_t *)&de
->d_off
);
9473 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9477 unlock_user(dirp
, arg2
, ret
);
9480 #endif /* TARGET_NR_getdents64 */
9481 #if defined(TARGET_NR__newselect)
9482 case TARGET_NR__newselect
:
9483 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9487 # ifdef TARGET_NR_poll
9488 case TARGET_NR_poll
:
9490 # ifdef TARGET_NR_ppoll
9491 case TARGET_NR_ppoll
:
9494 struct target_pollfd
*target_pfd
;
9495 unsigned int nfds
= arg2
;
9502 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9503 sizeof(struct target_pollfd
) * nfds
, 1);
9508 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9509 for (i
= 0; i
< nfds
; i
++) {
9510 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9511 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9516 # ifdef TARGET_NR_ppoll
9517 case TARGET_NR_ppoll
:
9519 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9520 target_sigset_t
*target_set
;
9521 sigset_t _set
, *set
= &_set
;
9524 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9525 unlock_user(target_pfd
, arg1
, 0);
9533 if (arg5
!= sizeof(target_sigset_t
)) {
9534 unlock_user(target_pfd
, arg1
, 0);
9535 ret
= -TARGET_EINVAL
;
9539 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9541 unlock_user(target_pfd
, arg1
, 0);
9544 target_to_host_sigset(set
, target_set
);
9549 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9550 set
, SIGSET_T_SIZE
));
9552 if (!is_error(ret
) && arg3
) {
9553 host_to_target_timespec(arg3
, timeout_ts
);
9556 unlock_user(target_set
, arg4
, 0);
9561 # ifdef TARGET_NR_poll
9562 case TARGET_NR_poll
:
9564 struct timespec ts
, *pts
;
9567 /* Convert ms to secs, ns */
9568 ts
.tv_sec
= arg3
/ 1000;
9569 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9572 /* -ve poll() timeout means "infinite" */
9575 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9580 g_assert_not_reached();
9583 if (!is_error(ret
)) {
9584 for(i
= 0; i
< nfds
; i
++) {
9585 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9588 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9592 case TARGET_NR_flock
:
9593 /* NOTE: the flock constant seems to be the same for every
9595 ret
= get_errno(safe_flock(arg1
, arg2
));
9597 case TARGET_NR_readv
:
9599 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9601 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9602 unlock_iovec(vec
, arg2
, arg3
, 1);
9604 ret
= -host_to_target_errno(errno
);
9608 case TARGET_NR_writev
:
9610 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9612 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9613 unlock_iovec(vec
, arg2
, arg3
, 0);
9615 ret
= -host_to_target_errno(errno
);
9619 case TARGET_NR_getsid
:
9620 ret
= get_errno(getsid(arg1
));
9622 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9623 case TARGET_NR_fdatasync
:
9624 ret
= get_errno(fdatasync(arg1
));
9627 #ifdef TARGET_NR__sysctl
9628 case TARGET_NR__sysctl
:
9629 /* We don't implement this, but ENOTDIR is always a safe
9631 ret
= -TARGET_ENOTDIR
;
9634 case TARGET_NR_sched_getaffinity
:
9636 unsigned int mask_size
;
9637 unsigned long *mask
;
9640 * sched_getaffinity needs multiples of ulong, so need to take
9641 * care of mismatches between target ulong and host ulong sizes.
9643 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9644 ret
= -TARGET_EINVAL
;
9647 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9649 mask
= alloca(mask_size
);
9650 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9652 if (!is_error(ret
)) {
9654 /* More data returned than the caller's buffer will fit.
9655 * This only happens if sizeof(abi_long) < sizeof(long)
9656 * and the caller passed us a buffer holding an odd number
9657 * of abi_longs. If the host kernel is actually using the
9658 * extra 4 bytes then fail EINVAL; otherwise we can just
9659 * ignore them and only copy the interesting part.
9661 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9662 if (numcpus
> arg2
* 8) {
9663 ret
= -TARGET_EINVAL
;
9669 if (copy_to_user(arg3
, mask
, ret
)) {
9675 case TARGET_NR_sched_setaffinity
:
9677 unsigned int mask_size
;
9678 unsigned long *mask
;
9681 * sched_setaffinity needs multiples of ulong, so need to take
9682 * care of mismatches between target ulong and host ulong sizes.
9684 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9685 ret
= -TARGET_EINVAL
;
9688 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9690 mask
= alloca(mask_size
);
9691 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9694 memcpy(mask
, p
, arg2
);
9695 unlock_user_struct(p
, arg2
, 0);
9697 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9700 case TARGET_NR_sched_setparam
:
9702 struct sched_param
*target_schp
;
9703 struct sched_param schp
;
9706 return -TARGET_EINVAL
;
9708 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9710 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9711 unlock_user_struct(target_schp
, arg2
, 0);
9712 ret
= get_errno(sched_setparam(arg1
, &schp
));
9715 case TARGET_NR_sched_getparam
:
9717 struct sched_param
*target_schp
;
9718 struct sched_param schp
;
9721 return -TARGET_EINVAL
;
9723 ret
= get_errno(sched_getparam(arg1
, &schp
));
9724 if (!is_error(ret
)) {
9725 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9727 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9728 unlock_user_struct(target_schp
, arg2
, 1);
9732 case TARGET_NR_sched_setscheduler
:
9734 struct sched_param
*target_schp
;
9735 struct sched_param schp
;
9737 return -TARGET_EINVAL
;
9739 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9741 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9742 unlock_user_struct(target_schp
, arg3
, 0);
9743 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9746 case TARGET_NR_sched_getscheduler
:
9747 ret
= get_errno(sched_getscheduler(arg1
));
9749 case TARGET_NR_sched_yield
:
9750 ret
= get_errno(sched_yield());
9752 case TARGET_NR_sched_get_priority_max
:
9753 ret
= get_errno(sched_get_priority_max(arg1
));
9755 case TARGET_NR_sched_get_priority_min
:
9756 ret
= get_errno(sched_get_priority_min(arg1
));
9758 case TARGET_NR_sched_rr_get_interval
:
9761 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9762 if (!is_error(ret
)) {
9763 ret
= host_to_target_timespec(arg2
, &ts
);
9767 case TARGET_NR_nanosleep
:
9769 struct timespec req
, rem
;
9770 target_to_host_timespec(&req
, arg1
);
9771 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9772 if (is_error(ret
) && arg2
) {
9773 host_to_target_timespec(arg2
, &rem
);
9777 #ifdef TARGET_NR_query_module
9778 case TARGET_NR_query_module
:
9781 #ifdef TARGET_NR_nfsservctl
9782 case TARGET_NR_nfsservctl
:
9785 case TARGET_NR_prctl
:
9787 case PR_GET_PDEATHSIG
:
9790 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9791 if (!is_error(ret
) && arg2
9792 && put_user_ual(deathsig
, arg2
)) {
9800 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9804 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9806 unlock_user(name
, arg2
, 16);
9811 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9815 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9817 unlock_user(name
, arg2
, 0);
9822 /* Most prctl options have no pointer arguments */
9823 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9827 #ifdef TARGET_NR_arch_prctl
9828 case TARGET_NR_arch_prctl
:
9829 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9830 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9836 #ifdef TARGET_NR_pread64
9837 case TARGET_NR_pread64
:
9838 if (regpairs_aligned(cpu_env
)) {
9842 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9844 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9845 unlock_user(p
, arg2
, ret
);
9847 case TARGET_NR_pwrite64
:
9848 if (regpairs_aligned(cpu_env
)) {
9852 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9854 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9855 unlock_user(p
, arg2
, 0);
9858 case TARGET_NR_getcwd
:
9859 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9861 ret
= get_errno(sys_getcwd1(p
, arg2
));
9862 unlock_user(p
, arg1
, ret
);
9864 case TARGET_NR_capget
:
9865 case TARGET_NR_capset
:
9867 struct target_user_cap_header
*target_header
;
9868 struct target_user_cap_data
*target_data
= NULL
;
9869 struct __user_cap_header_struct header
;
9870 struct __user_cap_data_struct data
[2];
9871 struct __user_cap_data_struct
*dataptr
= NULL
;
9872 int i
, target_datalen
;
9875 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9878 header
.version
= tswap32(target_header
->version
);
9879 header
.pid
= tswap32(target_header
->pid
);
9881 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9882 /* Version 2 and up takes pointer to two user_data structs */
9886 target_datalen
= sizeof(*target_data
) * data_items
;
9889 if (num
== TARGET_NR_capget
) {
9890 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9892 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9895 unlock_user_struct(target_header
, arg1
, 0);
9899 if (num
== TARGET_NR_capset
) {
9900 for (i
= 0; i
< data_items
; i
++) {
9901 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9902 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9903 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9910 if (num
== TARGET_NR_capget
) {
9911 ret
= get_errno(capget(&header
, dataptr
));
9913 ret
= get_errno(capset(&header
, dataptr
));
9916 /* The kernel always updates version for both capget and capset */
9917 target_header
->version
= tswap32(header
.version
);
9918 unlock_user_struct(target_header
, arg1
, 1);
9921 if (num
== TARGET_NR_capget
) {
9922 for (i
= 0; i
< data_items
; i
++) {
9923 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9924 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9925 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9927 unlock_user(target_data
, arg2
, target_datalen
);
9929 unlock_user(target_data
, arg2
, 0);
9934 case TARGET_NR_sigaltstack
:
9935 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9938 #ifdef CONFIG_SENDFILE
9939 case TARGET_NR_sendfile
:
9944 ret
= get_user_sal(off
, arg3
);
9945 if (is_error(ret
)) {
9950 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9951 if (!is_error(ret
) && arg3
) {
9952 abi_long ret2
= put_user_sal(off
, arg3
);
9953 if (is_error(ret2
)) {
9959 #ifdef TARGET_NR_sendfile64
9960 case TARGET_NR_sendfile64
:
9965 ret
= get_user_s64(off
, arg3
);
9966 if (is_error(ret
)) {
9971 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9972 if (!is_error(ret
) && arg3
) {
9973 abi_long ret2
= put_user_s64(off
, arg3
);
9974 if (is_error(ret2
)) {
9982 case TARGET_NR_sendfile
:
9983 #ifdef TARGET_NR_sendfile64
9984 case TARGET_NR_sendfile64
:
9989 #ifdef TARGET_NR_getpmsg
9990 case TARGET_NR_getpmsg
:
9993 #ifdef TARGET_NR_putpmsg
9994 case TARGET_NR_putpmsg
:
9997 #ifdef TARGET_NR_vfork
9998 case TARGET_NR_vfork
:
9999 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10003 #ifdef TARGET_NR_ugetrlimit
10004 case TARGET_NR_ugetrlimit
:
10006 struct rlimit rlim
;
10007 int resource
= target_to_host_resource(arg1
);
10008 ret
= get_errno(getrlimit(resource
, &rlim
));
10009 if (!is_error(ret
)) {
10010 struct target_rlimit
*target_rlim
;
10011 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10013 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10014 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10015 unlock_user_struct(target_rlim
, arg2
, 1);
10020 #ifdef TARGET_NR_truncate64
10021 case TARGET_NR_truncate64
:
10022 if (!(p
= lock_user_string(arg1
)))
10024 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10025 unlock_user(p
, arg1
, 0);
10028 #ifdef TARGET_NR_ftruncate64
10029 case TARGET_NR_ftruncate64
:
10030 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10033 #ifdef TARGET_NR_stat64
10034 case TARGET_NR_stat64
:
10035 if (!(p
= lock_user_string(arg1
)))
10037 ret
= get_errno(stat(path(p
), &st
));
10038 unlock_user(p
, arg1
, 0);
10039 if (!is_error(ret
))
10040 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10043 #ifdef TARGET_NR_lstat64
10044 case TARGET_NR_lstat64
:
10045 if (!(p
= lock_user_string(arg1
)))
10047 ret
= get_errno(lstat(path(p
), &st
));
10048 unlock_user(p
, arg1
, 0);
10049 if (!is_error(ret
))
10050 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10053 #ifdef TARGET_NR_fstat64
10054 case TARGET_NR_fstat64
:
10055 ret
= get_errno(fstat(arg1
, &st
));
10056 if (!is_error(ret
))
10057 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10060 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10061 #ifdef TARGET_NR_fstatat64
10062 case TARGET_NR_fstatat64
:
10064 #ifdef TARGET_NR_newfstatat
10065 case TARGET_NR_newfstatat
:
10067 if (!(p
= lock_user_string(arg2
)))
10069 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10070 if (!is_error(ret
))
10071 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10074 #ifdef TARGET_NR_lchown
10075 case TARGET_NR_lchown
:
10076 if (!(p
= lock_user_string(arg1
)))
10078 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10079 unlock_user(p
, arg1
, 0);
10082 #ifdef TARGET_NR_getuid
10083 case TARGET_NR_getuid
:
10084 ret
= get_errno(high2lowuid(getuid()));
10087 #ifdef TARGET_NR_getgid
10088 case TARGET_NR_getgid
:
10089 ret
= get_errno(high2lowgid(getgid()));
10092 #ifdef TARGET_NR_geteuid
10093 case TARGET_NR_geteuid
:
10094 ret
= get_errno(high2lowuid(geteuid()));
10097 #ifdef TARGET_NR_getegid
10098 case TARGET_NR_getegid
:
10099 ret
= get_errno(high2lowgid(getegid()));
10102 case TARGET_NR_setreuid
:
10103 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10105 case TARGET_NR_setregid
:
10106 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10108 case TARGET_NR_getgroups
:
10110 int gidsetsize
= arg1
;
10111 target_id
*target_grouplist
;
10115 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10116 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10117 if (gidsetsize
== 0)
10119 if (!is_error(ret
)) {
10120 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10121 if (!target_grouplist
)
10123 for(i
= 0;i
< ret
; i
++)
10124 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10125 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10129 case TARGET_NR_setgroups
:
10131 int gidsetsize
= arg1
;
10132 target_id
*target_grouplist
;
10133 gid_t
*grouplist
= NULL
;
10136 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10137 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10138 if (!target_grouplist
) {
10139 ret
= -TARGET_EFAULT
;
10142 for (i
= 0; i
< gidsetsize
; i
++) {
10143 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10145 unlock_user(target_grouplist
, arg2
, 0);
10147 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10150 case TARGET_NR_fchown
:
10151 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10153 #if defined(TARGET_NR_fchownat)
10154 case TARGET_NR_fchownat
:
10155 if (!(p
= lock_user_string(arg2
)))
10157 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10158 low2highgid(arg4
), arg5
));
10159 unlock_user(p
, arg2
, 0);
10162 #ifdef TARGET_NR_setresuid
10163 case TARGET_NR_setresuid
:
10164 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10166 low2highuid(arg3
)));
10169 #ifdef TARGET_NR_getresuid
10170 case TARGET_NR_getresuid
:
10172 uid_t ruid
, euid
, suid
;
10173 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10174 if (!is_error(ret
)) {
10175 if (put_user_id(high2lowuid(ruid
), arg1
)
10176 || put_user_id(high2lowuid(euid
), arg2
)
10177 || put_user_id(high2lowuid(suid
), arg3
))
10183 #ifdef TARGET_NR_getresgid
10184 case TARGET_NR_setresgid
:
10185 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10187 low2highgid(arg3
)));
10190 #ifdef TARGET_NR_getresgid
10191 case TARGET_NR_getresgid
:
10193 gid_t rgid
, egid
, sgid
;
10194 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10195 if (!is_error(ret
)) {
10196 if (put_user_id(high2lowgid(rgid
), arg1
)
10197 || put_user_id(high2lowgid(egid
), arg2
)
10198 || put_user_id(high2lowgid(sgid
), arg3
))
10204 #ifdef TARGET_NR_chown
10205 case TARGET_NR_chown
:
10206 if (!(p
= lock_user_string(arg1
)))
10208 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10209 unlock_user(p
, arg1
, 0);
10212 case TARGET_NR_setuid
:
10213 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10215 case TARGET_NR_setgid
:
10216 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10218 case TARGET_NR_setfsuid
:
10219 ret
= get_errno(setfsuid(arg1
));
10221 case TARGET_NR_setfsgid
:
10222 ret
= get_errno(setfsgid(arg1
));
10225 #ifdef TARGET_NR_lchown32
10226 case TARGET_NR_lchown32
:
10227 if (!(p
= lock_user_string(arg1
)))
10229 ret
= get_errno(lchown(p
, arg2
, arg3
));
10230 unlock_user(p
, arg1
, 0);
10233 #ifdef TARGET_NR_getuid32
10234 case TARGET_NR_getuid32
:
10235 ret
= get_errno(getuid());
10239 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10240 /* Alpha specific */
10241 case TARGET_NR_getxuid
:
10245 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10247 ret
= get_errno(getuid());
10250 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10251 /* Alpha specific */
10252 case TARGET_NR_getxgid
:
10256 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10258 ret
= get_errno(getgid());
10261 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10262 /* Alpha specific */
10263 case TARGET_NR_osf_getsysinfo
:
10264 ret
= -TARGET_EOPNOTSUPP
;
10266 case TARGET_GSI_IEEE_FP_CONTROL
:
10268 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10270 /* Copied from linux ieee_fpcr_to_swcr. */
10271 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10272 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10273 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10274 | SWCR_TRAP_ENABLE_DZE
10275 | SWCR_TRAP_ENABLE_OVF
);
10276 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10277 | SWCR_TRAP_ENABLE_INE
);
10278 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10279 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10281 if (put_user_u64 (swcr
, arg2
))
10287 /* case GSI_IEEE_STATE_AT_SIGNAL:
10288 -- Not implemented in linux kernel.
10290 -- Retrieves current unaligned access state; not much used.
10291 case GSI_PROC_TYPE:
10292 -- Retrieves implver information; surely not used.
10293 case GSI_GET_HWRPB:
10294 -- Grabs a copy of the HWRPB; surely not used.
10299 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10300 /* Alpha specific */
10301 case TARGET_NR_osf_setsysinfo
:
10302 ret
= -TARGET_EOPNOTSUPP
;
10304 case TARGET_SSI_IEEE_FP_CONTROL
:
10306 uint64_t swcr
, fpcr
, orig_fpcr
;
10308 if (get_user_u64 (swcr
, arg2
)) {
10311 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10312 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10314 /* Copied from linux ieee_swcr_to_fpcr. */
10315 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10316 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10317 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10318 | SWCR_TRAP_ENABLE_DZE
10319 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10320 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10321 | SWCR_TRAP_ENABLE_INE
)) << 57;
10322 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10323 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10325 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10330 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10332 uint64_t exc
, fpcr
, orig_fpcr
;
10335 if (get_user_u64(exc
, arg2
)) {
10339 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10341 /* We only add to the exception status here. */
10342 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10344 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10347 /* Old exceptions are not signaled. */
10348 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10350 /* If any exceptions set by this call,
10351 and are unmasked, send a signal. */
10353 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10354 si_code
= TARGET_FPE_FLTRES
;
10356 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10357 si_code
= TARGET_FPE_FLTUND
;
10359 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10360 si_code
= TARGET_FPE_FLTOVF
;
10362 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10363 si_code
= TARGET_FPE_FLTDIV
;
10365 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10366 si_code
= TARGET_FPE_FLTINV
;
10368 if (si_code
!= 0) {
10369 target_siginfo_t info
;
10370 info
.si_signo
= SIGFPE
;
10372 info
.si_code
= si_code
;
10373 info
._sifields
._sigfault
._addr
10374 = ((CPUArchState
*)cpu_env
)->pc
;
10375 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10380 /* case SSI_NVPAIRS:
10381 -- Used with SSIN_UACPROC to enable unaligned accesses.
10382 case SSI_IEEE_STATE_AT_SIGNAL:
10383 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10384 -- Not implemented in linux kernel
10389 #ifdef TARGET_NR_osf_sigprocmask
10390 /* Alpha specific. */
10391 case TARGET_NR_osf_sigprocmask
:
10395 sigset_t set
, oldset
;
10398 case TARGET_SIG_BLOCK
:
10401 case TARGET_SIG_UNBLOCK
:
10404 case TARGET_SIG_SETMASK
:
10408 ret
= -TARGET_EINVAL
;
10412 target_to_host_old_sigset(&set
, &mask
);
10413 ret
= do_sigprocmask(how
, &set
, &oldset
);
10415 host_to_target_old_sigset(&mask
, &oldset
);
10422 #ifdef TARGET_NR_getgid32
10423 case TARGET_NR_getgid32
:
10424 ret
= get_errno(getgid());
10427 #ifdef TARGET_NR_geteuid32
10428 case TARGET_NR_geteuid32
:
10429 ret
= get_errno(geteuid());
10432 #ifdef TARGET_NR_getegid32
10433 case TARGET_NR_getegid32
:
10434 ret
= get_errno(getegid());
10437 #ifdef TARGET_NR_setreuid32
10438 case TARGET_NR_setreuid32
:
10439 ret
= get_errno(setreuid(arg1
, arg2
));
10442 #ifdef TARGET_NR_setregid32
10443 case TARGET_NR_setregid32
:
10444 ret
= get_errno(setregid(arg1
, arg2
));
10447 #ifdef TARGET_NR_getgroups32
10448 case TARGET_NR_getgroups32
:
10450 int gidsetsize
= arg1
;
10451 uint32_t *target_grouplist
;
10455 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10456 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10457 if (gidsetsize
== 0)
10459 if (!is_error(ret
)) {
10460 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10461 if (!target_grouplist
) {
10462 ret
= -TARGET_EFAULT
;
10465 for(i
= 0;i
< ret
; i
++)
10466 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10467 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10472 #ifdef TARGET_NR_setgroups32
10473 case TARGET_NR_setgroups32
:
10475 int gidsetsize
= arg1
;
10476 uint32_t *target_grouplist
;
10480 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10481 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10482 if (!target_grouplist
) {
10483 ret
= -TARGET_EFAULT
;
10486 for(i
= 0;i
< gidsetsize
; i
++)
10487 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10488 unlock_user(target_grouplist
, arg2
, 0);
10489 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10493 #ifdef TARGET_NR_fchown32
10494 case TARGET_NR_fchown32
:
10495 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10498 #ifdef TARGET_NR_setresuid32
10499 case TARGET_NR_setresuid32
:
10500 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10503 #ifdef TARGET_NR_getresuid32
10504 case TARGET_NR_getresuid32
:
10506 uid_t ruid
, euid
, suid
;
10507 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10508 if (!is_error(ret
)) {
10509 if (put_user_u32(ruid
, arg1
)
10510 || put_user_u32(euid
, arg2
)
10511 || put_user_u32(suid
, arg3
))
10517 #ifdef TARGET_NR_setresgid32
10518 case TARGET_NR_setresgid32
:
10519 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10522 #ifdef TARGET_NR_getresgid32
10523 case TARGET_NR_getresgid32
:
10525 gid_t rgid
, egid
, sgid
;
10526 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10527 if (!is_error(ret
)) {
10528 if (put_user_u32(rgid
, arg1
)
10529 || put_user_u32(egid
, arg2
)
10530 || put_user_u32(sgid
, arg3
))
10536 #ifdef TARGET_NR_chown32
10537 case TARGET_NR_chown32
:
10538 if (!(p
= lock_user_string(arg1
)))
10540 ret
= get_errno(chown(p
, arg2
, arg3
));
10541 unlock_user(p
, arg1
, 0);
10544 #ifdef TARGET_NR_setuid32
10545 case TARGET_NR_setuid32
:
10546 ret
= get_errno(sys_setuid(arg1
));
10549 #ifdef TARGET_NR_setgid32
10550 case TARGET_NR_setgid32
:
10551 ret
= get_errno(sys_setgid(arg1
));
10554 #ifdef TARGET_NR_setfsuid32
10555 case TARGET_NR_setfsuid32
:
10556 ret
= get_errno(setfsuid(arg1
));
10559 #ifdef TARGET_NR_setfsgid32
10560 case TARGET_NR_setfsgid32
:
10561 ret
= get_errno(setfsgid(arg1
));
10565 case TARGET_NR_pivot_root
:
10566 goto unimplemented
;
10567 #ifdef TARGET_NR_mincore
10568 case TARGET_NR_mincore
:
10571 ret
= -TARGET_EFAULT
;
10572 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10574 if (!(p
= lock_user_string(arg3
)))
10576 ret
= get_errno(mincore(a
, arg2
, p
));
10577 unlock_user(p
, arg3
, ret
);
10579 unlock_user(a
, arg1
, 0);
10583 #ifdef TARGET_NR_arm_fadvise64_64
10584 case TARGET_NR_arm_fadvise64_64
:
10585 /* arm_fadvise64_64 looks like fadvise64_64 but
10586 * with different argument order: fd, advice, offset, len
10587 * rather than the usual fd, offset, len, advice.
10588 * Note that offset and len are both 64-bit so appear as
10589 * pairs of 32-bit registers.
10591 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10592 target_offset64(arg5
, arg6
), arg2
);
10593 ret
= -host_to_target_errno(ret
);
10597 #if TARGET_ABI_BITS == 32
10599 #ifdef TARGET_NR_fadvise64_64
10600 case TARGET_NR_fadvise64_64
:
10601 /* 6 args: fd, offset (high, low), len (high, low), advice */
10602 if (regpairs_aligned(cpu_env
)) {
10603 /* offset is in (3,4), len in (5,6) and advice in 7 */
10610 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10611 target_offset64(arg2
, arg3
),
10612 target_offset64(arg4
, arg5
),
10617 #ifdef TARGET_NR_fadvise64
10618 case TARGET_NR_fadvise64
:
10619 /* 5 args: fd, offset (high, low), len, advice */
10620 if (regpairs_aligned(cpu_env
)) {
10621 /* offset is in (3,4), len in 5 and advice in 6 */
10627 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10628 target_offset64(arg2
, arg3
),
10633 #else /* not a 32-bit ABI */
10634 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10635 #ifdef TARGET_NR_fadvise64_64
10636 case TARGET_NR_fadvise64_64
:
10638 #ifdef TARGET_NR_fadvise64
10639 case TARGET_NR_fadvise64
:
10641 #ifdef TARGET_S390X
10643 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10644 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10645 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10646 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10650 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10653 #endif /* end of 64-bit ABI fadvise handling */
10655 #ifdef TARGET_NR_madvise
10656 case TARGET_NR_madvise
:
10657 /* A straight passthrough may not be safe because qemu sometimes
10658 turns private file-backed mappings into anonymous mappings.
10659 This will break MADV_DONTNEED.
10660 This is a hint, so ignoring and returning success is ok. */
10661 ret
= get_errno(0);
10664 #if TARGET_ABI_BITS == 32
10665 case TARGET_NR_fcntl64
:
10669 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10670 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10673 if (((CPUARMState
*)cpu_env
)->eabi
) {
10674 copyfrom
= copy_from_user_eabi_flock64
;
10675 copyto
= copy_to_user_eabi_flock64
;
10679 cmd
= target_to_host_fcntl_cmd(arg2
);
10680 if (cmd
== -TARGET_EINVAL
) {
10686 case TARGET_F_GETLK64
:
10687 ret
= copyfrom(&fl
, arg3
);
10691 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10693 ret
= copyto(arg3
, &fl
);
10697 case TARGET_F_SETLK64
:
10698 case TARGET_F_SETLKW64
:
10699 ret
= copyfrom(&fl
, arg3
);
10703 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10706 ret
= do_fcntl(arg1
, arg2
, arg3
);
10712 #ifdef TARGET_NR_cacheflush
10713 case TARGET_NR_cacheflush
:
10714 /* self-modifying code is handled automatically, so nothing needed */
10718 #ifdef TARGET_NR_security
10719 case TARGET_NR_security
:
10720 goto unimplemented
;
10722 #ifdef TARGET_NR_getpagesize
10723 case TARGET_NR_getpagesize
:
10724 ret
= TARGET_PAGE_SIZE
;
10727 case TARGET_NR_gettid
:
10728 ret
= get_errno(gettid());
10730 #ifdef TARGET_NR_readahead
10731 case TARGET_NR_readahead
:
10732 #if TARGET_ABI_BITS == 32
10733 if (regpairs_aligned(cpu_env
)) {
10738 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10740 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10745 #ifdef TARGET_NR_setxattr
10746 case TARGET_NR_listxattr
:
10747 case TARGET_NR_llistxattr
:
10751 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10753 ret
= -TARGET_EFAULT
;
10757 p
= lock_user_string(arg1
);
10759 if (num
== TARGET_NR_listxattr
) {
10760 ret
= get_errno(listxattr(p
, b
, arg3
));
10762 ret
= get_errno(llistxattr(p
, b
, arg3
));
10765 ret
= -TARGET_EFAULT
;
10767 unlock_user(p
, arg1
, 0);
10768 unlock_user(b
, arg2
, arg3
);
10771 case TARGET_NR_flistxattr
:
10775 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10777 ret
= -TARGET_EFAULT
;
10781 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10782 unlock_user(b
, arg2
, arg3
);
10785 case TARGET_NR_setxattr
:
10786 case TARGET_NR_lsetxattr
:
10788 void *p
, *n
, *v
= 0;
10790 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10792 ret
= -TARGET_EFAULT
;
10796 p
= lock_user_string(arg1
);
10797 n
= lock_user_string(arg2
);
10799 if (num
== TARGET_NR_setxattr
) {
10800 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10802 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10805 ret
= -TARGET_EFAULT
;
10807 unlock_user(p
, arg1
, 0);
10808 unlock_user(n
, arg2
, 0);
10809 unlock_user(v
, arg3
, 0);
10812 case TARGET_NR_fsetxattr
:
10816 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10818 ret
= -TARGET_EFAULT
;
10822 n
= lock_user_string(arg2
);
10824 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10826 ret
= -TARGET_EFAULT
;
10828 unlock_user(n
, arg2
, 0);
10829 unlock_user(v
, arg3
, 0);
10832 case TARGET_NR_getxattr
:
10833 case TARGET_NR_lgetxattr
:
10835 void *p
, *n
, *v
= 0;
10837 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10839 ret
= -TARGET_EFAULT
;
10843 p
= lock_user_string(arg1
);
10844 n
= lock_user_string(arg2
);
10846 if (num
== TARGET_NR_getxattr
) {
10847 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10849 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10852 ret
= -TARGET_EFAULT
;
10854 unlock_user(p
, arg1
, 0);
10855 unlock_user(n
, arg2
, 0);
10856 unlock_user(v
, arg3
, arg4
);
10859 case TARGET_NR_fgetxattr
:
10863 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10865 ret
= -TARGET_EFAULT
;
10869 n
= lock_user_string(arg2
);
10871 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10873 ret
= -TARGET_EFAULT
;
10875 unlock_user(n
, arg2
, 0);
10876 unlock_user(v
, arg3
, arg4
);
10879 case TARGET_NR_removexattr
:
10880 case TARGET_NR_lremovexattr
:
10883 p
= lock_user_string(arg1
);
10884 n
= lock_user_string(arg2
);
10886 if (num
== TARGET_NR_removexattr
) {
10887 ret
= get_errno(removexattr(p
, n
));
10889 ret
= get_errno(lremovexattr(p
, n
));
10892 ret
= -TARGET_EFAULT
;
10894 unlock_user(p
, arg1
, 0);
10895 unlock_user(n
, arg2
, 0);
10898 case TARGET_NR_fremovexattr
:
10901 n
= lock_user_string(arg2
);
10903 ret
= get_errno(fremovexattr(arg1
, n
));
10905 ret
= -TARGET_EFAULT
;
10907 unlock_user(n
, arg2
, 0);
10911 #endif /* CONFIG_ATTR */
10912 #ifdef TARGET_NR_set_thread_area
10913 case TARGET_NR_set_thread_area
:
10914 #if defined(TARGET_MIPS)
10915 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10918 #elif defined(TARGET_CRIS)
10920 ret
= -TARGET_EINVAL
;
10922 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10926 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10927 ret
= do_set_thread_area(cpu_env
, arg1
);
10929 #elif defined(TARGET_M68K)
10931 TaskState
*ts
= cpu
->opaque
;
10932 ts
->tp_value
= arg1
;
10937 goto unimplemented_nowarn
;
10940 #ifdef TARGET_NR_get_thread_area
10941 case TARGET_NR_get_thread_area
:
10942 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10943 ret
= do_get_thread_area(cpu_env
, arg1
);
10945 #elif defined(TARGET_M68K)
10947 TaskState
*ts
= cpu
->opaque
;
10948 ret
= ts
->tp_value
;
10952 goto unimplemented_nowarn
;
10955 #ifdef TARGET_NR_getdomainname
10956 case TARGET_NR_getdomainname
:
10957 goto unimplemented_nowarn
;
10960 #ifdef TARGET_NR_clock_gettime
10961 case TARGET_NR_clock_gettime
:
10963 struct timespec ts
;
10964 ret
= get_errno(clock_gettime(arg1
, &ts
));
10965 if (!is_error(ret
)) {
10966 host_to_target_timespec(arg2
, &ts
);
10971 #ifdef TARGET_NR_clock_getres
10972 case TARGET_NR_clock_getres
:
10974 struct timespec ts
;
10975 ret
= get_errno(clock_getres(arg1
, &ts
));
10976 if (!is_error(ret
)) {
10977 host_to_target_timespec(arg2
, &ts
);
10982 #ifdef TARGET_NR_clock_nanosleep
10983 case TARGET_NR_clock_nanosleep
:
10985 struct timespec ts
;
10986 target_to_host_timespec(&ts
, arg3
);
10987 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10988 &ts
, arg4
? &ts
: NULL
));
10990 host_to_target_timespec(arg4
, &ts
);
10992 #if defined(TARGET_PPC)
10993 /* clock_nanosleep is odd in that it returns positive errno values.
10994 * On PPC, CR0 bit 3 should be set in such a situation. */
10995 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10996 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11003 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11004 case TARGET_NR_set_tid_address
:
11005 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11009 case TARGET_NR_tkill
:
11010 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11013 case TARGET_NR_tgkill
:
11014 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11015 target_to_host_signal(arg3
)));
11018 #ifdef TARGET_NR_set_robust_list
11019 case TARGET_NR_set_robust_list
:
11020 case TARGET_NR_get_robust_list
:
11021 /* The ABI for supporting robust futexes has userspace pass
11022 * the kernel a pointer to a linked list which is updated by
11023 * userspace after the syscall; the list is walked by the kernel
11024 * when the thread exits. Since the linked list in QEMU guest
11025 * memory isn't a valid linked list for the host and we have
11026 * no way to reliably intercept the thread-death event, we can't
11027 * support these. Silently return ENOSYS so that guest userspace
11028 * falls back to a non-robust futex implementation (which should
11029 * be OK except in the corner case of the guest crashing while
11030 * holding a mutex that is shared with another process via
11033 goto unimplemented_nowarn
;
11036 #if defined(TARGET_NR_utimensat)
11037 case TARGET_NR_utimensat
:
11039 struct timespec
*tsp
, ts
[2];
11043 target_to_host_timespec(ts
, arg3
);
11044 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11048 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11050 if (!(p
= lock_user_string(arg2
))) {
11051 ret
= -TARGET_EFAULT
;
11054 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11055 unlock_user(p
, arg2
, 0);
11060 case TARGET_NR_futex
:
11061 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11063 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11064 case TARGET_NR_inotify_init
:
11065 ret
= get_errno(sys_inotify_init());
11068 #ifdef CONFIG_INOTIFY1
11069 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11070 case TARGET_NR_inotify_init1
:
11071 ret
= get_errno(sys_inotify_init1(arg1
));
11075 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11076 case TARGET_NR_inotify_add_watch
:
11077 p
= lock_user_string(arg2
);
11078 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11079 unlock_user(p
, arg2
, 0);
11082 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11083 case TARGET_NR_inotify_rm_watch
:
11084 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11088 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11089 case TARGET_NR_mq_open
:
11091 struct mq_attr posix_mq_attr
, *attrp
;
11093 p
= lock_user_string(arg1
- 1);
11095 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11096 attrp
= &posix_mq_attr
;
11100 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11101 unlock_user (p
, arg1
, 0);
11105 case TARGET_NR_mq_unlink
:
11106 p
= lock_user_string(arg1
- 1);
11107 ret
= get_errno(mq_unlink(p
));
11108 unlock_user (p
, arg1
, 0);
11111 case TARGET_NR_mq_timedsend
:
11113 struct timespec ts
;
11115 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11117 target_to_host_timespec(&ts
, arg5
);
11118 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11119 host_to_target_timespec(arg5
, &ts
);
11121 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11123 unlock_user (p
, arg2
, arg3
);
11127 case TARGET_NR_mq_timedreceive
:
11129 struct timespec ts
;
11132 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11134 target_to_host_timespec(&ts
, arg5
);
11135 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11137 host_to_target_timespec(arg5
, &ts
);
11139 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11142 unlock_user (p
, arg2
, arg3
);
11144 put_user_u32(prio
, arg4
);
11148 /* Not implemented for now... */
11149 /* case TARGET_NR_mq_notify: */
11152 case TARGET_NR_mq_getsetattr
:
11154 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11157 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11158 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11161 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11162 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11169 #ifdef CONFIG_SPLICE
11170 #ifdef TARGET_NR_tee
11171 case TARGET_NR_tee
:
11173 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11177 #ifdef TARGET_NR_splice
11178 case TARGET_NR_splice
:
11180 loff_t loff_in
, loff_out
;
11181 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11183 if (get_user_u64(loff_in
, arg2
)) {
11186 ploff_in
= &loff_in
;
11189 if (get_user_u64(loff_out
, arg4
)) {
11192 ploff_out
= &loff_out
;
11194 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11196 if (put_user_u64(loff_in
, arg2
)) {
11201 if (put_user_u64(loff_out
, arg4
)) {
11208 #ifdef TARGET_NR_vmsplice
11209 case TARGET_NR_vmsplice
:
11211 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11213 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11214 unlock_iovec(vec
, arg2
, arg3
, 0);
11216 ret
= -host_to_target_errno(errno
);
11221 #endif /* CONFIG_SPLICE */
11222 #ifdef CONFIG_EVENTFD
11223 #if defined(TARGET_NR_eventfd)
11224 case TARGET_NR_eventfd
:
11225 ret
= get_errno(eventfd(arg1
, 0));
11226 fd_trans_unregister(ret
);
11229 #if defined(TARGET_NR_eventfd2)
11230 case TARGET_NR_eventfd2
:
11232 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11233 if (arg2
& TARGET_O_NONBLOCK
) {
11234 host_flags
|= O_NONBLOCK
;
11236 if (arg2
& TARGET_O_CLOEXEC
) {
11237 host_flags
|= O_CLOEXEC
;
11239 ret
= get_errno(eventfd(arg1
, host_flags
));
11240 fd_trans_unregister(ret
);
11244 #endif /* CONFIG_EVENTFD */
11245 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11246 case TARGET_NR_fallocate
:
11247 #if TARGET_ABI_BITS == 32
11248 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11249 target_offset64(arg5
, arg6
)));
11251 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11255 #if defined(CONFIG_SYNC_FILE_RANGE)
11256 #if defined(TARGET_NR_sync_file_range)
11257 case TARGET_NR_sync_file_range
:
11258 #if TARGET_ABI_BITS == 32
11259 #if defined(TARGET_MIPS)
11260 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11261 target_offset64(arg5
, arg6
), arg7
));
11263 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11264 target_offset64(arg4
, arg5
), arg6
));
11265 #endif /* !TARGET_MIPS */
11267 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11271 #if defined(TARGET_NR_sync_file_range2)
11272 case TARGET_NR_sync_file_range2
:
11273 /* This is like sync_file_range but the arguments are reordered */
11274 #if TARGET_ABI_BITS == 32
11275 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11276 target_offset64(arg5
, arg6
), arg2
));
11278 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11283 #if defined(TARGET_NR_signalfd4)
11284 case TARGET_NR_signalfd4
:
11285 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11288 #if defined(TARGET_NR_signalfd)
11289 case TARGET_NR_signalfd
:
11290 ret
= do_signalfd4(arg1
, arg2
, 0);
11293 #if defined(CONFIG_EPOLL)
11294 #if defined(TARGET_NR_epoll_create)
11295 case TARGET_NR_epoll_create
:
11296 ret
= get_errno(epoll_create(arg1
));
11299 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11300 case TARGET_NR_epoll_create1
:
11301 ret
= get_errno(epoll_create1(arg1
));
11304 #if defined(TARGET_NR_epoll_ctl)
11305 case TARGET_NR_epoll_ctl
:
11307 struct epoll_event ep
;
11308 struct epoll_event
*epp
= 0;
11310 struct target_epoll_event
*target_ep
;
11311 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11314 ep
.events
= tswap32(target_ep
->events
);
11315 /* The epoll_data_t union is just opaque data to the kernel,
11316 * so we transfer all 64 bits across and need not worry what
11317 * actual data type it is.
11319 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11320 unlock_user_struct(target_ep
, arg4
, 0);
11323 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11328 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11329 #if defined(TARGET_NR_epoll_wait)
11330 case TARGET_NR_epoll_wait
:
11332 #if defined(TARGET_NR_epoll_pwait)
11333 case TARGET_NR_epoll_pwait
:
11336 struct target_epoll_event
*target_ep
;
11337 struct epoll_event
*ep
;
11339 int maxevents
= arg3
;
11340 int timeout
= arg4
;
11342 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11343 maxevents
* sizeof(struct target_epoll_event
), 1);
11348 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11351 #if defined(TARGET_NR_epoll_pwait)
11352 case TARGET_NR_epoll_pwait
:
11354 target_sigset_t
*target_set
;
11355 sigset_t _set
, *set
= &_set
;
11358 if (arg6
!= sizeof(target_sigset_t
)) {
11359 ret
= -TARGET_EINVAL
;
11363 target_set
= lock_user(VERIFY_READ
, arg5
,
11364 sizeof(target_sigset_t
), 1);
11366 unlock_user(target_ep
, arg2
, 0);
11369 target_to_host_sigset(set
, target_set
);
11370 unlock_user(target_set
, arg5
, 0);
11375 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11376 set
, SIGSET_T_SIZE
));
11380 #if defined(TARGET_NR_epoll_wait)
11381 case TARGET_NR_epoll_wait
:
11382 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11387 ret
= -TARGET_ENOSYS
;
11389 if (!is_error(ret
)) {
11391 for (i
= 0; i
< ret
; i
++) {
11392 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11393 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11396 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11401 #ifdef TARGET_NR_prlimit64
11402 case TARGET_NR_prlimit64
:
11404 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11405 struct target_rlimit64
*target_rnew
, *target_rold
;
11406 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11407 int resource
= target_to_host_resource(arg2
);
11409 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11412 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11413 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11414 unlock_user_struct(target_rnew
, arg3
, 0);
11418 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11419 if (!is_error(ret
) && arg4
) {
11420 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11423 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11424 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11425 unlock_user_struct(target_rold
, arg4
, 1);
11430 #ifdef TARGET_NR_gethostname
11431 case TARGET_NR_gethostname
:
11433 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11435 ret
= get_errno(gethostname(name
, arg2
));
11436 unlock_user(name
, arg1
, arg2
);
11438 ret
= -TARGET_EFAULT
;
11443 #ifdef TARGET_NR_atomic_cmpxchg_32
11444 case TARGET_NR_atomic_cmpxchg_32
:
11446 /* should use start_exclusive from main.c */
11447 abi_ulong mem_value
;
11448 if (get_user_u32(mem_value
, arg6
)) {
11449 target_siginfo_t info
;
11450 info
.si_signo
= SIGSEGV
;
11452 info
.si_code
= TARGET_SEGV_MAPERR
;
11453 info
._sifields
._sigfault
._addr
= arg6
;
11454 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11458 if (mem_value
== arg2
)
11459 put_user_u32(arg1
, arg6
);
11464 #ifdef TARGET_NR_atomic_barrier
11465 case TARGET_NR_atomic_barrier
:
11467 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11473 #ifdef TARGET_NR_timer_create
11474 case TARGET_NR_timer_create
:
11476 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11478 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11481 int timer_index
= next_free_host_timer();
11483 if (timer_index
< 0) {
11484 ret
= -TARGET_EAGAIN
;
11486 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11489 phost_sevp
= &host_sevp
;
11490 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11496 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11500 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11509 #ifdef TARGET_NR_timer_settime
11510 case TARGET_NR_timer_settime
:
11512 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11513 * struct itimerspec * old_value */
11514 target_timer_t timerid
= get_timer_id(arg1
);
11518 } else if (arg3
== 0) {
11519 ret
= -TARGET_EINVAL
;
11521 timer_t htimer
= g_posix_timers
[timerid
];
11522 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11524 target_to_host_itimerspec(&hspec_new
, arg3
);
11526 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11527 host_to_target_itimerspec(arg2
, &hspec_old
);
11533 #ifdef TARGET_NR_timer_gettime
11534 case TARGET_NR_timer_gettime
:
11536 /* args: timer_t timerid, struct itimerspec *curr_value */
11537 target_timer_t timerid
= get_timer_id(arg1
);
11541 } else if (!arg2
) {
11542 ret
= -TARGET_EFAULT
;
11544 timer_t htimer
= g_posix_timers
[timerid
];
11545 struct itimerspec hspec
;
11546 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11548 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11549 ret
= -TARGET_EFAULT
;
11556 #ifdef TARGET_NR_timer_getoverrun
11557 case TARGET_NR_timer_getoverrun
:
11559 /* args: timer_t timerid */
11560 target_timer_t timerid
= get_timer_id(arg1
);
11565 timer_t htimer
= g_posix_timers
[timerid
];
11566 ret
= get_errno(timer_getoverrun(htimer
));
11568 fd_trans_unregister(ret
);
11573 #ifdef TARGET_NR_timer_delete
11574 case TARGET_NR_timer_delete
:
11576 /* args: timer_t timerid */
11577 target_timer_t timerid
= get_timer_id(arg1
);
11582 timer_t htimer
= g_posix_timers
[timerid
];
11583 ret
= get_errno(timer_delete(htimer
));
11584 g_posix_timers
[timerid
] = 0;
11590 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11591 case TARGET_NR_timerfd_create
:
11592 ret
= get_errno(timerfd_create(arg1
,
11593 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11597 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11598 case TARGET_NR_timerfd_gettime
:
11600 struct itimerspec its_curr
;
11602 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11604 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11611 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11612 case TARGET_NR_timerfd_settime
:
11614 struct itimerspec its_new
, its_old
, *p_new
;
11617 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11625 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11627 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11634 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11635 case TARGET_NR_ioprio_get
:
11636 ret
= get_errno(ioprio_get(arg1
, arg2
));
11640 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11641 case TARGET_NR_ioprio_set
:
11642 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11646 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11647 case TARGET_NR_setns
:
11648 ret
= get_errno(setns(arg1
, arg2
));
11651 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11652 case TARGET_NR_unshare
:
11653 ret
= get_errno(unshare(arg1
));
11659 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11660 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11661 unimplemented_nowarn
:
11663 ret
= -TARGET_ENOSYS
;
11668 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11671 print_syscall_ret(num
, ret
);
11672 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11675 ret
= -TARGET_EFAULT
;