4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
107 #include <linux/audit.h>
108 #include "linux_loop.h"
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
120 //#define DEBUG_ERESTARTSYS
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 /* This is the size of the host kernel's sigset_t, needed where we make
127 * direct system calls that take a sigset_t pointer and a size.
129 #define SIGSET_T_SIZE (_NSIG / 8)
139 #define _syscall0(type,name) \
140 static type name (void) \
142 return syscall(__NR_##name); \
145 #define _syscall1(type,name,type1,arg1) \
146 static type name (type1 arg1) \
148 return syscall(__NR_##name, arg1); \
151 #define _syscall2(type,name,type1,arg1,type2,arg2) \
152 static type name (type1 arg1,type2 arg2) \
154 return syscall(__NR_##name, arg1, arg2); \
157 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
158 static type name (type1 arg1,type2 arg2,type3 arg3) \
160 return syscall(__NR_##name, arg1, arg2, arg3); \
163 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
169 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
173 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
177 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
178 type5,arg5,type6,arg6) \
179 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
182 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
186 #define __NR_sys_uname __NR_uname
187 #define __NR_sys_getcwd1 __NR_getcwd
188 #define __NR_sys_getdents __NR_getdents
189 #define __NR_sys_getdents64 __NR_getdents64
190 #define __NR_sys_getpriority __NR_getpriority
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 #define __NR__llseek __NR_lseek
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
209 _syscall0(int, gettid
)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
226 loff_t
*, res
, uint
, wh
);
228 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
229 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group
,int,error_code
)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address
,int *,tidptr
)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
238 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
248 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
249 struct __user_cap_data_struct
*, data
);
250 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get
, int, which
, int, who
)
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
262 static bitmask_transtbl fcntl_flags_tbl
[] = {
263 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
264 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
265 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
266 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
267 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
268 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
269 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
270 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
271 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
272 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
273 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
274 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
275 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
286 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
295 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
297 typedef struct TargetFdTrans
{
298 TargetFdDataFunc host_to_target_data
;
299 TargetFdDataFunc target_to_host_data
;
300 TargetFdAddrFunc target_to_host_addr
;
303 static TargetFdTrans
**target_fd_trans
;
305 static unsigned int target_fd_max
;
307 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
309 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
310 return target_fd_trans
[fd
]->target_to_host_data
;
315 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
317 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
318 return target_fd_trans
[fd
]->host_to_target_data
;
323 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
325 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
326 return target_fd_trans
[fd
]->target_to_host_addr
;
331 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
335 if (fd
>= target_fd_max
) {
336 oldmax
= target_fd_max
;
337 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
338 target_fd_trans
= g_renew(TargetFdTrans
*,
339 target_fd_trans
, target_fd_max
);
340 memset((void *)(target_fd_trans
+ oldmax
), 0,
341 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
343 target_fd_trans
[fd
] = trans
;
346 static void fd_trans_unregister(int fd
)
348 if (fd
>= 0 && fd
< target_fd_max
) {
349 target_fd_trans
[fd
] = NULL
;
353 static void fd_trans_dup(int oldfd
, int newfd
)
355 fd_trans_unregister(newfd
);
356 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
357 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
361 static int sys_getcwd1(char *buf
, size_t size
)
363 if (getcwd(buf
, size
) == NULL
) {
364 /* getcwd() sets errno */
367 return strlen(buf
)+1;
370 #ifdef TARGET_NR_utimensat
371 #ifdef CONFIG_UTIMENSAT
372 static int sys_utimensat(int dirfd
, const char *pathname
,
373 const struct timespec times
[2], int flags
)
375 if (pathname
== NULL
)
376 return futimens(dirfd
, times
);
378 return utimensat(dirfd
, pathname
, times
, flags
);
380 #elif defined(__NR_utimensat)
381 #define __NR_sys_utimensat __NR_utimensat
382 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
383 const struct timespec
*,tsp
,int,flags
)
385 static int sys_utimensat(int dirfd
, const char *pathname
,
386 const struct timespec times
[2], int flags
)
392 #endif /* TARGET_NR_utimensat */
394 #ifdef CONFIG_INOTIFY
395 #include <sys/inotify.h>
397 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
398 static int sys_inotify_init(void)
400 return (inotify_init());
403 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
404 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
406 return (inotify_add_watch(fd
, pathname
, mask
));
409 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
410 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
412 return (inotify_rm_watch(fd
, wd
));
415 #ifdef CONFIG_INOTIFY1
416 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
417 static int sys_inotify_init1(int flags
)
419 return (inotify_init1(flags
));
424 /* Userspace can usually survive runtime without inotify */
425 #undef TARGET_NR_inotify_init
426 #undef TARGET_NR_inotify_init1
427 #undef TARGET_NR_inotify_add_watch
428 #undef TARGET_NR_inotify_rm_watch
429 #endif /* CONFIG_INOTIFY */
431 #if defined(TARGET_NR_prlimit64)
432 #ifndef __NR_prlimit64
433 # define __NR_prlimit64 -1
435 #define __NR_sys_prlimit64 __NR_prlimit64
436 /* The glibc rlimit structure may not be that used by the underlying syscall */
437 struct host_rlimit64
{
441 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
442 const struct host_rlimit64
*, new_limit
,
443 struct host_rlimit64
*, old_limit
)
447 #if defined(TARGET_NR_timer_create)
448 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
449 static timer_t g_posix_timers
[32] = { 0, } ;
451 static inline int next_free_host_timer(void)
454 /* FIXME: Does finding the next free slot require a lock? */
455 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
456 if (g_posix_timers
[k
] == 0) {
457 g_posix_timers
[k
] = (timer_t
) 1;
465 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
467 static inline int regpairs_aligned(void *cpu_env
) {
468 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
470 #elif defined(TARGET_MIPS)
471 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
472 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
473 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
474 * of registers which translates to the same as ARM/MIPS, because we start with
476 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
478 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
481 #define ERRNO_TABLE_SIZE 1200
483 /* target_to_host_errno_table[] is initialized from
484 * host_to_target_errno_table[] in syscall_init(). */
485 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
489 * This list is the union of errno values overridden in asm-<arch>/errno.h
490 * minus the errnos that are not actually generic to all archs.
492 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
493 [EAGAIN
] = TARGET_EAGAIN
,
494 [EIDRM
] = TARGET_EIDRM
,
495 [ECHRNG
] = TARGET_ECHRNG
,
496 [EL2NSYNC
] = TARGET_EL2NSYNC
,
497 [EL3HLT
] = TARGET_EL3HLT
,
498 [EL3RST
] = TARGET_EL3RST
,
499 [ELNRNG
] = TARGET_ELNRNG
,
500 [EUNATCH
] = TARGET_EUNATCH
,
501 [ENOCSI
] = TARGET_ENOCSI
,
502 [EL2HLT
] = TARGET_EL2HLT
,
503 [EDEADLK
] = TARGET_EDEADLK
,
504 [ENOLCK
] = TARGET_ENOLCK
,
505 [EBADE
] = TARGET_EBADE
,
506 [EBADR
] = TARGET_EBADR
,
507 [EXFULL
] = TARGET_EXFULL
,
508 [ENOANO
] = TARGET_ENOANO
,
509 [EBADRQC
] = TARGET_EBADRQC
,
510 [EBADSLT
] = TARGET_EBADSLT
,
511 [EBFONT
] = TARGET_EBFONT
,
512 [ENOSTR
] = TARGET_ENOSTR
,
513 [ENODATA
] = TARGET_ENODATA
,
514 [ETIME
] = TARGET_ETIME
,
515 [ENOSR
] = TARGET_ENOSR
,
516 [ENONET
] = TARGET_ENONET
,
517 [ENOPKG
] = TARGET_ENOPKG
,
518 [EREMOTE
] = TARGET_EREMOTE
,
519 [ENOLINK
] = TARGET_ENOLINK
,
520 [EADV
] = TARGET_EADV
,
521 [ESRMNT
] = TARGET_ESRMNT
,
522 [ECOMM
] = TARGET_ECOMM
,
523 [EPROTO
] = TARGET_EPROTO
,
524 [EDOTDOT
] = TARGET_EDOTDOT
,
525 [EMULTIHOP
] = TARGET_EMULTIHOP
,
526 [EBADMSG
] = TARGET_EBADMSG
,
527 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
528 [EOVERFLOW
] = TARGET_EOVERFLOW
,
529 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
530 [EBADFD
] = TARGET_EBADFD
,
531 [EREMCHG
] = TARGET_EREMCHG
,
532 [ELIBACC
] = TARGET_ELIBACC
,
533 [ELIBBAD
] = TARGET_ELIBBAD
,
534 [ELIBSCN
] = TARGET_ELIBSCN
,
535 [ELIBMAX
] = TARGET_ELIBMAX
,
536 [ELIBEXEC
] = TARGET_ELIBEXEC
,
537 [EILSEQ
] = TARGET_EILSEQ
,
538 [ENOSYS
] = TARGET_ENOSYS
,
539 [ELOOP
] = TARGET_ELOOP
,
540 [ERESTART
] = TARGET_ERESTART
,
541 [ESTRPIPE
] = TARGET_ESTRPIPE
,
542 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
543 [EUSERS
] = TARGET_EUSERS
,
544 [ENOTSOCK
] = TARGET_ENOTSOCK
,
545 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
546 [EMSGSIZE
] = TARGET_EMSGSIZE
,
547 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
548 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
549 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
550 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
551 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
552 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
553 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
554 [EADDRINUSE
] = TARGET_EADDRINUSE
,
555 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
556 [ENETDOWN
] = TARGET_ENETDOWN
,
557 [ENETUNREACH
] = TARGET_ENETUNREACH
,
558 [ENETRESET
] = TARGET_ENETRESET
,
559 [ECONNABORTED
] = TARGET_ECONNABORTED
,
560 [ECONNRESET
] = TARGET_ECONNRESET
,
561 [ENOBUFS
] = TARGET_ENOBUFS
,
562 [EISCONN
] = TARGET_EISCONN
,
563 [ENOTCONN
] = TARGET_ENOTCONN
,
564 [EUCLEAN
] = TARGET_EUCLEAN
,
565 [ENOTNAM
] = TARGET_ENOTNAM
,
566 [ENAVAIL
] = TARGET_ENAVAIL
,
567 [EISNAM
] = TARGET_EISNAM
,
568 [EREMOTEIO
] = TARGET_EREMOTEIO
,
569 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
570 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
571 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
572 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
573 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
574 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
575 [EALREADY
] = TARGET_EALREADY
,
576 [EINPROGRESS
] = TARGET_EINPROGRESS
,
577 [ESTALE
] = TARGET_ESTALE
,
578 [ECANCELED
] = TARGET_ECANCELED
,
579 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
580 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
582 [ENOKEY
] = TARGET_ENOKEY
,
585 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
588 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
591 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
594 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
596 #ifdef ENOTRECOVERABLE
597 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
601 static inline int host_to_target_errno(int err
)
603 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
604 host_to_target_errno_table
[err
]) {
605 return host_to_target_errno_table
[err
];
610 static inline int target_to_host_errno(int err
)
612 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
613 target_to_host_errno_table
[err
]) {
614 return target_to_host_errno_table
[err
];
619 static inline abi_long
get_errno(abi_long ret
)
622 return -host_to_target_errno(errno
);
627 static inline int is_error(abi_long ret
)
629 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
632 const char *target_strerror(int err
)
634 if (err
== TARGET_ERESTARTSYS
) {
635 return "To be restarted";
637 if (err
== TARGET_QEMU_ESIGRETURN
) {
638 return "Successful exit from sigreturn";
641 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
644 return strerror(target_to_host_errno(err
));
647 #define safe_syscall0(type, name) \
648 static type safe_##name(void) \
650 return safe_syscall(__NR_##name); \
653 #define safe_syscall1(type, name, type1, arg1) \
654 static type safe_##name(type1 arg1) \
656 return safe_syscall(__NR_##name, arg1); \
659 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
660 static type safe_##name(type1 arg1, type2 arg2) \
662 return safe_syscall(__NR_##name, arg1, arg2); \
665 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
666 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
668 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
671 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
673 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
675 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
678 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
679 type4, arg4, type5, arg5) \
680 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
683 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
686 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
687 type4, arg4, type5, arg5, type6, arg6) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
689 type5 arg5, type6 arg6) \
691 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
694 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
695 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
696 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
697 int, flags
, mode_t
, mode
)
698 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
699 struct rusage
*, rusage
)
700 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
701 int, options
, struct rusage
*, rusage
)
702 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
703 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
704 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
705 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
706 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
708 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
709 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
711 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
712 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
713 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
714 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
715 safe_syscall2(int, tkill
, int, tid
, int, sig
)
716 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
717 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
718 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
719 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
721 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
722 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
723 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
724 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
725 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
726 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
727 safe_syscall2(int, flock
, int, fd
, int, operation
)
728 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
729 const struct timespec
*, uts
, size_t, sigsetsize
)
730 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
732 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
733 struct timespec
*, rem
)
734 #ifdef TARGET_NR_clock_nanosleep
735 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
736 const struct timespec
*, req
, struct timespec
*, rem
)
739 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
741 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
742 long, msgtype
, int, flags
)
743 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
744 unsigned, nsops
, const struct timespec
*, timeout
)
746 /* This host kernel architecture uses a single ipc syscall; fake up
747 * wrappers for the sub-operations to hide this implementation detail.
748 * Annoyingly we can't include linux/ipc.h to get the constant definitions
749 * for the call parameter because some structs in there conflict with the
750 * sys/ipc.h ones. So we just define them here, and rely on them being
751 * the same for all host architectures.
753 #define Q_SEMTIMEDOP 4
756 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
758 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
759 void *, ptr
, long, fifth
)
760 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
762 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
764 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
766 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
768 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
769 const struct timespec
*timeout
)
771 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
775 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
776 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
777 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
778 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
779 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
781 /* We do ioctl like this rather than via safe_syscall3 to preserve the
782 * "third argument might be integer or pointer or not present" behaviour of
785 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
787 static inline int host_to_target_sock_type(int host_type
)
791 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
793 target_type
= TARGET_SOCK_DGRAM
;
796 target_type
= TARGET_SOCK_STREAM
;
799 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
803 #if defined(SOCK_CLOEXEC)
804 if (host_type
& SOCK_CLOEXEC
) {
805 target_type
|= TARGET_SOCK_CLOEXEC
;
809 #if defined(SOCK_NONBLOCK)
810 if (host_type
& SOCK_NONBLOCK
) {
811 target_type
|= TARGET_SOCK_NONBLOCK
;
818 static abi_ulong target_brk
;
819 static abi_ulong target_original_brk
;
820 static abi_ulong brk_page
;
822 void target_set_brk(abi_ulong new_brk
)
824 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
825 brk_page
= HOST_PAGE_ALIGN(target_brk
);
828 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
829 #define DEBUGF_BRK(message, args...)
831 /* do_brk() must return target values and target errnos. */
832 abi_long
do_brk(abi_ulong new_brk
)
834 abi_long mapped_addr
;
837 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
840 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
843 if (new_brk
< target_original_brk
) {
844 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
849 /* If the new brk is less than the highest page reserved to the
850 * target heap allocation, set it and we're almost done... */
851 if (new_brk
<= brk_page
) {
852 /* Heap contents are initialized to zero, as for anonymous
854 if (new_brk
> target_brk
) {
855 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
857 target_brk
= new_brk
;
858 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
862 /* We need to allocate more memory after the brk... Note that
863 * we don't use MAP_FIXED because that will map over the top of
864 * any existing mapping (like the one with the host libc or qemu
865 * itself); instead we treat "mapped but at wrong address" as
866 * a failure and unmap again.
868 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
869 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
870 PROT_READ
|PROT_WRITE
,
871 MAP_ANON
|MAP_PRIVATE
, 0, 0));
873 if (mapped_addr
== brk_page
) {
874 /* Heap contents are initialized to zero, as for anonymous
875 * mapped pages. Technically the new pages are already
876 * initialized to zero since they *are* anonymous mapped
877 * pages, however we have to take care with the contents that
878 * come from the remaining part of the previous page: it may
879 * contains garbage data due to a previous heap usage (grown
881 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
883 target_brk
= new_brk
;
884 brk_page
= HOST_PAGE_ALIGN(target_brk
);
885 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
888 } else if (mapped_addr
!= -1) {
889 /* Mapped but at wrong address, meaning there wasn't actually
890 * enough space for this brk.
892 target_munmap(mapped_addr
, new_alloc_size
);
894 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
897 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
900 #if defined(TARGET_ALPHA)
901 /* We (partially) emulate OSF/1 on Alpha, which requires we
902 return a proper errno, not an unchanged brk value. */
903 return -TARGET_ENOMEM
;
905 /* For everything else, return the previous break. */
909 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
910 abi_ulong target_fds_addr
,
914 abi_ulong b
, *target_fds
;
916 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
917 if (!(target_fds
= lock_user(VERIFY_READ
,
919 sizeof(abi_ulong
) * nw
,
921 return -TARGET_EFAULT
;
925 for (i
= 0; i
< nw
; i
++) {
926 /* grab the abi_ulong */
927 __get_user(b
, &target_fds
[i
]);
928 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
929 /* check the bit inside the abi_ulong */
936 unlock_user(target_fds
, target_fds_addr
, 0);
941 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
942 abi_ulong target_fds_addr
,
945 if (target_fds_addr
) {
946 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
947 return -TARGET_EFAULT
;
955 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
961 abi_ulong
*target_fds
;
963 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
964 if (!(target_fds
= lock_user(VERIFY_WRITE
,
966 sizeof(abi_ulong
) * nw
,
968 return -TARGET_EFAULT
;
971 for (i
= 0; i
< nw
; i
++) {
973 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
974 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
977 __put_user(v
, &target_fds
[i
]);
980 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
985 #if defined(__alpha__)
991 static inline abi_long
host_to_target_clock_t(long ticks
)
993 #if HOST_HZ == TARGET_HZ
996 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1000 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1001 const struct rusage
*rusage
)
1003 struct target_rusage
*target_rusage
;
1005 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1006 return -TARGET_EFAULT
;
1007 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1008 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1009 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1010 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1011 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1012 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1013 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1014 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1015 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1016 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1017 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1018 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1019 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1020 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1021 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1022 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1023 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1024 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1025 unlock_user_struct(target_rusage
, target_addr
, 1);
1030 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1032 abi_ulong target_rlim_swap
;
1035 target_rlim_swap
= tswapal(target_rlim
);
1036 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1037 return RLIM_INFINITY
;
1039 result
= target_rlim_swap
;
1040 if (target_rlim_swap
!= (rlim_t
)result
)
1041 return RLIM_INFINITY
;
1046 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1048 abi_ulong target_rlim_swap
;
1051 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1052 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1054 target_rlim_swap
= rlim
;
1055 result
= tswapal(target_rlim_swap
);
1060 static inline int target_to_host_resource(int code
)
1063 case TARGET_RLIMIT_AS
:
1065 case TARGET_RLIMIT_CORE
:
1067 case TARGET_RLIMIT_CPU
:
1069 case TARGET_RLIMIT_DATA
:
1071 case TARGET_RLIMIT_FSIZE
:
1072 return RLIMIT_FSIZE
;
1073 case TARGET_RLIMIT_LOCKS
:
1074 return RLIMIT_LOCKS
;
1075 case TARGET_RLIMIT_MEMLOCK
:
1076 return RLIMIT_MEMLOCK
;
1077 case TARGET_RLIMIT_MSGQUEUE
:
1078 return RLIMIT_MSGQUEUE
;
1079 case TARGET_RLIMIT_NICE
:
1081 case TARGET_RLIMIT_NOFILE
:
1082 return RLIMIT_NOFILE
;
1083 case TARGET_RLIMIT_NPROC
:
1084 return RLIMIT_NPROC
;
1085 case TARGET_RLIMIT_RSS
:
1087 case TARGET_RLIMIT_RTPRIO
:
1088 return RLIMIT_RTPRIO
;
1089 case TARGET_RLIMIT_SIGPENDING
:
1090 return RLIMIT_SIGPENDING
;
1091 case TARGET_RLIMIT_STACK
:
1092 return RLIMIT_STACK
;
1098 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1099 abi_ulong target_tv_addr
)
1101 struct target_timeval
*target_tv
;
1103 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1104 return -TARGET_EFAULT
;
1106 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1107 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1109 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1114 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1115 const struct timeval
*tv
)
1117 struct target_timeval
*target_tv
;
1119 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1120 return -TARGET_EFAULT
;
1122 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1123 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1125 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1130 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1131 abi_ulong target_tz_addr
)
1133 struct target_timezone
*target_tz
;
1135 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1136 return -TARGET_EFAULT
;
1139 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1140 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1142 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1147 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1150 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1151 abi_ulong target_mq_attr_addr
)
1153 struct target_mq_attr
*target_mq_attr
;
1155 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1156 target_mq_attr_addr
, 1))
1157 return -TARGET_EFAULT
;
1159 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1160 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1161 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1162 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1164 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1169 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1170 const struct mq_attr
*attr
)
1172 struct target_mq_attr
*target_mq_attr
;
1174 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1175 target_mq_attr_addr
, 0))
1176 return -TARGET_EFAULT
;
1178 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1179 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1180 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1181 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1183 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1189 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1190 /* do_select() must return target values and target errnos. */
1191 static abi_long
do_select(int n
,
1192 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1193 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1195 fd_set rfds
, wfds
, efds
;
1196 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1198 struct timespec ts
, *ts_ptr
;
1201 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1205 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1209 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1214 if (target_tv_addr
) {
1215 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1216 return -TARGET_EFAULT
;
1217 ts
.tv_sec
= tv
.tv_sec
;
1218 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1224 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1227 if (!is_error(ret
)) {
1228 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1229 return -TARGET_EFAULT
;
1230 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1231 return -TARGET_EFAULT
;
1232 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1233 return -TARGET_EFAULT
;
1235 if (target_tv_addr
) {
1236 tv
.tv_sec
= ts
.tv_sec
;
1237 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1238 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1239 return -TARGET_EFAULT
;
1248 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1251 return pipe2(host_pipe
, flags
);
1257 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1258 int flags
, int is_pipe2
)
1262 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1265 return get_errno(ret
);
1267 /* Several targets have special calling conventions for the original
1268 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1270 #if defined(TARGET_ALPHA)
1271 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1272 return host_pipe
[0];
1273 #elif defined(TARGET_MIPS)
1274 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1275 return host_pipe
[0];
1276 #elif defined(TARGET_SH4)
1277 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1278 return host_pipe
[0];
1279 #elif defined(TARGET_SPARC)
1280 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1281 return host_pipe
[0];
1285 if (put_user_s32(host_pipe
[0], pipedes
)
1286 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1287 return -TARGET_EFAULT
;
1288 return get_errno(ret
);
1291 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1292 abi_ulong target_addr
,
1295 struct target_ip_mreqn
*target_smreqn
;
1297 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1299 return -TARGET_EFAULT
;
1300 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1301 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1302 if (len
== sizeof(struct target_ip_mreqn
))
1303 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1304 unlock_user(target_smreqn
, target_addr
, 0);
1309 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1310 abi_ulong target_addr
,
1313 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1314 sa_family_t sa_family
;
1315 struct target_sockaddr
*target_saddr
;
1317 if (fd_trans_target_to_host_addr(fd
)) {
1318 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1321 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1323 return -TARGET_EFAULT
;
1325 sa_family
= tswap16(target_saddr
->sa_family
);
1327 /* Oops. The caller might send a incomplete sun_path; sun_path
1328 * must be terminated by \0 (see the manual page), but
1329 * unfortunately it is quite common to specify sockaddr_un
1330 * length as "strlen(x->sun_path)" while it should be
1331 * "strlen(...) + 1". We'll fix that here if needed.
1332 * Linux kernel has a similar feature.
1335 if (sa_family
== AF_UNIX
) {
1336 if (len
< unix_maxlen
&& len
> 0) {
1337 char *cp
= (char*)target_saddr
;
1339 if ( cp
[len
-1] && !cp
[len
] )
1342 if (len
> unix_maxlen
)
1346 memcpy(addr
, target_saddr
, len
);
1347 addr
->sa_family
= sa_family
;
1348 if (sa_family
== AF_NETLINK
) {
1349 struct sockaddr_nl
*nladdr
;
1351 nladdr
= (struct sockaddr_nl
*)addr
;
1352 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1353 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1354 } else if (sa_family
== AF_PACKET
) {
1355 struct target_sockaddr_ll
*lladdr
;
1357 lladdr
= (struct target_sockaddr_ll
*)addr
;
1358 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1359 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1361 unlock_user(target_saddr
, target_addr
, 0);
1366 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1367 struct sockaddr
*addr
,
1370 struct target_sockaddr
*target_saddr
;
1372 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1374 return -TARGET_EFAULT
;
1375 memcpy(target_saddr
, addr
, len
);
1376 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1377 if (addr
->sa_family
== AF_NETLINK
) {
1378 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1379 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1380 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1382 unlock_user(target_saddr
, target_addr
, len
);
1387 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1388 struct target_msghdr
*target_msgh
)
1390 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1391 abi_long msg_controllen
;
1392 abi_ulong target_cmsg_addr
;
1393 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1394 socklen_t space
= 0;
1396 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1397 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1399 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1400 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1401 target_cmsg_start
= target_cmsg
;
1403 return -TARGET_EFAULT
;
1405 while (cmsg
&& target_cmsg
) {
1406 void *data
= CMSG_DATA(cmsg
);
1407 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1409 int len
= tswapal(target_cmsg
->cmsg_len
)
1410 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1412 space
+= CMSG_SPACE(len
);
1413 if (space
> msgh
->msg_controllen
) {
1414 space
-= CMSG_SPACE(len
);
1415 /* This is a QEMU bug, since we allocated the payload
1416 * area ourselves (unlike overflow in host-to-target
1417 * conversion, which is just the guest giving us a buffer
1418 * that's too small). It can't happen for the payload types
1419 * we currently support; if it becomes an issue in future
1420 * we would need to improve our allocation strategy to
1421 * something more intelligent than "twice the size of the
1422 * target buffer we're reading from".
1424 gemu_log("Host cmsg overflow\n");
1428 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1429 cmsg
->cmsg_level
= SOL_SOCKET
;
1431 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1433 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1434 cmsg
->cmsg_len
= CMSG_LEN(len
);
1436 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1437 int *fd
= (int *)data
;
1438 int *target_fd
= (int *)target_data
;
1439 int i
, numfds
= len
/ sizeof(int);
1441 for (i
= 0; i
< numfds
; i
++) {
1442 __get_user(fd
[i
], target_fd
+ i
);
1444 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1445 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1446 struct ucred
*cred
= (struct ucred
*)data
;
1447 struct target_ucred
*target_cred
=
1448 (struct target_ucred
*)target_data
;
1450 __get_user(cred
->pid
, &target_cred
->pid
);
1451 __get_user(cred
->uid
, &target_cred
->uid
);
1452 __get_user(cred
->gid
, &target_cred
->gid
);
1454 gemu_log("Unsupported ancillary data: %d/%d\n",
1455 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1456 memcpy(data
, target_data
, len
);
1459 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1460 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1463 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1465 msgh
->msg_controllen
= space
;
1469 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1470 struct msghdr
*msgh
)
1472 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1473 abi_long msg_controllen
;
1474 abi_ulong target_cmsg_addr
;
1475 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1476 socklen_t space
= 0;
1478 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1479 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1481 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1482 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1483 target_cmsg_start
= target_cmsg
;
1485 return -TARGET_EFAULT
;
1487 while (cmsg
&& target_cmsg
) {
1488 void *data
= CMSG_DATA(cmsg
);
1489 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1491 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1492 int tgt_len
, tgt_space
;
1494 /* We never copy a half-header but may copy half-data;
1495 * this is Linux's behaviour in put_cmsg(). Note that
1496 * truncation here is a guest problem (which we report
1497 * to the guest via the CTRUNC bit), unlike truncation
1498 * in target_to_host_cmsg, which is a QEMU bug.
1500 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1501 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1505 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1506 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1508 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1510 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1512 tgt_len
= TARGET_CMSG_LEN(len
);
1514 /* Payload types which need a different size of payload on
1515 * the target must adjust tgt_len here.
1517 switch (cmsg
->cmsg_level
) {
1519 switch (cmsg
->cmsg_type
) {
1521 tgt_len
= sizeof(struct target_timeval
);
1530 if (msg_controllen
< tgt_len
) {
1531 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1532 tgt_len
= msg_controllen
;
1535 /* We must now copy-and-convert len bytes of payload
1536 * into tgt_len bytes of destination space. Bear in mind
1537 * that in both source and destination we may be dealing
1538 * with a truncated value!
1540 switch (cmsg
->cmsg_level
) {
1542 switch (cmsg
->cmsg_type
) {
1545 int *fd
= (int *)data
;
1546 int *target_fd
= (int *)target_data
;
1547 int i
, numfds
= tgt_len
/ sizeof(int);
1549 for (i
= 0; i
< numfds
; i
++) {
1550 __put_user(fd
[i
], target_fd
+ i
);
1556 struct timeval
*tv
= (struct timeval
*)data
;
1557 struct target_timeval
*target_tv
=
1558 (struct target_timeval
*)target_data
;
1560 if (len
!= sizeof(struct timeval
) ||
1561 tgt_len
!= sizeof(struct target_timeval
)) {
1565 /* copy struct timeval to target */
1566 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1567 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1570 case SCM_CREDENTIALS
:
1572 struct ucred
*cred
= (struct ucred
*)data
;
1573 struct target_ucred
*target_cred
=
1574 (struct target_ucred
*)target_data
;
1576 __put_user(cred
->pid
, &target_cred
->pid
);
1577 __put_user(cred
->uid
, &target_cred
->uid
);
1578 __put_user(cred
->gid
, &target_cred
->gid
);
1588 gemu_log("Unsupported ancillary data: %d/%d\n",
1589 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1590 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1591 if (tgt_len
> len
) {
1592 memset(target_data
+ len
, 0, tgt_len
- len
);
1596 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1597 tgt_space
= TARGET_CMSG_SPACE(len
);
1598 if (msg_controllen
< tgt_space
) {
1599 tgt_space
= msg_controllen
;
1601 msg_controllen
-= tgt_space
;
1603 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1604 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1607 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1609 target_msgh
->msg_controllen
= tswapal(space
);
1613 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1615 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1616 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1617 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1618 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1619 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1622 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1624 abi_long (*host_to_target_nlmsg
)
1625 (struct nlmsghdr
*))
1630 while (len
> sizeof(struct nlmsghdr
)) {
1632 nlmsg_len
= nlh
->nlmsg_len
;
1633 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1638 switch (nlh
->nlmsg_type
) {
1640 tswap_nlmsghdr(nlh
);
1646 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1647 e
->error
= tswap32(e
->error
);
1648 tswap_nlmsghdr(&e
->msg
);
1649 tswap_nlmsghdr(nlh
);
1653 ret
= host_to_target_nlmsg(nlh
);
1655 tswap_nlmsghdr(nlh
);
1660 tswap_nlmsghdr(nlh
);
1661 len
-= NLMSG_ALIGN(nlmsg_len
);
1662 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1667 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1669 abi_long (*target_to_host_nlmsg
)
1670 (struct nlmsghdr
*))
1674 while (len
> sizeof(struct nlmsghdr
)) {
1675 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1676 tswap32(nlh
->nlmsg_len
) > len
) {
1679 tswap_nlmsghdr(nlh
);
1680 switch (nlh
->nlmsg_type
) {
1687 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1688 e
->error
= tswap32(e
->error
);
1689 tswap_nlmsghdr(&e
->msg
);
1692 ret
= target_to_host_nlmsg(nlh
);
1697 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1698 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1703 #ifdef CONFIG_RTNETLINK
1704 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1706 abi_long (*host_to_target_rtattr
)
1709 unsigned short rta_len
;
1712 while (len
> sizeof(struct rtattr
)) {
1713 rta_len
= rtattr
->rta_len
;
1714 if (rta_len
< sizeof(struct rtattr
) ||
1718 ret
= host_to_target_rtattr(rtattr
);
1719 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1720 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1724 len
-= RTA_ALIGN(rta_len
);
1725 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1730 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1733 struct rtnl_link_stats
*st
;
1734 struct rtnl_link_stats64
*st64
;
1735 struct rtnl_link_ifmap
*map
;
1737 switch (rtattr
->rta_type
) {
1740 case IFLA_BROADCAST
:
1746 case IFLA_OPERSTATE
:
1749 case IFLA_PROTO_DOWN
:
1756 case IFLA_CARRIER_CHANGES
:
1757 case IFLA_NUM_RX_QUEUES
:
1758 case IFLA_NUM_TX_QUEUES
:
1759 case IFLA_PROMISCUITY
:
1761 case IFLA_LINK_NETNSID
:
1765 u32
= RTA_DATA(rtattr
);
1766 *u32
= tswap32(*u32
);
1768 /* struct rtnl_link_stats */
1770 st
= RTA_DATA(rtattr
);
1771 st
->rx_packets
= tswap32(st
->rx_packets
);
1772 st
->tx_packets
= tswap32(st
->tx_packets
);
1773 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1774 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1775 st
->rx_errors
= tswap32(st
->rx_errors
);
1776 st
->tx_errors
= tswap32(st
->tx_errors
);
1777 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1778 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1779 st
->multicast
= tswap32(st
->multicast
);
1780 st
->collisions
= tswap32(st
->collisions
);
1782 /* detailed rx_errors: */
1783 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1784 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1785 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1786 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1787 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1788 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1790 /* detailed tx_errors */
1791 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1792 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1793 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1794 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1795 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1798 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1799 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1801 /* struct rtnl_link_stats64 */
1803 st64
= RTA_DATA(rtattr
);
1804 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1805 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1806 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1807 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1808 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1809 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1810 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1811 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1812 st64
->multicast
= tswap64(st64
->multicast
);
1813 st64
->collisions
= tswap64(st64
->collisions
);
1815 /* detailed rx_errors: */
1816 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1817 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1818 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1819 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1820 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1821 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1823 /* detailed tx_errors */
1824 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1825 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1826 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1827 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1828 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1831 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1832 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1834 /* struct rtnl_link_ifmap */
1836 map
= RTA_DATA(rtattr
);
1837 map
->mem_start
= tswap64(map
->mem_start
);
1838 map
->mem_end
= tswap64(map
->mem_end
);
1839 map
->base_addr
= tswap64(map
->base_addr
);
1840 map
->irq
= tswap16(map
->irq
);
1845 /* FIXME: implement nested type */
1846 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1849 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1855 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1858 struct ifa_cacheinfo
*ci
;
1860 switch (rtattr
->rta_type
) {
1861 /* binary: depends on family type */
1871 u32
= RTA_DATA(rtattr
);
1872 *u32
= tswap32(*u32
);
1874 /* struct ifa_cacheinfo */
1876 ci
= RTA_DATA(rtattr
);
1877 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1878 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1879 ci
->cstamp
= tswap32(ci
->cstamp
);
1880 ci
->tstamp
= tswap32(ci
->tstamp
);
1883 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1889 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1892 switch (rtattr
->rta_type
) {
1893 /* binary: depends on family type */
1902 u32
= RTA_DATA(rtattr
);
1903 *u32
= tswap32(*u32
);
1906 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1912 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1913 uint32_t rtattr_len
)
1915 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1916 host_to_target_data_link_rtattr
);
1919 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1920 uint32_t rtattr_len
)
1922 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1923 host_to_target_data_addr_rtattr
);
1926 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1927 uint32_t rtattr_len
)
1929 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1930 host_to_target_data_route_rtattr
);
1933 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1936 struct ifinfomsg
*ifi
;
1937 struct ifaddrmsg
*ifa
;
1940 nlmsg_len
= nlh
->nlmsg_len
;
1941 switch (nlh
->nlmsg_type
) {
1945 ifi
= NLMSG_DATA(nlh
);
1946 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1947 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1948 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1949 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1950 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1951 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1956 ifa
= NLMSG_DATA(nlh
);
1957 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1958 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1959 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1964 rtm
= NLMSG_DATA(nlh
);
1965 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1966 host_to_target_route_rtattr(RTM_RTA(rtm
),
1967 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1970 return -TARGET_EINVAL
;
1975 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1978 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1981 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1983 abi_long (*target_to_host_rtattr
)
1988 while (len
>= sizeof(struct rtattr
)) {
1989 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1990 tswap16(rtattr
->rta_len
) > len
) {
1993 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1994 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1995 ret
= target_to_host_rtattr(rtattr
);
1999 len
-= RTA_ALIGN(rtattr
->rta_len
);
2000 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2001 RTA_ALIGN(rtattr
->rta_len
));
2006 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2008 switch (rtattr
->rta_type
) {
2010 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2016 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2018 switch (rtattr
->rta_type
) {
2019 /* binary: depends on family type */
2024 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2030 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2033 switch (rtattr
->rta_type
) {
2034 /* binary: depends on family type */
2041 u32
= RTA_DATA(rtattr
);
2042 *u32
= tswap32(*u32
);
2045 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2051 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2052 uint32_t rtattr_len
)
2054 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2055 target_to_host_data_link_rtattr
);
2058 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2059 uint32_t rtattr_len
)
2061 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2062 target_to_host_data_addr_rtattr
);
2065 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2066 uint32_t rtattr_len
)
2068 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2069 target_to_host_data_route_rtattr
);
2072 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2074 struct ifinfomsg
*ifi
;
2075 struct ifaddrmsg
*ifa
;
2078 switch (nlh
->nlmsg_type
) {
2083 ifi
= NLMSG_DATA(nlh
);
2084 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2085 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2086 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2087 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2088 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2089 NLMSG_LENGTH(sizeof(*ifi
)));
2094 ifa
= NLMSG_DATA(nlh
);
2095 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2096 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2097 NLMSG_LENGTH(sizeof(*ifa
)));
2103 rtm
= NLMSG_DATA(nlh
);
2104 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2105 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2106 NLMSG_LENGTH(sizeof(*rtm
)));
2109 return -TARGET_EOPNOTSUPP
;
2114 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2116 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2118 #endif /* CONFIG_RTNETLINK */
2120 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2122 switch (nlh
->nlmsg_type
) {
2124 gemu_log("Unknown host audit message type %d\n",
2126 return -TARGET_EINVAL
;
2131 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2134 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2137 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2139 switch (nlh
->nlmsg_type
) {
2141 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2142 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2145 gemu_log("Unknown target audit message type %d\n",
2147 return -TARGET_EINVAL
;
2153 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2155 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2158 /* do_setsockopt() Must return target values and target errnos. */
2159 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2160 abi_ulong optval_addr
, socklen_t optlen
)
2164 struct ip_mreqn
*ip_mreq
;
2165 struct ip_mreq_source
*ip_mreq_source
;
2169 /* TCP options all take an 'int' value. */
2170 if (optlen
< sizeof(uint32_t))
2171 return -TARGET_EINVAL
;
2173 if (get_user_u32(val
, optval_addr
))
2174 return -TARGET_EFAULT
;
2175 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2182 case IP_ROUTER_ALERT
:
2186 case IP_MTU_DISCOVER
:
2192 case IP_MULTICAST_TTL
:
2193 case IP_MULTICAST_LOOP
:
2195 if (optlen
>= sizeof(uint32_t)) {
2196 if (get_user_u32(val
, optval_addr
))
2197 return -TARGET_EFAULT
;
2198 } else if (optlen
>= 1) {
2199 if (get_user_u8(val
, optval_addr
))
2200 return -TARGET_EFAULT
;
2202 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2204 case IP_ADD_MEMBERSHIP
:
2205 case IP_DROP_MEMBERSHIP
:
2206 if (optlen
< sizeof (struct target_ip_mreq
) ||
2207 optlen
> sizeof (struct target_ip_mreqn
))
2208 return -TARGET_EINVAL
;
2210 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2211 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2212 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2215 case IP_BLOCK_SOURCE
:
2216 case IP_UNBLOCK_SOURCE
:
2217 case IP_ADD_SOURCE_MEMBERSHIP
:
2218 case IP_DROP_SOURCE_MEMBERSHIP
:
2219 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2220 return -TARGET_EINVAL
;
2222 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2223 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2224 unlock_user (ip_mreq_source
, optval_addr
, 0);
2233 case IPV6_MTU_DISCOVER
:
2236 case IPV6_RECVPKTINFO
:
2238 if (optlen
< sizeof(uint32_t)) {
2239 return -TARGET_EINVAL
;
2241 if (get_user_u32(val
, optval_addr
)) {
2242 return -TARGET_EFAULT
;
2244 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2245 &val
, sizeof(val
)));
2254 /* struct icmp_filter takes an u32 value */
2255 if (optlen
< sizeof(uint32_t)) {
2256 return -TARGET_EINVAL
;
2259 if (get_user_u32(val
, optval_addr
)) {
2260 return -TARGET_EFAULT
;
2262 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2263 &val
, sizeof(val
)));
2270 case TARGET_SOL_SOCKET
:
2272 case TARGET_SO_RCVTIMEO
:
2276 optname
= SO_RCVTIMEO
;
2279 if (optlen
!= sizeof(struct target_timeval
)) {
2280 return -TARGET_EINVAL
;
2283 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2284 return -TARGET_EFAULT
;
2287 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2291 case TARGET_SO_SNDTIMEO
:
2292 optname
= SO_SNDTIMEO
;
2294 case TARGET_SO_ATTACH_FILTER
:
2296 struct target_sock_fprog
*tfprog
;
2297 struct target_sock_filter
*tfilter
;
2298 struct sock_fprog fprog
;
2299 struct sock_filter
*filter
;
2302 if (optlen
!= sizeof(*tfprog
)) {
2303 return -TARGET_EINVAL
;
2305 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2306 return -TARGET_EFAULT
;
2308 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2309 tswapal(tfprog
->filter
), 0)) {
2310 unlock_user_struct(tfprog
, optval_addr
, 1);
2311 return -TARGET_EFAULT
;
2314 fprog
.len
= tswap16(tfprog
->len
);
2315 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2316 if (filter
== NULL
) {
2317 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2318 unlock_user_struct(tfprog
, optval_addr
, 1);
2319 return -TARGET_ENOMEM
;
2321 for (i
= 0; i
< fprog
.len
; i
++) {
2322 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2323 filter
[i
].jt
= tfilter
[i
].jt
;
2324 filter
[i
].jf
= tfilter
[i
].jf
;
2325 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2327 fprog
.filter
= filter
;
2329 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2330 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2333 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2334 unlock_user_struct(tfprog
, optval_addr
, 1);
2337 case TARGET_SO_BINDTODEVICE
:
2339 char *dev_ifname
, *addr_ifname
;
2341 if (optlen
> IFNAMSIZ
- 1) {
2342 optlen
= IFNAMSIZ
- 1;
2344 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2346 return -TARGET_EFAULT
;
2348 optname
= SO_BINDTODEVICE
;
2349 addr_ifname
= alloca(IFNAMSIZ
);
2350 memcpy(addr_ifname
, dev_ifname
, optlen
);
2351 addr_ifname
[optlen
] = 0;
2352 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2353 addr_ifname
, optlen
));
2354 unlock_user (dev_ifname
, optval_addr
, 0);
2357 /* Options with 'int' argument. */
2358 case TARGET_SO_DEBUG
:
2361 case TARGET_SO_REUSEADDR
:
2362 optname
= SO_REUSEADDR
;
2364 case TARGET_SO_TYPE
:
2367 case TARGET_SO_ERROR
:
2370 case TARGET_SO_DONTROUTE
:
2371 optname
= SO_DONTROUTE
;
2373 case TARGET_SO_BROADCAST
:
2374 optname
= SO_BROADCAST
;
2376 case TARGET_SO_SNDBUF
:
2377 optname
= SO_SNDBUF
;
2379 case TARGET_SO_SNDBUFFORCE
:
2380 optname
= SO_SNDBUFFORCE
;
2382 case TARGET_SO_RCVBUF
:
2383 optname
= SO_RCVBUF
;
2385 case TARGET_SO_RCVBUFFORCE
:
2386 optname
= SO_RCVBUFFORCE
;
2388 case TARGET_SO_KEEPALIVE
:
2389 optname
= SO_KEEPALIVE
;
2391 case TARGET_SO_OOBINLINE
:
2392 optname
= SO_OOBINLINE
;
2394 case TARGET_SO_NO_CHECK
:
2395 optname
= SO_NO_CHECK
;
2397 case TARGET_SO_PRIORITY
:
2398 optname
= SO_PRIORITY
;
2401 case TARGET_SO_BSDCOMPAT
:
2402 optname
= SO_BSDCOMPAT
;
2405 case TARGET_SO_PASSCRED
:
2406 optname
= SO_PASSCRED
;
2408 case TARGET_SO_PASSSEC
:
2409 optname
= SO_PASSSEC
;
2411 case TARGET_SO_TIMESTAMP
:
2412 optname
= SO_TIMESTAMP
;
2414 case TARGET_SO_RCVLOWAT
:
2415 optname
= SO_RCVLOWAT
;
2421 if (optlen
< sizeof(uint32_t))
2422 return -TARGET_EINVAL
;
2424 if (get_user_u32(val
, optval_addr
))
2425 return -TARGET_EFAULT
;
2426 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2430 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2431 ret
= -TARGET_ENOPROTOOPT
;
2436 /* do_getsockopt() Must return target values and target errnos. */
2437 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2438 abi_ulong optval_addr
, abi_ulong optlen
)
2445 case TARGET_SOL_SOCKET
:
2448 /* These don't just return a single integer */
2449 case TARGET_SO_LINGER
:
2450 case TARGET_SO_RCVTIMEO
:
2451 case TARGET_SO_SNDTIMEO
:
2452 case TARGET_SO_PEERNAME
:
2454 case TARGET_SO_PEERCRED
: {
2457 struct target_ucred
*tcr
;
2459 if (get_user_u32(len
, optlen
)) {
2460 return -TARGET_EFAULT
;
2463 return -TARGET_EINVAL
;
2467 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2475 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2476 return -TARGET_EFAULT
;
2478 __put_user(cr
.pid
, &tcr
->pid
);
2479 __put_user(cr
.uid
, &tcr
->uid
);
2480 __put_user(cr
.gid
, &tcr
->gid
);
2481 unlock_user_struct(tcr
, optval_addr
, 1);
2482 if (put_user_u32(len
, optlen
)) {
2483 return -TARGET_EFAULT
;
2487 /* Options with 'int' argument. */
2488 case TARGET_SO_DEBUG
:
2491 case TARGET_SO_REUSEADDR
:
2492 optname
= SO_REUSEADDR
;
2494 case TARGET_SO_TYPE
:
2497 case TARGET_SO_ERROR
:
2500 case TARGET_SO_DONTROUTE
:
2501 optname
= SO_DONTROUTE
;
2503 case TARGET_SO_BROADCAST
:
2504 optname
= SO_BROADCAST
;
2506 case TARGET_SO_SNDBUF
:
2507 optname
= SO_SNDBUF
;
2509 case TARGET_SO_RCVBUF
:
2510 optname
= SO_RCVBUF
;
2512 case TARGET_SO_KEEPALIVE
:
2513 optname
= SO_KEEPALIVE
;
2515 case TARGET_SO_OOBINLINE
:
2516 optname
= SO_OOBINLINE
;
2518 case TARGET_SO_NO_CHECK
:
2519 optname
= SO_NO_CHECK
;
2521 case TARGET_SO_PRIORITY
:
2522 optname
= SO_PRIORITY
;
2525 case TARGET_SO_BSDCOMPAT
:
2526 optname
= SO_BSDCOMPAT
;
2529 case TARGET_SO_PASSCRED
:
2530 optname
= SO_PASSCRED
;
2532 case TARGET_SO_TIMESTAMP
:
2533 optname
= SO_TIMESTAMP
;
2535 case TARGET_SO_RCVLOWAT
:
2536 optname
= SO_RCVLOWAT
;
2538 case TARGET_SO_ACCEPTCONN
:
2539 optname
= SO_ACCEPTCONN
;
2546 /* TCP options all take an 'int' value. */
2548 if (get_user_u32(len
, optlen
))
2549 return -TARGET_EFAULT
;
2551 return -TARGET_EINVAL
;
2553 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2556 if (optname
== SO_TYPE
) {
2557 val
= host_to_target_sock_type(val
);
2562 if (put_user_u32(val
, optval_addr
))
2563 return -TARGET_EFAULT
;
2565 if (put_user_u8(val
, optval_addr
))
2566 return -TARGET_EFAULT
;
2568 if (put_user_u32(len
, optlen
))
2569 return -TARGET_EFAULT
;
2576 case IP_ROUTER_ALERT
:
2580 case IP_MTU_DISCOVER
:
2586 case IP_MULTICAST_TTL
:
2587 case IP_MULTICAST_LOOP
:
2588 if (get_user_u32(len
, optlen
))
2589 return -TARGET_EFAULT
;
2591 return -TARGET_EINVAL
;
2593 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2596 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2598 if (put_user_u32(len
, optlen
)
2599 || put_user_u8(val
, optval_addr
))
2600 return -TARGET_EFAULT
;
2602 if (len
> sizeof(int))
2604 if (put_user_u32(len
, optlen
)
2605 || put_user_u32(val
, optval_addr
))
2606 return -TARGET_EFAULT
;
2610 ret
= -TARGET_ENOPROTOOPT
;
2616 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2618 ret
= -TARGET_EOPNOTSUPP
;
2624 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2625 int count
, int copy
)
2627 struct target_iovec
*target_vec
;
2629 abi_ulong total_len
, max_len
;
2632 bool bad_address
= false;
2638 if (count
< 0 || count
> IOV_MAX
) {
2643 vec
= g_try_new0(struct iovec
, count
);
2649 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2650 count
* sizeof(struct target_iovec
), 1);
2651 if (target_vec
== NULL
) {
2656 /* ??? If host page size > target page size, this will result in a
2657 value larger than what we can actually support. */
2658 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2661 for (i
= 0; i
< count
; i
++) {
2662 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2663 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2668 } else if (len
== 0) {
2669 /* Zero length pointer is ignored. */
2670 vec
[i
].iov_base
= 0;
2672 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2673 /* If the first buffer pointer is bad, this is a fault. But
2674 * subsequent bad buffers will result in a partial write; this
2675 * is realized by filling the vector with null pointers and
2677 if (!vec
[i
].iov_base
) {
2688 if (len
> max_len
- total_len
) {
2689 len
= max_len
- total_len
;
2692 vec
[i
].iov_len
= len
;
2696 unlock_user(target_vec
, target_addr
, 0);
2701 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2702 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2705 unlock_user(target_vec
, target_addr
, 0);
2712 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2713 int count
, int copy
)
2715 struct target_iovec
*target_vec
;
2718 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2719 count
* sizeof(struct target_iovec
), 1);
2721 for (i
= 0; i
< count
; i
++) {
2722 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2723 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2727 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2729 unlock_user(target_vec
, target_addr
, 0);
2735 static inline int target_to_host_sock_type(int *type
)
2738 int target_type
= *type
;
2740 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2741 case TARGET_SOCK_DGRAM
:
2742 host_type
= SOCK_DGRAM
;
2744 case TARGET_SOCK_STREAM
:
2745 host_type
= SOCK_STREAM
;
2748 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2751 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2752 #if defined(SOCK_CLOEXEC)
2753 host_type
|= SOCK_CLOEXEC
;
2755 return -TARGET_EINVAL
;
2758 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2759 #if defined(SOCK_NONBLOCK)
2760 host_type
|= SOCK_NONBLOCK
;
2761 #elif !defined(O_NONBLOCK)
2762 return -TARGET_EINVAL
;
2769 /* Try to emulate socket type flags after socket creation. */
2770 static int sock_flags_fixup(int fd
, int target_type
)
2772 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2773 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2774 int flags
= fcntl(fd
, F_GETFL
);
2775 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2777 return -TARGET_EINVAL
;
2784 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2785 abi_ulong target_addr
,
2788 struct sockaddr
*addr
= host_addr
;
2789 struct target_sockaddr
*target_saddr
;
2791 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2792 if (!target_saddr
) {
2793 return -TARGET_EFAULT
;
2796 memcpy(addr
, target_saddr
, len
);
2797 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2798 /* spkt_protocol is big-endian */
2800 unlock_user(target_saddr
, target_addr
, 0);
2804 static TargetFdTrans target_packet_trans
= {
2805 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2808 #ifdef CONFIG_RTNETLINK
2809 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2811 return target_to_host_nlmsg_route(buf
, len
);
2814 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2816 return host_to_target_nlmsg_route(buf
, len
);
2819 static TargetFdTrans target_netlink_route_trans
= {
2820 .target_to_host_data
= netlink_route_target_to_host
,
2821 .host_to_target_data
= netlink_route_host_to_target
,
2823 #endif /* CONFIG_RTNETLINK */
2825 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2827 return target_to_host_nlmsg_audit(buf
, len
);
2830 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2832 return host_to_target_nlmsg_audit(buf
, len
);
2835 static TargetFdTrans target_netlink_audit_trans
= {
2836 .target_to_host_data
= netlink_audit_target_to_host
,
2837 .host_to_target_data
= netlink_audit_host_to_target
,
2840 /* do_socket() Must return target values and target errnos. */
2841 static abi_long
do_socket(int domain
, int type
, int protocol
)
2843 int target_type
= type
;
2846 ret
= target_to_host_sock_type(&type
);
2851 if (domain
== PF_NETLINK
&& !(
2852 #ifdef CONFIG_RTNETLINK
2853 protocol
== NETLINK_ROUTE
||
2855 protocol
== NETLINK_KOBJECT_UEVENT
||
2856 protocol
== NETLINK_AUDIT
)) {
2857 return -EPFNOSUPPORT
;
2860 if (domain
== AF_PACKET
||
2861 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2862 protocol
= tswap16(protocol
);
2865 ret
= get_errno(socket(domain
, type
, protocol
));
2867 ret
= sock_flags_fixup(ret
, target_type
);
2868 if (type
== SOCK_PACKET
) {
2869 /* Manage an obsolete case :
2870 * if socket type is SOCK_PACKET, bind by name
2872 fd_trans_register(ret
, &target_packet_trans
);
2873 } else if (domain
== PF_NETLINK
) {
2875 #ifdef CONFIG_RTNETLINK
2877 fd_trans_register(ret
, &target_netlink_route_trans
);
2880 case NETLINK_KOBJECT_UEVENT
:
2881 /* nothing to do: messages are strings */
2884 fd_trans_register(ret
, &target_netlink_audit_trans
);
2887 g_assert_not_reached();
2894 /* do_bind() Must return target values and target errnos. */
2895 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2901 if ((int)addrlen
< 0) {
2902 return -TARGET_EINVAL
;
2905 addr
= alloca(addrlen
+1);
2907 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2911 return get_errno(bind(sockfd
, addr
, addrlen
));
2914 /* do_connect() Must return target values and target errnos. */
2915 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2921 if ((int)addrlen
< 0) {
2922 return -TARGET_EINVAL
;
2925 addr
= alloca(addrlen
+1);
2927 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2931 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2934 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2935 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2936 int flags
, int send
)
2942 abi_ulong target_vec
;
2944 if (msgp
->msg_name
) {
2945 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2946 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2947 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2948 tswapal(msgp
->msg_name
),
2954 msg
.msg_name
= NULL
;
2955 msg
.msg_namelen
= 0;
2957 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2958 msg
.msg_control
= alloca(msg
.msg_controllen
);
2959 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2961 count
= tswapal(msgp
->msg_iovlen
);
2962 target_vec
= tswapal(msgp
->msg_iov
);
2963 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2964 target_vec
, count
, send
);
2966 ret
= -host_to_target_errno(errno
);
2969 msg
.msg_iovlen
= count
;
2973 if (fd_trans_target_to_host_data(fd
)) {
2974 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2975 msg
.msg_iov
->iov_len
);
2977 ret
= target_to_host_cmsg(&msg
, msgp
);
2980 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2983 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2984 if (!is_error(ret
)) {
2986 if (fd_trans_host_to_target_data(fd
)) {
2987 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2988 msg
.msg_iov
->iov_len
);
2990 ret
= host_to_target_cmsg(msgp
, &msg
);
2992 if (!is_error(ret
)) {
2993 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2994 if (msg
.msg_name
!= NULL
) {
2995 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2996 msg
.msg_name
, msg
.msg_namelen
);
3008 unlock_iovec(vec
, target_vec
, count
, !send
);
3013 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3014 int flags
, int send
)
3017 struct target_msghdr
*msgp
;
3019 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3023 return -TARGET_EFAULT
;
3025 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3026 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3030 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3031 * so it might not have this *mmsg-specific flag either.
3033 #ifndef MSG_WAITFORONE
3034 #define MSG_WAITFORONE 0x10000
3037 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3038 unsigned int vlen
, unsigned int flags
,
3041 struct target_mmsghdr
*mmsgp
;
3045 if (vlen
> UIO_MAXIOV
) {
3049 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3051 return -TARGET_EFAULT
;
3054 for (i
= 0; i
< vlen
; i
++) {
3055 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3056 if (is_error(ret
)) {
3059 mmsgp
[i
].msg_len
= tswap32(ret
);
3060 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3061 if (flags
& MSG_WAITFORONE
) {
3062 flags
|= MSG_DONTWAIT
;
3066 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3068 /* Return number of datagrams sent if we sent any at all;
3069 * otherwise return the error.
3077 /* do_accept4() Must return target values and target errnos. */
3078 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3079 abi_ulong target_addrlen_addr
, int flags
)
3086 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3088 if (target_addr
== 0) {
3089 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3092 /* linux returns EINVAL if addrlen pointer is invalid */
3093 if (get_user_u32(addrlen
, target_addrlen_addr
))
3094 return -TARGET_EINVAL
;
3096 if ((int)addrlen
< 0) {
3097 return -TARGET_EINVAL
;
3100 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3101 return -TARGET_EINVAL
;
3103 addr
= alloca(addrlen
);
3105 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3106 if (!is_error(ret
)) {
3107 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3108 if (put_user_u32(addrlen
, target_addrlen_addr
))
3109 ret
= -TARGET_EFAULT
;
3114 /* do_getpeername() Must return target values and target errnos. */
3115 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3116 abi_ulong target_addrlen_addr
)
3122 if (get_user_u32(addrlen
, target_addrlen_addr
))
3123 return -TARGET_EFAULT
;
3125 if ((int)addrlen
< 0) {
3126 return -TARGET_EINVAL
;
3129 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3130 return -TARGET_EFAULT
;
3132 addr
= alloca(addrlen
);
3134 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3135 if (!is_error(ret
)) {
3136 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3137 if (put_user_u32(addrlen
, target_addrlen_addr
))
3138 ret
= -TARGET_EFAULT
;
3143 /* do_getsockname() Must return target values and target errnos. */
3144 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3145 abi_ulong target_addrlen_addr
)
3151 if (get_user_u32(addrlen
, target_addrlen_addr
))
3152 return -TARGET_EFAULT
;
3154 if ((int)addrlen
< 0) {
3155 return -TARGET_EINVAL
;
3158 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3159 return -TARGET_EFAULT
;
3161 addr
= alloca(addrlen
);
3163 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3164 if (!is_error(ret
)) {
3165 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3166 if (put_user_u32(addrlen
, target_addrlen_addr
))
3167 ret
= -TARGET_EFAULT
;
3172 /* do_socketpair() Must return target values and target errnos. */
3173 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3174 abi_ulong target_tab_addr
)
3179 target_to_host_sock_type(&type
);
3181 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3182 if (!is_error(ret
)) {
3183 if (put_user_s32(tab
[0], target_tab_addr
)
3184 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3185 ret
= -TARGET_EFAULT
;
3190 /* do_sendto() Must return target values and target errnos. */
3191 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3192 abi_ulong target_addr
, socklen_t addrlen
)
3198 if ((int)addrlen
< 0) {
3199 return -TARGET_EINVAL
;
3202 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3204 return -TARGET_EFAULT
;
3205 if (fd_trans_target_to_host_data(fd
)) {
3206 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3208 unlock_user(host_msg
, msg
, 0);
3213 addr
= alloca(addrlen
+1);
3214 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3216 unlock_user(host_msg
, msg
, 0);
3219 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3221 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3223 unlock_user(host_msg
, msg
, 0);
3227 /* do_recvfrom() Must return target values and target errnos. */
3228 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3229 abi_ulong target_addr
,
3230 abi_ulong target_addrlen
)
3237 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3239 return -TARGET_EFAULT
;
3241 if (get_user_u32(addrlen
, target_addrlen
)) {
3242 ret
= -TARGET_EFAULT
;
3245 if ((int)addrlen
< 0) {
3246 ret
= -TARGET_EINVAL
;
3249 addr
= alloca(addrlen
);
3250 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3253 addr
= NULL
; /* To keep compiler quiet. */
3254 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3256 if (!is_error(ret
)) {
3258 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3259 if (put_user_u32(addrlen
, target_addrlen
)) {
3260 ret
= -TARGET_EFAULT
;
3264 unlock_user(host_msg
, msg
, len
);
3267 unlock_user(host_msg
, msg
, 0);
3272 #ifdef TARGET_NR_socketcall
3273 /* do_socketcall() Must return target values and target errnos. */
3274 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3276 static const unsigned ac
[] = { /* number of arguments per call */
3277 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3278 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3279 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3280 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3281 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3282 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3283 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3284 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3285 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3286 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3287 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3288 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3289 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3290 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3291 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3292 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3293 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3294 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3295 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3296 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3298 abi_long a
[6]; /* max 6 args */
3300 /* first, collect the arguments in a[] according to ac[] */
3301 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3303 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3304 for (i
= 0; i
< ac
[num
]; ++i
) {
3305 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3306 return -TARGET_EFAULT
;
3311 /* now when we have the args, actually handle the call */
3313 case SOCKOP_socket
: /* domain, type, protocol */
3314 return do_socket(a
[0], a
[1], a
[2]);
3315 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3316 return do_bind(a
[0], a
[1], a
[2]);
3317 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3318 return do_connect(a
[0], a
[1], a
[2]);
3319 case SOCKOP_listen
: /* sockfd, backlog */
3320 return get_errno(listen(a
[0], a
[1]));
3321 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3322 return do_accept4(a
[0], a
[1], a
[2], 0);
3323 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3324 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3325 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3326 return do_getsockname(a
[0], a
[1], a
[2]);
3327 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3328 return do_getpeername(a
[0], a
[1], a
[2]);
3329 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3330 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3331 case SOCKOP_send
: /* sockfd, msg, len, flags */
3332 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3333 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3334 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3335 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3336 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3337 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3338 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3339 case SOCKOP_shutdown
: /* sockfd, how */
3340 return get_errno(shutdown(a
[0], a
[1]));
3341 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3342 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3343 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3344 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3345 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3346 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3347 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3348 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3349 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3350 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3351 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3352 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3354 gemu_log("Unsupported socketcall: %d\n", num
);
3355 return -TARGET_ENOSYS
;
3360 #define N_SHM_REGIONS 32
3362 static struct shm_region
{
3366 } shm_regions
[N_SHM_REGIONS
];
3368 struct target_semid_ds
3370 struct target_ipc_perm sem_perm
;
3371 abi_ulong sem_otime
;
3372 #if !defined(TARGET_PPC64)
3373 abi_ulong __unused1
;
3375 abi_ulong sem_ctime
;
3376 #if !defined(TARGET_PPC64)
3377 abi_ulong __unused2
;
3379 abi_ulong sem_nsems
;
3380 abi_ulong __unused3
;
3381 abi_ulong __unused4
;
3384 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3385 abi_ulong target_addr
)
3387 struct target_ipc_perm
*target_ip
;
3388 struct target_semid_ds
*target_sd
;
3390 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3391 return -TARGET_EFAULT
;
3392 target_ip
= &(target_sd
->sem_perm
);
3393 host_ip
->__key
= tswap32(target_ip
->__key
);
3394 host_ip
->uid
= tswap32(target_ip
->uid
);
3395 host_ip
->gid
= tswap32(target_ip
->gid
);
3396 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3397 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3398 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3399 host_ip
->mode
= tswap32(target_ip
->mode
);
3401 host_ip
->mode
= tswap16(target_ip
->mode
);
3403 #if defined(TARGET_PPC)
3404 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3406 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3408 unlock_user_struct(target_sd
, target_addr
, 0);
3412 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3413 struct ipc_perm
*host_ip
)
3415 struct target_ipc_perm
*target_ip
;
3416 struct target_semid_ds
*target_sd
;
3418 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3419 return -TARGET_EFAULT
;
3420 target_ip
= &(target_sd
->sem_perm
);
3421 target_ip
->__key
= tswap32(host_ip
->__key
);
3422 target_ip
->uid
= tswap32(host_ip
->uid
);
3423 target_ip
->gid
= tswap32(host_ip
->gid
);
3424 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3425 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3426 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3427 target_ip
->mode
= tswap32(host_ip
->mode
);
3429 target_ip
->mode
= tswap16(host_ip
->mode
);
3431 #if defined(TARGET_PPC)
3432 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3434 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3436 unlock_user_struct(target_sd
, target_addr
, 1);
3440 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3441 abi_ulong target_addr
)
3443 struct target_semid_ds
*target_sd
;
3445 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3446 return -TARGET_EFAULT
;
3447 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3448 return -TARGET_EFAULT
;
3449 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3450 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3451 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3452 unlock_user_struct(target_sd
, target_addr
, 0);
3456 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3457 struct semid_ds
*host_sd
)
3459 struct target_semid_ds
*target_sd
;
3461 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3462 return -TARGET_EFAULT
;
3463 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3464 return -TARGET_EFAULT
;
3465 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3466 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3467 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3468 unlock_user_struct(target_sd
, target_addr
, 1);
3472 struct target_seminfo
{
3485 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3486 struct seminfo
*host_seminfo
)
3488 struct target_seminfo
*target_seminfo
;
3489 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3490 return -TARGET_EFAULT
;
3491 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3492 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3493 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3494 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3495 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3496 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3497 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3498 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3499 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3500 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3501 unlock_user_struct(target_seminfo
, target_addr
, 1);
3507 struct semid_ds
*buf
;
3508 unsigned short *array
;
3509 struct seminfo
*__buf
;
3512 union target_semun
{
3519 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3520 abi_ulong target_addr
)
3523 unsigned short *array
;
3525 struct semid_ds semid_ds
;
3528 semun
.buf
= &semid_ds
;
3530 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3532 return get_errno(ret
);
3534 nsems
= semid_ds
.sem_nsems
;
3536 *host_array
= g_try_new(unsigned short, nsems
);
3538 return -TARGET_ENOMEM
;
3540 array
= lock_user(VERIFY_READ
, target_addr
,
3541 nsems
*sizeof(unsigned short), 1);
3543 g_free(*host_array
);
3544 return -TARGET_EFAULT
;
3547 for(i
=0; i
<nsems
; i
++) {
3548 __get_user((*host_array
)[i
], &array
[i
]);
3550 unlock_user(array
, target_addr
, 0);
3555 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3556 unsigned short **host_array
)
3559 unsigned short *array
;
3561 struct semid_ds semid_ds
;
3564 semun
.buf
= &semid_ds
;
3566 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3568 return get_errno(ret
);
3570 nsems
= semid_ds
.sem_nsems
;
3572 array
= lock_user(VERIFY_WRITE
, target_addr
,
3573 nsems
*sizeof(unsigned short), 0);
3575 return -TARGET_EFAULT
;
3577 for(i
=0; i
<nsems
; i
++) {
3578 __put_user((*host_array
)[i
], &array
[i
]);
3580 g_free(*host_array
);
3581 unlock_user(array
, target_addr
, 1);
3586 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3587 abi_ulong target_arg
)
3589 union target_semun target_su
= { .buf
= target_arg
};
3591 struct semid_ds dsarg
;
3592 unsigned short *array
= NULL
;
3593 struct seminfo seminfo
;
3594 abi_long ret
= -TARGET_EINVAL
;
3601 /* In 64 bit cross-endian situations, we will erroneously pick up
3602 * the wrong half of the union for the "val" element. To rectify
3603 * this, the entire 8-byte structure is byteswapped, followed by
3604 * a swap of the 4 byte val field. In other cases, the data is
3605 * already in proper host byte order. */
3606 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3607 target_su
.buf
= tswapal(target_su
.buf
);
3608 arg
.val
= tswap32(target_su
.val
);
3610 arg
.val
= target_su
.val
;
3612 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3616 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3620 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3621 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3628 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3632 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3633 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3639 arg
.__buf
= &seminfo
;
3640 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3641 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3649 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3656 struct target_sembuf
{
3657 unsigned short sem_num
;
3662 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3663 abi_ulong target_addr
,
3666 struct target_sembuf
*target_sembuf
;
3669 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3670 nsops
*sizeof(struct target_sembuf
), 1);
3672 return -TARGET_EFAULT
;
3674 for(i
=0; i
<nsops
; i
++) {
3675 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3676 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3677 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3680 unlock_user(target_sembuf
, target_addr
, 0);
3685 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3687 struct sembuf sops
[nsops
];
3689 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3690 return -TARGET_EFAULT
;
3692 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3695 struct target_msqid_ds
3697 struct target_ipc_perm msg_perm
;
3698 abi_ulong msg_stime
;
3699 #if TARGET_ABI_BITS == 32
3700 abi_ulong __unused1
;
3702 abi_ulong msg_rtime
;
3703 #if TARGET_ABI_BITS == 32
3704 abi_ulong __unused2
;
3706 abi_ulong msg_ctime
;
3707 #if TARGET_ABI_BITS == 32
3708 abi_ulong __unused3
;
3710 abi_ulong __msg_cbytes
;
3712 abi_ulong msg_qbytes
;
3713 abi_ulong msg_lspid
;
3714 abi_ulong msg_lrpid
;
3715 abi_ulong __unused4
;
3716 abi_ulong __unused5
;
3719 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3720 abi_ulong target_addr
)
3722 struct target_msqid_ds
*target_md
;
3724 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3725 return -TARGET_EFAULT
;
3726 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3727 return -TARGET_EFAULT
;
3728 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3729 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3730 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3731 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3732 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3733 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3734 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3735 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3736 unlock_user_struct(target_md
, target_addr
, 0);
3740 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3741 struct msqid_ds
*host_md
)
3743 struct target_msqid_ds
*target_md
;
3745 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3746 return -TARGET_EFAULT
;
3747 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3748 return -TARGET_EFAULT
;
3749 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3750 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3751 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3752 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3753 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3754 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3755 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3756 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3757 unlock_user_struct(target_md
, target_addr
, 1);
3761 struct target_msginfo
{
3769 unsigned short int msgseg
;
3772 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3773 struct msginfo
*host_msginfo
)
3775 struct target_msginfo
*target_msginfo
;
3776 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3777 return -TARGET_EFAULT
;
3778 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3779 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3780 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3781 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3782 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3783 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3784 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3785 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3786 unlock_user_struct(target_msginfo
, target_addr
, 1);
3790 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3792 struct msqid_ds dsarg
;
3793 struct msginfo msginfo
;
3794 abi_long ret
= -TARGET_EINVAL
;
3802 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3803 return -TARGET_EFAULT
;
3804 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3805 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3806 return -TARGET_EFAULT
;
3809 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3813 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3814 if (host_to_target_msginfo(ptr
, &msginfo
))
3815 return -TARGET_EFAULT
;
3822 struct target_msgbuf
{
3827 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3828 ssize_t msgsz
, int msgflg
)
3830 struct target_msgbuf
*target_mb
;
3831 struct msgbuf
*host_mb
;
3835 return -TARGET_EINVAL
;
3838 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3839 return -TARGET_EFAULT
;
3840 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3842 unlock_user_struct(target_mb
, msgp
, 0);
3843 return -TARGET_ENOMEM
;
3845 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3846 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3847 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3849 unlock_user_struct(target_mb
, msgp
, 0);
3854 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3855 ssize_t msgsz
, abi_long msgtyp
,
3858 struct target_msgbuf
*target_mb
;
3860 struct msgbuf
*host_mb
;
3864 return -TARGET_EINVAL
;
3867 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3868 return -TARGET_EFAULT
;
3870 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3872 ret
= -TARGET_ENOMEM
;
3875 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3878 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3879 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3880 if (!target_mtext
) {
3881 ret
= -TARGET_EFAULT
;
3884 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3885 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3888 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3892 unlock_user_struct(target_mb
, msgp
, 1);
3897 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3898 abi_ulong target_addr
)
3900 struct target_shmid_ds
*target_sd
;
3902 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3903 return -TARGET_EFAULT
;
3904 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3905 return -TARGET_EFAULT
;
3906 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3907 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3908 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3909 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3910 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3911 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3912 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3913 unlock_user_struct(target_sd
, target_addr
, 0);
3917 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3918 struct shmid_ds
*host_sd
)
3920 struct target_shmid_ds
*target_sd
;
3922 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3923 return -TARGET_EFAULT
;
3924 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3925 return -TARGET_EFAULT
;
3926 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3927 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3928 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3929 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3930 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3931 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3932 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3933 unlock_user_struct(target_sd
, target_addr
, 1);
3937 struct target_shminfo
{
3945 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3946 struct shminfo
*host_shminfo
)
3948 struct target_shminfo
*target_shminfo
;
3949 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3950 return -TARGET_EFAULT
;
3951 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3952 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3953 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3954 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3955 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3956 unlock_user_struct(target_shminfo
, target_addr
, 1);
3960 struct target_shm_info
{
3965 abi_ulong swap_attempts
;
3966 abi_ulong swap_successes
;
3969 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3970 struct shm_info
*host_shm_info
)
3972 struct target_shm_info
*target_shm_info
;
3973 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3974 return -TARGET_EFAULT
;
3975 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3976 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3977 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3978 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3979 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3980 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3981 unlock_user_struct(target_shm_info
, target_addr
, 1);
3985 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3987 struct shmid_ds dsarg
;
3988 struct shminfo shminfo
;
3989 struct shm_info shm_info
;
3990 abi_long ret
= -TARGET_EINVAL
;
3998 if (target_to_host_shmid_ds(&dsarg
, buf
))
3999 return -TARGET_EFAULT
;
4000 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4001 if (host_to_target_shmid_ds(buf
, &dsarg
))
4002 return -TARGET_EFAULT
;
4005 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4006 if (host_to_target_shminfo(buf
, &shminfo
))
4007 return -TARGET_EFAULT
;
4010 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4011 if (host_to_target_shm_info(buf
, &shm_info
))
4012 return -TARGET_EFAULT
;
4017 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4024 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4028 struct shmid_ds shm_info
;
4031 /* find out the length of the shared memory segment */
4032 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4033 if (is_error(ret
)) {
4034 /* can't get length, bail out */
4041 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4043 abi_ulong mmap_start
;
4045 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4047 if (mmap_start
== -1) {
4049 host_raddr
= (void *)-1;
4051 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4054 if (host_raddr
== (void *)-1) {
4056 return get_errno((long)host_raddr
);
4058 raddr
=h2g((unsigned long)host_raddr
);
4060 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4061 PAGE_VALID
| PAGE_READ
|
4062 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4064 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4065 if (!shm_regions
[i
].in_use
) {
4066 shm_regions
[i
].in_use
= true;
4067 shm_regions
[i
].start
= raddr
;
4068 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4078 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4082 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4083 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4084 shm_regions
[i
].in_use
= false;
4085 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4090 return get_errno(shmdt(g2h(shmaddr
)));
4093 #ifdef TARGET_NR_ipc
4094 /* ??? This only works with linear mappings. */
4095 /* do_ipc() must return target values and target errnos. */
4096 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4097 abi_long second
, abi_long third
,
4098 abi_long ptr
, abi_long fifth
)
4103 version
= call
>> 16;
4108 ret
= do_semop(first
, ptr
, second
);
4112 ret
= get_errno(semget(first
, second
, third
));
4115 case IPCOP_semctl
: {
4116 /* The semun argument to semctl is passed by value, so dereference the
4119 get_user_ual(atptr
, ptr
);
4120 ret
= do_semctl(first
, second
, third
, atptr
);
4125 ret
= get_errno(msgget(first
, second
));
4129 ret
= do_msgsnd(first
, ptr
, second
, third
);
4133 ret
= do_msgctl(first
, second
, ptr
);
4140 struct target_ipc_kludge
{
4145 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4146 ret
= -TARGET_EFAULT
;
4150 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4152 unlock_user_struct(tmp
, ptr
, 0);
4156 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4165 raddr
= do_shmat(first
, ptr
, second
);
4166 if (is_error(raddr
))
4167 return get_errno(raddr
);
4168 if (put_user_ual(raddr
, third
))
4169 return -TARGET_EFAULT
;
4173 ret
= -TARGET_EINVAL
;
4178 ret
= do_shmdt(ptr
);
4182 /* IPC_* flag values are the same on all linux platforms */
4183 ret
= get_errno(shmget(first
, second
, third
));
4186 /* IPC_* and SHM_* command values are the same on all linux platforms */
4188 ret
= do_shmctl(first
, second
, ptr
);
4191 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4192 ret
= -TARGET_ENOSYS
;
4199 /* kernel structure types definitions */
4201 #define STRUCT(name, ...) STRUCT_ ## name,
4202 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4204 #include "syscall_types.h"
4208 #undef STRUCT_SPECIAL
4210 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4211 #define STRUCT_SPECIAL(name)
4212 #include "syscall_types.h"
4214 #undef STRUCT_SPECIAL
4216 typedef struct IOCTLEntry IOCTLEntry
;
4218 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4219 int fd
, int cmd
, abi_long arg
);
4223 unsigned int host_cmd
;
4226 do_ioctl_fn
*do_ioctl
;
4227 const argtype arg_type
[5];
4230 #define IOC_R 0x0001
4231 #define IOC_W 0x0002
4232 #define IOC_RW (IOC_R | IOC_W)
4234 #define MAX_STRUCT_SIZE 4096
4236 #ifdef CONFIG_FIEMAP
4237 /* So fiemap access checks don't overflow on 32 bit systems.
4238 * This is very slightly smaller than the limit imposed by
4239 * the underlying kernel.
4241 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4242 / sizeof(struct fiemap_extent))
4244 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4245 int fd
, int cmd
, abi_long arg
)
4247 /* The parameter for this ioctl is a struct fiemap followed
4248 * by an array of struct fiemap_extent whose size is set
4249 * in fiemap->fm_extent_count. The array is filled in by the
4252 int target_size_in
, target_size_out
;
4254 const argtype
*arg_type
= ie
->arg_type
;
4255 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4258 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4262 assert(arg_type
[0] == TYPE_PTR
);
4263 assert(ie
->access
== IOC_RW
);
4265 target_size_in
= thunk_type_size(arg_type
, 0);
4266 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4268 return -TARGET_EFAULT
;
4270 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4271 unlock_user(argptr
, arg
, 0);
4272 fm
= (struct fiemap
*)buf_temp
;
4273 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4274 return -TARGET_EINVAL
;
4277 outbufsz
= sizeof (*fm
) +
4278 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4280 if (outbufsz
> MAX_STRUCT_SIZE
) {
4281 /* We can't fit all the extents into the fixed size buffer.
4282 * Allocate one that is large enough and use it instead.
4284 fm
= g_try_malloc(outbufsz
);
4286 return -TARGET_ENOMEM
;
4288 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4291 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4292 if (!is_error(ret
)) {
4293 target_size_out
= target_size_in
;
4294 /* An extent_count of 0 means we were only counting the extents
4295 * so there are no structs to copy
4297 if (fm
->fm_extent_count
!= 0) {
4298 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4300 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4302 ret
= -TARGET_EFAULT
;
4304 /* Convert the struct fiemap */
4305 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4306 if (fm
->fm_extent_count
!= 0) {
4307 p
= argptr
+ target_size_in
;
4308 /* ...and then all the struct fiemap_extents */
4309 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4310 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4315 unlock_user(argptr
, arg
, target_size_out
);
4325 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4326 int fd
, int cmd
, abi_long arg
)
4328 const argtype
*arg_type
= ie
->arg_type
;
4332 struct ifconf
*host_ifconf
;
4334 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4335 int target_ifreq_size
;
4340 abi_long target_ifc_buf
;
4344 assert(arg_type
[0] == TYPE_PTR
);
4345 assert(ie
->access
== IOC_RW
);
4348 target_size
= thunk_type_size(arg_type
, 0);
4350 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4352 return -TARGET_EFAULT
;
4353 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4354 unlock_user(argptr
, arg
, 0);
4356 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4357 target_ifc_len
= host_ifconf
->ifc_len
;
4358 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4360 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4361 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4362 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4364 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4365 if (outbufsz
> MAX_STRUCT_SIZE
) {
4366 /* We can't fit all the extents into the fixed size buffer.
4367 * Allocate one that is large enough and use it instead.
4369 host_ifconf
= malloc(outbufsz
);
4371 return -TARGET_ENOMEM
;
4373 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4376 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4378 host_ifconf
->ifc_len
= host_ifc_len
;
4379 host_ifconf
->ifc_buf
= host_ifc_buf
;
4381 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4382 if (!is_error(ret
)) {
4383 /* convert host ifc_len to target ifc_len */
4385 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4386 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4387 host_ifconf
->ifc_len
= target_ifc_len
;
4389 /* restore target ifc_buf */
4391 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4393 /* copy struct ifconf to target user */
4395 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4397 return -TARGET_EFAULT
;
4398 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4399 unlock_user(argptr
, arg
, target_size
);
4401 /* copy ifreq[] to target user */
4403 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4404 for (i
= 0; i
< nb_ifreq
; i
++) {
4405 thunk_convert(argptr
+ i
* target_ifreq_size
,
4406 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4407 ifreq_arg_type
, THUNK_TARGET
);
4409 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4419 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4420 int cmd
, abi_long arg
)
4423 struct dm_ioctl
*host_dm
;
4424 abi_long guest_data
;
4425 uint32_t guest_data_size
;
4427 const argtype
*arg_type
= ie
->arg_type
;
4429 void *big_buf
= NULL
;
4433 target_size
= thunk_type_size(arg_type
, 0);
4434 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4436 ret
= -TARGET_EFAULT
;
4439 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4440 unlock_user(argptr
, arg
, 0);
4442 /* buf_temp is too small, so fetch things into a bigger buffer */
4443 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4444 memcpy(big_buf
, buf_temp
, target_size
);
4448 guest_data
= arg
+ host_dm
->data_start
;
4449 if ((guest_data
- arg
) < 0) {
4453 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4454 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4456 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4457 switch (ie
->host_cmd
) {
4459 case DM_LIST_DEVICES
:
4462 case DM_DEV_SUSPEND
:
4465 case DM_TABLE_STATUS
:
4466 case DM_TABLE_CLEAR
:
4468 case DM_LIST_VERSIONS
:
4472 case DM_DEV_SET_GEOMETRY
:
4473 /* data contains only strings */
4474 memcpy(host_data
, argptr
, guest_data_size
);
4477 memcpy(host_data
, argptr
, guest_data_size
);
4478 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4482 void *gspec
= argptr
;
4483 void *cur_data
= host_data
;
4484 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4485 int spec_size
= thunk_type_size(arg_type
, 0);
4488 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4489 struct dm_target_spec
*spec
= cur_data
;
4493 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4494 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4496 spec
->next
= sizeof(*spec
) + slen
;
4497 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4499 cur_data
+= spec
->next
;
4504 ret
= -TARGET_EINVAL
;
4505 unlock_user(argptr
, guest_data
, 0);
4508 unlock_user(argptr
, guest_data
, 0);
4510 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4511 if (!is_error(ret
)) {
4512 guest_data
= arg
+ host_dm
->data_start
;
4513 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4514 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4515 switch (ie
->host_cmd
) {
4520 case DM_DEV_SUSPEND
:
4523 case DM_TABLE_CLEAR
:
4525 case DM_DEV_SET_GEOMETRY
:
4526 /* no return data */
4528 case DM_LIST_DEVICES
:
4530 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4531 uint32_t remaining_data
= guest_data_size
;
4532 void *cur_data
= argptr
;
4533 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4534 int nl_size
= 12; /* can't use thunk_size due to alignment */
4537 uint32_t next
= nl
->next
;
4539 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4541 if (remaining_data
< nl
->next
) {
4542 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4545 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4546 strcpy(cur_data
+ nl_size
, nl
->name
);
4547 cur_data
+= nl
->next
;
4548 remaining_data
-= nl
->next
;
4552 nl
= (void*)nl
+ next
;
4557 case DM_TABLE_STATUS
:
4559 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4560 void *cur_data
= argptr
;
4561 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4562 int spec_size
= thunk_type_size(arg_type
, 0);
4565 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4566 uint32_t next
= spec
->next
;
4567 int slen
= strlen((char*)&spec
[1]) + 1;
4568 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4569 if (guest_data_size
< spec
->next
) {
4570 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4573 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4574 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4575 cur_data
= argptr
+ spec
->next
;
4576 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4582 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4583 int count
= *(uint32_t*)hdata
;
4584 uint64_t *hdev
= hdata
+ 8;
4585 uint64_t *gdev
= argptr
+ 8;
4588 *(uint32_t*)argptr
= tswap32(count
);
4589 for (i
= 0; i
< count
; i
++) {
4590 *gdev
= tswap64(*hdev
);
4596 case DM_LIST_VERSIONS
:
4598 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4599 uint32_t remaining_data
= guest_data_size
;
4600 void *cur_data
= argptr
;
4601 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4602 int vers_size
= thunk_type_size(arg_type
, 0);
4605 uint32_t next
= vers
->next
;
4607 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4609 if (remaining_data
< vers
->next
) {
4610 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4613 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4614 strcpy(cur_data
+ vers_size
, vers
->name
);
4615 cur_data
+= vers
->next
;
4616 remaining_data
-= vers
->next
;
4620 vers
= (void*)vers
+ next
;
4625 unlock_user(argptr
, guest_data
, 0);
4626 ret
= -TARGET_EINVAL
;
4629 unlock_user(argptr
, guest_data
, guest_data_size
);
4631 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4633 ret
= -TARGET_EFAULT
;
4636 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4637 unlock_user(argptr
, arg
, target_size
);
4644 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4645 int cmd
, abi_long arg
)
4649 const argtype
*arg_type
= ie
->arg_type
;
4650 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4653 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4654 struct blkpg_partition host_part
;
4656 /* Read and convert blkpg */
4658 target_size
= thunk_type_size(arg_type
, 0);
4659 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4661 ret
= -TARGET_EFAULT
;
4664 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4665 unlock_user(argptr
, arg
, 0);
4667 switch (host_blkpg
->op
) {
4668 case BLKPG_ADD_PARTITION
:
4669 case BLKPG_DEL_PARTITION
:
4670 /* payload is struct blkpg_partition */
4673 /* Unknown opcode */
4674 ret
= -TARGET_EINVAL
;
4678 /* Read and convert blkpg->data */
4679 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4680 target_size
= thunk_type_size(part_arg_type
, 0);
4681 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4683 ret
= -TARGET_EFAULT
;
4686 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4687 unlock_user(argptr
, arg
, 0);
4689 /* Swizzle the data pointer to our local copy and call! */
4690 host_blkpg
->data
= &host_part
;
4691 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4697 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4698 int fd
, int cmd
, abi_long arg
)
4700 const argtype
*arg_type
= ie
->arg_type
;
4701 const StructEntry
*se
;
4702 const argtype
*field_types
;
4703 const int *dst_offsets
, *src_offsets
;
4706 abi_ulong
*target_rt_dev_ptr
;
4707 unsigned long *host_rt_dev_ptr
;
4711 assert(ie
->access
== IOC_W
);
4712 assert(*arg_type
== TYPE_PTR
);
4714 assert(*arg_type
== TYPE_STRUCT
);
4715 target_size
= thunk_type_size(arg_type
, 0);
4716 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4718 return -TARGET_EFAULT
;
4721 assert(*arg_type
== (int)STRUCT_rtentry
);
4722 se
= struct_entries
+ *arg_type
++;
4723 assert(se
->convert
[0] == NULL
);
4724 /* convert struct here to be able to catch rt_dev string */
4725 field_types
= se
->field_types
;
4726 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4727 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4728 for (i
= 0; i
< se
->nb_fields
; i
++) {
4729 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4730 assert(*field_types
== TYPE_PTRVOID
);
4731 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4732 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4733 if (*target_rt_dev_ptr
!= 0) {
4734 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4735 tswapal(*target_rt_dev_ptr
));
4736 if (!*host_rt_dev_ptr
) {
4737 unlock_user(argptr
, arg
, 0);
4738 return -TARGET_EFAULT
;
4741 *host_rt_dev_ptr
= 0;
4746 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4747 argptr
+ src_offsets
[i
],
4748 field_types
, THUNK_HOST
);
4750 unlock_user(argptr
, arg
, 0);
4752 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4753 if (*host_rt_dev_ptr
!= 0) {
4754 unlock_user((void *)*host_rt_dev_ptr
,
4755 *target_rt_dev_ptr
, 0);
4760 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4761 int fd
, int cmd
, abi_long arg
)
4763 int sig
= target_to_host_signal(arg
);
4764 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4767 static IOCTLEntry ioctl_entries
[] = {
4768 #define IOCTL(cmd, access, ...) \
4769 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4770 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4771 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4776 /* ??? Implement proper locking for ioctls. */
4777 /* do_ioctl() Must return target values and target errnos. */
4778 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4780 const IOCTLEntry
*ie
;
4781 const argtype
*arg_type
;
4783 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4789 if (ie
->target_cmd
== 0) {
4790 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4791 return -TARGET_ENOSYS
;
4793 if (ie
->target_cmd
== cmd
)
4797 arg_type
= ie
->arg_type
;
4799 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4802 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4805 switch(arg_type
[0]) {
4808 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4812 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4816 target_size
= thunk_type_size(arg_type
, 0);
4817 switch(ie
->access
) {
4819 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4820 if (!is_error(ret
)) {
4821 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4823 return -TARGET_EFAULT
;
4824 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4825 unlock_user(argptr
, arg
, target_size
);
4829 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4831 return -TARGET_EFAULT
;
4832 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4833 unlock_user(argptr
, arg
, 0);
4834 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4838 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4840 return -TARGET_EFAULT
;
4841 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4842 unlock_user(argptr
, arg
, 0);
4843 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4844 if (!is_error(ret
)) {
4845 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4847 return -TARGET_EFAULT
;
4848 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4849 unlock_user(argptr
, arg
, target_size
);
4855 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4856 (long)cmd
, arg_type
[0]);
4857 ret
= -TARGET_ENOSYS
;
4863 static const bitmask_transtbl iflag_tbl
[] = {
4864 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4865 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4866 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4867 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4868 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4869 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4870 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4871 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4872 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4873 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4874 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4875 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4876 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4877 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4881 static const bitmask_transtbl oflag_tbl
[] = {
4882 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4883 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4884 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4885 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4886 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4887 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4888 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4889 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4890 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4891 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4892 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4893 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4894 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4895 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4896 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4897 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4898 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4899 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4900 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4901 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4902 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4903 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4904 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4905 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4909 static const bitmask_transtbl cflag_tbl
[] = {
4910 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4911 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4912 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4913 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4914 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4915 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4916 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4917 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4918 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4919 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4920 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4921 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4922 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4923 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4924 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4925 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4926 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4927 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4928 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4929 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4930 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4931 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4932 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4933 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4934 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4935 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4936 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4937 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4938 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4939 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4940 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4944 static const bitmask_transtbl lflag_tbl
[] = {
4945 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4946 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4947 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4948 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4949 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4950 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4951 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4952 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4953 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4954 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4955 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4956 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4957 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4958 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4959 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4963 static void target_to_host_termios (void *dst
, const void *src
)
4965 struct host_termios
*host
= dst
;
4966 const struct target_termios
*target
= src
;
4969 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4971 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4973 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4975 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4976 host
->c_line
= target
->c_line
;
4978 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4979 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4980 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4981 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4982 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4983 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4984 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4985 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4986 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4987 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4988 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4989 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4990 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4991 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4992 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4993 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4994 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4995 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4998 static void host_to_target_termios (void *dst
, const void *src
)
5000 struct target_termios
*target
= dst
;
5001 const struct host_termios
*host
= src
;
5004 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5006 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5008 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5010 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5011 target
->c_line
= host
->c_line
;
5013 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5014 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5015 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5016 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5017 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5018 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5019 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5020 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5021 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5022 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5023 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5024 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5025 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5026 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5027 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5028 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5029 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5030 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5033 static const StructEntry struct_termios_def
= {
5034 .convert
= { host_to_target_termios
, target_to_host_termios
},
5035 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5036 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5039 static bitmask_transtbl mmap_flags_tbl
[] = {
5040 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5041 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5042 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5043 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5044 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5045 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5046 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5047 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5048 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5053 #if defined(TARGET_I386)
5055 /* NOTE: there is really one LDT for all the threads */
5056 static uint8_t *ldt_table
;
5058 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5065 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5066 if (size
> bytecount
)
5068 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5070 return -TARGET_EFAULT
;
5071 /* ??? Should this by byteswapped? */
5072 memcpy(p
, ldt_table
, size
);
5073 unlock_user(p
, ptr
, size
);
5077 /* XXX: add locking support */
5078 static abi_long
write_ldt(CPUX86State
*env
,
5079 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5081 struct target_modify_ldt_ldt_s ldt_info
;
5082 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5083 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5084 int seg_not_present
, useable
, lm
;
5085 uint32_t *lp
, entry_1
, entry_2
;
5087 if (bytecount
!= sizeof(ldt_info
))
5088 return -TARGET_EINVAL
;
5089 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5090 return -TARGET_EFAULT
;
5091 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5092 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5093 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5094 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5095 unlock_user_struct(target_ldt_info
, ptr
, 0);
5097 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5098 return -TARGET_EINVAL
;
5099 seg_32bit
= ldt_info
.flags
& 1;
5100 contents
= (ldt_info
.flags
>> 1) & 3;
5101 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5102 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5103 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5104 useable
= (ldt_info
.flags
>> 6) & 1;
5108 lm
= (ldt_info
.flags
>> 7) & 1;
5110 if (contents
== 3) {
5112 return -TARGET_EINVAL
;
5113 if (seg_not_present
== 0)
5114 return -TARGET_EINVAL
;
5116 /* allocate the LDT */
5118 env
->ldt
.base
= target_mmap(0,
5119 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5120 PROT_READ
|PROT_WRITE
,
5121 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5122 if (env
->ldt
.base
== -1)
5123 return -TARGET_ENOMEM
;
5124 memset(g2h(env
->ldt
.base
), 0,
5125 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5126 env
->ldt
.limit
= 0xffff;
5127 ldt_table
= g2h(env
->ldt
.base
);
5130 /* NOTE: same code as Linux kernel */
5131 /* Allow LDTs to be cleared by the user. */
5132 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5135 read_exec_only
== 1 &&
5137 limit_in_pages
== 0 &&
5138 seg_not_present
== 1 &&
5146 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5147 (ldt_info
.limit
& 0x0ffff);
5148 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5149 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5150 (ldt_info
.limit
& 0xf0000) |
5151 ((read_exec_only
^ 1) << 9) |
5153 ((seg_not_present
^ 1) << 15) |
5155 (limit_in_pages
<< 23) |
5159 entry_2
|= (useable
<< 20);
5161 /* Install the new entry ... */
5163 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5164 lp
[0] = tswap32(entry_1
);
5165 lp
[1] = tswap32(entry_2
);
5169 /* specific and weird i386 syscalls */
5170 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5171 unsigned long bytecount
)
5177 ret
= read_ldt(ptr
, bytecount
);
5180 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5183 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5186 ret
= -TARGET_ENOSYS
;
5192 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5193 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5195 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5196 struct target_modify_ldt_ldt_s ldt_info
;
5197 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5198 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5199 int seg_not_present
, useable
, lm
;
5200 uint32_t *lp
, entry_1
, entry_2
;
5203 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5204 if (!target_ldt_info
)
5205 return -TARGET_EFAULT
;
5206 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5207 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5208 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5209 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5210 if (ldt_info
.entry_number
== -1) {
5211 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5212 if (gdt_table
[i
] == 0) {
5213 ldt_info
.entry_number
= i
;
5214 target_ldt_info
->entry_number
= tswap32(i
);
5219 unlock_user_struct(target_ldt_info
, ptr
, 1);
5221 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5222 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5223 return -TARGET_EINVAL
;
5224 seg_32bit
= ldt_info
.flags
& 1;
5225 contents
= (ldt_info
.flags
>> 1) & 3;
5226 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5227 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5228 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5229 useable
= (ldt_info
.flags
>> 6) & 1;
5233 lm
= (ldt_info
.flags
>> 7) & 1;
5236 if (contents
== 3) {
5237 if (seg_not_present
== 0)
5238 return -TARGET_EINVAL
;
5241 /* NOTE: same code as Linux kernel */
5242 /* Allow LDTs to be cleared by the user. */
5243 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5244 if ((contents
== 0 &&
5245 read_exec_only
== 1 &&
5247 limit_in_pages
== 0 &&
5248 seg_not_present
== 1 &&
5256 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5257 (ldt_info
.limit
& 0x0ffff);
5258 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5259 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5260 (ldt_info
.limit
& 0xf0000) |
5261 ((read_exec_only
^ 1) << 9) |
5263 ((seg_not_present
^ 1) << 15) |
5265 (limit_in_pages
<< 23) |
5270 /* Install the new entry ... */
5272 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5273 lp
[0] = tswap32(entry_1
);
5274 lp
[1] = tswap32(entry_2
);
5278 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5280 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5281 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5282 uint32_t base_addr
, limit
, flags
;
5283 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5284 int seg_not_present
, useable
, lm
;
5285 uint32_t *lp
, entry_1
, entry_2
;
5287 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5288 if (!target_ldt_info
)
5289 return -TARGET_EFAULT
;
5290 idx
= tswap32(target_ldt_info
->entry_number
);
5291 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5292 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5293 unlock_user_struct(target_ldt_info
, ptr
, 1);
5294 return -TARGET_EINVAL
;
5296 lp
= (uint32_t *)(gdt_table
+ idx
);
5297 entry_1
= tswap32(lp
[0]);
5298 entry_2
= tswap32(lp
[1]);
5300 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5301 contents
= (entry_2
>> 10) & 3;
5302 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5303 seg_32bit
= (entry_2
>> 22) & 1;
5304 limit_in_pages
= (entry_2
>> 23) & 1;
5305 useable
= (entry_2
>> 20) & 1;
5309 lm
= (entry_2
>> 21) & 1;
5311 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5312 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5313 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5314 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5315 base_addr
= (entry_1
>> 16) |
5316 (entry_2
& 0xff000000) |
5317 ((entry_2
& 0xff) << 16);
5318 target_ldt_info
->base_addr
= tswapal(base_addr
);
5319 target_ldt_info
->limit
= tswap32(limit
);
5320 target_ldt_info
->flags
= tswap32(flags
);
5321 unlock_user_struct(target_ldt_info
, ptr
, 1);
5324 #endif /* TARGET_I386 && TARGET_ABI32 */
5326 #ifndef TARGET_ABI32
5327 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5334 case TARGET_ARCH_SET_GS
:
5335 case TARGET_ARCH_SET_FS
:
5336 if (code
== TARGET_ARCH_SET_GS
)
5340 cpu_x86_load_seg(env
, idx
, 0);
5341 env
->segs
[idx
].base
= addr
;
5343 case TARGET_ARCH_GET_GS
:
5344 case TARGET_ARCH_GET_FS
:
5345 if (code
== TARGET_ARCH_GET_GS
)
5349 val
= env
->segs
[idx
].base
;
5350 if (put_user(val
, addr
, abi_ulong
))
5351 ret
= -TARGET_EFAULT
;
5354 ret
= -TARGET_EINVAL
;
5361 #endif /* defined(TARGET_I386) */
5363 #define NEW_STACK_SIZE 0x40000
5366 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5369 pthread_mutex_t mutex
;
5370 pthread_cond_t cond
;
5373 abi_ulong child_tidptr
;
5374 abi_ulong parent_tidptr
;
5378 static void *clone_func(void *arg
)
5380 new_thread_info
*info
= arg
;
5385 rcu_register_thread();
5387 cpu
= ENV_GET_CPU(env
);
5389 ts
= (TaskState
*)cpu
->opaque
;
5390 info
->tid
= gettid();
5391 cpu
->host_tid
= info
->tid
;
5393 if (info
->child_tidptr
)
5394 put_user_u32(info
->tid
, info
->child_tidptr
);
5395 if (info
->parent_tidptr
)
5396 put_user_u32(info
->tid
, info
->parent_tidptr
);
5397 /* Enable signals. */
5398 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5399 /* Signal to the parent that we're ready. */
5400 pthread_mutex_lock(&info
->mutex
);
5401 pthread_cond_broadcast(&info
->cond
);
5402 pthread_mutex_unlock(&info
->mutex
);
5403 /* Wait until the parent has finshed initializing the tls state. */
5404 pthread_mutex_lock(&clone_lock
);
5405 pthread_mutex_unlock(&clone_lock
);
5411 /* do_fork() Must return host values and target errnos (unlike most
5412 do_*() functions). */
5413 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5414 abi_ulong parent_tidptr
, target_ulong newtls
,
5415 abi_ulong child_tidptr
)
5417 CPUState
*cpu
= ENV_GET_CPU(env
);
5421 CPUArchState
*new_env
;
5422 unsigned int nptl_flags
;
5425 /* Emulate vfork() with fork() */
5426 if (flags
& CLONE_VFORK
)
5427 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5429 if (flags
& CLONE_VM
) {
5430 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5431 new_thread_info info
;
5432 pthread_attr_t attr
;
5434 ts
= g_new0(TaskState
, 1);
5435 init_task_state(ts
);
5436 /* we create a new CPU instance. */
5437 new_env
= cpu_copy(env
);
5438 /* Init regs that differ from the parent. */
5439 cpu_clone_regs(new_env
, newsp
);
5440 new_cpu
= ENV_GET_CPU(new_env
);
5441 new_cpu
->opaque
= ts
;
5442 ts
->bprm
= parent_ts
->bprm
;
5443 ts
->info
= parent_ts
->info
;
5444 ts
->signal_mask
= parent_ts
->signal_mask
;
5446 flags
&= ~CLONE_NPTL_FLAGS2
;
5448 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5449 ts
->child_tidptr
= child_tidptr
;
5452 if (nptl_flags
& CLONE_SETTLS
)
5453 cpu_set_tls (new_env
, newtls
);
5455 /* Grab a mutex so that thread setup appears atomic. */
5456 pthread_mutex_lock(&clone_lock
);
5458 memset(&info
, 0, sizeof(info
));
5459 pthread_mutex_init(&info
.mutex
, NULL
);
5460 pthread_mutex_lock(&info
.mutex
);
5461 pthread_cond_init(&info
.cond
, NULL
);
5463 if (nptl_flags
& CLONE_CHILD_SETTID
)
5464 info
.child_tidptr
= child_tidptr
;
5465 if (nptl_flags
& CLONE_PARENT_SETTID
)
5466 info
.parent_tidptr
= parent_tidptr
;
5468 ret
= pthread_attr_init(&attr
);
5469 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5470 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5471 /* It is not safe to deliver signals until the child has finished
5472 initializing, so temporarily block all signals. */
5473 sigfillset(&sigmask
);
5474 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5476 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5477 /* TODO: Free new CPU state if thread creation failed. */
5479 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5480 pthread_attr_destroy(&attr
);
5482 /* Wait for the child to initialize. */
5483 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5485 if (flags
& CLONE_PARENT_SETTID
)
5486 put_user_u32(ret
, parent_tidptr
);
5490 pthread_mutex_unlock(&info
.mutex
);
5491 pthread_cond_destroy(&info
.cond
);
5492 pthread_mutex_destroy(&info
.mutex
);
5493 pthread_mutex_unlock(&clone_lock
);
5495 /* if no CLONE_VM, we consider it is a fork */
5496 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5497 return -TARGET_EINVAL
;
5500 if (block_signals()) {
5501 return -TARGET_ERESTARTSYS
;
5507 /* Child Process. */
5509 cpu_clone_regs(env
, newsp
);
5511 /* There is a race condition here. The parent process could
5512 theoretically read the TID in the child process before the child
5513 tid is set. This would require using either ptrace
5514 (not implemented) or having *_tidptr to point at a shared memory
5515 mapping. We can't repeat the spinlock hack used above because
5516 the child process gets its own copy of the lock. */
5517 if (flags
& CLONE_CHILD_SETTID
)
5518 put_user_u32(gettid(), child_tidptr
);
5519 if (flags
& CLONE_PARENT_SETTID
)
5520 put_user_u32(gettid(), parent_tidptr
);
5521 ts
= (TaskState
*)cpu
->opaque
;
5522 if (flags
& CLONE_SETTLS
)
5523 cpu_set_tls (env
, newtls
);
5524 if (flags
& CLONE_CHILD_CLEARTID
)
5525 ts
->child_tidptr
= child_tidptr
;
5533 /* warning : doesn't handle linux specific flags... */
5534 static int target_to_host_fcntl_cmd(int cmd
)
5537 case TARGET_F_DUPFD
:
5538 case TARGET_F_GETFD
:
5539 case TARGET_F_SETFD
:
5540 case TARGET_F_GETFL
:
5541 case TARGET_F_SETFL
:
5543 case TARGET_F_GETLK
:
5545 case TARGET_F_SETLK
:
5547 case TARGET_F_SETLKW
:
5549 case TARGET_F_GETOWN
:
5551 case TARGET_F_SETOWN
:
5553 case TARGET_F_GETSIG
:
5555 case TARGET_F_SETSIG
:
5557 #if TARGET_ABI_BITS == 32
5558 case TARGET_F_GETLK64
:
5560 case TARGET_F_SETLK64
:
5562 case TARGET_F_SETLKW64
:
5565 case TARGET_F_SETLEASE
:
5567 case TARGET_F_GETLEASE
:
5569 #ifdef F_DUPFD_CLOEXEC
5570 case TARGET_F_DUPFD_CLOEXEC
:
5571 return F_DUPFD_CLOEXEC
;
5573 case TARGET_F_NOTIFY
:
5576 case TARGET_F_GETOWN_EX
:
5580 case TARGET_F_SETOWN_EX
:
5584 return -TARGET_EINVAL
;
5586 return -TARGET_EINVAL
;
5589 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5590 static const bitmask_transtbl flock_tbl
[] = {
5591 TRANSTBL_CONVERT(F_RDLCK
),
5592 TRANSTBL_CONVERT(F_WRLCK
),
5593 TRANSTBL_CONVERT(F_UNLCK
),
5594 TRANSTBL_CONVERT(F_EXLCK
),
5595 TRANSTBL_CONVERT(F_SHLCK
),
5599 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5602 struct target_flock
*target_fl
;
5603 struct flock64 fl64
;
5604 struct target_flock64
*target_fl64
;
5606 struct f_owner_ex fox
;
5607 struct target_f_owner_ex
*target_fox
;
5610 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5612 if (host_cmd
== -TARGET_EINVAL
)
5616 case TARGET_F_GETLK
:
5617 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5618 return -TARGET_EFAULT
;
5620 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5621 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5622 fl
.l_start
= tswapal(target_fl
->l_start
);
5623 fl
.l_len
= tswapal(target_fl
->l_len
);
5624 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5625 unlock_user_struct(target_fl
, arg
, 0);
5626 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5628 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
5629 return -TARGET_EFAULT
;
5631 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
5632 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5633 target_fl
->l_start
= tswapal(fl
.l_start
);
5634 target_fl
->l_len
= tswapal(fl
.l_len
);
5635 target_fl
->l_pid
= tswap32(fl
.l_pid
);
5636 unlock_user_struct(target_fl
, arg
, 1);
5640 case TARGET_F_SETLK
:
5641 case TARGET_F_SETLKW
:
5642 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
5643 return -TARGET_EFAULT
;
5645 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
5646 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5647 fl
.l_start
= tswapal(target_fl
->l_start
);
5648 fl
.l_len
= tswapal(target_fl
->l_len
);
5649 fl
.l_pid
= tswap32(target_fl
->l_pid
);
5650 unlock_user_struct(target_fl
, arg
, 0);
5651 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
5654 case TARGET_F_GETLK64
:
5655 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5656 return -TARGET_EFAULT
;
5658 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5659 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5660 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5661 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5662 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5663 unlock_user_struct(target_fl64
, arg
, 0);
5664 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5666 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
5667 return -TARGET_EFAULT
;
5668 target_fl64
->l_type
=
5669 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
5670 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
5671 target_fl64
->l_start
= tswap64(fl64
.l_start
);
5672 target_fl64
->l_len
= tswap64(fl64
.l_len
);
5673 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
5674 unlock_user_struct(target_fl64
, arg
, 1);
5677 case TARGET_F_SETLK64
:
5678 case TARGET_F_SETLKW64
:
5679 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
5680 return -TARGET_EFAULT
;
5682 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
5683 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
5684 fl64
.l_start
= tswap64(target_fl64
->l_start
);
5685 fl64
.l_len
= tswap64(target_fl64
->l_len
);
5686 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
5687 unlock_user_struct(target_fl64
, arg
, 0);
5688 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
5691 case TARGET_F_GETFL
:
5692 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5694 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5698 case TARGET_F_SETFL
:
5699 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
5703 case TARGET_F_GETOWN_EX
:
5704 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5706 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5707 return -TARGET_EFAULT
;
5708 target_fox
->type
= tswap32(fox
.type
);
5709 target_fox
->pid
= tswap32(fox
.pid
);
5710 unlock_user_struct(target_fox
, arg
, 1);
5716 case TARGET_F_SETOWN_EX
:
5717 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5718 return -TARGET_EFAULT
;
5719 fox
.type
= tswap32(target_fox
->type
);
5720 fox
.pid
= tswap32(target_fox
->pid
);
5721 unlock_user_struct(target_fox
, arg
, 0);
5722 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5726 case TARGET_F_SETOWN
:
5727 case TARGET_F_GETOWN
:
5728 case TARGET_F_SETSIG
:
5729 case TARGET_F_GETSIG
:
5730 case TARGET_F_SETLEASE
:
5731 case TARGET_F_GETLEASE
:
5732 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5736 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5744 static inline int high2lowuid(int uid
)
5752 static inline int high2lowgid(int gid
)
5760 static inline int low2highuid(int uid
)
5762 if ((int16_t)uid
== -1)
5768 static inline int low2highgid(int gid
)
5770 if ((int16_t)gid
== -1)
5775 static inline int tswapid(int id
)
5780 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5782 #else /* !USE_UID16 */
5783 static inline int high2lowuid(int uid
)
5787 static inline int high2lowgid(int gid
)
5791 static inline int low2highuid(int uid
)
5795 static inline int low2highgid(int gid
)
5799 static inline int tswapid(int id
)
5804 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5806 #endif /* USE_UID16 */
5808 /* We must do direct syscalls for setting UID/GID, because we want to
5809 * implement the Linux system call semantics of "change only for this thread",
5810 * not the libc/POSIX semantics of "change for all threads in process".
5811 * (See http://ewontfix.com/17/ for more details.)
5812 * We use the 32-bit version of the syscalls if present; if it is not
5813 * then either the host architecture supports 32-bit UIDs natively with
5814 * the standard syscall, or the 16-bit UID is the best we can do.
5816 #ifdef __NR_setuid32
5817 #define __NR_sys_setuid __NR_setuid32
5819 #define __NR_sys_setuid __NR_setuid
5821 #ifdef __NR_setgid32
5822 #define __NR_sys_setgid __NR_setgid32
5824 #define __NR_sys_setgid __NR_setgid
5826 #ifdef __NR_setresuid32
5827 #define __NR_sys_setresuid __NR_setresuid32
5829 #define __NR_sys_setresuid __NR_setresuid
5831 #ifdef __NR_setresgid32
5832 #define __NR_sys_setresgid __NR_setresgid32
5834 #define __NR_sys_setresgid __NR_setresgid
5837 _syscall1(int, sys_setuid
, uid_t
, uid
)
5838 _syscall1(int, sys_setgid
, gid_t
, gid
)
5839 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5840 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5842 void syscall_init(void)
5845 const argtype
*arg_type
;
5849 thunk_init(STRUCT_MAX
);
5851 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5852 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5853 #include "syscall_types.h"
5855 #undef STRUCT_SPECIAL
5857 /* Build target_to_host_errno_table[] table from
5858 * host_to_target_errno_table[]. */
5859 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5860 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5863 /* we patch the ioctl size if necessary. We rely on the fact that
5864 no ioctl has all the bits at '1' in the size field */
5866 while (ie
->target_cmd
!= 0) {
5867 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5868 TARGET_IOC_SIZEMASK
) {
5869 arg_type
= ie
->arg_type
;
5870 if (arg_type
[0] != TYPE_PTR
) {
5871 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5876 size
= thunk_type_size(arg_type
, 0);
5877 ie
->target_cmd
= (ie
->target_cmd
&
5878 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5879 (size
<< TARGET_IOC_SIZESHIFT
);
5882 /* automatic consistency check if same arch */
5883 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5884 (defined(__x86_64__) && defined(TARGET_X86_64))
5885 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5886 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5887 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5894 #if TARGET_ABI_BITS == 32
5895 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5897 #ifdef TARGET_WORDS_BIGENDIAN
5898 return ((uint64_t)word0
<< 32) | word1
;
5900 return ((uint64_t)word1
<< 32) | word0
;
5903 #else /* TARGET_ABI_BITS == 32 */
5904 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5908 #endif /* TARGET_ABI_BITS != 32 */
5910 #ifdef TARGET_NR_truncate64
5911 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5916 if (regpairs_aligned(cpu_env
)) {
5920 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5924 #ifdef TARGET_NR_ftruncate64
5925 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5930 if (regpairs_aligned(cpu_env
)) {
5934 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5938 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5939 abi_ulong target_addr
)
5941 struct target_timespec
*target_ts
;
5943 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5944 return -TARGET_EFAULT
;
5945 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5946 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5947 unlock_user_struct(target_ts
, target_addr
, 0);
5951 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5952 struct timespec
*host_ts
)
5954 struct target_timespec
*target_ts
;
5956 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5957 return -TARGET_EFAULT
;
5958 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5959 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5960 unlock_user_struct(target_ts
, target_addr
, 1);
5964 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5965 abi_ulong target_addr
)
5967 struct target_itimerspec
*target_itspec
;
5969 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5970 return -TARGET_EFAULT
;
5973 host_itspec
->it_interval
.tv_sec
=
5974 tswapal(target_itspec
->it_interval
.tv_sec
);
5975 host_itspec
->it_interval
.tv_nsec
=
5976 tswapal(target_itspec
->it_interval
.tv_nsec
);
5977 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5978 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5980 unlock_user_struct(target_itspec
, target_addr
, 1);
5984 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5985 struct itimerspec
*host_its
)
5987 struct target_itimerspec
*target_itspec
;
5989 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5990 return -TARGET_EFAULT
;
5993 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5994 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5996 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5997 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5999 unlock_user_struct(target_itspec
, target_addr
, 0);
6003 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6004 abi_ulong target_addr
)
6006 struct target_sigevent
*target_sevp
;
6008 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6009 return -TARGET_EFAULT
;
6012 /* This union is awkward on 64 bit systems because it has a 32 bit
6013 * integer and a pointer in it; we follow the conversion approach
6014 * used for handling sigval types in signal.c so the guest should get
6015 * the correct value back even if we did a 64 bit byteswap and it's
6016 * using the 32 bit integer.
6018 host_sevp
->sigev_value
.sival_ptr
=
6019 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6020 host_sevp
->sigev_signo
=
6021 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6022 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6023 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6025 unlock_user_struct(target_sevp
, target_addr
, 1);
6029 #if defined(TARGET_NR_mlockall)
6030 static inline int target_to_host_mlockall_arg(int arg
)
6034 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6035 result
|= MCL_CURRENT
;
6037 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6038 result
|= MCL_FUTURE
;
6044 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6045 abi_ulong target_addr
,
6046 struct stat
*host_st
)
6048 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6049 if (((CPUARMState
*)cpu_env
)->eabi
) {
6050 struct target_eabi_stat64
*target_st
;
6052 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6053 return -TARGET_EFAULT
;
6054 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6055 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6056 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6057 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6058 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6060 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6061 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6062 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6063 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6064 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6065 __put_user(host_st
->st_size
, &target_st
->st_size
);
6066 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6067 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6068 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6069 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6070 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6071 unlock_user_struct(target_st
, target_addr
, 1);
6075 #if defined(TARGET_HAS_STRUCT_STAT64)
6076 struct target_stat64
*target_st
;
6078 struct target_stat
*target_st
;
6081 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6082 return -TARGET_EFAULT
;
6083 memset(target_st
, 0, sizeof(*target_st
));
6084 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6085 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6086 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6087 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6089 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6090 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6091 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6092 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6093 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6094 /* XXX: better use of kernel struct */
6095 __put_user(host_st
->st_size
, &target_st
->st_size
);
6096 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6097 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6098 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6099 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6100 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6101 unlock_user_struct(target_st
, target_addr
, 1);
6107 /* ??? Using host futex calls even when target atomic operations
6108 are not really atomic probably breaks things. However implementing
6109 futexes locally would make futexes shared between multiple processes
6110 tricky. However they're probably useless because guest atomic
6111 operations won't work either. */
6112 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6113 target_ulong uaddr2
, int val3
)
6115 struct timespec ts
, *pts
;
6118 /* ??? We assume FUTEX_* constants are the same on both host
6120 #ifdef FUTEX_CMD_MASK
6121 base_op
= op
& FUTEX_CMD_MASK
;
6127 case FUTEX_WAIT_BITSET
:
6130 target_to_host_timespec(pts
, timeout
);
6134 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6137 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6139 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6141 case FUTEX_CMP_REQUEUE
:
6143 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6144 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6145 But the prototype takes a `struct timespec *'; insert casts
6146 to satisfy the compiler. We do not need to tswap TIMEOUT
6147 since it's not compared to guest memory. */
6148 pts
= (struct timespec
*)(uintptr_t) timeout
;
6149 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6151 (base_op
== FUTEX_CMP_REQUEUE
6155 return -TARGET_ENOSYS
;
6158 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6159 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6160 abi_long handle
, abi_long mount_id
,
6163 struct file_handle
*target_fh
;
6164 struct file_handle
*fh
;
6168 unsigned int size
, total_size
;
6170 if (get_user_s32(size
, handle
)) {
6171 return -TARGET_EFAULT
;
6174 name
= lock_user_string(pathname
);
6176 return -TARGET_EFAULT
;
6179 total_size
= sizeof(struct file_handle
) + size
;
6180 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6182 unlock_user(name
, pathname
, 0);
6183 return -TARGET_EFAULT
;
6186 fh
= g_malloc0(total_size
);
6187 fh
->handle_bytes
= size
;
6189 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6190 unlock_user(name
, pathname
, 0);
6192 /* man name_to_handle_at(2):
6193 * Other than the use of the handle_bytes field, the caller should treat
6194 * the file_handle structure as an opaque data type
6197 memcpy(target_fh
, fh
, total_size
);
6198 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6199 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6201 unlock_user(target_fh
, handle
, total_size
);
6203 if (put_user_s32(mid
, mount_id
)) {
6204 return -TARGET_EFAULT
;
6212 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6213 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6216 struct file_handle
*target_fh
;
6217 struct file_handle
*fh
;
6218 unsigned int size
, total_size
;
6221 if (get_user_s32(size
, handle
)) {
6222 return -TARGET_EFAULT
;
6225 total_size
= sizeof(struct file_handle
) + size
;
6226 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6228 return -TARGET_EFAULT
;
6231 fh
= g_memdup(target_fh
, total_size
);
6232 fh
->handle_bytes
= size
;
6233 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6235 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6236 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6240 unlock_user(target_fh
, handle
, total_size
);
6246 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6248 /* signalfd siginfo conversion */
6251 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6252 const struct signalfd_siginfo
*info
)
6254 int sig
= host_to_target_signal(info
->ssi_signo
);
6256 /* linux/signalfd.h defines a ssi_addr_lsb
6257 * not defined in sys/signalfd.h but used by some kernels
6260 #ifdef BUS_MCEERR_AO
6261 if (tinfo
->ssi_signo
== SIGBUS
&&
6262 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6263 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6264 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6265 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6266 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6270 tinfo
->ssi_signo
= tswap32(sig
);
6271 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6272 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6273 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6274 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6275 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6276 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6277 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6278 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6279 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6280 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6281 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6282 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6283 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6284 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6285 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6288 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6292 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6293 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6299 static TargetFdTrans target_signalfd_trans
= {
6300 .host_to_target_data
= host_to_target_data_signalfd
,
6303 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6306 target_sigset_t
*target_mask
;
6310 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6311 return -TARGET_EINVAL
;
6313 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6314 return -TARGET_EFAULT
;
6317 target_to_host_sigset(&host_mask
, target_mask
);
6319 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6321 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6323 fd_trans_register(ret
, &target_signalfd_trans
);
6326 unlock_user_struct(target_mask
, mask
, 0);
6332 /* Map host to target signal numbers for the wait family of syscalls.
6333 Assume all other status bits are the same. */
6334 int host_to_target_waitstatus(int status
)
6336 if (WIFSIGNALED(status
)) {
6337 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6339 if (WIFSTOPPED(status
)) {
6340 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6346 static int open_self_cmdline(void *cpu_env
, int fd
)
6349 bool word_skipped
= false;
6351 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6361 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6364 fd_orig
= close(fd_orig
);
6367 } else if (nb_read
== 0) {
6371 if (!word_skipped
) {
6372 /* Skip the first string, which is the path to qemu-*-static
6373 instead of the actual command. */
6374 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6376 /* Null byte found, skip one string */
6378 nb_read
-= cp_buf
- buf
;
6379 word_skipped
= true;
6384 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6393 return close(fd_orig
);
6396 static int open_self_maps(void *cpu_env
, int fd
)
6398 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6399 TaskState
*ts
= cpu
->opaque
;
6405 fp
= fopen("/proc/self/maps", "r");
6410 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6411 int fields
, dev_maj
, dev_min
, inode
;
6412 uint64_t min
, max
, offset
;
6413 char flag_r
, flag_w
, flag_x
, flag_p
;
6414 char path
[512] = "";
6415 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6416 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6417 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6419 if ((fields
< 10) || (fields
> 11)) {
6422 if (h2g_valid(min
)) {
6423 int flags
= page_get_flags(h2g(min
));
6424 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6425 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6428 if (h2g(min
) == ts
->info
->stack_limit
) {
6429 pstrcpy(path
, sizeof(path
), " [stack]");
6431 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6432 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6433 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6434 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6435 path
[0] ? " " : "", path
);
6445 static int open_self_stat(void *cpu_env
, int fd
)
6447 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6448 TaskState
*ts
= cpu
->opaque
;
6449 abi_ulong start_stack
= ts
->info
->start_stack
;
6452 for (i
= 0; i
< 44; i
++) {
6460 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6461 } else if (i
== 1) {
6463 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6464 } else if (i
== 27) {
6467 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6469 /* for the rest, there is MasterCard */
6470 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6474 if (write(fd
, buf
, len
) != len
) {
6482 static int open_self_auxv(void *cpu_env
, int fd
)
6484 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6485 TaskState
*ts
= cpu
->opaque
;
6486 abi_ulong auxv
= ts
->info
->saved_auxv
;
6487 abi_ulong len
= ts
->info
->auxv_len
;
6491 * Auxiliary vector is stored in target process stack.
6492 * read in whole auxv vector and copy it to file
6494 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6498 r
= write(fd
, ptr
, len
);
6505 lseek(fd
, 0, SEEK_SET
);
6506 unlock_user(ptr
, auxv
, len
);
6512 static int is_proc_myself(const char *filename
, const char *entry
)
6514 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6515 filename
+= strlen("/proc/");
6516 if (!strncmp(filename
, "self/", strlen("self/"))) {
6517 filename
+= strlen("self/");
6518 } else if (*filename
>= '1' && *filename
<= '9') {
6520 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6521 if (!strncmp(filename
, myself
, strlen(myself
))) {
6522 filename
+= strlen(myself
);
6529 if (!strcmp(filename
, entry
)) {
6536 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6537 static int is_proc(const char *filename
, const char *entry
)
6539 return strcmp(filename
, entry
) == 0;
6542 static int open_net_route(void *cpu_env
, int fd
)
6549 fp
= fopen("/proc/net/route", "r");
6556 read
= getline(&line
, &len
, fp
);
6557 dprintf(fd
, "%s", line
);
6561 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6563 uint32_t dest
, gw
, mask
;
6564 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6565 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6566 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6567 &mask
, &mtu
, &window
, &irtt
);
6568 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6569 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6570 metric
, tswap32(mask
), mtu
, window
, irtt
);
6580 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6583 const char *filename
;
6584 int (*fill
)(void *cpu_env
, int fd
);
6585 int (*cmp
)(const char *s1
, const char *s2
);
6587 const struct fake_open
*fake_open
;
6588 static const struct fake_open fakes
[] = {
6589 { "maps", open_self_maps
, is_proc_myself
},
6590 { "stat", open_self_stat
, is_proc_myself
},
6591 { "auxv", open_self_auxv
, is_proc_myself
},
6592 { "cmdline", open_self_cmdline
, is_proc_myself
},
6593 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6594 { "/proc/net/route", open_net_route
, is_proc
},
6596 { NULL
, NULL
, NULL
}
6599 if (is_proc_myself(pathname
, "exe")) {
6600 int execfd
= qemu_getauxval(AT_EXECFD
);
6601 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6604 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6605 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6610 if (fake_open
->filename
) {
6612 char filename
[PATH_MAX
];
6615 /* create temporary file to map stat to */
6616 tmpdir
= getenv("TMPDIR");
6619 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6620 fd
= mkstemp(filename
);
6626 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6632 lseek(fd
, 0, SEEK_SET
);
6637 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6640 #define TIMER_MAGIC 0x0caf0000
6641 #define TIMER_MAGIC_MASK 0xffff0000
6643 /* Convert QEMU provided timer ID back to internal 16bit index format */
6644 static target_timer_t
get_timer_id(abi_long arg
)
6646 target_timer_t timerid
= arg
;
6648 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6649 return -TARGET_EINVAL
;
6654 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6655 return -TARGET_EINVAL
;
6661 /* do_syscall() should always have a single exit point at the end so
6662 that actions, such as logging of syscall results, can be performed.
6663 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6664 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6665 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6666 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6669 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6675 #if defined(DEBUG_ERESTARTSYS)
6676 /* Debug-only code for exercising the syscall-restart code paths
6677 * in the per-architecture cpu main loops: restart every syscall
6678 * the guest makes once before letting it through.
6685 return -TARGET_ERESTARTSYS
;
6691 gemu_log("syscall %d", num
);
6694 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6697 case TARGET_NR_exit
:
6698 /* In old applications this may be used to implement _exit(2).
6699 However in threaded applictions it is used for thread termination,
6700 and _exit_group is used for application termination.
6701 Do thread termination if we have more then one thread. */
6703 if (block_signals()) {
6704 ret
= -TARGET_ERESTARTSYS
;
6708 if (CPU_NEXT(first_cpu
)) {
6712 /* Remove the CPU from the list. */
6713 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6716 if (ts
->child_tidptr
) {
6717 put_user_u32(0, ts
->child_tidptr
);
6718 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6722 object_unref(OBJECT(cpu
));
6724 rcu_unregister_thread();
6730 gdb_exit(cpu_env
, arg1
);
6732 ret
= 0; /* avoid warning */
6734 case TARGET_NR_read
:
6738 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6740 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6742 fd_trans_host_to_target_data(arg1
)) {
6743 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6745 unlock_user(p
, arg2
, ret
);
6748 case TARGET_NR_write
:
6749 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6751 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6752 unlock_user(p
, arg2
, 0);
6754 #ifdef TARGET_NR_open
6755 case TARGET_NR_open
:
6756 if (!(p
= lock_user_string(arg1
)))
6758 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6759 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6761 fd_trans_unregister(ret
);
6762 unlock_user(p
, arg1
, 0);
6765 case TARGET_NR_openat
:
6766 if (!(p
= lock_user_string(arg2
)))
6768 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6769 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6771 fd_trans_unregister(ret
);
6772 unlock_user(p
, arg2
, 0);
6774 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6775 case TARGET_NR_name_to_handle_at
:
6776 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6779 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6780 case TARGET_NR_open_by_handle_at
:
6781 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6782 fd_trans_unregister(ret
);
6785 case TARGET_NR_close
:
6786 fd_trans_unregister(arg1
);
6787 ret
= get_errno(close(arg1
));
6792 #ifdef TARGET_NR_fork
6793 case TARGET_NR_fork
:
6794 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6797 #ifdef TARGET_NR_waitpid
6798 case TARGET_NR_waitpid
:
6801 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6802 if (!is_error(ret
) && arg2
&& ret
6803 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6808 #ifdef TARGET_NR_waitid
6809 case TARGET_NR_waitid
:
6813 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6814 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6815 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6817 host_to_target_siginfo(p
, &info
);
6818 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6823 #ifdef TARGET_NR_creat /* not on alpha */
6824 case TARGET_NR_creat
:
6825 if (!(p
= lock_user_string(arg1
)))
6827 ret
= get_errno(creat(p
, arg2
));
6828 fd_trans_unregister(ret
);
6829 unlock_user(p
, arg1
, 0);
6832 #ifdef TARGET_NR_link
6833 case TARGET_NR_link
:
6836 p
= lock_user_string(arg1
);
6837 p2
= lock_user_string(arg2
);
6839 ret
= -TARGET_EFAULT
;
6841 ret
= get_errno(link(p
, p2
));
6842 unlock_user(p2
, arg2
, 0);
6843 unlock_user(p
, arg1
, 0);
6847 #if defined(TARGET_NR_linkat)
6848 case TARGET_NR_linkat
:
6853 p
= lock_user_string(arg2
);
6854 p2
= lock_user_string(arg4
);
6856 ret
= -TARGET_EFAULT
;
6858 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6859 unlock_user(p
, arg2
, 0);
6860 unlock_user(p2
, arg4
, 0);
6864 #ifdef TARGET_NR_unlink
6865 case TARGET_NR_unlink
:
6866 if (!(p
= lock_user_string(arg1
)))
6868 ret
= get_errno(unlink(p
));
6869 unlock_user(p
, arg1
, 0);
6872 #if defined(TARGET_NR_unlinkat)
6873 case TARGET_NR_unlinkat
:
6874 if (!(p
= lock_user_string(arg2
)))
6876 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6877 unlock_user(p
, arg2
, 0);
6880 case TARGET_NR_execve
:
6882 char **argp
, **envp
;
6885 abi_ulong guest_argp
;
6886 abi_ulong guest_envp
;
6893 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6894 if (get_user_ual(addr
, gp
))
6902 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6903 if (get_user_ual(addr
, gp
))
6910 argp
= alloca((argc
+ 1) * sizeof(void *));
6911 envp
= alloca((envc
+ 1) * sizeof(void *));
6913 for (gp
= guest_argp
, q
= argp
; gp
;
6914 gp
+= sizeof(abi_ulong
), q
++) {
6915 if (get_user_ual(addr
, gp
))
6919 if (!(*q
= lock_user_string(addr
)))
6921 total_size
+= strlen(*q
) + 1;
6925 for (gp
= guest_envp
, q
= envp
; gp
;
6926 gp
+= sizeof(abi_ulong
), q
++) {
6927 if (get_user_ual(addr
, gp
))
6931 if (!(*q
= lock_user_string(addr
)))
6933 total_size
+= strlen(*q
) + 1;
6937 if (!(p
= lock_user_string(arg1
)))
6939 /* Although execve() is not an interruptible syscall it is
6940 * a special case where we must use the safe_syscall wrapper:
6941 * if we allow a signal to happen before we make the host
6942 * syscall then we will 'lose' it, because at the point of
6943 * execve the process leaves QEMU's control. So we use the
6944 * safe syscall wrapper to ensure that we either take the
6945 * signal as a guest signal, or else it does not happen
6946 * before the execve completes and makes it the other
6947 * program's problem.
6949 ret
= get_errno(safe_execve(p
, argp
, envp
));
6950 unlock_user(p
, arg1
, 0);
6955 ret
= -TARGET_EFAULT
;
6958 for (gp
= guest_argp
, q
= argp
; *q
;
6959 gp
+= sizeof(abi_ulong
), q
++) {
6960 if (get_user_ual(addr
, gp
)
6963 unlock_user(*q
, addr
, 0);
6965 for (gp
= guest_envp
, q
= envp
; *q
;
6966 gp
+= sizeof(abi_ulong
), q
++) {
6967 if (get_user_ual(addr
, gp
)
6970 unlock_user(*q
, addr
, 0);
6974 case TARGET_NR_chdir
:
6975 if (!(p
= lock_user_string(arg1
)))
6977 ret
= get_errno(chdir(p
));
6978 unlock_user(p
, arg1
, 0);
6980 #ifdef TARGET_NR_time
6981 case TARGET_NR_time
:
6984 ret
= get_errno(time(&host_time
));
6987 && put_user_sal(host_time
, arg1
))
6992 #ifdef TARGET_NR_mknod
6993 case TARGET_NR_mknod
:
6994 if (!(p
= lock_user_string(arg1
)))
6996 ret
= get_errno(mknod(p
, arg2
, arg3
));
6997 unlock_user(p
, arg1
, 0);
7000 #if defined(TARGET_NR_mknodat)
7001 case TARGET_NR_mknodat
:
7002 if (!(p
= lock_user_string(arg2
)))
7004 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7005 unlock_user(p
, arg2
, 0);
7008 #ifdef TARGET_NR_chmod
7009 case TARGET_NR_chmod
:
7010 if (!(p
= lock_user_string(arg1
)))
7012 ret
= get_errno(chmod(p
, arg2
));
7013 unlock_user(p
, arg1
, 0);
7016 #ifdef TARGET_NR_break
7017 case TARGET_NR_break
:
7020 #ifdef TARGET_NR_oldstat
7021 case TARGET_NR_oldstat
:
7024 case TARGET_NR_lseek
:
7025 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7027 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7028 /* Alpha specific */
7029 case TARGET_NR_getxpid
:
7030 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7031 ret
= get_errno(getpid());
7034 #ifdef TARGET_NR_getpid
7035 case TARGET_NR_getpid
:
7036 ret
= get_errno(getpid());
7039 case TARGET_NR_mount
:
7041 /* need to look at the data field */
7045 p
= lock_user_string(arg1
);
7053 p2
= lock_user_string(arg2
);
7056 unlock_user(p
, arg1
, 0);
7062 p3
= lock_user_string(arg3
);
7065 unlock_user(p
, arg1
, 0);
7067 unlock_user(p2
, arg2
, 0);
7074 /* FIXME - arg5 should be locked, but it isn't clear how to
7075 * do that since it's not guaranteed to be a NULL-terminated
7079 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7081 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7083 ret
= get_errno(ret
);
7086 unlock_user(p
, arg1
, 0);
7088 unlock_user(p2
, arg2
, 0);
7090 unlock_user(p3
, arg3
, 0);
7094 #ifdef TARGET_NR_umount
7095 case TARGET_NR_umount
:
7096 if (!(p
= lock_user_string(arg1
)))
7098 ret
= get_errno(umount(p
));
7099 unlock_user(p
, arg1
, 0);
7102 #ifdef TARGET_NR_stime /* not on alpha */
7103 case TARGET_NR_stime
:
7106 if (get_user_sal(host_time
, arg1
))
7108 ret
= get_errno(stime(&host_time
));
7112 case TARGET_NR_ptrace
:
7114 #ifdef TARGET_NR_alarm /* not on alpha */
7115 case TARGET_NR_alarm
:
7119 #ifdef TARGET_NR_oldfstat
7120 case TARGET_NR_oldfstat
:
7123 #ifdef TARGET_NR_pause /* not on alpha */
7124 case TARGET_NR_pause
:
7125 if (!block_signals()) {
7126 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7128 ret
= -TARGET_EINTR
;
7131 #ifdef TARGET_NR_utime
7132 case TARGET_NR_utime
:
7134 struct utimbuf tbuf
, *host_tbuf
;
7135 struct target_utimbuf
*target_tbuf
;
7137 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7139 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7140 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7141 unlock_user_struct(target_tbuf
, arg2
, 0);
7146 if (!(p
= lock_user_string(arg1
)))
7148 ret
= get_errno(utime(p
, host_tbuf
));
7149 unlock_user(p
, arg1
, 0);
7153 #ifdef TARGET_NR_utimes
7154 case TARGET_NR_utimes
:
7156 struct timeval
*tvp
, tv
[2];
7158 if (copy_from_user_timeval(&tv
[0], arg2
)
7159 || copy_from_user_timeval(&tv
[1],
7160 arg2
+ sizeof(struct target_timeval
)))
7166 if (!(p
= lock_user_string(arg1
)))
7168 ret
= get_errno(utimes(p
, tvp
));
7169 unlock_user(p
, arg1
, 0);
7173 #if defined(TARGET_NR_futimesat)
7174 case TARGET_NR_futimesat
:
7176 struct timeval
*tvp
, tv
[2];
7178 if (copy_from_user_timeval(&tv
[0], arg3
)
7179 || copy_from_user_timeval(&tv
[1],
7180 arg3
+ sizeof(struct target_timeval
)))
7186 if (!(p
= lock_user_string(arg2
)))
7188 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7189 unlock_user(p
, arg2
, 0);
7193 #ifdef TARGET_NR_stty
7194 case TARGET_NR_stty
:
7197 #ifdef TARGET_NR_gtty
7198 case TARGET_NR_gtty
:
7201 #ifdef TARGET_NR_access
7202 case TARGET_NR_access
:
7203 if (!(p
= lock_user_string(arg1
)))
7205 ret
= get_errno(access(path(p
), arg2
));
7206 unlock_user(p
, arg1
, 0);
7209 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7210 case TARGET_NR_faccessat
:
7211 if (!(p
= lock_user_string(arg2
)))
7213 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7214 unlock_user(p
, arg2
, 0);
7217 #ifdef TARGET_NR_nice /* not on alpha */
7218 case TARGET_NR_nice
:
7219 ret
= get_errno(nice(arg1
));
7222 #ifdef TARGET_NR_ftime
7223 case TARGET_NR_ftime
:
7226 case TARGET_NR_sync
:
7230 case TARGET_NR_kill
:
7231 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7233 #ifdef TARGET_NR_rename
7234 case TARGET_NR_rename
:
7237 p
= lock_user_string(arg1
);
7238 p2
= lock_user_string(arg2
);
7240 ret
= -TARGET_EFAULT
;
7242 ret
= get_errno(rename(p
, p2
));
7243 unlock_user(p2
, arg2
, 0);
7244 unlock_user(p
, arg1
, 0);
7248 #if defined(TARGET_NR_renameat)
7249 case TARGET_NR_renameat
:
7252 p
= lock_user_string(arg2
);
7253 p2
= lock_user_string(arg4
);
7255 ret
= -TARGET_EFAULT
;
7257 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7258 unlock_user(p2
, arg4
, 0);
7259 unlock_user(p
, arg2
, 0);
7263 #ifdef TARGET_NR_mkdir
7264 case TARGET_NR_mkdir
:
7265 if (!(p
= lock_user_string(arg1
)))
7267 ret
= get_errno(mkdir(p
, arg2
));
7268 unlock_user(p
, arg1
, 0);
7271 #if defined(TARGET_NR_mkdirat)
7272 case TARGET_NR_mkdirat
:
7273 if (!(p
= lock_user_string(arg2
)))
7275 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7276 unlock_user(p
, arg2
, 0);
7279 #ifdef TARGET_NR_rmdir
7280 case TARGET_NR_rmdir
:
7281 if (!(p
= lock_user_string(arg1
)))
7283 ret
= get_errno(rmdir(p
));
7284 unlock_user(p
, arg1
, 0);
7288 ret
= get_errno(dup(arg1
));
7290 fd_trans_dup(arg1
, ret
);
7293 #ifdef TARGET_NR_pipe
7294 case TARGET_NR_pipe
:
7295 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7298 #ifdef TARGET_NR_pipe2
7299 case TARGET_NR_pipe2
:
7300 ret
= do_pipe(cpu_env
, arg1
,
7301 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7304 case TARGET_NR_times
:
7306 struct target_tms
*tmsp
;
7308 ret
= get_errno(times(&tms
));
7310 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7313 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7314 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7315 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7316 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7319 ret
= host_to_target_clock_t(ret
);
7322 #ifdef TARGET_NR_prof
7323 case TARGET_NR_prof
:
7326 #ifdef TARGET_NR_signal
7327 case TARGET_NR_signal
:
7330 case TARGET_NR_acct
:
7332 ret
= get_errno(acct(NULL
));
7334 if (!(p
= lock_user_string(arg1
)))
7336 ret
= get_errno(acct(path(p
)));
7337 unlock_user(p
, arg1
, 0);
7340 #ifdef TARGET_NR_umount2
7341 case TARGET_NR_umount2
:
7342 if (!(p
= lock_user_string(arg1
)))
7344 ret
= get_errno(umount2(p
, arg2
));
7345 unlock_user(p
, arg1
, 0);
7348 #ifdef TARGET_NR_lock
7349 case TARGET_NR_lock
:
7352 case TARGET_NR_ioctl
:
7353 ret
= do_ioctl(arg1
, arg2
, arg3
);
7355 case TARGET_NR_fcntl
:
7356 ret
= do_fcntl(arg1
, arg2
, arg3
);
7358 #ifdef TARGET_NR_mpx
7362 case TARGET_NR_setpgid
:
7363 ret
= get_errno(setpgid(arg1
, arg2
));
7365 #ifdef TARGET_NR_ulimit
7366 case TARGET_NR_ulimit
:
7369 #ifdef TARGET_NR_oldolduname
7370 case TARGET_NR_oldolduname
:
7373 case TARGET_NR_umask
:
7374 ret
= get_errno(umask(arg1
));
7376 case TARGET_NR_chroot
:
7377 if (!(p
= lock_user_string(arg1
)))
7379 ret
= get_errno(chroot(p
));
7380 unlock_user(p
, arg1
, 0);
7382 #ifdef TARGET_NR_ustat
7383 case TARGET_NR_ustat
:
7386 #ifdef TARGET_NR_dup2
7387 case TARGET_NR_dup2
:
7388 ret
= get_errno(dup2(arg1
, arg2
));
7390 fd_trans_dup(arg1
, arg2
);
7394 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7395 case TARGET_NR_dup3
:
7396 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7398 fd_trans_dup(arg1
, arg2
);
7402 #ifdef TARGET_NR_getppid /* not on alpha */
7403 case TARGET_NR_getppid
:
7404 ret
= get_errno(getppid());
7407 #ifdef TARGET_NR_getpgrp
7408 case TARGET_NR_getpgrp
:
7409 ret
= get_errno(getpgrp());
7412 case TARGET_NR_setsid
:
7413 ret
= get_errno(setsid());
7415 #ifdef TARGET_NR_sigaction
7416 case TARGET_NR_sigaction
:
7418 #if defined(TARGET_ALPHA)
7419 struct target_sigaction act
, oact
, *pact
= 0;
7420 struct target_old_sigaction
*old_act
;
7422 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7424 act
._sa_handler
= old_act
->_sa_handler
;
7425 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7426 act
.sa_flags
= old_act
->sa_flags
;
7427 act
.sa_restorer
= 0;
7428 unlock_user_struct(old_act
, arg2
, 0);
7431 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7432 if (!is_error(ret
) && arg3
) {
7433 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7435 old_act
->_sa_handler
= oact
._sa_handler
;
7436 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7437 old_act
->sa_flags
= oact
.sa_flags
;
7438 unlock_user_struct(old_act
, arg3
, 1);
7440 #elif defined(TARGET_MIPS)
7441 struct target_sigaction act
, oact
, *pact
, *old_act
;
7444 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7446 act
._sa_handler
= old_act
->_sa_handler
;
7447 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7448 act
.sa_flags
= old_act
->sa_flags
;
7449 unlock_user_struct(old_act
, arg2
, 0);
7455 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7457 if (!is_error(ret
) && arg3
) {
7458 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7460 old_act
->_sa_handler
= oact
._sa_handler
;
7461 old_act
->sa_flags
= oact
.sa_flags
;
7462 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7463 old_act
->sa_mask
.sig
[1] = 0;
7464 old_act
->sa_mask
.sig
[2] = 0;
7465 old_act
->sa_mask
.sig
[3] = 0;
7466 unlock_user_struct(old_act
, arg3
, 1);
7469 struct target_old_sigaction
*old_act
;
7470 struct target_sigaction act
, oact
, *pact
;
7472 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7474 act
._sa_handler
= old_act
->_sa_handler
;
7475 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7476 act
.sa_flags
= old_act
->sa_flags
;
7477 act
.sa_restorer
= old_act
->sa_restorer
;
7478 unlock_user_struct(old_act
, arg2
, 0);
7483 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7484 if (!is_error(ret
) && arg3
) {
7485 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7487 old_act
->_sa_handler
= oact
._sa_handler
;
7488 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7489 old_act
->sa_flags
= oact
.sa_flags
;
7490 old_act
->sa_restorer
= oact
.sa_restorer
;
7491 unlock_user_struct(old_act
, arg3
, 1);
7497 case TARGET_NR_rt_sigaction
:
7499 #if defined(TARGET_ALPHA)
7500 struct target_sigaction act
, oact
, *pact
= 0;
7501 struct target_rt_sigaction
*rt_act
;
7502 /* ??? arg4 == sizeof(sigset_t). */
7504 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7506 act
._sa_handler
= rt_act
->_sa_handler
;
7507 act
.sa_mask
= rt_act
->sa_mask
;
7508 act
.sa_flags
= rt_act
->sa_flags
;
7509 act
.sa_restorer
= arg5
;
7510 unlock_user_struct(rt_act
, arg2
, 0);
7513 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7514 if (!is_error(ret
) && arg3
) {
7515 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7517 rt_act
->_sa_handler
= oact
._sa_handler
;
7518 rt_act
->sa_mask
= oact
.sa_mask
;
7519 rt_act
->sa_flags
= oact
.sa_flags
;
7520 unlock_user_struct(rt_act
, arg3
, 1);
7523 struct target_sigaction
*act
;
7524 struct target_sigaction
*oact
;
7527 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7532 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7533 ret
= -TARGET_EFAULT
;
7534 goto rt_sigaction_fail
;
7538 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7541 unlock_user_struct(act
, arg2
, 0);
7543 unlock_user_struct(oact
, arg3
, 1);
7547 #ifdef TARGET_NR_sgetmask /* not on alpha */
7548 case TARGET_NR_sgetmask
:
7551 abi_ulong target_set
;
7552 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7554 host_to_target_old_sigset(&target_set
, &cur_set
);
7560 #ifdef TARGET_NR_ssetmask /* not on alpha */
7561 case TARGET_NR_ssetmask
:
7563 sigset_t set
, oset
, cur_set
;
7564 abi_ulong target_set
= arg1
;
7565 /* We only have one word of the new mask so we must read
7566 * the rest of it with do_sigprocmask() and OR in this word.
7567 * We are guaranteed that a do_sigprocmask() that only queries
7568 * the signal mask will not fail.
7570 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7572 target_to_host_old_sigset(&set
, &target_set
);
7573 sigorset(&set
, &set
, &cur_set
);
7574 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7576 host_to_target_old_sigset(&target_set
, &oset
);
7582 #ifdef TARGET_NR_sigprocmask
7583 case TARGET_NR_sigprocmask
:
7585 #if defined(TARGET_ALPHA)
7586 sigset_t set
, oldset
;
7591 case TARGET_SIG_BLOCK
:
7594 case TARGET_SIG_UNBLOCK
:
7597 case TARGET_SIG_SETMASK
:
7601 ret
= -TARGET_EINVAL
;
7605 target_to_host_old_sigset(&set
, &mask
);
7607 ret
= do_sigprocmask(how
, &set
, &oldset
);
7608 if (!is_error(ret
)) {
7609 host_to_target_old_sigset(&mask
, &oldset
);
7611 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7614 sigset_t set
, oldset
, *set_ptr
;
7619 case TARGET_SIG_BLOCK
:
7622 case TARGET_SIG_UNBLOCK
:
7625 case TARGET_SIG_SETMASK
:
7629 ret
= -TARGET_EINVAL
;
7632 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7634 target_to_host_old_sigset(&set
, p
);
7635 unlock_user(p
, arg2
, 0);
7641 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7642 if (!is_error(ret
) && arg3
) {
7643 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7645 host_to_target_old_sigset(p
, &oldset
);
7646 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7652 case TARGET_NR_rt_sigprocmask
:
7655 sigset_t set
, oldset
, *set_ptr
;
7659 case TARGET_SIG_BLOCK
:
7662 case TARGET_SIG_UNBLOCK
:
7665 case TARGET_SIG_SETMASK
:
7669 ret
= -TARGET_EINVAL
;
7672 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7674 target_to_host_sigset(&set
, p
);
7675 unlock_user(p
, arg2
, 0);
7681 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7682 if (!is_error(ret
) && arg3
) {
7683 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7685 host_to_target_sigset(p
, &oldset
);
7686 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7690 #ifdef TARGET_NR_sigpending
7691 case TARGET_NR_sigpending
:
7694 ret
= get_errno(sigpending(&set
));
7695 if (!is_error(ret
)) {
7696 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7698 host_to_target_old_sigset(p
, &set
);
7699 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7704 case TARGET_NR_rt_sigpending
:
7707 ret
= get_errno(sigpending(&set
));
7708 if (!is_error(ret
)) {
7709 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7711 host_to_target_sigset(p
, &set
);
7712 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7716 #ifdef TARGET_NR_sigsuspend
7717 case TARGET_NR_sigsuspend
:
7719 TaskState
*ts
= cpu
->opaque
;
7720 #if defined(TARGET_ALPHA)
7721 abi_ulong mask
= arg1
;
7722 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7724 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7726 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7727 unlock_user(p
, arg1
, 0);
7729 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7731 if (ret
!= -TARGET_ERESTARTSYS
) {
7732 ts
->in_sigsuspend
= 1;
7737 case TARGET_NR_rt_sigsuspend
:
7739 TaskState
*ts
= cpu
->opaque
;
7740 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7742 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7743 unlock_user(p
, arg1
, 0);
7744 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7746 if (ret
!= -TARGET_ERESTARTSYS
) {
7747 ts
->in_sigsuspend
= 1;
7751 case TARGET_NR_rt_sigtimedwait
:
7754 struct timespec uts
, *puts
;
7757 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7759 target_to_host_sigset(&set
, p
);
7760 unlock_user(p
, arg1
, 0);
7763 target_to_host_timespec(puts
, arg3
);
7767 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7769 if (!is_error(ret
)) {
7771 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7776 host_to_target_siginfo(p
, &uinfo
);
7777 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7779 ret
= host_to_target_signal(ret
);
7783 case TARGET_NR_rt_sigqueueinfo
:
7786 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7788 target_to_host_siginfo(&uinfo
, p
);
7789 unlock_user(p
, arg1
, 0);
7790 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7793 #ifdef TARGET_NR_sigreturn
7794 case TARGET_NR_sigreturn
:
7795 if (block_signals()) {
7796 ret
= -TARGET_ERESTARTSYS
;
7798 ret
= do_sigreturn(cpu_env
);
7802 case TARGET_NR_rt_sigreturn
:
7803 if (block_signals()) {
7804 ret
= -TARGET_ERESTARTSYS
;
7806 ret
= do_rt_sigreturn(cpu_env
);
7809 case TARGET_NR_sethostname
:
7810 if (!(p
= lock_user_string(arg1
)))
7812 ret
= get_errno(sethostname(p
, arg2
));
7813 unlock_user(p
, arg1
, 0);
7815 case TARGET_NR_setrlimit
:
7817 int resource
= target_to_host_resource(arg1
);
7818 struct target_rlimit
*target_rlim
;
7820 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7822 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7823 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7824 unlock_user_struct(target_rlim
, arg2
, 0);
7825 ret
= get_errno(setrlimit(resource
, &rlim
));
7828 case TARGET_NR_getrlimit
:
7830 int resource
= target_to_host_resource(arg1
);
7831 struct target_rlimit
*target_rlim
;
7834 ret
= get_errno(getrlimit(resource
, &rlim
));
7835 if (!is_error(ret
)) {
7836 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7838 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7839 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7840 unlock_user_struct(target_rlim
, arg2
, 1);
7844 case TARGET_NR_getrusage
:
7846 struct rusage rusage
;
7847 ret
= get_errno(getrusage(arg1
, &rusage
));
7848 if (!is_error(ret
)) {
7849 ret
= host_to_target_rusage(arg2
, &rusage
);
7853 case TARGET_NR_gettimeofday
:
7856 ret
= get_errno(gettimeofday(&tv
, NULL
));
7857 if (!is_error(ret
)) {
7858 if (copy_to_user_timeval(arg1
, &tv
))
7863 case TARGET_NR_settimeofday
:
7865 struct timeval tv
, *ptv
= NULL
;
7866 struct timezone tz
, *ptz
= NULL
;
7869 if (copy_from_user_timeval(&tv
, arg1
)) {
7876 if (copy_from_user_timezone(&tz
, arg2
)) {
7882 ret
= get_errno(settimeofday(ptv
, ptz
));
7885 #if defined(TARGET_NR_select)
7886 case TARGET_NR_select
:
7887 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7888 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7891 struct target_sel_arg_struct
*sel
;
7892 abi_ulong inp
, outp
, exp
, tvp
;
7895 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7897 nsel
= tswapal(sel
->n
);
7898 inp
= tswapal(sel
->inp
);
7899 outp
= tswapal(sel
->outp
);
7900 exp
= tswapal(sel
->exp
);
7901 tvp
= tswapal(sel
->tvp
);
7902 unlock_user_struct(sel
, arg1
, 0);
7903 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7908 #ifdef TARGET_NR_pselect6
7909 case TARGET_NR_pselect6
:
7911 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7912 fd_set rfds
, wfds
, efds
;
7913 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7914 struct timespec ts
, *ts_ptr
;
7917 * The 6th arg is actually two args smashed together,
7918 * so we cannot use the C library.
7926 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7927 target_sigset_t
*target_sigset
;
7935 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7939 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7943 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7949 * This takes a timespec, and not a timeval, so we cannot
7950 * use the do_select() helper ...
7953 if (target_to_host_timespec(&ts
, ts_addr
)) {
7961 /* Extract the two packed args for the sigset */
7964 sig
.size
= SIGSET_T_SIZE
;
7966 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7970 arg_sigset
= tswapal(arg7
[0]);
7971 arg_sigsize
= tswapal(arg7
[1]);
7972 unlock_user(arg7
, arg6
, 0);
7976 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7977 /* Like the kernel, we enforce correct size sigsets */
7978 ret
= -TARGET_EINVAL
;
7981 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7982 sizeof(*target_sigset
), 1);
7983 if (!target_sigset
) {
7986 target_to_host_sigset(&set
, target_sigset
);
7987 unlock_user(target_sigset
, arg_sigset
, 0);
7995 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7998 if (!is_error(ret
)) {
7999 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8001 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8003 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8006 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8012 #ifdef TARGET_NR_symlink
8013 case TARGET_NR_symlink
:
8016 p
= lock_user_string(arg1
);
8017 p2
= lock_user_string(arg2
);
8019 ret
= -TARGET_EFAULT
;
8021 ret
= get_errno(symlink(p
, p2
));
8022 unlock_user(p2
, arg2
, 0);
8023 unlock_user(p
, arg1
, 0);
8027 #if defined(TARGET_NR_symlinkat)
8028 case TARGET_NR_symlinkat
:
8031 p
= lock_user_string(arg1
);
8032 p2
= lock_user_string(arg3
);
8034 ret
= -TARGET_EFAULT
;
8036 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8037 unlock_user(p2
, arg3
, 0);
8038 unlock_user(p
, arg1
, 0);
8042 #ifdef TARGET_NR_oldlstat
8043 case TARGET_NR_oldlstat
:
8046 #ifdef TARGET_NR_readlink
8047 case TARGET_NR_readlink
:
8050 p
= lock_user_string(arg1
);
8051 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8053 ret
= -TARGET_EFAULT
;
8055 /* Short circuit this for the magic exe check. */
8056 ret
= -TARGET_EINVAL
;
8057 } else if (is_proc_myself((const char *)p
, "exe")) {
8058 char real
[PATH_MAX
], *temp
;
8059 temp
= realpath(exec_path
, real
);
8060 /* Return value is # of bytes that we wrote to the buffer. */
8062 ret
= get_errno(-1);
8064 /* Don't worry about sign mismatch as earlier mapping
8065 * logic would have thrown a bad address error. */
8066 ret
= MIN(strlen(real
), arg3
);
8067 /* We cannot NUL terminate the string. */
8068 memcpy(p2
, real
, ret
);
8071 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8073 unlock_user(p2
, arg2
, ret
);
8074 unlock_user(p
, arg1
, 0);
8078 #if defined(TARGET_NR_readlinkat)
8079 case TARGET_NR_readlinkat
:
8082 p
= lock_user_string(arg2
);
8083 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8085 ret
= -TARGET_EFAULT
;
8086 } else if (is_proc_myself((const char *)p
, "exe")) {
8087 char real
[PATH_MAX
], *temp
;
8088 temp
= realpath(exec_path
, real
);
8089 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8090 snprintf((char *)p2
, arg4
, "%s", real
);
8092 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8094 unlock_user(p2
, arg3
, ret
);
8095 unlock_user(p
, arg2
, 0);
8099 #ifdef TARGET_NR_uselib
8100 case TARGET_NR_uselib
:
8103 #ifdef TARGET_NR_swapon
8104 case TARGET_NR_swapon
:
8105 if (!(p
= lock_user_string(arg1
)))
8107 ret
= get_errno(swapon(p
, arg2
));
8108 unlock_user(p
, arg1
, 0);
8111 case TARGET_NR_reboot
:
8112 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8113 /* arg4 must be ignored in all other cases */
8114 p
= lock_user_string(arg4
);
8118 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8119 unlock_user(p
, arg4
, 0);
8121 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8124 #ifdef TARGET_NR_readdir
8125 case TARGET_NR_readdir
:
8128 #ifdef TARGET_NR_mmap
8129 case TARGET_NR_mmap
:
8130 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8131 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8132 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8133 || defined(TARGET_S390X)
8136 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8137 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8145 unlock_user(v
, arg1
, 0);
8146 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8147 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8151 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8152 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8158 #ifdef TARGET_NR_mmap2
8159 case TARGET_NR_mmap2
:
8161 #define MMAP_SHIFT 12
8163 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8164 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8166 arg6
<< MMAP_SHIFT
));
8169 case TARGET_NR_munmap
:
8170 ret
= get_errno(target_munmap(arg1
, arg2
));
8172 case TARGET_NR_mprotect
:
8174 TaskState
*ts
= cpu
->opaque
;
8175 /* Special hack to detect libc making the stack executable. */
8176 if ((arg3
& PROT_GROWSDOWN
)
8177 && arg1
>= ts
->info
->stack_limit
8178 && arg1
<= ts
->info
->start_stack
) {
8179 arg3
&= ~PROT_GROWSDOWN
;
8180 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8181 arg1
= ts
->info
->stack_limit
;
8184 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8186 #ifdef TARGET_NR_mremap
8187 case TARGET_NR_mremap
:
8188 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8191 /* ??? msync/mlock/munlock are broken for softmmu. */
8192 #ifdef TARGET_NR_msync
8193 case TARGET_NR_msync
:
8194 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8197 #ifdef TARGET_NR_mlock
8198 case TARGET_NR_mlock
:
8199 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8202 #ifdef TARGET_NR_munlock
8203 case TARGET_NR_munlock
:
8204 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8207 #ifdef TARGET_NR_mlockall
8208 case TARGET_NR_mlockall
:
8209 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8212 #ifdef TARGET_NR_munlockall
8213 case TARGET_NR_munlockall
:
8214 ret
= get_errno(munlockall());
8217 case TARGET_NR_truncate
:
8218 if (!(p
= lock_user_string(arg1
)))
8220 ret
= get_errno(truncate(p
, arg2
));
8221 unlock_user(p
, arg1
, 0);
8223 case TARGET_NR_ftruncate
:
8224 ret
= get_errno(ftruncate(arg1
, arg2
));
8226 case TARGET_NR_fchmod
:
8227 ret
= get_errno(fchmod(arg1
, arg2
));
8229 #if defined(TARGET_NR_fchmodat)
8230 case TARGET_NR_fchmodat
:
8231 if (!(p
= lock_user_string(arg2
)))
8233 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8234 unlock_user(p
, arg2
, 0);
8237 case TARGET_NR_getpriority
:
8238 /* Note that negative values are valid for getpriority, so we must
8239 differentiate based on errno settings. */
8241 ret
= getpriority(arg1
, arg2
);
8242 if (ret
== -1 && errno
!= 0) {
8243 ret
= -host_to_target_errno(errno
);
8247 /* Return value is the unbiased priority. Signal no error. */
8248 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8250 /* Return value is a biased priority to avoid negative numbers. */
8254 case TARGET_NR_setpriority
:
8255 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8257 #ifdef TARGET_NR_profil
8258 case TARGET_NR_profil
:
8261 case TARGET_NR_statfs
:
8262 if (!(p
= lock_user_string(arg1
)))
8264 ret
= get_errno(statfs(path(p
), &stfs
));
8265 unlock_user(p
, arg1
, 0);
8267 if (!is_error(ret
)) {
8268 struct target_statfs
*target_stfs
;
8270 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8272 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8273 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8274 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8275 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8276 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8277 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8278 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8279 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8280 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8281 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8282 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8283 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8284 unlock_user_struct(target_stfs
, arg2
, 1);
8287 case TARGET_NR_fstatfs
:
8288 ret
= get_errno(fstatfs(arg1
, &stfs
));
8289 goto convert_statfs
;
8290 #ifdef TARGET_NR_statfs64
8291 case TARGET_NR_statfs64
:
8292 if (!(p
= lock_user_string(arg1
)))
8294 ret
= get_errno(statfs(path(p
), &stfs
));
8295 unlock_user(p
, arg1
, 0);
8297 if (!is_error(ret
)) {
8298 struct target_statfs64
*target_stfs
;
8300 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8302 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8303 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8304 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8305 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8306 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8307 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8308 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8309 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8310 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8311 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8312 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8313 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8314 unlock_user_struct(target_stfs
, arg3
, 1);
8317 case TARGET_NR_fstatfs64
:
8318 ret
= get_errno(fstatfs(arg1
, &stfs
));
8319 goto convert_statfs64
;
8321 #ifdef TARGET_NR_ioperm
8322 case TARGET_NR_ioperm
:
8325 #ifdef TARGET_NR_socketcall
8326 case TARGET_NR_socketcall
:
8327 ret
= do_socketcall(arg1
, arg2
);
8330 #ifdef TARGET_NR_accept
8331 case TARGET_NR_accept
:
8332 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8335 #ifdef TARGET_NR_accept4
8336 case TARGET_NR_accept4
:
8337 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8340 #ifdef TARGET_NR_bind
8341 case TARGET_NR_bind
:
8342 ret
= do_bind(arg1
, arg2
, arg3
);
8345 #ifdef TARGET_NR_connect
8346 case TARGET_NR_connect
:
8347 ret
= do_connect(arg1
, arg2
, arg3
);
8350 #ifdef TARGET_NR_getpeername
8351 case TARGET_NR_getpeername
:
8352 ret
= do_getpeername(arg1
, arg2
, arg3
);
8355 #ifdef TARGET_NR_getsockname
8356 case TARGET_NR_getsockname
:
8357 ret
= do_getsockname(arg1
, arg2
, arg3
);
8360 #ifdef TARGET_NR_getsockopt
8361 case TARGET_NR_getsockopt
:
8362 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8365 #ifdef TARGET_NR_listen
8366 case TARGET_NR_listen
:
8367 ret
= get_errno(listen(arg1
, arg2
));
8370 #ifdef TARGET_NR_recv
8371 case TARGET_NR_recv
:
8372 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8375 #ifdef TARGET_NR_recvfrom
8376 case TARGET_NR_recvfrom
:
8377 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8380 #ifdef TARGET_NR_recvmsg
8381 case TARGET_NR_recvmsg
:
8382 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8385 #ifdef TARGET_NR_send
8386 case TARGET_NR_send
:
8387 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8390 #ifdef TARGET_NR_sendmsg
8391 case TARGET_NR_sendmsg
:
8392 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8395 #ifdef TARGET_NR_sendmmsg
8396 case TARGET_NR_sendmmsg
:
8397 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8399 case TARGET_NR_recvmmsg
:
8400 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8403 #ifdef TARGET_NR_sendto
8404 case TARGET_NR_sendto
:
8405 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8408 #ifdef TARGET_NR_shutdown
8409 case TARGET_NR_shutdown
:
8410 ret
= get_errno(shutdown(arg1
, arg2
));
8413 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8414 case TARGET_NR_getrandom
:
8415 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8419 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8420 unlock_user(p
, arg1
, ret
);
8423 #ifdef TARGET_NR_socket
8424 case TARGET_NR_socket
:
8425 ret
= do_socket(arg1
, arg2
, arg3
);
8426 fd_trans_unregister(ret
);
8429 #ifdef TARGET_NR_socketpair
8430 case TARGET_NR_socketpair
:
8431 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8434 #ifdef TARGET_NR_setsockopt
8435 case TARGET_NR_setsockopt
:
8436 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8440 case TARGET_NR_syslog
:
8441 if (!(p
= lock_user_string(arg2
)))
8443 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8444 unlock_user(p
, arg2
, 0);
8447 case TARGET_NR_setitimer
:
8449 struct itimerval value
, ovalue
, *pvalue
;
8453 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8454 || copy_from_user_timeval(&pvalue
->it_value
,
8455 arg2
+ sizeof(struct target_timeval
)))
8460 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8461 if (!is_error(ret
) && arg3
) {
8462 if (copy_to_user_timeval(arg3
,
8463 &ovalue
.it_interval
)
8464 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8470 case TARGET_NR_getitimer
:
8472 struct itimerval value
;
8474 ret
= get_errno(getitimer(arg1
, &value
));
8475 if (!is_error(ret
) && arg2
) {
8476 if (copy_to_user_timeval(arg2
,
8478 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8484 #ifdef TARGET_NR_stat
8485 case TARGET_NR_stat
:
8486 if (!(p
= lock_user_string(arg1
)))
8488 ret
= get_errno(stat(path(p
), &st
));
8489 unlock_user(p
, arg1
, 0);
8492 #ifdef TARGET_NR_lstat
8493 case TARGET_NR_lstat
:
8494 if (!(p
= lock_user_string(arg1
)))
8496 ret
= get_errno(lstat(path(p
), &st
));
8497 unlock_user(p
, arg1
, 0);
8500 case TARGET_NR_fstat
:
8502 ret
= get_errno(fstat(arg1
, &st
));
8503 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8506 if (!is_error(ret
)) {
8507 struct target_stat
*target_st
;
8509 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8511 memset(target_st
, 0, sizeof(*target_st
));
8512 __put_user(st
.st_dev
, &target_st
->st_dev
);
8513 __put_user(st
.st_ino
, &target_st
->st_ino
);
8514 __put_user(st
.st_mode
, &target_st
->st_mode
);
8515 __put_user(st
.st_uid
, &target_st
->st_uid
);
8516 __put_user(st
.st_gid
, &target_st
->st_gid
);
8517 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8518 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8519 __put_user(st
.st_size
, &target_st
->st_size
);
8520 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8521 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8522 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8523 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8524 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8525 unlock_user_struct(target_st
, arg2
, 1);
8529 #ifdef TARGET_NR_olduname
8530 case TARGET_NR_olduname
:
8533 #ifdef TARGET_NR_iopl
8534 case TARGET_NR_iopl
:
8537 case TARGET_NR_vhangup
:
8538 ret
= get_errno(vhangup());
8540 #ifdef TARGET_NR_idle
8541 case TARGET_NR_idle
:
8544 #ifdef TARGET_NR_syscall
8545 case TARGET_NR_syscall
:
8546 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8547 arg6
, arg7
, arg8
, 0);
8550 case TARGET_NR_wait4
:
8553 abi_long status_ptr
= arg2
;
8554 struct rusage rusage
, *rusage_ptr
;
8555 abi_ulong target_rusage
= arg4
;
8556 abi_long rusage_err
;
8558 rusage_ptr
= &rusage
;
8561 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8562 if (!is_error(ret
)) {
8563 if (status_ptr
&& ret
) {
8564 status
= host_to_target_waitstatus(status
);
8565 if (put_user_s32(status
, status_ptr
))
8568 if (target_rusage
) {
8569 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8577 #ifdef TARGET_NR_swapoff
8578 case TARGET_NR_swapoff
:
8579 if (!(p
= lock_user_string(arg1
)))
8581 ret
= get_errno(swapoff(p
));
8582 unlock_user(p
, arg1
, 0);
8585 case TARGET_NR_sysinfo
:
8587 struct target_sysinfo
*target_value
;
8588 struct sysinfo value
;
8589 ret
= get_errno(sysinfo(&value
));
8590 if (!is_error(ret
) && arg1
)
8592 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8594 __put_user(value
.uptime
, &target_value
->uptime
);
8595 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8596 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8597 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8598 __put_user(value
.totalram
, &target_value
->totalram
);
8599 __put_user(value
.freeram
, &target_value
->freeram
);
8600 __put_user(value
.sharedram
, &target_value
->sharedram
);
8601 __put_user(value
.bufferram
, &target_value
->bufferram
);
8602 __put_user(value
.totalswap
, &target_value
->totalswap
);
8603 __put_user(value
.freeswap
, &target_value
->freeswap
);
8604 __put_user(value
.procs
, &target_value
->procs
);
8605 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8606 __put_user(value
.freehigh
, &target_value
->freehigh
);
8607 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8608 unlock_user_struct(target_value
, arg1
, 1);
8612 #ifdef TARGET_NR_ipc
8614 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8617 #ifdef TARGET_NR_semget
8618 case TARGET_NR_semget
:
8619 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8622 #ifdef TARGET_NR_semop
8623 case TARGET_NR_semop
:
8624 ret
= do_semop(arg1
, arg2
, arg3
);
8627 #ifdef TARGET_NR_semctl
8628 case TARGET_NR_semctl
:
8629 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8632 #ifdef TARGET_NR_msgctl
8633 case TARGET_NR_msgctl
:
8634 ret
= do_msgctl(arg1
, arg2
, arg3
);
8637 #ifdef TARGET_NR_msgget
8638 case TARGET_NR_msgget
:
8639 ret
= get_errno(msgget(arg1
, arg2
));
8642 #ifdef TARGET_NR_msgrcv
8643 case TARGET_NR_msgrcv
:
8644 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8647 #ifdef TARGET_NR_msgsnd
8648 case TARGET_NR_msgsnd
:
8649 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8652 #ifdef TARGET_NR_shmget
8653 case TARGET_NR_shmget
:
8654 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8657 #ifdef TARGET_NR_shmctl
8658 case TARGET_NR_shmctl
:
8659 ret
= do_shmctl(arg1
, arg2
, arg3
);
8662 #ifdef TARGET_NR_shmat
8663 case TARGET_NR_shmat
:
8664 ret
= do_shmat(arg1
, arg2
, arg3
);
8667 #ifdef TARGET_NR_shmdt
8668 case TARGET_NR_shmdt
:
8669 ret
= do_shmdt(arg1
);
8672 case TARGET_NR_fsync
:
8673 ret
= get_errno(fsync(arg1
));
8675 case TARGET_NR_clone
:
8676 /* Linux manages to have three different orderings for its
8677 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8678 * match the kernel's CONFIG_CLONE_* settings.
8679 * Microblaze is further special in that it uses a sixth
8680 * implicit argument to clone for the TLS pointer.
8682 #if defined(TARGET_MICROBLAZE)
8683 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8684 #elif defined(TARGET_CLONE_BACKWARDS)
8685 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8686 #elif defined(TARGET_CLONE_BACKWARDS2)
8687 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8689 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8692 #ifdef __NR_exit_group
8693 /* new thread calls */
8694 case TARGET_NR_exit_group
:
8698 gdb_exit(cpu_env
, arg1
);
8699 ret
= get_errno(exit_group(arg1
));
8702 case TARGET_NR_setdomainname
:
8703 if (!(p
= lock_user_string(arg1
)))
8705 ret
= get_errno(setdomainname(p
, arg2
));
8706 unlock_user(p
, arg1
, 0);
8708 case TARGET_NR_uname
:
8709 /* no need to transcode because we use the linux syscall */
8711 struct new_utsname
* buf
;
8713 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8715 ret
= get_errno(sys_uname(buf
));
8716 if (!is_error(ret
)) {
8717 /* Overrite the native machine name with whatever is being
8719 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8720 /* Allow the user to override the reported release. */
8721 if (qemu_uname_release
&& *qemu_uname_release
)
8722 strcpy (buf
->release
, qemu_uname_release
);
8724 unlock_user_struct(buf
, arg1
, 1);
8728 case TARGET_NR_modify_ldt
:
8729 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8731 #if !defined(TARGET_X86_64)
8732 case TARGET_NR_vm86old
:
8734 case TARGET_NR_vm86
:
8735 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8739 case TARGET_NR_adjtimex
:
8741 #ifdef TARGET_NR_create_module
8742 case TARGET_NR_create_module
:
8744 case TARGET_NR_init_module
:
8745 case TARGET_NR_delete_module
:
8746 #ifdef TARGET_NR_get_kernel_syms
8747 case TARGET_NR_get_kernel_syms
:
8750 case TARGET_NR_quotactl
:
8752 case TARGET_NR_getpgid
:
8753 ret
= get_errno(getpgid(arg1
));
8755 case TARGET_NR_fchdir
:
8756 ret
= get_errno(fchdir(arg1
));
8758 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8759 case TARGET_NR_bdflush
:
8762 #ifdef TARGET_NR_sysfs
8763 case TARGET_NR_sysfs
:
8766 case TARGET_NR_personality
:
8767 ret
= get_errno(personality(arg1
));
8769 #ifdef TARGET_NR_afs_syscall
8770 case TARGET_NR_afs_syscall
:
8773 #ifdef TARGET_NR__llseek /* Not on alpha */
8774 case TARGET_NR__llseek
:
8777 #if !defined(__NR_llseek)
8778 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8780 ret
= get_errno(res
);
8785 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8787 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8793 #ifdef TARGET_NR_getdents
8794 case TARGET_NR_getdents
:
8795 #ifdef __NR_getdents
8796 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8798 struct target_dirent
*target_dirp
;
8799 struct linux_dirent
*dirp
;
8800 abi_long count
= arg3
;
8802 dirp
= g_try_malloc(count
);
8804 ret
= -TARGET_ENOMEM
;
8808 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8809 if (!is_error(ret
)) {
8810 struct linux_dirent
*de
;
8811 struct target_dirent
*tde
;
8813 int reclen
, treclen
;
8814 int count1
, tnamelen
;
8818 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8822 reclen
= de
->d_reclen
;
8823 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8824 assert(tnamelen
>= 0);
8825 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8826 assert(count1
+ treclen
<= count
);
8827 tde
->d_reclen
= tswap16(treclen
);
8828 tde
->d_ino
= tswapal(de
->d_ino
);
8829 tde
->d_off
= tswapal(de
->d_off
);
8830 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8831 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8833 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8837 unlock_user(target_dirp
, arg2
, ret
);
8843 struct linux_dirent
*dirp
;
8844 abi_long count
= arg3
;
8846 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8848 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8849 if (!is_error(ret
)) {
8850 struct linux_dirent
*de
;
8855 reclen
= de
->d_reclen
;
8858 de
->d_reclen
= tswap16(reclen
);
8859 tswapls(&de
->d_ino
);
8860 tswapls(&de
->d_off
);
8861 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8865 unlock_user(dirp
, arg2
, ret
);
8869 /* Implement getdents in terms of getdents64 */
8871 struct linux_dirent64
*dirp
;
8872 abi_long count
= arg3
;
8874 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8878 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8879 if (!is_error(ret
)) {
8880 /* Convert the dirent64 structs to target dirent. We do this
8881 * in-place, since we can guarantee that a target_dirent is no
8882 * larger than a dirent64; however this means we have to be
8883 * careful to read everything before writing in the new format.
8885 struct linux_dirent64
*de
;
8886 struct target_dirent
*tde
;
8891 tde
= (struct target_dirent
*)dirp
;
8893 int namelen
, treclen
;
8894 int reclen
= de
->d_reclen
;
8895 uint64_t ino
= de
->d_ino
;
8896 int64_t off
= de
->d_off
;
8897 uint8_t type
= de
->d_type
;
8899 namelen
= strlen(de
->d_name
);
8900 treclen
= offsetof(struct target_dirent
, d_name
)
8902 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8904 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8905 tde
->d_ino
= tswapal(ino
);
8906 tde
->d_off
= tswapal(off
);
8907 tde
->d_reclen
= tswap16(treclen
);
8908 /* The target_dirent type is in what was formerly a padding
8909 * byte at the end of the structure:
8911 *(((char *)tde
) + treclen
- 1) = type
;
8913 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8914 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8920 unlock_user(dirp
, arg2
, ret
);
8924 #endif /* TARGET_NR_getdents */
8925 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8926 case TARGET_NR_getdents64
:
8928 struct linux_dirent64
*dirp
;
8929 abi_long count
= arg3
;
8930 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8932 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8933 if (!is_error(ret
)) {
8934 struct linux_dirent64
*de
;
8939 reclen
= de
->d_reclen
;
8942 de
->d_reclen
= tswap16(reclen
);
8943 tswap64s((uint64_t *)&de
->d_ino
);
8944 tswap64s((uint64_t *)&de
->d_off
);
8945 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8949 unlock_user(dirp
, arg2
, ret
);
8952 #endif /* TARGET_NR_getdents64 */
8953 #if defined(TARGET_NR__newselect)
8954 case TARGET_NR__newselect
:
8955 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8958 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8959 # ifdef TARGET_NR_poll
8960 case TARGET_NR_poll
:
8962 # ifdef TARGET_NR_ppoll
8963 case TARGET_NR_ppoll
:
8966 struct target_pollfd
*target_pfd
;
8967 unsigned int nfds
= arg2
;
8974 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8975 sizeof(struct target_pollfd
) * nfds
, 1);
8980 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8981 for (i
= 0; i
< nfds
; i
++) {
8982 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8983 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8988 # ifdef TARGET_NR_ppoll
8989 case TARGET_NR_ppoll
:
8991 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8992 target_sigset_t
*target_set
;
8993 sigset_t _set
, *set
= &_set
;
8996 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8997 unlock_user(target_pfd
, arg1
, 0);
9005 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9007 unlock_user(target_pfd
, arg1
, 0);
9010 target_to_host_sigset(set
, target_set
);
9015 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9016 set
, SIGSET_T_SIZE
));
9018 if (!is_error(ret
) && arg3
) {
9019 host_to_target_timespec(arg3
, timeout_ts
);
9022 unlock_user(target_set
, arg4
, 0);
9027 # ifdef TARGET_NR_poll
9028 case TARGET_NR_poll
:
9030 struct timespec ts
, *pts
;
9033 /* Convert ms to secs, ns */
9034 ts
.tv_sec
= arg3
/ 1000;
9035 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9038 /* -ve poll() timeout means "infinite" */
9041 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9046 g_assert_not_reached();
9049 if (!is_error(ret
)) {
9050 for(i
= 0; i
< nfds
; i
++) {
9051 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9054 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9058 case TARGET_NR_flock
:
9059 /* NOTE: the flock constant seems to be the same for every
9061 ret
= get_errno(safe_flock(arg1
, arg2
));
9063 case TARGET_NR_readv
:
9065 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9067 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9068 unlock_iovec(vec
, arg2
, arg3
, 1);
9070 ret
= -host_to_target_errno(errno
);
9074 case TARGET_NR_writev
:
9076 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9078 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9079 unlock_iovec(vec
, arg2
, arg3
, 0);
9081 ret
= -host_to_target_errno(errno
);
9085 case TARGET_NR_getsid
:
9086 ret
= get_errno(getsid(arg1
));
9088 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9089 case TARGET_NR_fdatasync
:
9090 ret
= get_errno(fdatasync(arg1
));
9093 #ifdef TARGET_NR__sysctl
9094 case TARGET_NR__sysctl
:
9095 /* We don't implement this, but ENOTDIR is always a safe
9097 ret
= -TARGET_ENOTDIR
;
9100 case TARGET_NR_sched_getaffinity
:
9102 unsigned int mask_size
;
9103 unsigned long *mask
;
9106 * sched_getaffinity needs multiples of ulong, so need to take
9107 * care of mismatches between target ulong and host ulong sizes.
9109 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9110 ret
= -TARGET_EINVAL
;
9113 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9115 mask
= alloca(mask_size
);
9116 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9118 if (!is_error(ret
)) {
9120 /* More data returned than the caller's buffer will fit.
9121 * This only happens if sizeof(abi_long) < sizeof(long)
9122 * and the caller passed us a buffer holding an odd number
9123 * of abi_longs. If the host kernel is actually using the
9124 * extra 4 bytes then fail EINVAL; otherwise we can just
9125 * ignore them and only copy the interesting part.
9127 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9128 if (numcpus
> arg2
* 8) {
9129 ret
= -TARGET_EINVAL
;
9135 if (copy_to_user(arg3
, mask
, ret
)) {
9141 case TARGET_NR_sched_setaffinity
:
9143 unsigned int mask_size
;
9144 unsigned long *mask
;
9147 * sched_setaffinity needs multiples of ulong, so need to take
9148 * care of mismatches between target ulong and host ulong sizes.
9150 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9151 ret
= -TARGET_EINVAL
;
9154 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9156 mask
= alloca(mask_size
);
9157 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9160 memcpy(mask
, p
, arg2
);
9161 unlock_user_struct(p
, arg2
, 0);
9163 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9166 case TARGET_NR_sched_setparam
:
9168 struct sched_param
*target_schp
;
9169 struct sched_param schp
;
9172 return -TARGET_EINVAL
;
9174 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9176 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9177 unlock_user_struct(target_schp
, arg2
, 0);
9178 ret
= get_errno(sched_setparam(arg1
, &schp
));
9181 case TARGET_NR_sched_getparam
:
9183 struct sched_param
*target_schp
;
9184 struct sched_param schp
;
9187 return -TARGET_EINVAL
;
9189 ret
= get_errno(sched_getparam(arg1
, &schp
));
9190 if (!is_error(ret
)) {
9191 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9193 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9194 unlock_user_struct(target_schp
, arg2
, 1);
9198 case TARGET_NR_sched_setscheduler
:
9200 struct sched_param
*target_schp
;
9201 struct sched_param schp
;
9203 return -TARGET_EINVAL
;
9205 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9207 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9208 unlock_user_struct(target_schp
, arg3
, 0);
9209 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9212 case TARGET_NR_sched_getscheduler
:
9213 ret
= get_errno(sched_getscheduler(arg1
));
9215 case TARGET_NR_sched_yield
:
9216 ret
= get_errno(sched_yield());
9218 case TARGET_NR_sched_get_priority_max
:
9219 ret
= get_errno(sched_get_priority_max(arg1
));
9221 case TARGET_NR_sched_get_priority_min
:
9222 ret
= get_errno(sched_get_priority_min(arg1
));
9224 case TARGET_NR_sched_rr_get_interval
:
9227 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9228 if (!is_error(ret
)) {
9229 ret
= host_to_target_timespec(arg2
, &ts
);
9233 case TARGET_NR_nanosleep
:
9235 struct timespec req
, rem
;
9236 target_to_host_timespec(&req
, arg1
);
9237 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9238 if (is_error(ret
) && arg2
) {
9239 host_to_target_timespec(arg2
, &rem
);
9243 #ifdef TARGET_NR_query_module
9244 case TARGET_NR_query_module
:
9247 #ifdef TARGET_NR_nfsservctl
9248 case TARGET_NR_nfsservctl
:
9251 case TARGET_NR_prctl
:
9253 case PR_GET_PDEATHSIG
:
9256 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9257 if (!is_error(ret
) && arg2
9258 && put_user_ual(deathsig
, arg2
)) {
9266 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9270 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9272 unlock_user(name
, arg2
, 16);
9277 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9281 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9283 unlock_user(name
, arg2
, 0);
9288 /* Most prctl options have no pointer arguments */
9289 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9293 #ifdef TARGET_NR_arch_prctl
9294 case TARGET_NR_arch_prctl
:
9295 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9296 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9302 #ifdef TARGET_NR_pread64
9303 case TARGET_NR_pread64
:
9304 if (regpairs_aligned(cpu_env
)) {
9308 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9310 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9311 unlock_user(p
, arg2
, ret
);
9313 case TARGET_NR_pwrite64
:
9314 if (regpairs_aligned(cpu_env
)) {
9318 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9320 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9321 unlock_user(p
, arg2
, 0);
9324 case TARGET_NR_getcwd
:
9325 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9327 ret
= get_errno(sys_getcwd1(p
, arg2
));
9328 unlock_user(p
, arg1
, ret
);
9330 case TARGET_NR_capget
:
9331 case TARGET_NR_capset
:
9333 struct target_user_cap_header
*target_header
;
9334 struct target_user_cap_data
*target_data
= NULL
;
9335 struct __user_cap_header_struct header
;
9336 struct __user_cap_data_struct data
[2];
9337 struct __user_cap_data_struct
*dataptr
= NULL
;
9338 int i
, target_datalen
;
9341 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9344 header
.version
= tswap32(target_header
->version
);
9345 header
.pid
= tswap32(target_header
->pid
);
9347 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9348 /* Version 2 and up takes pointer to two user_data structs */
9352 target_datalen
= sizeof(*target_data
) * data_items
;
9355 if (num
== TARGET_NR_capget
) {
9356 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9358 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9361 unlock_user_struct(target_header
, arg1
, 0);
9365 if (num
== TARGET_NR_capset
) {
9366 for (i
= 0; i
< data_items
; i
++) {
9367 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9368 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9369 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9376 if (num
== TARGET_NR_capget
) {
9377 ret
= get_errno(capget(&header
, dataptr
));
9379 ret
= get_errno(capset(&header
, dataptr
));
9382 /* The kernel always updates version for both capget and capset */
9383 target_header
->version
= tswap32(header
.version
);
9384 unlock_user_struct(target_header
, arg1
, 1);
9387 if (num
== TARGET_NR_capget
) {
9388 for (i
= 0; i
< data_items
; i
++) {
9389 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9390 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9391 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9393 unlock_user(target_data
, arg2
, target_datalen
);
9395 unlock_user(target_data
, arg2
, 0);
9400 case TARGET_NR_sigaltstack
:
9401 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9404 #ifdef CONFIG_SENDFILE
9405 case TARGET_NR_sendfile
:
9410 ret
= get_user_sal(off
, arg3
);
9411 if (is_error(ret
)) {
9416 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9417 if (!is_error(ret
) && arg3
) {
9418 abi_long ret2
= put_user_sal(off
, arg3
);
9419 if (is_error(ret2
)) {
9425 #ifdef TARGET_NR_sendfile64
9426 case TARGET_NR_sendfile64
:
9431 ret
= get_user_s64(off
, arg3
);
9432 if (is_error(ret
)) {
9437 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9438 if (!is_error(ret
) && arg3
) {
9439 abi_long ret2
= put_user_s64(off
, arg3
);
9440 if (is_error(ret2
)) {
9448 case TARGET_NR_sendfile
:
9449 #ifdef TARGET_NR_sendfile64
9450 case TARGET_NR_sendfile64
:
9455 #ifdef TARGET_NR_getpmsg
9456 case TARGET_NR_getpmsg
:
9459 #ifdef TARGET_NR_putpmsg
9460 case TARGET_NR_putpmsg
:
9463 #ifdef TARGET_NR_vfork
9464 case TARGET_NR_vfork
:
9465 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9469 #ifdef TARGET_NR_ugetrlimit
9470 case TARGET_NR_ugetrlimit
:
9473 int resource
= target_to_host_resource(arg1
);
9474 ret
= get_errno(getrlimit(resource
, &rlim
));
9475 if (!is_error(ret
)) {
9476 struct target_rlimit
*target_rlim
;
9477 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9479 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9480 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9481 unlock_user_struct(target_rlim
, arg2
, 1);
9486 #ifdef TARGET_NR_truncate64
9487 case TARGET_NR_truncate64
:
9488 if (!(p
= lock_user_string(arg1
)))
9490 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9491 unlock_user(p
, arg1
, 0);
9494 #ifdef TARGET_NR_ftruncate64
9495 case TARGET_NR_ftruncate64
:
9496 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9499 #ifdef TARGET_NR_stat64
9500 case TARGET_NR_stat64
:
9501 if (!(p
= lock_user_string(arg1
)))
9503 ret
= get_errno(stat(path(p
), &st
));
9504 unlock_user(p
, arg1
, 0);
9506 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9509 #ifdef TARGET_NR_lstat64
9510 case TARGET_NR_lstat64
:
9511 if (!(p
= lock_user_string(arg1
)))
9513 ret
= get_errno(lstat(path(p
), &st
));
9514 unlock_user(p
, arg1
, 0);
9516 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9519 #ifdef TARGET_NR_fstat64
9520 case TARGET_NR_fstat64
:
9521 ret
= get_errno(fstat(arg1
, &st
));
9523 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9526 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9527 #ifdef TARGET_NR_fstatat64
9528 case TARGET_NR_fstatat64
:
9530 #ifdef TARGET_NR_newfstatat
9531 case TARGET_NR_newfstatat
:
9533 if (!(p
= lock_user_string(arg2
)))
9535 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9537 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9540 #ifdef TARGET_NR_lchown
9541 case TARGET_NR_lchown
:
9542 if (!(p
= lock_user_string(arg1
)))
9544 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9545 unlock_user(p
, arg1
, 0);
9548 #ifdef TARGET_NR_getuid
9549 case TARGET_NR_getuid
:
9550 ret
= get_errno(high2lowuid(getuid()));
9553 #ifdef TARGET_NR_getgid
9554 case TARGET_NR_getgid
:
9555 ret
= get_errno(high2lowgid(getgid()));
9558 #ifdef TARGET_NR_geteuid
9559 case TARGET_NR_geteuid
:
9560 ret
= get_errno(high2lowuid(geteuid()));
9563 #ifdef TARGET_NR_getegid
9564 case TARGET_NR_getegid
:
9565 ret
= get_errno(high2lowgid(getegid()));
9568 case TARGET_NR_setreuid
:
9569 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9571 case TARGET_NR_setregid
:
9572 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9574 case TARGET_NR_getgroups
:
9576 int gidsetsize
= arg1
;
9577 target_id
*target_grouplist
;
9581 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9582 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9583 if (gidsetsize
== 0)
9585 if (!is_error(ret
)) {
9586 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9587 if (!target_grouplist
)
9589 for(i
= 0;i
< ret
; i
++)
9590 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9591 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9595 case TARGET_NR_setgroups
:
9597 int gidsetsize
= arg1
;
9598 target_id
*target_grouplist
;
9599 gid_t
*grouplist
= NULL
;
9602 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9603 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9604 if (!target_grouplist
) {
9605 ret
= -TARGET_EFAULT
;
9608 for (i
= 0; i
< gidsetsize
; i
++) {
9609 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9611 unlock_user(target_grouplist
, arg2
, 0);
9613 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9616 case TARGET_NR_fchown
:
9617 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9619 #if defined(TARGET_NR_fchownat)
9620 case TARGET_NR_fchownat
:
9621 if (!(p
= lock_user_string(arg2
)))
9623 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9624 low2highgid(arg4
), arg5
));
9625 unlock_user(p
, arg2
, 0);
9628 #ifdef TARGET_NR_setresuid
9629 case TARGET_NR_setresuid
:
9630 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9632 low2highuid(arg3
)));
9635 #ifdef TARGET_NR_getresuid
9636 case TARGET_NR_getresuid
:
9638 uid_t ruid
, euid
, suid
;
9639 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9640 if (!is_error(ret
)) {
9641 if (put_user_id(high2lowuid(ruid
), arg1
)
9642 || put_user_id(high2lowuid(euid
), arg2
)
9643 || put_user_id(high2lowuid(suid
), arg3
))
9649 #ifdef TARGET_NR_getresgid
9650 case TARGET_NR_setresgid
:
9651 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9653 low2highgid(arg3
)));
9656 #ifdef TARGET_NR_getresgid
9657 case TARGET_NR_getresgid
:
9659 gid_t rgid
, egid
, sgid
;
9660 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9661 if (!is_error(ret
)) {
9662 if (put_user_id(high2lowgid(rgid
), arg1
)
9663 || put_user_id(high2lowgid(egid
), arg2
)
9664 || put_user_id(high2lowgid(sgid
), arg3
))
9670 #ifdef TARGET_NR_chown
9671 case TARGET_NR_chown
:
9672 if (!(p
= lock_user_string(arg1
)))
9674 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9675 unlock_user(p
, arg1
, 0);
9678 case TARGET_NR_setuid
:
9679 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9681 case TARGET_NR_setgid
:
9682 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9684 case TARGET_NR_setfsuid
:
9685 ret
= get_errno(setfsuid(arg1
));
9687 case TARGET_NR_setfsgid
:
9688 ret
= get_errno(setfsgid(arg1
));
9691 #ifdef TARGET_NR_lchown32
9692 case TARGET_NR_lchown32
:
9693 if (!(p
= lock_user_string(arg1
)))
9695 ret
= get_errno(lchown(p
, arg2
, arg3
));
9696 unlock_user(p
, arg1
, 0);
9699 #ifdef TARGET_NR_getuid32
9700 case TARGET_NR_getuid32
:
9701 ret
= get_errno(getuid());
9705 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9706 /* Alpha specific */
9707 case TARGET_NR_getxuid
:
9711 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9713 ret
= get_errno(getuid());
9716 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9717 /* Alpha specific */
9718 case TARGET_NR_getxgid
:
9722 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9724 ret
= get_errno(getgid());
9727 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9728 /* Alpha specific */
9729 case TARGET_NR_osf_getsysinfo
:
9730 ret
= -TARGET_EOPNOTSUPP
;
9732 case TARGET_GSI_IEEE_FP_CONTROL
:
9734 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9736 /* Copied from linux ieee_fpcr_to_swcr. */
9737 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9738 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9739 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9740 | SWCR_TRAP_ENABLE_DZE
9741 | SWCR_TRAP_ENABLE_OVF
);
9742 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9743 | SWCR_TRAP_ENABLE_INE
);
9744 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9745 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9747 if (put_user_u64 (swcr
, arg2
))
9753 /* case GSI_IEEE_STATE_AT_SIGNAL:
9754 -- Not implemented in linux kernel.
9756 -- Retrieves current unaligned access state; not much used.
9758 -- Retrieves implver information; surely not used.
9760 -- Grabs a copy of the HWRPB; surely not used.
9765 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9766 /* Alpha specific */
9767 case TARGET_NR_osf_setsysinfo
:
9768 ret
= -TARGET_EOPNOTSUPP
;
9770 case TARGET_SSI_IEEE_FP_CONTROL
:
9772 uint64_t swcr
, fpcr
, orig_fpcr
;
9774 if (get_user_u64 (swcr
, arg2
)) {
9777 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9778 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9780 /* Copied from linux ieee_swcr_to_fpcr. */
9781 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9782 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9783 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9784 | SWCR_TRAP_ENABLE_DZE
9785 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9786 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9787 | SWCR_TRAP_ENABLE_INE
)) << 57;
9788 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9789 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9791 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9796 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9798 uint64_t exc
, fpcr
, orig_fpcr
;
9801 if (get_user_u64(exc
, arg2
)) {
9805 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9807 /* We only add to the exception status here. */
9808 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9810 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9813 /* Old exceptions are not signaled. */
9814 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9816 /* If any exceptions set by this call,
9817 and are unmasked, send a signal. */
9819 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9820 si_code
= TARGET_FPE_FLTRES
;
9822 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9823 si_code
= TARGET_FPE_FLTUND
;
9825 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9826 si_code
= TARGET_FPE_FLTOVF
;
9828 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9829 si_code
= TARGET_FPE_FLTDIV
;
9831 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9832 si_code
= TARGET_FPE_FLTINV
;
9835 target_siginfo_t info
;
9836 info
.si_signo
= SIGFPE
;
9838 info
.si_code
= si_code
;
9839 info
._sifields
._sigfault
._addr
9840 = ((CPUArchState
*)cpu_env
)->pc
;
9841 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9846 /* case SSI_NVPAIRS:
9847 -- Used with SSIN_UACPROC to enable unaligned accesses.
9848 case SSI_IEEE_STATE_AT_SIGNAL:
9849 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9850 -- Not implemented in linux kernel
9855 #ifdef TARGET_NR_osf_sigprocmask
9856 /* Alpha specific. */
9857 case TARGET_NR_osf_sigprocmask
:
9861 sigset_t set
, oldset
;
9864 case TARGET_SIG_BLOCK
:
9867 case TARGET_SIG_UNBLOCK
:
9870 case TARGET_SIG_SETMASK
:
9874 ret
= -TARGET_EINVAL
;
9878 target_to_host_old_sigset(&set
, &mask
);
9879 ret
= do_sigprocmask(how
, &set
, &oldset
);
9881 host_to_target_old_sigset(&mask
, &oldset
);
9888 #ifdef TARGET_NR_getgid32
9889 case TARGET_NR_getgid32
:
9890 ret
= get_errno(getgid());
9893 #ifdef TARGET_NR_geteuid32
9894 case TARGET_NR_geteuid32
:
9895 ret
= get_errno(geteuid());
9898 #ifdef TARGET_NR_getegid32
9899 case TARGET_NR_getegid32
:
9900 ret
= get_errno(getegid());
9903 #ifdef TARGET_NR_setreuid32
9904 case TARGET_NR_setreuid32
:
9905 ret
= get_errno(setreuid(arg1
, arg2
));
9908 #ifdef TARGET_NR_setregid32
9909 case TARGET_NR_setregid32
:
9910 ret
= get_errno(setregid(arg1
, arg2
));
9913 #ifdef TARGET_NR_getgroups32
9914 case TARGET_NR_getgroups32
:
9916 int gidsetsize
= arg1
;
9917 uint32_t *target_grouplist
;
9921 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9922 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9923 if (gidsetsize
== 0)
9925 if (!is_error(ret
)) {
9926 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9927 if (!target_grouplist
) {
9928 ret
= -TARGET_EFAULT
;
9931 for(i
= 0;i
< ret
; i
++)
9932 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9933 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9938 #ifdef TARGET_NR_setgroups32
9939 case TARGET_NR_setgroups32
:
9941 int gidsetsize
= arg1
;
9942 uint32_t *target_grouplist
;
9946 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9947 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9948 if (!target_grouplist
) {
9949 ret
= -TARGET_EFAULT
;
9952 for(i
= 0;i
< gidsetsize
; i
++)
9953 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9954 unlock_user(target_grouplist
, arg2
, 0);
9955 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9959 #ifdef TARGET_NR_fchown32
9960 case TARGET_NR_fchown32
:
9961 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9964 #ifdef TARGET_NR_setresuid32
9965 case TARGET_NR_setresuid32
:
9966 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9969 #ifdef TARGET_NR_getresuid32
9970 case TARGET_NR_getresuid32
:
9972 uid_t ruid
, euid
, suid
;
9973 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9974 if (!is_error(ret
)) {
9975 if (put_user_u32(ruid
, arg1
)
9976 || put_user_u32(euid
, arg2
)
9977 || put_user_u32(suid
, arg3
))
9983 #ifdef TARGET_NR_setresgid32
9984 case TARGET_NR_setresgid32
:
9985 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9988 #ifdef TARGET_NR_getresgid32
9989 case TARGET_NR_getresgid32
:
9991 gid_t rgid
, egid
, sgid
;
9992 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9993 if (!is_error(ret
)) {
9994 if (put_user_u32(rgid
, arg1
)
9995 || put_user_u32(egid
, arg2
)
9996 || put_user_u32(sgid
, arg3
))
10002 #ifdef TARGET_NR_chown32
10003 case TARGET_NR_chown32
:
10004 if (!(p
= lock_user_string(arg1
)))
10006 ret
= get_errno(chown(p
, arg2
, arg3
));
10007 unlock_user(p
, arg1
, 0);
10010 #ifdef TARGET_NR_setuid32
10011 case TARGET_NR_setuid32
:
10012 ret
= get_errno(sys_setuid(arg1
));
10015 #ifdef TARGET_NR_setgid32
10016 case TARGET_NR_setgid32
:
10017 ret
= get_errno(sys_setgid(arg1
));
10020 #ifdef TARGET_NR_setfsuid32
10021 case TARGET_NR_setfsuid32
:
10022 ret
= get_errno(setfsuid(arg1
));
10025 #ifdef TARGET_NR_setfsgid32
10026 case TARGET_NR_setfsgid32
:
10027 ret
= get_errno(setfsgid(arg1
));
10031 case TARGET_NR_pivot_root
:
10032 goto unimplemented
;
10033 #ifdef TARGET_NR_mincore
10034 case TARGET_NR_mincore
:
10037 ret
= -TARGET_EFAULT
;
10038 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10040 if (!(p
= lock_user_string(arg3
)))
10042 ret
= get_errno(mincore(a
, arg2
, p
));
10043 unlock_user(p
, arg3
, ret
);
10045 unlock_user(a
, arg1
, 0);
10049 #ifdef TARGET_NR_arm_fadvise64_64
10050 case TARGET_NR_arm_fadvise64_64
:
10051 /* arm_fadvise64_64 looks like fadvise64_64 but
10052 * with different argument order: fd, advice, offset, len
10053 * rather than the usual fd, offset, len, advice.
10054 * Note that offset and len are both 64-bit so appear as
10055 * pairs of 32-bit registers.
10057 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10058 target_offset64(arg5
, arg6
), arg2
);
10059 ret
= -host_to_target_errno(ret
);
10063 #if TARGET_ABI_BITS == 32
10065 #ifdef TARGET_NR_fadvise64_64
10066 case TARGET_NR_fadvise64_64
:
10067 /* 6 args: fd, offset (high, low), len (high, low), advice */
10068 if (regpairs_aligned(cpu_env
)) {
10069 /* offset is in (3,4), len in (5,6) and advice in 7 */
10076 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10077 target_offset64(arg2
, arg3
),
10078 target_offset64(arg4
, arg5
),
10083 #ifdef TARGET_NR_fadvise64
10084 case TARGET_NR_fadvise64
:
10085 /* 5 args: fd, offset (high, low), len, advice */
10086 if (regpairs_aligned(cpu_env
)) {
10087 /* offset is in (3,4), len in 5 and advice in 6 */
10093 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10094 target_offset64(arg2
, arg3
),
10099 #else /* not a 32-bit ABI */
10100 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10101 #ifdef TARGET_NR_fadvise64_64
10102 case TARGET_NR_fadvise64_64
:
10104 #ifdef TARGET_NR_fadvise64
10105 case TARGET_NR_fadvise64
:
10107 #ifdef TARGET_S390X
10109 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10110 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10111 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10112 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10116 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10119 #endif /* end of 64-bit ABI fadvise handling */
10121 #ifdef TARGET_NR_madvise
10122 case TARGET_NR_madvise
:
10123 /* A straight passthrough may not be safe because qemu sometimes
10124 turns private file-backed mappings into anonymous mappings.
10125 This will break MADV_DONTNEED.
10126 This is a hint, so ignoring and returning success is ok. */
10127 ret
= get_errno(0);
10130 #if TARGET_ABI_BITS == 32
10131 case TARGET_NR_fcntl64
:
10135 struct target_flock64
*target_fl
;
10137 struct target_eabi_flock64
*target_efl
;
10140 cmd
= target_to_host_fcntl_cmd(arg2
);
10141 if (cmd
== -TARGET_EINVAL
) {
10147 case TARGET_F_GETLK64
:
10149 if (((CPUARMState
*)cpu_env
)->eabi
) {
10150 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10152 fl
.l_type
= tswap16(target_efl
->l_type
);
10153 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10154 fl
.l_start
= tswap64(target_efl
->l_start
);
10155 fl
.l_len
= tswap64(target_efl
->l_len
);
10156 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10157 unlock_user_struct(target_efl
, arg3
, 0);
10161 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10163 fl
.l_type
= tswap16(target_fl
->l_type
);
10164 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10165 fl
.l_start
= tswap64(target_fl
->l_start
);
10166 fl
.l_len
= tswap64(target_fl
->l_len
);
10167 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10168 unlock_user_struct(target_fl
, arg3
, 0);
10170 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10173 if (((CPUARMState
*)cpu_env
)->eabi
) {
10174 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
10176 target_efl
->l_type
= tswap16(fl
.l_type
);
10177 target_efl
->l_whence
= tswap16(fl
.l_whence
);
10178 target_efl
->l_start
= tswap64(fl
.l_start
);
10179 target_efl
->l_len
= tswap64(fl
.l_len
);
10180 target_efl
->l_pid
= tswap32(fl
.l_pid
);
10181 unlock_user_struct(target_efl
, arg3
, 1);
10185 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
10187 target_fl
->l_type
= tswap16(fl
.l_type
);
10188 target_fl
->l_whence
= tswap16(fl
.l_whence
);
10189 target_fl
->l_start
= tswap64(fl
.l_start
);
10190 target_fl
->l_len
= tswap64(fl
.l_len
);
10191 target_fl
->l_pid
= tswap32(fl
.l_pid
);
10192 unlock_user_struct(target_fl
, arg3
, 1);
10197 case TARGET_F_SETLK64
:
10198 case TARGET_F_SETLKW64
:
10200 if (((CPUARMState
*)cpu_env
)->eabi
) {
10201 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
10203 fl
.l_type
= tswap16(target_efl
->l_type
);
10204 fl
.l_whence
= tswap16(target_efl
->l_whence
);
10205 fl
.l_start
= tswap64(target_efl
->l_start
);
10206 fl
.l_len
= tswap64(target_efl
->l_len
);
10207 fl
.l_pid
= tswap32(target_efl
->l_pid
);
10208 unlock_user_struct(target_efl
, arg3
, 0);
10212 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
10214 fl
.l_type
= tswap16(target_fl
->l_type
);
10215 fl
.l_whence
= tswap16(target_fl
->l_whence
);
10216 fl
.l_start
= tswap64(target_fl
->l_start
);
10217 fl
.l_len
= tswap64(target_fl
->l_len
);
10218 fl
.l_pid
= tswap32(target_fl
->l_pid
);
10219 unlock_user_struct(target_fl
, arg3
, 0);
10221 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10224 ret
= do_fcntl(arg1
, arg2
, arg3
);
10230 #ifdef TARGET_NR_cacheflush
10231 case TARGET_NR_cacheflush
:
10232 /* self-modifying code is handled automatically, so nothing needed */
10236 #ifdef TARGET_NR_security
10237 case TARGET_NR_security
:
10238 goto unimplemented
;
10240 #ifdef TARGET_NR_getpagesize
10241 case TARGET_NR_getpagesize
:
10242 ret
= TARGET_PAGE_SIZE
;
10245 case TARGET_NR_gettid
:
10246 ret
= get_errno(gettid());
10248 #ifdef TARGET_NR_readahead
10249 case TARGET_NR_readahead
:
10250 #if TARGET_ABI_BITS == 32
10251 if (regpairs_aligned(cpu_env
)) {
10256 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10258 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10263 #ifdef TARGET_NR_setxattr
10264 case TARGET_NR_listxattr
:
10265 case TARGET_NR_llistxattr
:
10269 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10271 ret
= -TARGET_EFAULT
;
10275 p
= lock_user_string(arg1
);
10277 if (num
== TARGET_NR_listxattr
) {
10278 ret
= get_errno(listxattr(p
, b
, arg3
));
10280 ret
= get_errno(llistxattr(p
, b
, arg3
));
10283 ret
= -TARGET_EFAULT
;
10285 unlock_user(p
, arg1
, 0);
10286 unlock_user(b
, arg2
, arg3
);
10289 case TARGET_NR_flistxattr
:
10293 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10295 ret
= -TARGET_EFAULT
;
10299 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10300 unlock_user(b
, arg2
, arg3
);
10303 case TARGET_NR_setxattr
:
10304 case TARGET_NR_lsetxattr
:
10306 void *p
, *n
, *v
= 0;
10308 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10310 ret
= -TARGET_EFAULT
;
10314 p
= lock_user_string(arg1
);
10315 n
= lock_user_string(arg2
);
10317 if (num
== TARGET_NR_setxattr
) {
10318 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10320 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10323 ret
= -TARGET_EFAULT
;
10325 unlock_user(p
, arg1
, 0);
10326 unlock_user(n
, arg2
, 0);
10327 unlock_user(v
, arg3
, 0);
10330 case TARGET_NR_fsetxattr
:
10334 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10336 ret
= -TARGET_EFAULT
;
10340 n
= lock_user_string(arg2
);
10342 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10344 ret
= -TARGET_EFAULT
;
10346 unlock_user(n
, arg2
, 0);
10347 unlock_user(v
, arg3
, 0);
10350 case TARGET_NR_getxattr
:
10351 case TARGET_NR_lgetxattr
:
10353 void *p
, *n
, *v
= 0;
10355 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10357 ret
= -TARGET_EFAULT
;
10361 p
= lock_user_string(arg1
);
10362 n
= lock_user_string(arg2
);
10364 if (num
== TARGET_NR_getxattr
) {
10365 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10367 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10370 ret
= -TARGET_EFAULT
;
10372 unlock_user(p
, arg1
, 0);
10373 unlock_user(n
, arg2
, 0);
10374 unlock_user(v
, arg3
, arg4
);
10377 case TARGET_NR_fgetxattr
:
10381 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10383 ret
= -TARGET_EFAULT
;
10387 n
= lock_user_string(arg2
);
10389 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10391 ret
= -TARGET_EFAULT
;
10393 unlock_user(n
, arg2
, 0);
10394 unlock_user(v
, arg3
, arg4
);
10397 case TARGET_NR_removexattr
:
10398 case TARGET_NR_lremovexattr
:
10401 p
= lock_user_string(arg1
);
10402 n
= lock_user_string(arg2
);
10404 if (num
== TARGET_NR_removexattr
) {
10405 ret
= get_errno(removexattr(p
, n
));
10407 ret
= get_errno(lremovexattr(p
, n
));
10410 ret
= -TARGET_EFAULT
;
10412 unlock_user(p
, arg1
, 0);
10413 unlock_user(n
, arg2
, 0);
10416 case TARGET_NR_fremovexattr
:
10419 n
= lock_user_string(arg2
);
10421 ret
= get_errno(fremovexattr(arg1
, n
));
10423 ret
= -TARGET_EFAULT
;
10425 unlock_user(n
, arg2
, 0);
10429 #endif /* CONFIG_ATTR */
10430 #ifdef TARGET_NR_set_thread_area
10431 case TARGET_NR_set_thread_area
:
10432 #if defined(TARGET_MIPS)
10433 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10436 #elif defined(TARGET_CRIS)
10438 ret
= -TARGET_EINVAL
;
10440 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10444 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10445 ret
= do_set_thread_area(cpu_env
, arg1
);
10447 #elif defined(TARGET_M68K)
10449 TaskState
*ts
= cpu
->opaque
;
10450 ts
->tp_value
= arg1
;
10455 goto unimplemented_nowarn
;
10458 #ifdef TARGET_NR_get_thread_area
10459 case TARGET_NR_get_thread_area
:
10460 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10461 ret
= do_get_thread_area(cpu_env
, arg1
);
10463 #elif defined(TARGET_M68K)
10465 TaskState
*ts
= cpu
->opaque
;
10466 ret
= ts
->tp_value
;
10470 goto unimplemented_nowarn
;
10473 #ifdef TARGET_NR_getdomainname
10474 case TARGET_NR_getdomainname
:
10475 goto unimplemented_nowarn
;
10478 #ifdef TARGET_NR_clock_gettime
10479 case TARGET_NR_clock_gettime
:
10481 struct timespec ts
;
10482 ret
= get_errno(clock_gettime(arg1
, &ts
));
10483 if (!is_error(ret
)) {
10484 host_to_target_timespec(arg2
, &ts
);
10489 #ifdef TARGET_NR_clock_getres
10490 case TARGET_NR_clock_getres
:
10492 struct timespec ts
;
10493 ret
= get_errno(clock_getres(arg1
, &ts
));
10494 if (!is_error(ret
)) {
10495 host_to_target_timespec(arg2
, &ts
);
10500 #ifdef TARGET_NR_clock_nanosleep
10501 case TARGET_NR_clock_nanosleep
:
10503 struct timespec ts
;
10504 target_to_host_timespec(&ts
, arg3
);
10505 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10506 &ts
, arg4
? &ts
: NULL
));
10508 host_to_target_timespec(arg4
, &ts
);
10510 #if defined(TARGET_PPC)
10511 /* clock_nanosleep is odd in that it returns positive errno values.
10512 * On PPC, CR0 bit 3 should be set in such a situation. */
10513 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10514 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10521 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10522 case TARGET_NR_set_tid_address
:
10523 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10527 case TARGET_NR_tkill
:
10528 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10531 case TARGET_NR_tgkill
:
10532 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10533 target_to_host_signal(arg3
)));
10536 #ifdef TARGET_NR_set_robust_list
10537 case TARGET_NR_set_robust_list
:
10538 case TARGET_NR_get_robust_list
:
10539 /* The ABI for supporting robust futexes has userspace pass
10540 * the kernel a pointer to a linked list which is updated by
10541 * userspace after the syscall; the list is walked by the kernel
10542 * when the thread exits. Since the linked list in QEMU guest
10543 * memory isn't a valid linked list for the host and we have
10544 * no way to reliably intercept the thread-death event, we can't
10545 * support these. Silently return ENOSYS so that guest userspace
10546 * falls back to a non-robust futex implementation (which should
10547 * be OK except in the corner case of the guest crashing while
10548 * holding a mutex that is shared with another process via
10551 goto unimplemented_nowarn
;
10554 #if defined(TARGET_NR_utimensat)
10555 case TARGET_NR_utimensat
:
10557 struct timespec
*tsp
, ts
[2];
10561 target_to_host_timespec(ts
, arg3
);
10562 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10566 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10568 if (!(p
= lock_user_string(arg2
))) {
10569 ret
= -TARGET_EFAULT
;
10572 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10573 unlock_user(p
, arg2
, 0);
10578 case TARGET_NR_futex
:
10579 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10581 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10582 case TARGET_NR_inotify_init
:
10583 ret
= get_errno(sys_inotify_init());
10586 #ifdef CONFIG_INOTIFY1
10587 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10588 case TARGET_NR_inotify_init1
:
10589 ret
= get_errno(sys_inotify_init1(arg1
));
10593 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10594 case TARGET_NR_inotify_add_watch
:
10595 p
= lock_user_string(arg2
);
10596 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10597 unlock_user(p
, arg2
, 0);
10600 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10601 case TARGET_NR_inotify_rm_watch
:
10602 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10606 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10607 case TARGET_NR_mq_open
:
10609 struct mq_attr posix_mq_attr
, *attrp
;
10611 p
= lock_user_string(arg1
- 1);
10613 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10614 attrp
= &posix_mq_attr
;
10618 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10619 unlock_user (p
, arg1
, 0);
10623 case TARGET_NR_mq_unlink
:
10624 p
= lock_user_string(arg1
- 1);
10625 ret
= get_errno(mq_unlink(p
));
10626 unlock_user (p
, arg1
, 0);
10629 case TARGET_NR_mq_timedsend
:
10631 struct timespec ts
;
10633 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10635 target_to_host_timespec(&ts
, arg5
);
10636 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10637 host_to_target_timespec(arg5
, &ts
);
10639 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10641 unlock_user (p
, arg2
, arg3
);
10645 case TARGET_NR_mq_timedreceive
:
10647 struct timespec ts
;
10650 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10652 target_to_host_timespec(&ts
, arg5
);
10653 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10655 host_to_target_timespec(arg5
, &ts
);
10657 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10660 unlock_user (p
, arg2
, arg3
);
10662 put_user_u32(prio
, arg4
);
10666 /* Not implemented for now... */
10667 /* case TARGET_NR_mq_notify: */
10670 case TARGET_NR_mq_getsetattr
:
10672 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10675 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10676 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10679 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10680 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10687 #ifdef CONFIG_SPLICE
10688 #ifdef TARGET_NR_tee
10689 case TARGET_NR_tee
:
10691 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10695 #ifdef TARGET_NR_splice
10696 case TARGET_NR_splice
:
10698 loff_t loff_in
, loff_out
;
10699 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10701 if (get_user_u64(loff_in
, arg2
)) {
10704 ploff_in
= &loff_in
;
10707 if (get_user_u64(loff_out
, arg4
)) {
10710 ploff_out
= &loff_out
;
10712 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10714 if (put_user_u64(loff_in
, arg2
)) {
10719 if (put_user_u64(loff_out
, arg4
)) {
10726 #ifdef TARGET_NR_vmsplice
10727 case TARGET_NR_vmsplice
:
10729 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10731 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10732 unlock_iovec(vec
, arg2
, arg3
, 0);
10734 ret
= -host_to_target_errno(errno
);
10739 #endif /* CONFIG_SPLICE */
10740 #ifdef CONFIG_EVENTFD
10741 #if defined(TARGET_NR_eventfd)
10742 case TARGET_NR_eventfd
:
10743 ret
= get_errno(eventfd(arg1
, 0));
10744 fd_trans_unregister(ret
);
10747 #if defined(TARGET_NR_eventfd2)
10748 case TARGET_NR_eventfd2
:
10750 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10751 if (arg2
& TARGET_O_NONBLOCK
) {
10752 host_flags
|= O_NONBLOCK
;
10754 if (arg2
& TARGET_O_CLOEXEC
) {
10755 host_flags
|= O_CLOEXEC
;
10757 ret
= get_errno(eventfd(arg1
, host_flags
));
10758 fd_trans_unregister(ret
);
10762 #endif /* CONFIG_EVENTFD */
10763 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10764 case TARGET_NR_fallocate
:
10765 #if TARGET_ABI_BITS == 32
10766 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10767 target_offset64(arg5
, arg6
)));
10769 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10773 #if defined(CONFIG_SYNC_FILE_RANGE)
10774 #if defined(TARGET_NR_sync_file_range)
10775 case TARGET_NR_sync_file_range
:
10776 #if TARGET_ABI_BITS == 32
10777 #if defined(TARGET_MIPS)
10778 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10779 target_offset64(arg5
, arg6
), arg7
));
10781 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10782 target_offset64(arg4
, arg5
), arg6
));
10783 #endif /* !TARGET_MIPS */
10785 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10789 #if defined(TARGET_NR_sync_file_range2)
10790 case TARGET_NR_sync_file_range2
:
10791 /* This is like sync_file_range but the arguments are reordered */
10792 #if TARGET_ABI_BITS == 32
10793 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10794 target_offset64(arg5
, arg6
), arg2
));
10796 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10801 #if defined(TARGET_NR_signalfd4)
10802 case TARGET_NR_signalfd4
:
10803 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10806 #if defined(TARGET_NR_signalfd)
10807 case TARGET_NR_signalfd
:
10808 ret
= do_signalfd4(arg1
, arg2
, 0);
10811 #if defined(CONFIG_EPOLL)
10812 #if defined(TARGET_NR_epoll_create)
10813 case TARGET_NR_epoll_create
:
10814 ret
= get_errno(epoll_create(arg1
));
10817 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10818 case TARGET_NR_epoll_create1
:
10819 ret
= get_errno(epoll_create1(arg1
));
10822 #if defined(TARGET_NR_epoll_ctl)
10823 case TARGET_NR_epoll_ctl
:
10825 struct epoll_event ep
;
10826 struct epoll_event
*epp
= 0;
10828 struct target_epoll_event
*target_ep
;
10829 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10832 ep
.events
= tswap32(target_ep
->events
);
10833 /* The epoll_data_t union is just opaque data to the kernel,
10834 * so we transfer all 64 bits across and need not worry what
10835 * actual data type it is.
10837 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10838 unlock_user_struct(target_ep
, arg4
, 0);
10841 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10846 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10847 #if defined(TARGET_NR_epoll_wait)
10848 case TARGET_NR_epoll_wait
:
10850 #if defined(TARGET_NR_epoll_pwait)
10851 case TARGET_NR_epoll_pwait
:
10854 struct target_epoll_event
*target_ep
;
10855 struct epoll_event
*ep
;
10857 int maxevents
= arg3
;
10858 int timeout
= arg4
;
10860 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10861 maxevents
* sizeof(struct target_epoll_event
), 1);
10866 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10869 #if defined(TARGET_NR_epoll_pwait)
10870 case TARGET_NR_epoll_pwait
:
10872 target_sigset_t
*target_set
;
10873 sigset_t _set
, *set
= &_set
;
10876 target_set
= lock_user(VERIFY_READ
, arg5
,
10877 sizeof(target_sigset_t
), 1);
10879 unlock_user(target_ep
, arg2
, 0);
10882 target_to_host_sigset(set
, target_set
);
10883 unlock_user(target_set
, arg5
, 0);
10888 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10889 set
, SIGSET_T_SIZE
));
10893 #if defined(TARGET_NR_epoll_wait)
10894 case TARGET_NR_epoll_wait
:
10895 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10900 ret
= -TARGET_ENOSYS
;
10902 if (!is_error(ret
)) {
10904 for (i
= 0; i
< ret
; i
++) {
10905 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10906 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10909 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10914 #ifdef TARGET_NR_prlimit64
10915 case TARGET_NR_prlimit64
:
10917 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10918 struct target_rlimit64
*target_rnew
, *target_rold
;
10919 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10920 int resource
= target_to_host_resource(arg2
);
10922 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10925 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10926 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10927 unlock_user_struct(target_rnew
, arg3
, 0);
10931 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10932 if (!is_error(ret
) && arg4
) {
10933 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10936 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10937 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10938 unlock_user_struct(target_rold
, arg4
, 1);
10943 #ifdef TARGET_NR_gethostname
10944 case TARGET_NR_gethostname
:
10946 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10948 ret
= get_errno(gethostname(name
, arg2
));
10949 unlock_user(name
, arg1
, arg2
);
10951 ret
= -TARGET_EFAULT
;
10956 #ifdef TARGET_NR_atomic_cmpxchg_32
10957 case TARGET_NR_atomic_cmpxchg_32
:
10959 /* should use start_exclusive from main.c */
10960 abi_ulong mem_value
;
10961 if (get_user_u32(mem_value
, arg6
)) {
10962 target_siginfo_t info
;
10963 info
.si_signo
= SIGSEGV
;
10965 info
.si_code
= TARGET_SEGV_MAPERR
;
10966 info
._sifields
._sigfault
._addr
= arg6
;
10967 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10971 if (mem_value
== arg2
)
10972 put_user_u32(arg1
, arg6
);
10977 #ifdef TARGET_NR_atomic_barrier
10978 case TARGET_NR_atomic_barrier
:
10980 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10986 #ifdef TARGET_NR_timer_create
10987 case TARGET_NR_timer_create
:
10989 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10991 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10994 int timer_index
= next_free_host_timer();
10996 if (timer_index
< 0) {
10997 ret
= -TARGET_EAGAIN
;
10999 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11002 phost_sevp
= &host_sevp
;
11003 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11009 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11013 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11022 #ifdef TARGET_NR_timer_settime
11023 case TARGET_NR_timer_settime
:
11025 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11026 * struct itimerspec * old_value */
11027 target_timer_t timerid
= get_timer_id(arg1
);
11031 } else if (arg3
== 0) {
11032 ret
= -TARGET_EINVAL
;
11034 timer_t htimer
= g_posix_timers
[timerid
];
11035 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11037 target_to_host_itimerspec(&hspec_new
, arg3
);
11039 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11040 host_to_target_itimerspec(arg2
, &hspec_old
);
11046 #ifdef TARGET_NR_timer_gettime
11047 case TARGET_NR_timer_gettime
:
11049 /* args: timer_t timerid, struct itimerspec *curr_value */
11050 target_timer_t timerid
= get_timer_id(arg1
);
11054 } else if (!arg2
) {
11055 ret
= -TARGET_EFAULT
;
11057 timer_t htimer
= g_posix_timers
[timerid
];
11058 struct itimerspec hspec
;
11059 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11061 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11062 ret
= -TARGET_EFAULT
;
11069 #ifdef TARGET_NR_timer_getoverrun
11070 case TARGET_NR_timer_getoverrun
:
11072 /* args: timer_t timerid */
11073 target_timer_t timerid
= get_timer_id(arg1
);
11078 timer_t htimer
= g_posix_timers
[timerid
];
11079 ret
= get_errno(timer_getoverrun(htimer
));
11081 fd_trans_unregister(ret
);
11086 #ifdef TARGET_NR_timer_delete
11087 case TARGET_NR_timer_delete
:
11089 /* args: timer_t timerid */
11090 target_timer_t timerid
= get_timer_id(arg1
);
11095 timer_t htimer
= g_posix_timers
[timerid
];
11096 ret
= get_errno(timer_delete(htimer
));
11097 g_posix_timers
[timerid
] = 0;
11103 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11104 case TARGET_NR_timerfd_create
:
11105 ret
= get_errno(timerfd_create(arg1
,
11106 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11110 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11111 case TARGET_NR_timerfd_gettime
:
11113 struct itimerspec its_curr
;
11115 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11117 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11124 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11125 case TARGET_NR_timerfd_settime
:
11127 struct itimerspec its_new
, its_old
, *p_new
;
11130 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11138 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11140 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11147 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11148 case TARGET_NR_ioprio_get
:
11149 ret
= get_errno(ioprio_get(arg1
, arg2
));
11153 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11154 case TARGET_NR_ioprio_set
:
11155 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11159 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11160 case TARGET_NR_setns
:
11161 ret
= get_errno(setns(arg1
, arg2
));
11164 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11165 case TARGET_NR_unshare
:
11166 ret
= get_errno(unshare(arg1
));
11172 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11173 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11174 unimplemented_nowarn
:
11176 ret
= -TARGET_ENOSYS
;
11181 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11184 print_syscall_ret(num
, ret
);
11187 ret
= -TARGET_EFAULT
;