4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
107 #include <linux/audit.h>
108 #include "linux_loop.h"
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
120 //#define DEBUG_ERESTARTSYS
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
195 #define __NR__llseek __NR_lseek
198 /* Newer kernel ports have llseek() instead of _llseek() */
199 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
200 #define TARGET_NR__llseek TARGET_NR_llseek
204 _syscall0(int, gettid
)
206 /* This is a replacement for the host gettid() and must return a host
208 static int gettid(void) {
212 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
213 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #ifdef __NR_exit_group
226 _syscall1(int,exit_group
,int,error_code
)
228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
229 _syscall1(int,set_tid_address
,int *,tidptr
)
231 #if defined(TARGET_NR_futex) && defined(__NR_futex)
232 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
233 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
235 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
236 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
237 unsigned long *, user_mask_ptr
);
238 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
239 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
240 unsigned long *, user_mask_ptr
);
241 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
243 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
244 struct __user_cap_data_struct
*, data
);
245 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
248 _syscall2(int, ioprio_get
, int, which
, int, who
)
250 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
251 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
254 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
257 static bitmask_transtbl fcntl_flags_tbl
[] = {
258 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
259 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
260 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
261 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
262 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
263 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
264 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
265 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
266 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
267 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
268 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
269 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
270 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
281 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
290 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
291 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
292 typedef struct TargetFdTrans
{
293 TargetFdDataFunc host_to_target_data
;
294 TargetFdDataFunc target_to_host_data
;
295 TargetFdAddrFunc target_to_host_addr
;
298 static TargetFdTrans
**target_fd_trans
;
300 static unsigned int target_fd_max
;
302 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
304 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
305 return target_fd_trans
[fd
]->target_to_host_data
;
310 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
312 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
313 return target_fd_trans
[fd
]->host_to_target_data
;
318 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
320 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
321 return target_fd_trans
[fd
]->target_to_host_addr
;
326 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
330 if (fd
>= target_fd_max
) {
331 oldmax
= target_fd_max
;
332 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans
= g_renew(TargetFdTrans
*,
334 target_fd_trans
, target_fd_max
);
335 memset((void *)(target_fd_trans
+ oldmax
), 0,
336 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
338 target_fd_trans
[fd
] = trans
;
341 static void fd_trans_unregister(int fd
)
343 if (fd
>= 0 && fd
< target_fd_max
) {
344 target_fd_trans
[fd
] = NULL
;
348 static void fd_trans_dup(int oldfd
, int newfd
)
350 fd_trans_unregister(newfd
);
351 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
352 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
356 static int sys_getcwd1(char *buf
, size_t size
)
358 if (getcwd(buf
, size
) == NULL
) {
359 /* getcwd() sets errno */
362 return strlen(buf
)+1;
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd
, const char *pathname
,
368 const struct timespec times
[2], int flags
)
370 if (pathname
== NULL
)
371 return futimens(dirfd
, times
);
373 return utimensat(dirfd
, pathname
, times
, flags
);
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
378 const struct timespec
*,tsp
,int,flags
)
380 static int sys_utimensat(int dirfd
, const char *pathname
,
381 const struct timespec times
[2], int flags
)
387 #endif /* TARGET_NR_utimensat */
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
395 return (inotify_init());
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
401 return (inotify_add_watch(fd
, pathname
, mask
));
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
407 return (inotify_rm_watch(fd
, wd
));
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags
)
414 return (inotify_init1(flags
));
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
426 #if defined(TARGET_NR_prlimit64)
427 #ifndef __NR_prlimit64
428 # define __NR_prlimit64 -1
430 #define __NR_sys_prlimit64 __NR_prlimit64
431 /* The glibc rlimit structure may not be that used by the underlying syscall */
432 struct host_rlimit64
{
436 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
437 const struct host_rlimit64
*, new_limit
,
438 struct host_rlimit64
*, old_limit
)
442 #if defined(TARGET_NR_timer_create)
443 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
444 static timer_t g_posix_timers
[32] = { 0, } ;
446 static inline int next_free_host_timer(void)
449 /* FIXME: Does finding the next free slot require a lock? */
450 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
451 if (g_posix_timers
[k
] == 0) {
452 g_posix_timers
[k
] = (timer_t
) 1;
460 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
462 static inline int regpairs_aligned(void *cpu_env
) {
463 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
465 #elif defined(TARGET_MIPS)
466 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
467 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
468 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
469 * of registers which translates to the same as ARM/MIPS, because we start with
471 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
473 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
476 #define ERRNO_TABLE_SIZE 1200
478 /* target_to_host_errno_table[] is initialized from
479 * host_to_target_errno_table[] in syscall_init(). */
480 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
484 * This list is the union of errno values overridden in asm-<arch>/errno.h
485 * minus the errnos that are not actually generic to all archs.
487 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
488 [EAGAIN
] = TARGET_EAGAIN
,
489 [EIDRM
] = TARGET_EIDRM
,
490 [ECHRNG
] = TARGET_ECHRNG
,
491 [EL2NSYNC
] = TARGET_EL2NSYNC
,
492 [EL3HLT
] = TARGET_EL3HLT
,
493 [EL3RST
] = TARGET_EL3RST
,
494 [ELNRNG
] = TARGET_ELNRNG
,
495 [EUNATCH
] = TARGET_EUNATCH
,
496 [ENOCSI
] = TARGET_ENOCSI
,
497 [EL2HLT
] = TARGET_EL2HLT
,
498 [EDEADLK
] = TARGET_EDEADLK
,
499 [ENOLCK
] = TARGET_ENOLCK
,
500 [EBADE
] = TARGET_EBADE
,
501 [EBADR
] = TARGET_EBADR
,
502 [EXFULL
] = TARGET_EXFULL
,
503 [ENOANO
] = TARGET_ENOANO
,
504 [EBADRQC
] = TARGET_EBADRQC
,
505 [EBADSLT
] = TARGET_EBADSLT
,
506 [EBFONT
] = TARGET_EBFONT
,
507 [ENOSTR
] = TARGET_ENOSTR
,
508 [ENODATA
] = TARGET_ENODATA
,
509 [ETIME
] = TARGET_ETIME
,
510 [ENOSR
] = TARGET_ENOSR
,
511 [ENONET
] = TARGET_ENONET
,
512 [ENOPKG
] = TARGET_ENOPKG
,
513 [EREMOTE
] = TARGET_EREMOTE
,
514 [ENOLINK
] = TARGET_ENOLINK
,
515 [EADV
] = TARGET_EADV
,
516 [ESRMNT
] = TARGET_ESRMNT
,
517 [ECOMM
] = TARGET_ECOMM
,
518 [EPROTO
] = TARGET_EPROTO
,
519 [EDOTDOT
] = TARGET_EDOTDOT
,
520 [EMULTIHOP
] = TARGET_EMULTIHOP
,
521 [EBADMSG
] = TARGET_EBADMSG
,
522 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
523 [EOVERFLOW
] = TARGET_EOVERFLOW
,
524 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
525 [EBADFD
] = TARGET_EBADFD
,
526 [EREMCHG
] = TARGET_EREMCHG
,
527 [ELIBACC
] = TARGET_ELIBACC
,
528 [ELIBBAD
] = TARGET_ELIBBAD
,
529 [ELIBSCN
] = TARGET_ELIBSCN
,
530 [ELIBMAX
] = TARGET_ELIBMAX
,
531 [ELIBEXEC
] = TARGET_ELIBEXEC
,
532 [EILSEQ
] = TARGET_EILSEQ
,
533 [ENOSYS
] = TARGET_ENOSYS
,
534 [ELOOP
] = TARGET_ELOOP
,
535 [ERESTART
] = TARGET_ERESTART
,
536 [ESTRPIPE
] = TARGET_ESTRPIPE
,
537 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
538 [EUSERS
] = TARGET_EUSERS
,
539 [ENOTSOCK
] = TARGET_ENOTSOCK
,
540 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
541 [EMSGSIZE
] = TARGET_EMSGSIZE
,
542 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
543 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
544 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
545 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
546 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
547 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
548 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
549 [EADDRINUSE
] = TARGET_EADDRINUSE
,
550 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
551 [ENETDOWN
] = TARGET_ENETDOWN
,
552 [ENETUNREACH
] = TARGET_ENETUNREACH
,
553 [ENETRESET
] = TARGET_ENETRESET
,
554 [ECONNABORTED
] = TARGET_ECONNABORTED
,
555 [ECONNRESET
] = TARGET_ECONNRESET
,
556 [ENOBUFS
] = TARGET_ENOBUFS
,
557 [EISCONN
] = TARGET_EISCONN
,
558 [ENOTCONN
] = TARGET_ENOTCONN
,
559 [EUCLEAN
] = TARGET_EUCLEAN
,
560 [ENOTNAM
] = TARGET_ENOTNAM
,
561 [ENAVAIL
] = TARGET_ENAVAIL
,
562 [EISNAM
] = TARGET_EISNAM
,
563 [EREMOTEIO
] = TARGET_EREMOTEIO
,
564 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
565 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
566 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
567 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
568 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
569 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
570 [EALREADY
] = TARGET_EALREADY
,
571 [EINPROGRESS
] = TARGET_EINPROGRESS
,
572 [ESTALE
] = TARGET_ESTALE
,
573 [ECANCELED
] = TARGET_ECANCELED
,
574 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
575 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
577 [ENOKEY
] = TARGET_ENOKEY
,
580 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
583 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
586 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
589 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
596 static inline int host_to_target_errno(int err
)
598 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
599 host_to_target_errno_table
[err
]) {
600 return host_to_target_errno_table
[err
];
605 static inline int target_to_host_errno(int err
)
607 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
608 target_to_host_errno_table
[err
]) {
609 return target_to_host_errno_table
[err
];
614 static inline abi_long
get_errno(abi_long ret
)
617 return -host_to_target_errno(errno
);
622 static inline int is_error(abi_long ret
)
624 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
627 const char *target_strerror(int err
)
629 if (err
== TARGET_ERESTARTSYS
) {
630 return "To be restarted";
632 if (err
== TARGET_QEMU_ESIGRETURN
) {
633 return "Successful exit from sigreturn";
636 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
639 return strerror(target_to_host_errno(err
));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
690 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
691 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
692 int, flags
, mode_t
, mode
)
693 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
694 struct rusage
*, rusage
)
695 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
696 int, options
, struct rusage
*, rusage
)
697 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
698 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
699 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
700 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
701 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
703 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
704 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
706 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
707 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
708 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
709 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
710 safe_syscall2(int, tkill
, int, tid
, int, sig
)
711 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
712 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
713 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
714 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
716 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
717 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
718 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
719 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
720 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
721 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
722 safe_syscall2(int, flock
, int, fd
, int, operation
)
723 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
724 const struct timespec
*, uts
, size_t, sigsetsize
)
725 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
727 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
728 struct timespec
*, rem
)
729 #ifdef TARGET_NR_clock_nanosleep
730 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
731 const struct timespec
*, req
, struct timespec
*, rem
)
734 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
736 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
737 long, msgtype
, int, flags
)
738 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
739 unsigned, nsops
, const struct timespec
*, timeout
)
741 /* This host kernel architecture uses a single ipc syscall; fake up
742 * wrappers for the sub-operations to hide this implementation detail.
743 * Annoyingly we can't include linux/ipc.h to get the constant definitions
744 * for the call parameter because some structs in there conflict with the
745 * sys/ipc.h ones. So we just define them here, and rely on them being
746 * the same for all host architectures.
748 #define Q_SEMTIMEDOP 4
751 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
753 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
754 void *, ptr
, long, fifth
)
755 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
757 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
759 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
761 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
763 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
764 const struct timespec
*timeout
)
766 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
771 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
772 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
773 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
774 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
776 /* We do ioctl like this rather than via safe_syscall3 to preserve the
777 * "third argument might be integer or pointer or not present" behaviour of
780 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
781 /* Similarly for fcntl. Note that callers must always:
782 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
783 * use the flock64 struct rather than unsuffixed flock
784 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
787 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
792 static inline int host_to_target_sock_type(int host_type
)
796 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
798 target_type
= TARGET_SOCK_DGRAM
;
801 target_type
= TARGET_SOCK_STREAM
;
804 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
808 #if defined(SOCK_CLOEXEC)
809 if (host_type
& SOCK_CLOEXEC
) {
810 target_type
|= TARGET_SOCK_CLOEXEC
;
814 #if defined(SOCK_NONBLOCK)
815 if (host_type
& SOCK_NONBLOCK
) {
816 target_type
|= TARGET_SOCK_NONBLOCK
;
823 static abi_ulong target_brk
;
824 static abi_ulong target_original_brk
;
825 static abi_ulong brk_page
;
827 void target_set_brk(abi_ulong new_brk
)
829 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
830 brk_page
= HOST_PAGE_ALIGN(target_brk
);
833 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
834 #define DEBUGF_BRK(message, args...)
836 /* do_brk() must return target values and target errnos. */
837 abi_long
do_brk(abi_ulong new_brk
)
839 abi_long mapped_addr
;
842 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
848 if (new_brk
< target_original_brk
) {
849 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
854 /* If the new brk is less than the highest page reserved to the
855 * target heap allocation, set it and we're almost done... */
856 if (new_brk
<= brk_page
) {
857 /* Heap contents are initialized to zero, as for anonymous
859 if (new_brk
> target_brk
) {
860 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
862 target_brk
= new_brk
;
863 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
867 /* We need to allocate more memory after the brk... Note that
868 * we don't use MAP_FIXED because that will map over the top of
869 * any existing mapping (like the one with the host libc or qemu
870 * itself); instead we treat "mapped but at wrong address" as
871 * a failure and unmap again.
873 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
874 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
875 PROT_READ
|PROT_WRITE
,
876 MAP_ANON
|MAP_PRIVATE
, 0, 0));
878 if (mapped_addr
== brk_page
) {
879 /* Heap contents are initialized to zero, as for anonymous
880 * mapped pages. Technically the new pages are already
881 * initialized to zero since they *are* anonymous mapped
882 * pages, however we have to take care with the contents that
883 * come from the remaining part of the previous page: it may
884 * contains garbage data due to a previous heap usage (grown
886 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
888 target_brk
= new_brk
;
889 brk_page
= HOST_PAGE_ALIGN(target_brk
);
890 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
893 } else if (mapped_addr
!= -1) {
894 /* Mapped but at wrong address, meaning there wasn't actually
895 * enough space for this brk.
897 target_munmap(mapped_addr
, new_alloc_size
);
899 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
905 #if defined(TARGET_ALPHA)
906 /* We (partially) emulate OSF/1 on Alpha, which requires we
907 return a proper errno, not an unchanged brk value. */
908 return -TARGET_ENOMEM
;
910 /* For everything else, return the previous break. */
914 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
915 abi_ulong target_fds_addr
,
919 abi_ulong b
, *target_fds
;
921 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
922 if (!(target_fds
= lock_user(VERIFY_READ
,
924 sizeof(abi_ulong
) * nw
,
926 return -TARGET_EFAULT
;
930 for (i
= 0; i
< nw
; i
++) {
931 /* grab the abi_ulong */
932 __get_user(b
, &target_fds
[i
]);
933 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
934 /* check the bit inside the abi_ulong */
941 unlock_user(target_fds
, target_fds_addr
, 0);
946 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
947 abi_ulong target_fds_addr
,
950 if (target_fds_addr
) {
951 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
952 return -TARGET_EFAULT
;
960 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
966 abi_ulong
*target_fds
;
968 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
969 if (!(target_fds
= lock_user(VERIFY_WRITE
,
971 sizeof(abi_ulong
) * nw
,
973 return -TARGET_EFAULT
;
976 for (i
= 0; i
< nw
; i
++) {
978 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
979 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
982 __put_user(v
, &target_fds
[i
]);
985 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
990 #if defined(__alpha__)
996 static inline abi_long
host_to_target_clock_t(long ticks
)
998 #if HOST_HZ == TARGET_HZ
1001 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1005 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1006 const struct rusage
*rusage
)
1008 struct target_rusage
*target_rusage
;
1010 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1011 return -TARGET_EFAULT
;
1012 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1013 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1014 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1015 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1016 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1017 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1018 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1019 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1020 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1021 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1022 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1023 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1024 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1025 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1026 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1027 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1028 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1029 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1030 unlock_user_struct(target_rusage
, target_addr
, 1);
1035 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1037 abi_ulong target_rlim_swap
;
1040 target_rlim_swap
= tswapal(target_rlim
);
1041 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1042 return RLIM_INFINITY
;
1044 result
= target_rlim_swap
;
1045 if (target_rlim_swap
!= (rlim_t
)result
)
1046 return RLIM_INFINITY
;
1051 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1053 abi_ulong target_rlim_swap
;
1056 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1057 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1059 target_rlim_swap
= rlim
;
1060 result
= tswapal(target_rlim_swap
);
1065 static inline int target_to_host_resource(int code
)
1068 case TARGET_RLIMIT_AS
:
1070 case TARGET_RLIMIT_CORE
:
1072 case TARGET_RLIMIT_CPU
:
1074 case TARGET_RLIMIT_DATA
:
1076 case TARGET_RLIMIT_FSIZE
:
1077 return RLIMIT_FSIZE
;
1078 case TARGET_RLIMIT_LOCKS
:
1079 return RLIMIT_LOCKS
;
1080 case TARGET_RLIMIT_MEMLOCK
:
1081 return RLIMIT_MEMLOCK
;
1082 case TARGET_RLIMIT_MSGQUEUE
:
1083 return RLIMIT_MSGQUEUE
;
1084 case TARGET_RLIMIT_NICE
:
1086 case TARGET_RLIMIT_NOFILE
:
1087 return RLIMIT_NOFILE
;
1088 case TARGET_RLIMIT_NPROC
:
1089 return RLIMIT_NPROC
;
1090 case TARGET_RLIMIT_RSS
:
1092 case TARGET_RLIMIT_RTPRIO
:
1093 return RLIMIT_RTPRIO
;
1094 case TARGET_RLIMIT_SIGPENDING
:
1095 return RLIMIT_SIGPENDING
;
1096 case TARGET_RLIMIT_STACK
:
1097 return RLIMIT_STACK
;
1103 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1104 abi_ulong target_tv_addr
)
1106 struct target_timeval
*target_tv
;
1108 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1109 return -TARGET_EFAULT
;
1111 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1112 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1114 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1119 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1120 const struct timeval
*tv
)
1122 struct target_timeval
*target_tv
;
1124 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1125 return -TARGET_EFAULT
;
1127 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1128 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1130 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1135 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1136 abi_ulong target_tz_addr
)
1138 struct target_timezone
*target_tz
;
1140 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1141 return -TARGET_EFAULT
;
1144 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1145 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1147 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1152 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1155 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1156 abi_ulong target_mq_attr_addr
)
1158 struct target_mq_attr
*target_mq_attr
;
1160 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1161 target_mq_attr_addr
, 1))
1162 return -TARGET_EFAULT
;
1164 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1165 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1166 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1167 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1169 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1174 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1175 const struct mq_attr
*attr
)
1177 struct target_mq_attr
*target_mq_attr
;
1179 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1180 target_mq_attr_addr
, 0))
1181 return -TARGET_EFAULT
;
1183 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1184 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1185 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1186 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1188 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1194 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1195 /* do_select() must return target values and target errnos. */
1196 static abi_long
do_select(int n
,
1197 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1198 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1200 fd_set rfds
, wfds
, efds
;
1201 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1203 struct timespec ts
, *ts_ptr
;
1206 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1210 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1214 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1219 if (target_tv_addr
) {
1220 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1221 return -TARGET_EFAULT
;
1222 ts
.tv_sec
= tv
.tv_sec
;
1223 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1229 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1232 if (!is_error(ret
)) {
1233 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1234 return -TARGET_EFAULT
;
1235 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1236 return -TARGET_EFAULT
;
1237 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1238 return -TARGET_EFAULT
;
1240 if (target_tv_addr
) {
1241 tv
.tv_sec
= ts
.tv_sec
;
1242 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1243 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1244 return -TARGET_EFAULT
;
1253 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1256 return pipe2(host_pipe
, flags
);
1262 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1263 int flags
, int is_pipe2
)
1267 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1270 return get_errno(ret
);
1272 /* Several targets have special calling conventions for the original
1273 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1275 #if defined(TARGET_ALPHA)
1276 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1277 return host_pipe
[0];
1278 #elif defined(TARGET_MIPS)
1279 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1280 return host_pipe
[0];
1281 #elif defined(TARGET_SH4)
1282 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1283 return host_pipe
[0];
1284 #elif defined(TARGET_SPARC)
1285 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1286 return host_pipe
[0];
1290 if (put_user_s32(host_pipe
[0], pipedes
)
1291 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1292 return -TARGET_EFAULT
;
1293 return get_errno(ret
);
1296 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1297 abi_ulong target_addr
,
1300 struct target_ip_mreqn
*target_smreqn
;
1302 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1304 return -TARGET_EFAULT
;
1305 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1306 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1307 if (len
== sizeof(struct target_ip_mreqn
))
1308 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1309 unlock_user(target_smreqn
, target_addr
, 0);
1314 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1315 abi_ulong target_addr
,
1318 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1319 sa_family_t sa_family
;
1320 struct target_sockaddr
*target_saddr
;
1322 if (fd_trans_target_to_host_addr(fd
)) {
1323 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1326 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1328 return -TARGET_EFAULT
;
1330 sa_family
= tswap16(target_saddr
->sa_family
);
1332 /* Oops. The caller might send a incomplete sun_path; sun_path
1333 * must be terminated by \0 (see the manual page), but
1334 * unfortunately it is quite common to specify sockaddr_un
1335 * length as "strlen(x->sun_path)" while it should be
1336 * "strlen(...) + 1". We'll fix that here if needed.
1337 * Linux kernel has a similar feature.
1340 if (sa_family
== AF_UNIX
) {
1341 if (len
< unix_maxlen
&& len
> 0) {
1342 char *cp
= (char*)target_saddr
;
1344 if ( cp
[len
-1] && !cp
[len
] )
1347 if (len
> unix_maxlen
)
1351 memcpy(addr
, target_saddr
, len
);
1352 addr
->sa_family
= sa_family
;
1353 if (sa_family
== AF_NETLINK
) {
1354 struct sockaddr_nl
*nladdr
;
1356 nladdr
= (struct sockaddr_nl
*)addr
;
1357 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1358 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1359 } else if (sa_family
== AF_PACKET
) {
1360 struct target_sockaddr_ll
*lladdr
;
1362 lladdr
= (struct target_sockaddr_ll
*)addr
;
1363 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1364 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1366 unlock_user(target_saddr
, target_addr
, 0);
1371 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1372 struct sockaddr
*addr
,
1375 struct target_sockaddr
*target_saddr
;
1377 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1379 return -TARGET_EFAULT
;
1380 memcpy(target_saddr
, addr
, len
);
1381 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1382 if (addr
->sa_family
== AF_NETLINK
) {
1383 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1384 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1385 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1387 unlock_user(target_saddr
, target_addr
, len
);
1392 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1393 struct target_msghdr
*target_msgh
)
1395 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1396 abi_long msg_controllen
;
1397 abi_ulong target_cmsg_addr
;
1398 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1399 socklen_t space
= 0;
1401 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1402 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1404 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1405 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1406 target_cmsg_start
= target_cmsg
;
1408 return -TARGET_EFAULT
;
1410 while (cmsg
&& target_cmsg
) {
1411 void *data
= CMSG_DATA(cmsg
);
1412 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1414 int len
= tswapal(target_cmsg
->cmsg_len
)
1415 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1417 space
+= CMSG_SPACE(len
);
1418 if (space
> msgh
->msg_controllen
) {
1419 space
-= CMSG_SPACE(len
);
1420 /* This is a QEMU bug, since we allocated the payload
1421 * area ourselves (unlike overflow in host-to-target
1422 * conversion, which is just the guest giving us a buffer
1423 * that's too small). It can't happen for the payload types
1424 * we currently support; if it becomes an issue in future
1425 * we would need to improve our allocation strategy to
1426 * something more intelligent than "twice the size of the
1427 * target buffer we're reading from".
1429 gemu_log("Host cmsg overflow\n");
1433 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1434 cmsg
->cmsg_level
= SOL_SOCKET
;
1436 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1438 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1439 cmsg
->cmsg_len
= CMSG_LEN(len
);
1441 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1442 int *fd
= (int *)data
;
1443 int *target_fd
= (int *)target_data
;
1444 int i
, numfds
= len
/ sizeof(int);
1446 for (i
= 0; i
< numfds
; i
++) {
1447 __get_user(fd
[i
], target_fd
+ i
);
1449 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1450 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1451 struct ucred
*cred
= (struct ucred
*)data
;
1452 struct target_ucred
*target_cred
=
1453 (struct target_ucred
*)target_data
;
1455 __get_user(cred
->pid
, &target_cred
->pid
);
1456 __get_user(cred
->uid
, &target_cred
->uid
);
1457 __get_user(cred
->gid
, &target_cred
->gid
);
1459 gemu_log("Unsupported ancillary data: %d/%d\n",
1460 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1461 memcpy(data
, target_data
, len
);
1464 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1465 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1468 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1470 msgh
->msg_controllen
= space
;
1474 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1475 struct msghdr
*msgh
)
1477 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1478 abi_long msg_controllen
;
1479 abi_ulong target_cmsg_addr
;
1480 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1481 socklen_t space
= 0;
1483 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1484 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1486 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1487 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1488 target_cmsg_start
= target_cmsg
;
1490 return -TARGET_EFAULT
;
1492 while (cmsg
&& target_cmsg
) {
1493 void *data
= CMSG_DATA(cmsg
);
1494 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1496 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1497 int tgt_len
, tgt_space
;
1499 /* We never copy a half-header but may copy half-data;
1500 * this is Linux's behaviour in put_cmsg(). Note that
1501 * truncation here is a guest problem (which we report
1502 * to the guest via the CTRUNC bit), unlike truncation
1503 * in target_to_host_cmsg, which is a QEMU bug.
1505 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1506 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1510 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1511 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1513 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1515 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1517 tgt_len
= TARGET_CMSG_LEN(len
);
1519 /* Payload types which need a different size of payload on
1520 * the target must adjust tgt_len here.
1522 switch (cmsg
->cmsg_level
) {
1524 switch (cmsg
->cmsg_type
) {
1526 tgt_len
= sizeof(struct target_timeval
);
1535 if (msg_controllen
< tgt_len
) {
1536 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1537 tgt_len
= msg_controllen
;
1540 /* We must now copy-and-convert len bytes of payload
1541 * into tgt_len bytes of destination space. Bear in mind
1542 * that in both source and destination we may be dealing
1543 * with a truncated value!
1545 switch (cmsg
->cmsg_level
) {
1547 switch (cmsg
->cmsg_type
) {
1550 int *fd
= (int *)data
;
1551 int *target_fd
= (int *)target_data
;
1552 int i
, numfds
= tgt_len
/ sizeof(int);
1554 for (i
= 0; i
< numfds
; i
++) {
1555 __put_user(fd
[i
], target_fd
+ i
);
1561 struct timeval
*tv
= (struct timeval
*)data
;
1562 struct target_timeval
*target_tv
=
1563 (struct target_timeval
*)target_data
;
1565 if (len
!= sizeof(struct timeval
) ||
1566 tgt_len
!= sizeof(struct target_timeval
)) {
1570 /* copy struct timeval to target */
1571 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1572 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1575 case SCM_CREDENTIALS
:
1577 struct ucred
*cred
= (struct ucred
*)data
;
1578 struct target_ucred
*target_cred
=
1579 (struct target_ucred
*)target_data
;
1581 __put_user(cred
->pid
, &target_cred
->pid
);
1582 __put_user(cred
->uid
, &target_cred
->uid
);
1583 __put_user(cred
->gid
, &target_cred
->gid
);
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1595 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1596 if (tgt_len
> len
) {
1597 memset(target_data
+ len
, 0, tgt_len
- len
);
1601 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1602 tgt_space
= TARGET_CMSG_SPACE(len
);
1603 if (msg_controllen
< tgt_space
) {
1604 tgt_space
= msg_controllen
;
1606 msg_controllen
-= tgt_space
;
1608 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1609 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1612 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1614 target_msgh
->msg_controllen
= tswapal(space
);
1618 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1620 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1621 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1622 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1623 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1624 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1627 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1629 abi_long (*host_to_target_nlmsg
)
1630 (struct nlmsghdr
*))
1635 while (len
> sizeof(struct nlmsghdr
)) {
1637 nlmsg_len
= nlh
->nlmsg_len
;
1638 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1643 switch (nlh
->nlmsg_type
) {
1645 tswap_nlmsghdr(nlh
);
1651 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1652 e
->error
= tswap32(e
->error
);
1653 tswap_nlmsghdr(&e
->msg
);
1654 tswap_nlmsghdr(nlh
);
1658 ret
= host_to_target_nlmsg(nlh
);
1660 tswap_nlmsghdr(nlh
);
1665 tswap_nlmsghdr(nlh
);
1666 len
-= NLMSG_ALIGN(nlmsg_len
);
1667 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1672 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1674 abi_long (*target_to_host_nlmsg
)
1675 (struct nlmsghdr
*))
1679 while (len
> sizeof(struct nlmsghdr
)) {
1680 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1681 tswap32(nlh
->nlmsg_len
) > len
) {
1684 tswap_nlmsghdr(nlh
);
1685 switch (nlh
->nlmsg_type
) {
1692 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1693 e
->error
= tswap32(e
->error
);
1694 tswap_nlmsghdr(&e
->msg
);
1698 ret
= target_to_host_nlmsg(nlh
);
1703 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1704 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1709 #ifdef CONFIG_RTNETLINK
1710 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1712 abi_long (*host_to_target_rtattr
)
1715 unsigned short rta_len
;
1718 while (len
> sizeof(struct rtattr
)) {
1719 rta_len
= rtattr
->rta_len
;
1720 if (rta_len
< sizeof(struct rtattr
) ||
1724 ret
= host_to_target_rtattr(rtattr
);
1725 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1726 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1730 len
-= RTA_ALIGN(rta_len
);
1731 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1736 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1739 struct rtnl_link_stats
*st
;
1740 struct rtnl_link_stats64
*st64
;
1741 struct rtnl_link_ifmap
*map
;
1743 switch (rtattr
->rta_type
) {
1746 case IFLA_BROADCAST
:
1752 case IFLA_OPERSTATE
:
1755 case IFLA_PROTO_DOWN
:
1762 case IFLA_CARRIER_CHANGES
:
1763 case IFLA_NUM_RX_QUEUES
:
1764 case IFLA_NUM_TX_QUEUES
:
1765 case IFLA_PROMISCUITY
:
1767 case IFLA_LINK_NETNSID
:
1771 u32
= RTA_DATA(rtattr
);
1772 *u32
= tswap32(*u32
);
1774 /* struct rtnl_link_stats */
1776 st
= RTA_DATA(rtattr
);
1777 st
->rx_packets
= tswap32(st
->rx_packets
);
1778 st
->tx_packets
= tswap32(st
->tx_packets
);
1779 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1780 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1781 st
->rx_errors
= tswap32(st
->rx_errors
);
1782 st
->tx_errors
= tswap32(st
->tx_errors
);
1783 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1784 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1785 st
->multicast
= tswap32(st
->multicast
);
1786 st
->collisions
= tswap32(st
->collisions
);
1788 /* detailed rx_errors: */
1789 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1790 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1791 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1792 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1793 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1794 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1796 /* detailed tx_errors */
1797 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1798 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1799 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1800 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1801 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1804 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1805 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1807 /* struct rtnl_link_stats64 */
1809 st64
= RTA_DATA(rtattr
);
1810 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1811 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1812 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1813 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1814 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1815 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1816 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1817 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1818 st64
->multicast
= tswap64(st64
->multicast
);
1819 st64
->collisions
= tswap64(st64
->collisions
);
1821 /* detailed rx_errors: */
1822 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1823 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1824 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1825 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1826 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1827 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1829 /* detailed tx_errors */
1830 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1831 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1832 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1833 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1834 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1837 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1838 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1840 /* struct rtnl_link_ifmap */
1842 map
= RTA_DATA(rtattr
);
1843 map
->mem_start
= tswap64(map
->mem_start
);
1844 map
->mem_end
= tswap64(map
->mem_end
);
1845 map
->base_addr
= tswap64(map
->base_addr
);
1846 map
->irq
= tswap16(map
->irq
);
1851 /* FIXME: implement nested type */
1852 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1855 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1861 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1864 struct ifa_cacheinfo
*ci
;
1866 switch (rtattr
->rta_type
) {
1867 /* binary: depends on family type */
1877 u32
= RTA_DATA(rtattr
);
1878 *u32
= tswap32(*u32
);
1880 /* struct ifa_cacheinfo */
1882 ci
= RTA_DATA(rtattr
);
1883 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1884 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1885 ci
->cstamp
= tswap32(ci
->cstamp
);
1886 ci
->tstamp
= tswap32(ci
->tstamp
);
1889 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1895 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1898 switch (rtattr
->rta_type
) {
1899 /* binary: depends on family type */
1908 u32
= RTA_DATA(rtattr
);
1909 *u32
= tswap32(*u32
);
1912 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1918 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1919 uint32_t rtattr_len
)
1921 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1922 host_to_target_data_link_rtattr
);
1925 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1926 uint32_t rtattr_len
)
1928 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1929 host_to_target_data_addr_rtattr
);
1932 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1933 uint32_t rtattr_len
)
1935 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1936 host_to_target_data_route_rtattr
);
1939 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1942 struct ifinfomsg
*ifi
;
1943 struct ifaddrmsg
*ifa
;
1946 nlmsg_len
= nlh
->nlmsg_len
;
1947 switch (nlh
->nlmsg_type
) {
1951 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
1952 ifi
= NLMSG_DATA(nlh
);
1953 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1954 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1955 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1956 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1957 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1958 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1964 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
1965 ifa
= NLMSG_DATA(nlh
);
1966 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1967 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1968 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1974 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
1975 rtm
= NLMSG_DATA(nlh
);
1976 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1977 host_to_target_route_rtattr(RTM_RTA(rtm
),
1978 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1982 return -TARGET_EINVAL
;
1987 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1990 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1993 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1995 abi_long (*target_to_host_rtattr
)
2000 while (len
>= sizeof(struct rtattr
)) {
2001 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2002 tswap16(rtattr
->rta_len
) > len
) {
2005 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2006 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2007 ret
= target_to_host_rtattr(rtattr
);
2011 len
-= RTA_ALIGN(rtattr
->rta_len
);
2012 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2013 RTA_ALIGN(rtattr
->rta_len
));
2018 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2020 switch (rtattr
->rta_type
) {
2022 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2028 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2030 switch (rtattr
->rta_type
) {
2031 /* binary: depends on family type */
2036 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2042 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2045 switch (rtattr
->rta_type
) {
2046 /* binary: depends on family type */
2053 u32
= RTA_DATA(rtattr
);
2054 *u32
= tswap32(*u32
);
2057 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2063 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2064 uint32_t rtattr_len
)
2066 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2067 target_to_host_data_link_rtattr
);
2070 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2071 uint32_t rtattr_len
)
2073 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2074 target_to_host_data_addr_rtattr
);
2077 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2078 uint32_t rtattr_len
)
2080 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2081 target_to_host_data_route_rtattr
);
2084 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2086 struct ifinfomsg
*ifi
;
2087 struct ifaddrmsg
*ifa
;
2090 switch (nlh
->nlmsg_type
) {
2095 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2096 ifi
= NLMSG_DATA(nlh
);
2097 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2098 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2099 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2100 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2101 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2102 NLMSG_LENGTH(sizeof(*ifi
)));
2108 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2109 ifa
= NLMSG_DATA(nlh
);
2110 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2111 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2112 NLMSG_LENGTH(sizeof(*ifa
)));
2119 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2120 rtm
= NLMSG_DATA(nlh
);
2121 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2122 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2123 NLMSG_LENGTH(sizeof(*rtm
)));
2127 return -TARGET_EOPNOTSUPP
;
2132 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2134 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2136 #endif /* CONFIG_RTNETLINK */
2138 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2140 switch (nlh
->nlmsg_type
) {
2142 gemu_log("Unknown host audit message type %d\n",
2144 return -TARGET_EINVAL
;
2149 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2152 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2155 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2157 switch (nlh
->nlmsg_type
) {
2159 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2160 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2163 gemu_log("Unknown target audit message type %d\n",
2165 return -TARGET_EINVAL
;
2171 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2173 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2178 abi_ulong optval_addr
, socklen_t optlen
)
2182 struct ip_mreqn
*ip_mreq
;
2183 struct ip_mreq_source
*ip_mreq_source
;
2187 /* TCP options all take an 'int' value. */
2188 if (optlen
< sizeof(uint32_t))
2189 return -TARGET_EINVAL
;
2191 if (get_user_u32(val
, optval_addr
))
2192 return -TARGET_EFAULT
;
2193 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2200 case IP_ROUTER_ALERT
:
2204 case IP_MTU_DISCOVER
:
2210 case IP_MULTICAST_TTL
:
2211 case IP_MULTICAST_LOOP
:
2213 if (optlen
>= sizeof(uint32_t)) {
2214 if (get_user_u32(val
, optval_addr
))
2215 return -TARGET_EFAULT
;
2216 } else if (optlen
>= 1) {
2217 if (get_user_u8(val
, optval_addr
))
2218 return -TARGET_EFAULT
;
2220 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2222 case IP_ADD_MEMBERSHIP
:
2223 case IP_DROP_MEMBERSHIP
:
2224 if (optlen
< sizeof (struct target_ip_mreq
) ||
2225 optlen
> sizeof (struct target_ip_mreqn
))
2226 return -TARGET_EINVAL
;
2228 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2229 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2230 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2233 case IP_BLOCK_SOURCE
:
2234 case IP_UNBLOCK_SOURCE
:
2235 case IP_ADD_SOURCE_MEMBERSHIP
:
2236 case IP_DROP_SOURCE_MEMBERSHIP
:
2237 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2238 return -TARGET_EINVAL
;
2240 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2241 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2242 unlock_user (ip_mreq_source
, optval_addr
, 0);
2251 case IPV6_MTU_DISCOVER
:
2254 case IPV6_RECVPKTINFO
:
2256 if (optlen
< sizeof(uint32_t)) {
2257 return -TARGET_EINVAL
;
2259 if (get_user_u32(val
, optval_addr
)) {
2260 return -TARGET_EFAULT
;
2262 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2263 &val
, sizeof(val
)));
2272 /* struct icmp_filter takes an u32 value */
2273 if (optlen
< sizeof(uint32_t)) {
2274 return -TARGET_EINVAL
;
2277 if (get_user_u32(val
, optval_addr
)) {
2278 return -TARGET_EFAULT
;
2280 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2281 &val
, sizeof(val
)));
2288 case TARGET_SOL_SOCKET
:
2290 case TARGET_SO_RCVTIMEO
:
2294 optname
= SO_RCVTIMEO
;
2297 if (optlen
!= sizeof(struct target_timeval
)) {
2298 return -TARGET_EINVAL
;
2301 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2302 return -TARGET_EFAULT
;
2305 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2309 case TARGET_SO_SNDTIMEO
:
2310 optname
= SO_SNDTIMEO
;
2312 case TARGET_SO_ATTACH_FILTER
:
2314 struct target_sock_fprog
*tfprog
;
2315 struct target_sock_filter
*tfilter
;
2316 struct sock_fprog fprog
;
2317 struct sock_filter
*filter
;
2320 if (optlen
!= sizeof(*tfprog
)) {
2321 return -TARGET_EINVAL
;
2323 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2324 return -TARGET_EFAULT
;
2326 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2327 tswapal(tfprog
->filter
), 0)) {
2328 unlock_user_struct(tfprog
, optval_addr
, 1);
2329 return -TARGET_EFAULT
;
2332 fprog
.len
= tswap16(tfprog
->len
);
2333 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2334 if (filter
== NULL
) {
2335 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2336 unlock_user_struct(tfprog
, optval_addr
, 1);
2337 return -TARGET_ENOMEM
;
2339 for (i
= 0; i
< fprog
.len
; i
++) {
2340 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2341 filter
[i
].jt
= tfilter
[i
].jt
;
2342 filter
[i
].jf
= tfilter
[i
].jf
;
2343 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2345 fprog
.filter
= filter
;
2347 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2348 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2351 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2352 unlock_user_struct(tfprog
, optval_addr
, 1);
2355 case TARGET_SO_BINDTODEVICE
:
2357 char *dev_ifname
, *addr_ifname
;
2359 if (optlen
> IFNAMSIZ
- 1) {
2360 optlen
= IFNAMSIZ
- 1;
2362 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2364 return -TARGET_EFAULT
;
2366 optname
= SO_BINDTODEVICE
;
2367 addr_ifname
= alloca(IFNAMSIZ
);
2368 memcpy(addr_ifname
, dev_ifname
, optlen
);
2369 addr_ifname
[optlen
] = 0;
2370 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2371 addr_ifname
, optlen
));
2372 unlock_user (dev_ifname
, optval_addr
, 0);
2375 /* Options with 'int' argument. */
2376 case TARGET_SO_DEBUG
:
2379 case TARGET_SO_REUSEADDR
:
2380 optname
= SO_REUSEADDR
;
2382 case TARGET_SO_TYPE
:
2385 case TARGET_SO_ERROR
:
2388 case TARGET_SO_DONTROUTE
:
2389 optname
= SO_DONTROUTE
;
2391 case TARGET_SO_BROADCAST
:
2392 optname
= SO_BROADCAST
;
2394 case TARGET_SO_SNDBUF
:
2395 optname
= SO_SNDBUF
;
2397 case TARGET_SO_SNDBUFFORCE
:
2398 optname
= SO_SNDBUFFORCE
;
2400 case TARGET_SO_RCVBUF
:
2401 optname
= SO_RCVBUF
;
2403 case TARGET_SO_RCVBUFFORCE
:
2404 optname
= SO_RCVBUFFORCE
;
2406 case TARGET_SO_KEEPALIVE
:
2407 optname
= SO_KEEPALIVE
;
2409 case TARGET_SO_OOBINLINE
:
2410 optname
= SO_OOBINLINE
;
2412 case TARGET_SO_NO_CHECK
:
2413 optname
= SO_NO_CHECK
;
2415 case TARGET_SO_PRIORITY
:
2416 optname
= SO_PRIORITY
;
2419 case TARGET_SO_BSDCOMPAT
:
2420 optname
= SO_BSDCOMPAT
;
2423 case TARGET_SO_PASSCRED
:
2424 optname
= SO_PASSCRED
;
2426 case TARGET_SO_PASSSEC
:
2427 optname
= SO_PASSSEC
;
2429 case TARGET_SO_TIMESTAMP
:
2430 optname
= SO_TIMESTAMP
;
2432 case TARGET_SO_RCVLOWAT
:
2433 optname
= SO_RCVLOWAT
;
2439 if (optlen
< sizeof(uint32_t))
2440 return -TARGET_EINVAL
;
2442 if (get_user_u32(val
, optval_addr
))
2443 return -TARGET_EFAULT
;
2444 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2448 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2449 ret
= -TARGET_ENOPROTOOPT
;
2454 /* do_getsockopt() Must return target values and target errnos. */
2455 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2456 abi_ulong optval_addr
, abi_ulong optlen
)
2463 case TARGET_SOL_SOCKET
:
2466 /* These don't just return a single integer */
2467 case TARGET_SO_LINGER
:
2468 case TARGET_SO_RCVTIMEO
:
2469 case TARGET_SO_SNDTIMEO
:
2470 case TARGET_SO_PEERNAME
:
2472 case TARGET_SO_PEERCRED
: {
2475 struct target_ucred
*tcr
;
2477 if (get_user_u32(len
, optlen
)) {
2478 return -TARGET_EFAULT
;
2481 return -TARGET_EINVAL
;
2485 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2493 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2494 return -TARGET_EFAULT
;
2496 __put_user(cr
.pid
, &tcr
->pid
);
2497 __put_user(cr
.uid
, &tcr
->uid
);
2498 __put_user(cr
.gid
, &tcr
->gid
);
2499 unlock_user_struct(tcr
, optval_addr
, 1);
2500 if (put_user_u32(len
, optlen
)) {
2501 return -TARGET_EFAULT
;
2505 /* Options with 'int' argument. */
2506 case TARGET_SO_DEBUG
:
2509 case TARGET_SO_REUSEADDR
:
2510 optname
= SO_REUSEADDR
;
2512 case TARGET_SO_TYPE
:
2515 case TARGET_SO_ERROR
:
2518 case TARGET_SO_DONTROUTE
:
2519 optname
= SO_DONTROUTE
;
2521 case TARGET_SO_BROADCAST
:
2522 optname
= SO_BROADCAST
;
2524 case TARGET_SO_SNDBUF
:
2525 optname
= SO_SNDBUF
;
2527 case TARGET_SO_RCVBUF
:
2528 optname
= SO_RCVBUF
;
2530 case TARGET_SO_KEEPALIVE
:
2531 optname
= SO_KEEPALIVE
;
2533 case TARGET_SO_OOBINLINE
:
2534 optname
= SO_OOBINLINE
;
2536 case TARGET_SO_NO_CHECK
:
2537 optname
= SO_NO_CHECK
;
2539 case TARGET_SO_PRIORITY
:
2540 optname
= SO_PRIORITY
;
2543 case TARGET_SO_BSDCOMPAT
:
2544 optname
= SO_BSDCOMPAT
;
2547 case TARGET_SO_PASSCRED
:
2548 optname
= SO_PASSCRED
;
2550 case TARGET_SO_TIMESTAMP
:
2551 optname
= SO_TIMESTAMP
;
2553 case TARGET_SO_RCVLOWAT
:
2554 optname
= SO_RCVLOWAT
;
2556 case TARGET_SO_ACCEPTCONN
:
2557 optname
= SO_ACCEPTCONN
;
2564 /* TCP options all take an 'int' value. */
2566 if (get_user_u32(len
, optlen
))
2567 return -TARGET_EFAULT
;
2569 return -TARGET_EINVAL
;
2571 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2574 if (optname
== SO_TYPE
) {
2575 val
= host_to_target_sock_type(val
);
2580 if (put_user_u32(val
, optval_addr
))
2581 return -TARGET_EFAULT
;
2583 if (put_user_u8(val
, optval_addr
))
2584 return -TARGET_EFAULT
;
2586 if (put_user_u32(len
, optlen
))
2587 return -TARGET_EFAULT
;
2594 case IP_ROUTER_ALERT
:
2598 case IP_MTU_DISCOVER
:
2604 case IP_MULTICAST_TTL
:
2605 case IP_MULTICAST_LOOP
:
2606 if (get_user_u32(len
, optlen
))
2607 return -TARGET_EFAULT
;
2609 return -TARGET_EINVAL
;
2611 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2614 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2616 if (put_user_u32(len
, optlen
)
2617 || put_user_u8(val
, optval_addr
))
2618 return -TARGET_EFAULT
;
2620 if (len
> sizeof(int))
2622 if (put_user_u32(len
, optlen
)
2623 || put_user_u32(val
, optval_addr
))
2624 return -TARGET_EFAULT
;
2628 ret
= -TARGET_ENOPROTOOPT
;
2634 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2636 ret
= -TARGET_EOPNOTSUPP
;
2642 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2643 int count
, int copy
)
2645 struct target_iovec
*target_vec
;
2647 abi_ulong total_len
, max_len
;
2650 bool bad_address
= false;
2656 if (count
< 0 || count
> IOV_MAX
) {
2661 vec
= g_try_new0(struct iovec
, count
);
2667 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2668 count
* sizeof(struct target_iovec
), 1);
2669 if (target_vec
== NULL
) {
2674 /* ??? If host page size > target page size, this will result in a
2675 value larger than what we can actually support. */
2676 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2679 for (i
= 0; i
< count
; i
++) {
2680 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2681 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2686 } else if (len
== 0) {
2687 /* Zero length pointer is ignored. */
2688 vec
[i
].iov_base
= 0;
2690 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2691 /* If the first buffer pointer is bad, this is a fault. But
2692 * subsequent bad buffers will result in a partial write; this
2693 * is realized by filling the vector with null pointers and
2695 if (!vec
[i
].iov_base
) {
2706 if (len
> max_len
- total_len
) {
2707 len
= max_len
- total_len
;
2710 vec
[i
].iov_len
= len
;
2714 unlock_user(target_vec
, target_addr
, 0);
2719 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2720 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2723 unlock_user(target_vec
, target_addr
, 0);
2730 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2731 int count
, int copy
)
2733 struct target_iovec
*target_vec
;
2736 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2737 count
* sizeof(struct target_iovec
), 1);
2739 for (i
= 0; i
< count
; i
++) {
2740 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2741 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2745 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2747 unlock_user(target_vec
, target_addr
, 0);
2753 static inline int target_to_host_sock_type(int *type
)
2756 int target_type
= *type
;
2758 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2759 case TARGET_SOCK_DGRAM
:
2760 host_type
= SOCK_DGRAM
;
2762 case TARGET_SOCK_STREAM
:
2763 host_type
= SOCK_STREAM
;
2766 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2769 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2770 #if defined(SOCK_CLOEXEC)
2771 host_type
|= SOCK_CLOEXEC
;
2773 return -TARGET_EINVAL
;
2776 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2777 #if defined(SOCK_NONBLOCK)
2778 host_type
|= SOCK_NONBLOCK
;
2779 #elif !defined(O_NONBLOCK)
2780 return -TARGET_EINVAL
;
2787 /* Try to emulate socket type flags after socket creation. */
2788 static int sock_flags_fixup(int fd
, int target_type
)
2790 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2791 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2792 int flags
= fcntl(fd
, F_GETFL
);
2793 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2795 return -TARGET_EINVAL
;
2802 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2803 abi_ulong target_addr
,
2806 struct sockaddr
*addr
= host_addr
;
2807 struct target_sockaddr
*target_saddr
;
2809 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2810 if (!target_saddr
) {
2811 return -TARGET_EFAULT
;
2814 memcpy(addr
, target_saddr
, len
);
2815 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2816 /* spkt_protocol is big-endian */
2818 unlock_user(target_saddr
, target_addr
, 0);
2822 static TargetFdTrans target_packet_trans
= {
2823 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2826 #ifdef CONFIG_RTNETLINK
2827 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2831 ret
= target_to_host_nlmsg_route(buf
, len
);
2839 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2843 ret
= host_to_target_nlmsg_route(buf
, len
);
2851 static TargetFdTrans target_netlink_route_trans
= {
2852 .target_to_host_data
= netlink_route_target_to_host
,
2853 .host_to_target_data
= netlink_route_host_to_target
,
2855 #endif /* CONFIG_RTNETLINK */
2857 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2861 ret
= target_to_host_nlmsg_audit(buf
, len
);
2869 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2873 ret
= host_to_target_nlmsg_audit(buf
, len
);
2881 static TargetFdTrans target_netlink_audit_trans
= {
2882 .target_to_host_data
= netlink_audit_target_to_host
,
2883 .host_to_target_data
= netlink_audit_host_to_target
,
2886 /* do_socket() Must return target values and target errnos. */
2887 static abi_long
do_socket(int domain
, int type
, int protocol
)
2889 int target_type
= type
;
2892 ret
= target_to_host_sock_type(&type
);
2897 if (domain
== PF_NETLINK
&& !(
2898 #ifdef CONFIG_RTNETLINK
2899 protocol
== NETLINK_ROUTE
||
2901 protocol
== NETLINK_KOBJECT_UEVENT
||
2902 protocol
== NETLINK_AUDIT
)) {
2903 return -EPFNOSUPPORT
;
2906 if (domain
== AF_PACKET
||
2907 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2908 protocol
= tswap16(protocol
);
2911 ret
= get_errno(socket(domain
, type
, protocol
));
2913 ret
= sock_flags_fixup(ret
, target_type
);
2914 if (type
== SOCK_PACKET
) {
2915 /* Manage an obsolete case :
2916 * if socket type is SOCK_PACKET, bind by name
2918 fd_trans_register(ret
, &target_packet_trans
);
2919 } else if (domain
== PF_NETLINK
) {
2921 #ifdef CONFIG_RTNETLINK
2923 fd_trans_register(ret
, &target_netlink_route_trans
);
2926 case NETLINK_KOBJECT_UEVENT
:
2927 /* nothing to do: messages are strings */
2930 fd_trans_register(ret
, &target_netlink_audit_trans
);
2933 g_assert_not_reached();
2940 /* do_bind() Must return target values and target errnos. */
2941 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2947 if ((int)addrlen
< 0) {
2948 return -TARGET_EINVAL
;
2951 addr
= alloca(addrlen
+1);
2953 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2957 return get_errno(bind(sockfd
, addr
, addrlen
));
2960 /* do_connect() Must return target values and target errnos. */
2961 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2967 if ((int)addrlen
< 0) {
2968 return -TARGET_EINVAL
;
2971 addr
= alloca(addrlen
+1);
2973 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2977 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2980 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2981 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2982 int flags
, int send
)
2988 abi_ulong target_vec
;
2990 if (msgp
->msg_name
) {
2991 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2992 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2993 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2994 tswapal(msgp
->msg_name
),
3000 msg
.msg_name
= NULL
;
3001 msg
.msg_namelen
= 0;
3003 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3004 msg
.msg_control
= alloca(msg
.msg_controllen
);
3005 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3007 count
= tswapal(msgp
->msg_iovlen
);
3008 target_vec
= tswapal(msgp
->msg_iov
);
3009 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3010 target_vec
, count
, send
);
3012 ret
= -host_to_target_errno(errno
);
3015 msg
.msg_iovlen
= count
;
3019 if (fd_trans_target_to_host_data(fd
)) {
3022 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3023 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3024 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3025 msg
.msg_iov
->iov_len
);
3027 msg
.msg_iov
->iov_base
= host_msg
;
3028 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3032 ret
= target_to_host_cmsg(&msg
, msgp
);
3034 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3038 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3039 if (!is_error(ret
)) {
3041 if (fd_trans_host_to_target_data(fd
)) {
3042 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3045 ret
= host_to_target_cmsg(msgp
, &msg
);
3047 if (!is_error(ret
)) {
3048 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3049 if (msg
.msg_name
!= NULL
) {
3050 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3051 msg
.msg_name
, msg
.msg_namelen
);
3063 unlock_iovec(vec
, target_vec
, count
, !send
);
3068 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3069 int flags
, int send
)
3072 struct target_msghdr
*msgp
;
3074 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3078 return -TARGET_EFAULT
;
3080 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3081 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3085 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3086 * so it might not have this *mmsg-specific flag either.
3088 #ifndef MSG_WAITFORONE
3089 #define MSG_WAITFORONE 0x10000
3092 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3093 unsigned int vlen
, unsigned int flags
,
3096 struct target_mmsghdr
*mmsgp
;
3100 if (vlen
> UIO_MAXIOV
) {
3104 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3106 return -TARGET_EFAULT
;
3109 for (i
= 0; i
< vlen
; i
++) {
3110 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3111 if (is_error(ret
)) {
3114 mmsgp
[i
].msg_len
= tswap32(ret
);
3115 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3116 if (flags
& MSG_WAITFORONE
) {
3117 flags
|= MSG_DONTWAIT
;
3121 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3123 /* Return number of datagrams sent if we sent any at all;
3124 * otherwise return the error.
3132 /* do_accept4() Must return target values and target errnos. */
3133 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3134 abi_ulong target_addrlen_addr
, int flags
)
3141 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3143 if (target_addr
== 0) {
3144 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3147 /* linux returns EINVAL if addrlen pointer is invalid */
3148 if (get_user_u32(addrlen
, target_addrlen_addr
))
3149 return -TARGET_EINVAL
;
3151 if ((int)addrlen
< 0) {
3152 return -TARGET_EINVAL
;
3155 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3156 return -TARGET_EINVAL
;
3158 addr
= alloca(addrlen
);
3160 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3161 if (!is_error(ret
)) {
3162 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3163 if (put_user_u32(addrlen
, target_addrlen_addr
))
3164 ret
= -TARGET_EFAULT
;
3169 /* do_getpeername() Must return target values and target errnos. */
3170 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3171 abi_ulong target_addrlen_addr
)
3177 if (get_user_u32(addrlen
, target_addrlen_addr
))
3178 return -TARGET_EFAULT
;
3180 if ((int)addrlen
< 0) {
3181 return -TARGET_EINVAL
;
3184 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3185 return -TARGET_EFAULT
;
3187 addr
= alloca(addrlen
);
3189 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3190 if (!is_error(ret
)) {
3191 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3192 if (put_user_u32(addrlen
, target_addrlen_addr
))
3193 ret
= -TARGET_EFAULT
;
3198 /* do_getsockname() Must return target values and target errnos. */
3199 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3200 abi_ulong target_addrlen_addr
)
3206 if (get_user_u32(addrlen
, target_addrlen_addr
))
3207 return -TARGET_EFAULT
;
3209 if ((int)addrlen
< 0) {
3210 return -TARGET_EINVAL
;
3213 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3214 return -TARGET_EFAULT
;
3216 addr
= alloca(addrlen
);
3218 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3219 if (!is_error(ret
)) {
3220 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3221 if (put_user_u32(addrlen
, target_addrlen_addr
))
3222 ret
= -TARGET_EFAULT
;
3227 /* do_socketpair() Must return target values and target errnos. */
3228 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3229 abi_ulong target_tab_addr
)
3234 target_to_host_sock_type(&type
);
3236 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3237 if (!is_error(ret
)) {
3238 if (put_user_s32(tab
[0], target_tab_addr
)
3239 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3240 ret
= -TARGET_EFAULT
;
3245 /* do_sendto() Must return target values and target errnos. */
3246 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3247 abi_ulong target_addr
, socklen_t addrlen
)
3251 void *copy_msg
= NULL
;
3254 if ((int)addrlen
< 0) {
3255 return -TARGET_EINVAL
;
3258 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3260 return -TARGET_EFAULT
;
3261 if (fd_trans_target_to_host_data(fd
)) {
3262 copy_msg
= host_msg
;
3263 host_msg
= g_malloc(len
);
3264 memcpy(host_msg
, copy_msg
, len
);
3265 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3271 addr
= alloca(addrlen
+1);
3272 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3276 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3278 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3283 host_msg
= copy_msg
;
3285 unlock_user(host_msg
, msg
, 0);
3289 /* do_recvfrom() Must return target values and target errnos. */
3290 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3291 abi_ulong target_addr
,
3292 abi_ulong target_addrlen
)
3299 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3301 return -TARGET_EFAULT
;
3303 if (get_user_u32(addrlen
, target_addrlen
)) {
3304 ret
= -TARGET_EFAULT
;
3307 if ((int)addrlen
< 0) {
3308 ret
= -TARGET_EINVAL
;
3311 addr
= alloca(addrlen
);
3312 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3315 addr
= NULL
; /* To keep compiler quiet. */
3316 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3318 if (!is_error(ret
)) {
3319 if (fd_trans_host_to_target_data(fd
)) {
3320 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3323 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3324 if (put_user_u32(addrlen
, target_addrlen
)) {
3325 ret
= -TARGET_EFAULT
;
3329 unlock_user(host_msg
, msg
, len
);
3332 unlock_user(host_msg
, msg
, 0);
3337 #ifdef TARGET_NR_socketcall
3338 /* do_socketcall() Must return target values and target errnos. */
3339 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3341 static const unsigned ac
[] = { /* number of arguments per call */
3342 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3343 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3344 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3345 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3346 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3347 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3348 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3349 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3350 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3351 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3352 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3353 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3354 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3355 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3356 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3357 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3358 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3359 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3360 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3361 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3363 abi_long a
[6]; /* max 6 args */
3365 /* first, collect the arguments in a[] according to ac[] */
3366 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3368 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3369 for (i
= 0; i
< ac
[num
]; ++i
) {
3370 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3371 return -TARGET_EFAULT
;
3376 /* now when we have the args, actually handle the call */
3378 case SOCKOP_socket
: /* domain, type, protocol */
3379 return do_socket(a
[0], a
[1], a
[2]);
3380 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3381 return do_bind(a
[0], a
[1], a
[2]);
3382 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3383 return do_connect(a
[0], a
[1], a
[2]);
3384 case SOCKOP_listen
: /* sockfd, backlog */
3385 return get_errno(listen(a
[0], a
[1]));
3386 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3387 return do_accept4(a
[0], a
[1], a
[2], 0);
3388 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3389 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3390 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3391 return do_getsockname(a
[0], a
[1], a
[2]);
3392 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3393 return do_getpeername(a
[0], a
[1], a
[2]);
3394 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3395 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3396 case SOCKOP_send
: /* sockfd, msg, len, flags */
3397 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3398 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3399 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3400 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3401 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3402 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3403 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3404 case SOCKOP_shutdown
: /* sockfd, how */
3405 return get_errno(shutdown(a
[0], a
[1]));
3406 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3407 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3408 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3409 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3410 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3411 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3412 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3413 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3414 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3415 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3416 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3417 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3419 gemu_log("Unsupported socketcall: %d\n", num
);
3420 return -TARGET_ENOSYS
;
3425 #define N_SHM_REGIONS 32
3427 static struct shm_region
{
3431 } shm_regions
[N_SHM_REGIONS
];
3433 struct target_semid_ds
3435 struct target_ipc_perm sem_perm
;
3436 abi_ulong sem_otime
;
3437 #if !defined(TARGET_PPC64)
3438 abi_ulong __unused1
;
3440 abi_ulong sem_ctime
;
3441 #if !defined(TARGET_PPC64)
3442 abi_ulong __unused2
;
3444 abi_ulong sem_nsems
;
3445 abi_ulong __unused3
;
3446 abi_ulong __unused4
;
3449 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3450 abi_ulong target_addr
)
3452 struct target_ipc_perm
*target_ip
;
3453 struct target_semid_ds
*target_sd
;
3455 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3456 return -TARGET_EFAULT
;
3457 target_ip
= &(target_sd
->sem_perm
);
3458 host_ip
->__key
= tswap32(target_ip
->__key
);
3459 host_ip
->uid
= tswap32(target_ip
->uid
);
3460 host_ip
->gid
= tswap32(target_ip
->gid
);
3461 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3462 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3463 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3464 host_ip
->mode
= tswap32(target_ip
->mode
);
3466 host_ip
->mode
= tswap16(target_ip
->mode
);
3468 #if defined(TARGET_PPC)
3469 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3471 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3473 unlock_user_struct(target_sd
, target_addr
, 0);
3477 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3478 struct ipc_perm
*host_ip
)
3480 struct target_ipc_perm
*target_ip
;
3481 struct target_semid_ds
*target_sd
;
3483 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3484 return -TARGET_EFAULT
;
3485 target_ip
= &(target_sd
->sem_perm
);
3486 target_ip
->__key
= tswap32(host_ip
->__key
);
3487 target_ip
->uid
= tswap32(host_ip
->uid
);
3488 target_ip
->gid
= tswap32(host_ip
->gid
);
3489 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3490 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3491 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3492 target_ip
->mode
= tswap32(host_ip
->mode
);
3494 target_ip
->mode
= tswap16(host_ip
->mode
);
3496 #if defined(TARGET_PPC)
3497 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3499 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3501 unlock_user_struct(target_sd
, target_addr
, 1);
3505 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3506 abi_ulong target_addr
)
3508 struct target_semid_ds
*target_sd
;
3510 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3511 return -TARGET_EFAULT
;
3512 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3513 return -TARGET_EFAULT
;
3514 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3515 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3516 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3517 unlock_user_struct(target_sd
, target_addr
, 0);
3521 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3522 struct semid_ds
*host_sd
)
3524 struct target_semid_ds
*target_sd
;
3526 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3527 return -TARGET_EFAULT
;
3528 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3529 return -TARGET_EFAULT
;
3530 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3531 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3532 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3533 unlock_user_struct(target_sd
, target_addr
, 1);
3537 struct target_seminfo
{
3550 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3551 struct seminfo
*host_seminfo
)
3553 struct target_seminfo
*target_seminfo
;
3554 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3555 return -TARGET_EFAULT
;
3556 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3557 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3558 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3559 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3560 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3561 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3562 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3563 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3564 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3565 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3566 unlock_user_struct(target_seminfo
, target_addr
, 1);
3572 struct semid_ds
*buf
;
3573 unsigned short *array
;
3574 struct seminfo
*__buf
;
3577 union target_semun
{
3584 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3585 abi_ulong target_addr
)
3588 unsigned short *array
;
3590 struct semid_ds semid_ds
;
3593 semun
.buf
= &semid_ds
;
3595 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3597 return get_errno(ret
);
3599 nsems
= semid_ds
.sem_nsems
;
3601 *host_array
= g_try_new(unsigned short, nsems
);
3603 return -TARGET_ENOMEM
;
3605 array
= lock_user(VERIFY_READ
, target_addr
,
3606 nsems
*sizeof(unsigned short), 1);
3608 g_free(*host_array
);
3609 return -TARGET_EFAULT
;
3612 for(i
=0; i
<nsems
; i
++) {
3613 __get_user((*host_array
)[i
], &array
[i
]);
3615 unlock_user(array
, target_addr
, 0);
3620 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3621 unsigned short **host_array
)
3624 unsigned short *array
;
3626 struct semid_ds semid_ds
;
3629 semun
.buf
= &semid_ds
;
3631 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3633 return get_errno(ret
);
3635 nsems
= semid_ds
.sem_nsems
;
3637 array
= lock_user(VERIFY_WRITE
, target_addr
,
3638 nsems
*sizeof(unsigned short), 0);
3640 return -TARGET_EFAULT
;
3642 for(i
=0; i
<nsems
; i
++) {
3643 __put_user((*host_array
)[i
], &array
[i
]);
3645 g_free(*host_array
);
3646 unlock_user(array
, target_addr
, 1);
3651 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3652 abi_ulong target_arg
)
3654 union target_semun target_su
= { .buf
= target_arg
};
3656 struct semid_ds dsarg
;
3657 unsigned short *array
= NULL
;
3658 struct seminfo seminfo
;
3659 abi_long ret
= -TARGET_EINVAL
;
3666 /* In 64 bit cross-endian situations, we will erroneously pick up
3667 * the wrong half of the union for the "val" element. To rectify
3668 * this, the entire 8-byte structure is byteswapped, followed by
3669 * a swap of the 4 byte val field. In other cases, the data is
3670 * already in proper host byte order. */
3671 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3672 target_su
.buf
= tswapal(target_su
.buf
);
3673 arg
.val
= tswap32(target_su
.val
);
3675 arg
.val
= target_su
.val
;
3677 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3681 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3685 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3686 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3693 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3697 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3698 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3704 arg
.__buf
= &seminfo
;
3705 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3706 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3714 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3721 struct target_sembuf
{
3722 unsigned short sem_num
;
3727 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3728 abi_ulong target_addr
,
3731 struct target_sembuf
*target_sembuf
;
3734 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3735 nsops
*sizeof(struct target_sembuf
), 1);
3737 return -TARGET_EFAULT
;
3739 for(i
=0; i
<nsops
; i
++) {
3740 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3741 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3742 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3745 unlock_user(target_sembuf
, target_addr
, 0);
3750 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3752 struct sembuf sops
[nsops
];
3754 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3755 return -TARGET_EFAULT
;
3757 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3760 struct target_msqid_ds
3762 struct target_ipc_perm msg_perm
;
3763 abi_ulong msg_stime
;
3764 #if TARGET_ABI_BITS == 32
3765 abi_ulong __unused1
;
3767 abi_ulong msg_rtime
;
3768 #if TARGET_ABI_BITS == 32
3769 abi_ulong __unused2
;
3771 abi_ulong msg_ctime
;
3772 #if TARGET_ABI_BITS == 32
3773 abi_ulong __unused3
;
3775 abi_ulong __msg_cbytes
;
3777 abi_ulong msg_qbytes
;
3778 abi_ulong msg_lspid
;
3779 abi_ulong msg_lrpid
;
3780 abi_ulong __unused4
;
3781 abi_ulong __unused5
;
3784 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3785 abi_ulong target_addr
)
3787 struct target_msqid_ds
*target_md
;
3789 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3790 return -TARGET_EFAULT
;
3791 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3792 return -TARGET_EFAULT
;
3793 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3794 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3795 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3796 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3797 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3798 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3799 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3800 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3801 unlock_user_struct(target_md
, target_addr
, 0);
3805 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3806 struct msqid_ds
*host_md
)
3808 struct target_msqid_ds
*target_md
;
3810 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3811 return -TARGET_EFAULT
;
3812 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3813 return -TARGET_EFAULT
;
3814 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3815 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3816 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3817 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3818 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3819 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3820 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3821 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3822 unlock_user_struct(target_md
, target_addr
, 1);
3826 struct target_msginfo
{
3834 unsigned short int msgseg
;
3837 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3838 struct msginfo
*host_msginfo
)
3840 struct target_msginfo
*target_msginfo
;
3841 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3842 return -TARGET_EFAULT
;
3843 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3844 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3845 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3846 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3847 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3848 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3849 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3850 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3851 unlock_user_struct(target_msginfo
, target_addr
, 1);
3855 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3857 struct msqid_ds dsarg
;
3858 struct msginfo msginfo
;
3859 abi_long ret
= -TARGET_EINVAL
;
3867 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3868 return -TARGET_EFAULT
;
3869 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3870 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3871 return -TARGET_EFAULT
;
3874 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3878 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3879 if (host_to_target_msginfo(ptr
, &msginfo
))
3880 return -TARGET_EFAULT
;
3887 struct target_msgbuf
{
3892 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3893 ssize_t msgsz
, int msgflg
)
3895 struct target_msgbuf
*target_mb
;
3896 struct msgbuf
*host_mb
;
3900 return -TARGET_EINVAL
;
3903 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3904 return -TARGET_EFAULT
;
3905 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3907 unlock_user_struct(target_mb
, msgp
, 0);
3908 return -TARGET_ENOMEM
;
3910 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3911 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3912 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3914 unlock_user_struct(target_mb
, msgp
, 0);
3919 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3920 ssize_t msgsz
, abi_long msgtyp
,
3923 struct target_msgbuf
*target_mb
;
3925 struct msgbuf
*host_mb
;
3929 return -TARGET_EINVAL
;
3932 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3933 return -TARGET_EFAULT
;
3935 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3937 ret
= -TARGET_ENOMEM
;
3940 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3943 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3944 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3945 if (!target_mtext
) {
3946 ret
= -TARGET_EFAULT
;
3949 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3950 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3953 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3957 unlock_user_struct(target_mb
, msgp
, 1);
3962 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3963 abi_ulong target_addr
)
3965 struct target_shmid_ds
*target_sd
;
3967 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3968 return -TARGET_EFAULT
;
3969 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3970 return -TARGET_EFAULT
;
3971 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3972 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3973 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3974 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3975 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3976 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3977 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3978 unlock_user_struct(target_sd
, target_addr
, 0);
3982 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3983 struct shmid_ds
*host_sd
)
3985 struct target_shmid_ds
*target_sd
;
3987 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3988 return -TARGET_EFAULT
;
3989 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3990 return -TARGET_EFAULT
;
3991 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3992 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3993 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3994 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3995 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3996 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3997 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3998 unlock_user_struct(target_sd
, target_addr
, 1);
4002 struct target_shminfo
{
4010 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4011 struct shminfo
*host_shminfo
)
4013 struct target_shminfo
*target_shminfo
;
4014 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4015 return -TARGET_EFAULT
;
4016 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4017 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4018 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4019 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4020 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4021 unlock_user_struct(target_shminfo
, target_addr
, 1);
4025 struct target_shm_info
{
4030 abi_ulong swap_attempts
;
4031 abi_ulong swap_successes
;
4034 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4035 struct shm_info
*host_shm_info
)
4037 struct target_shm_info
*target_shm_info
;
4038 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4039 return -TARGET_EFAULT
;
4040 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4041 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4042 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4043 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4044 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4045 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4046 unlock_user_struct(target_shm_info
, target_addr
, 1);
4050 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4052 struct shmid_ds dsarg
;
4053 struct shminfo shminfo
;
4054 struct shm_info shm_info
;
4055 abi_long ret
= -TARGET_EINVAL
;
4063 if (target_to_host_shmid_ds(&dsarg
, buf
))
4064 return -TARGET_EFAULT
;
4065 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4066 if (host_to_target_shmid_ds(buf
, &dsarg
))
4067 return -TARGET_EFAULT
;
4070 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4071 if (host_to_target_shminfo(buf
, &shminfo
))
4072 return -TARGET_EFAULT
;
4075 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4076 if (host_to_target_shm_info(buf
, &shm_info
))
4077 return -TARGET_EFAULT
;
4082 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4089 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4093 struct shmid_ds shm_info
;
4096 /* find out the length of the shared memory segment */
4097 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4098 if (is_error(ret
)) {
4099 /* can't get length, bail out */
4106 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4108 abi_ulong mmap_start
;
4110 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4112 if (mmap_start
== -1) {
4114 host_raddr
= (void *)-1;
4116 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4119 if (host_raddr
== (void *)-1) {
4121 return get_errno((long)host_raddr
);
4123 raddr
=h2g((unsigned long)host_raddr
);
4125 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4126 PAGE_VALID
| PAGE_READ
|
4127 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4129 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4130 if (!shm_regions
[i
].in_use
) {
4131 shm_regions
[i
].in_use
= true;
4132 shm_regions
[i
].start
= raddr
;
4133 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4143 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4147 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4148 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4149 shm_regions
[i
].in_use
= false;
4150 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4155 return get_errno(shmdt(g2h(shmaddr
)));
4158 #ifdef TARGET_NR_ipc
4159 /* ??? This only works with linear mappings. */
4160 /* do_ipc() must return target values and target errnos. */
4161 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4162 abi_long second
, abi_long third
,
4163 abi_long ptr
, abi_long fifth
)
4168 version
= call
>> 16;
4173 ret
= do_semop(first
, ptr
, second
);
4177 ret
= get_errno(semget(first
, second
, third
));
4180 case IPCOP_semctl
: {
4181 /* The semun argument to semctl is passed by value, so dereference the
4184 get_user_ual(atptr
, ptr
);
4185 ret
= do_semctl(first
, second
, third
, atptr
);
4190 ret
= get_errno(msgget(first
, second
));
4194 ret
= do_msgsnd(first
, ptr
, second
, third
);
4198 ret
= do_msgctl(first
, second
, ptr
);
4205 struct target_ipc_kludge
{
4210 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4211 ret
= -TARGET_EFAULT
;
4215 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4217 unlock_user_struct(tmp
, ptr
, 0);
4221 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4230 raddr
= do_shmat(first
, ptr
, second
);
4231 if (is_error(raddr
))
4232 return get_errno(raddr
);
4233 if (put_user_ual(raddr
, third
))
4234 return -TARGET_EFAULT
;
4238 ret
= -TARGET_EINVAL
;
4243 ret
= do_shmdt(ptr
);
4247 /* IPC_* flag values are the same on all linux platforms */
4248 ret
= get_errno(shmget(first
, second
, third
));
4251 /* IPC_* and SHM_* command values are the same on all linux platforms */
4253 ret
= do_shmctl(first
, second
, ptr
);
4256 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4257 ret
= -TARGET_ENOSYS
;
4264 /* kernel structure types definitions */
4266 #define STRUCT(name, ...) STRUCT_ ## name,
4267 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4269 #include "syscall_types.h"
4273 #undef STRUCT_SPECIAL
4275 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4276 #define STRUCT_SPECIAL(name)
4277 #include "syscall_types.h"
4279 #undef STRUCT_SPECIAL
4281 typedef struct IOCTLEntry IOCTLEntry
;
4283 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4284 int fd
, int cmd
, abi_long arg
);
4288 unsigned int host_cmd
;
4291 do_ioctl_fn
*do_ioctl
;
4292 const argtype arg_type
[5];
4295 #define IOC_R 0x0001
4296 #define IOC_W 0x0002
4297 #define IOC_RW (IOC_R | IOC_W)
4299 #define MAX_STRUCT_SIZE 4096
4301 #ifdef CONFIG_FIEMAP
4302 /* So fiemap access checks don't overflow on 32 bit systems.
4303 * This is very slightly smaller than the limit imposed by
4304 * the underlying kernel.
4306 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4307 / sizeof(struct fiemap_extent))
4309 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4310 int fd
, int cmd
, abi_long arg
)
4312 /* The parameter for this ioctl is a struct fiemap followed
4313 * by an array of struct fiemap_extent whose size is set
4314 * in fiemap->fm_extent_count. The array is filled in by the
4317 int target_size_in
, target_size_out
;
4319 const argtype
*arg_type
= ie
->arg_type
;
4320 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4323 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4327 assert(arg_type
[0] == TYPE_PTR
);
4328 assert(ie
->access
== IOC_RW
);
4330 target_size_in
= thunk_type_size(arg_type
, 0);
4331 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4333 return -TARGET_EFAULT
;
4335 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4336 unlock_user(argptr
, arg
, 0);
4337 fm
= (struct fiemap
*)buf_temp
;
4338 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4339 return -TARGET_EINVAL
;
4342 outbufsz
= sizeof (*fm
) +
4343 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4345 if (outbufsz
> MAX_STRUCT_SIZE
) {
4346 /* We can't fit all the extents into the fixed size buffer.
4347 * Allocate one that is large enough and use it instead.
4349 fm
= g_try_malloc(outbufsz
);
4351 return -TARGET_ENOMEM
;
4353 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4356 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4357 if (!is_error(ret
)) {
4358 target_size_out
= target_size_in
;
4359 /* An extent_count of 0 means we were only counting the extents
4360 * so there are no structs to copy
4362 if (fm
->fm_extent_count
!= 0) {
4363 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4365 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4367 ret
= -TARGET_EFAULT
;
4369 /* Convert the struct fiemap */
4370 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4371 if (fm
->fm_extent_count
!= 0) {
4372 p
= argptr
+ target_size_in
;
4373 /* ...and then all the struct fiemap_extents */
4374 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4375 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4380 unlock_user(argptr
, arg
, target_size_out
);
4390 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4391 int fd
, int cmd
, abi_long arg
)
4393 const argtype
*arg_type
= ie
->arg_type
;
4397 struct ifconf
*host_ifconf
;
4399 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4400 int target_ifreq_size
;
4405 abi_long target_ifc_buf
;
4409 assert(arg_type
[0] == TYPE_PTR
);
4410 assert(ie
->access
== IOC_RW
);
4413 target_size
= thunk_type_size(arg_type
, 0);
4415 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4417 return -TARGET_EFAULT
;
4418 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4419 unlock_user(argptr
, arg
, 0);
4421 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4422 target_ifc_len
= host_ifconf
->ifc_len
;
4423 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4425 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4426 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4427 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4429 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4430 if (outbufsz
> MAX_STRUCT_SIZE
) {
4431 /* We can't fit all the extents into the fixed size buffer.
4432 * Allocate one that is large enough and use it instead.
4434 host_ifconf
= malloc(outbufsz
);
4436 return -TARGET_ENOMEM
;
4438 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4441 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4443 host_ifconf
->ifc_len
= host_ifc_len
;
4444 host_ifconf
->ifc_buf
= host_ifc_buf
;
4446 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4447 if (!is_error(ret
)) {
4448 /* convert host ifc_len to target ifc_len */
4450 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4451 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4452 host_ifconf
->ifc_len
= target_ifc_len
;
4454 /* restore target ifc_buf */
4456 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4458 /* copy struct ifconf to target user */
4460 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4462 return -TARGET_EFAULT
;
4463 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4464 unlock_user(argptr
, arg
, target_size
);
4466 /* copy ifreq[] to target user */
4468 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4469 for (i
= 0; i
< nb_ifreq
; i
++) {
4470 thunk_convert(argptr
+ i
* target_ifreq_size
,
4471 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4472 ifreq_arg_type
, THUNK_TARGET
);
4474 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4484 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4485 int cmd
, abi_long arg
)
4488 struct dm_ioctl
*host_dm
;
4489 abi_long guest_data
;
4490 uint32_t guest_data_size
;
4492 const argtype
*arg_type
= ie
->arg_type
;
4494 void *big_buf
= NULL
;
4498 target_size
= thunk_type_size(arg_type
, 0);
4499 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4501 ret
= -TARGET_EFAULT
;
4504 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4505 unlock_user(argptr
, arg
, 0);
4507 /* buf_temp is too small, so fetch things into a bigger buffer */
4508 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4509 memcpy(big_buf
, buf_temp
, target_size
);
4513 guest_data
= arg
+ host_dm
->data_start
;
4514 if ((guest_data
- arg
) < 0) {
4518 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4519 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4521 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4522 switch (ie
->host_cmd
) {
4524 case DM_LIST_DEVICES
:
4527 case DM_DEV_SUSPEND
:
4530 case DM_TABLE_STATUS
:
4531 case DM_TABLE_CLEAR
:
4533 case DM_LIST_VERSIONS
:
4537 case DM_DEV_SET_GEOMETRY
:
4538 /* data contains only strings */
4539 memcpy(host_data
, argptr
, guest_data_size
);
4542 memcpy(host_data
, argptr
, guest_data_size
);
4543 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4547 void *gspec
= argptr
;
4548 void *cur_data
= host_data
;
4549 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4550 int spec_size
= thunk_type_size(arg_type
, 0);
4553 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4554 struct dm_target_spec
*spec
= cur_data
;
4558 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4559 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4561 spec
->next
= sizeof(*spec
) + slen
;
4562 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4564 cur_data
+= spec
->next
;
4569 ret
= -TARGET_EINVAL
;
4570 unlock_user(argptr
, guest_data
, 0);
4573 unlock_user(argptr
, guest_data
, 0);
4575 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4576 if (!is_error(ret
)) {
4577 guest_data
= arg
+ host_dm
->data_start
;
4578 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4579 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4580 switch (ie
->host_cmd
) {
4585 case DM_DEV_SUSPEND
:
4588 case DM_TABLE_CLEAR
:
4590 case DM_DEV_SET_GEOMETRY
:
4591 /* no return data */
4593 case DM_LIST_DEVICES
:
4595 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4596 uint32_t remaining_data
= guest_data_size
;
4597 void *cur_data
= argptr
;
4598 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4599 int nl_size
= 12; /* can't use thunk_size due to alignment */
4602 uint32_t next
= nl
->next
;
4604 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4606 if (remaining_data
< nl
->next
) {
4607 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4610 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4611 strcpy(cur_data
+ nl_size
, nl
->name
);
4612 cur_data
+= nl
->next
;
4613 remaining_data
-= nl
->next
;
4617 nl
= (void*)nl
+ next
;
4622 case DM_TABLE_STATUS
:
4624 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4625 void *cur_data
= argptr
;
4626 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4627 int spec_size
= thunk_type_size(arg_type
, 0);
4630 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4631 uint32_t next
= spec
->next
;
4632 int slen
= strlen((char*)&spec
[1]) + 1;
4633 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4634 if (guest_data_size
< spec
->next
) {
4635 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4638 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4639 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4640 cur_data
= argptr
+ spec
->next
;
4641 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4647 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4648 int count
= *(uint32_t*)hdata
;
4649 uint64_t *hdev
= hdata
+ 8;
4650 uint64_t *gdev
= argptr
+ 8;
4653 *(uint32_t*)argptr
= tswap32(count
);
4654 for (i
= 0; i
< count
; i
++) {
4655 *gdev
= tswap64(*hdev
);
4661 case DM_LIST_VERSIONS
:
4663 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4664 uint32_t remaining_data
= guest_data_size
;
4665 void *cur_data
= argptr
;
4666 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4667 int vers_size
= thunk_type_size(arg_type
, 0);
4670 uint32_t next
= vers
->next
;
4672 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4674 if (remaining_data
< vers
->next
) {
4675 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4678 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4679 strcpy(cur_data
+ vers_size
, vers
->name
);
4680 cur_data
+= vers
->next
;
4681 remaining_data
-= vers
->next
;
4685 vers
= (void*)vers
+ next
;
4690 unlock_user(argptr
, guest_data
, 0);
4691 ret
= -TARGET_EINVAL
;
4694 unlock_user(argptr
, guest_data
, guest_data_size
);
4696 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4698 ret
= -TARGET_EFAULT
;
4701 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4702 unlock_user(argptr
, arg
, target_size
);
4709 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4710 int cmd
, abi_long arg
)
4714 const argtype
*arg_type
= ie
->arg_type
;
4715 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4718 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4719 struct blkpg_partition host_part
;
4721 /* Read and convert blkpg */
4723 target_size
= thunk_type_size(arg_type
, 0);
4724 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4726 ret
= -TARGET_EFAULT
;
4729 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4730 unlock_user(argptr
, arg
, 0);
4732 switch (host_blkpg
->op
) {
4733 case BLKPG_ADD_PARTITION
:
4734 case BLKPG_DEL_PARTITION
:
4735 /* payload is struct blkpg_partition */
4738 /* Unknown opcode */
4739 ret
= -TARGET_EINVAL
;
4743 /* Read and convert blkpg->data */
4744 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4745 target_size
= thunk_type_size(part_arg_type
, 0);
4746 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4748 ret
= -TARGET_EFAULT
;
4751 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4752 unlock_user(argptr
, arg
, 0);
4754 /* Swizzle the data pointer to our local copy and call! */
4755 host_blkpg
->data
= &host_part
;
4756 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4762 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4763 int fd
, int cmd
, abi_long arg
)
4765 const argtype
*arg_type
= ie
->arg_type
;
4766 const StructEntry
*se
;
4767 const argtype
*field_types
;
4768 const int *dst_offsets
, *src_offsets
;
4771 abi_ulong
*target_rt_dev_ptr
;
4772 unsigned long *host_rt_dev_ptr
;
4776 assert(ie
->access
== IOC_W
);
4777 assert(*arg_type
== TYPE_PTR
);
4779 assert(*arg_type
== TYPE_STRUCT
);
4780 target_size
= thunk_type_size(arg_type
, 0);
4781 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4783 return -TARGET_EFAULT
;
4786 assert(*arg_type
== (int)STRUCT_rtentry
);
4787 se
= struct_entries
+ *arg_type
++;
4788 assert(se
->convert
[0] == NULL
);
4789 /* convert struct here to be able to catch rt_dev string */
4790 field_types
= se
->field_types
;
4791 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4792 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4793 for (i
= 0; i
< se
->nb_fields
; i
++) {
4794 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4795 assert(*field_types
== TYPE_PTRVOID
);
4796 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4797 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4798 if (*target_rt_dev_ptr
!= 0) {
4799 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4800 tswapal(*target_rt_dev_ptr
));
4801 if (!*host_rt_dev_ptr
) {
4802 unlock_user(argptr
, arg
, 0);
4803 return -TARGET_EFAULT
;
4806 *host_rt_dev_ptr
= 0;
4811 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4812 argptr
+ src_offsets
[i
],
4813 field_types
, THUNK_HOST
);
4815 unlock_user(argptr
, arg
, 0);
4817 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4818 if (*host_rt_dev_ptr
!= 0) {
4819 unlock_user((void *)*host_rt_dev_ptr
,
4820 *target_rt_dev_ptr
, 0);
4825 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4826 int fd
, int cmd
, abi_long arg
)
4828 int sig
= target_to_host_signal(arg
);
4829 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4832 static IOCTLEntry ioctl_entries
[] = {
4833 #define IOCTL(cmd, access, ...) \
4834 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4835 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4836 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4841 /* ??? Implement proper locking for ioctls. */
4842 /* do_ioctl() Must return target values and target errnos. */
4843 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4845 const IOCTLEntry
*ie
;
4846 const argtype
*arg_type
;
4848 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4854 if (ie
->target_cmd
== 0) {
4855 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4856 return -TARGET_ENOSYS
;
4858 if (ie
->target_cmd
== cmd
)
4862 arg_type
= ie
->arg_type
;
4864 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4867 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4870 switch(arg_type
[0]) {
4873 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4877 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4881 target_size
= thunk_type_size(arg_type
, 0);
4882 switch(ie
->access
) {
4884 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4885 if (!is_error(ret
)) {
4886 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4888 return -TARGET_EFAULT
;
4889 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4890 unlock_user(argptr
, arg
, target_size
);
4894 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4896 return -TARGET_EFAULT
;
4897 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4898 unlock_user(argptr
, arg
, 0);
4899 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4903 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4905 return -TARGET_EFAULT
;
4906 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4907 unlock_user(argptr
, arg
, 0);
4908 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4909 if (!is_error(ret
)) {
4910 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4912 return -TARGET_EFAULT
;
4913 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4914 unlock_user(argptr
, arg
, target_size
);
4920 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4921 (long)cmd
, arg_type
[0]);
4922 ret
= -TARGET_ENOSYS
;
4928 static const bitmask_transtbl iflag_tbl
[] = {
4929 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4930 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4931 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4932 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4933 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4934 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4935 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4936 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4937 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4938 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4939 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4940 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4941 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4942 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4946 static const bitmask_transtbl oflag_tbl
[] = {
4947 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4948 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4949 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4950 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4951 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4952 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4953 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4954 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4955 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4956 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4957 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4958 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4959 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4960 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4961 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4962 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4963 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4964 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4965 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4966 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4967 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4968 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4969 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4970 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4974 static const bitmask_transtbl cflag_tbl
[] = {
4975 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4976 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4977 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4978 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4979 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4980 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4981 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4982 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4983 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4984 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4985 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4986 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4987 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4988 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4989 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4990 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4991 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4992 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4993 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4994 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4995 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4996 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4997 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4998 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4999 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5000 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5001 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5002 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5003 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5004 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5005 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5009 static const bitmask_transtbl lflag_tbl
[] = {
5010 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5011 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5012 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5013 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5014 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5015 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5016 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5017 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5018 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5019 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5020 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5021 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5022 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5023 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5024 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5028 static void target_to_host_termios (void *dst
, const void *src
)
5030 struct host_termios
*host
= dst
;
5031 const struct target_termios
*target
= src
;
5034 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5036 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5038 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5040 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5041 host
->c_line
= target
->c_line
;
5043 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5044 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5045 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5046 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5047 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5048 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5049 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5050 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5051 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5052 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5053 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5054 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5055 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5056 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5057 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5058 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5059 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5060 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5063 static void host_to_target_termios (void *dst
, const void *src
)
5065 struct target_termios
*target
= dst
;
5066 const struct host_termios
*host
= src
;
5069 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5071 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5073 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5075 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5076 target
->c_line
= host
->c_line
;
5078 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5079 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5080 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5081 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5082 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5083 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5084 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5085 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5086 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5087 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5088 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5089 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5090 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5091 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5092 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5093 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5094 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5095 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5098 static const StructEntry struct_termios_def
= {
5099 .convert
= { host_to_target_termios
, target_to_host_termios
},
5100 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5101 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5104 static bitmask_transtbl mmap_flags_tbl
[] = {
5105 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5106 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5107 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5108 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5109 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5110 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5111 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5112 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5113 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5118 #if defined(TARGET_I386)
5120 /* NOTE: there is really one LDT for all the threads */
5121 static uint8_t *ldt_table
;
5123 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5130 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5131 if (size
> bytecount
)
5133 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5135 return -TARGET_EFAULT
;
5136 /* ??? Should this by byteswapped? */
5137 memcpy(p
, ldt_table
, size
);
5138 unlock_user(p
, ptr
, size
);
5142 /* XXX: add locking support */
5143 static abi_long
write_ldt(CPUX86State
*env
,
5144 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5146 struct target_modify_ldt_ldt_s ldt_info
;
5147 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5148 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5149 int seg_not_present
, useable
, lm
;
5150 uint32_t *lp
, entry_1
, entry_2
;
5152 if (bytecount
!= sizeof(ldt_info
))
5153 return -TARGET_EINVAL
;
5154 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5155 return -TARGET_EFAULT
;
5156 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5157 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5158 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5159 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5160 unlock_user_struct(target_ldt_info
, ptr
, 0);
5162 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5163 return -TARGET_EINVAL
;
5164 seg_32bit
= ldt_info
.flags
& 1;
5165 contents
= (ldt_info
.flags
>> 1) & 3;
5166 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5167 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5168 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5169 useable
= (ldt_info
.flags
>> 6) & 1;
5173 lm
= (ldt_info
.flags
>> 7) & 1;
5175 if (contents
== 3) {
5177 return -TARGET_EINVAL
;
5178 if (seg_not_present
== 0)
5179 return -TARGET_EINVAL
;
5181 /* allocate the LDT */
5183 env
->ldt
.base
= target_mmap(0,
5184 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5185 PROT_READ
|PROT_WRITE
,
5186 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5187 if (env
->ldt
.base
== -1)
5188 return -TARGET_ENOMEM
;
5189 memset(g2h(env
->ldt
.base
), 0,
5190 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5191 env
->ldt
.limit
= 0xffff;
5192 ldt_table
= g2h(env
->ldt
.base
);
5195 /* NOTE: same code as Linux kernel */
5196 /* Allow LDTs to be cleared by the user. */
5197 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5200 read_exec_only
== 1 &&
5202 limit_in_pages
== 0 &&
5203 seg_not_present
== 1 &&
5211 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5212 (ldt_info
.limit
& 0x0ffff);
5213 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5214 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5215 (ldt_info
.limit
& 0xf0000) |
5216 ((read_exec_only
^ 1) << 9) |
5218 ((seg_not_present
^ 1) << 15) |
5220 (limit_in_pages
<< 23) |
5224 entry_2
|= (useable
<< 20);
5226 /* Install the new entry ... */
5228 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5229 lp
[0] = tswap32(entry_1
);
5230 lp
[1] = tswap32(entry_2
);
5234 /* specific and weird i386 syscalls */
5235 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5236 unsigned long bytecount
)
5242 ret
= read_ldt(ptr
, bytecount
);
5245 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5248 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5251 ret
= -TARGET_ENOSYS
;
5257 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5258 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5260 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5261 struct target_modify_ldt_ldt_s ldt_info
;
5262 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5263 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5264 int seg_not_present
, useable
, lm
;
5265 uint32_t *lp
, entry_1
, entry_2
;
5268 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5269 if (!target_ldt_info
)
5270 return -TARGET_EFAULT
;
5271 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5272 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5273 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5274 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5275 if (ldt_info
.entry_number
== -1) {
5276 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5277 if (gdt_table
[i
] == 0) {
5278 ldt_info
.entry_number
= i
;
5279 target_ldt_info
->entry_number
= tswap32(i
);
5284 unlock_user_struct(target_ldt_info
, ptr
, 1);
5286 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5287 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5288 return -TARGET_EINVAL
;
5289 seg_32bit
= ldt_info
.flags
& 1;
5290 contents
= (ldt_info
.flags
>> 1) & 3;
5291 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5292 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5293 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5294 useable
= (ldt_info
.flags
>> 6) & 1;
5298 lm
= (ldt_info
.flags
>> 7) & 1;
5301 if (contents
== 3) {
5302 if (seg_not_present
== 0)
5303 return -TARGET_EINVAL
;
5306 /* NOTE: same code as Linux kernel */
5307 /* Allow LDTs to be cleared by the user. */
5308 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5309 if ((contents
== 0 &&
5310 read_exec_only
== 1 &&
5312 limit_in_pages
== 0 &&
5313 seg_not_present
== 1 &&
5321 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5322 (ldt_info
.limit
& 0x0ffff);
5323 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5324 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5325 (ldt_info
.limit
& 0xf0000) |
5326 ((read_exec_only
^ 1) << 9) |
5328 ((seg_not_present
^ 1) << 15) |
5330 (limit_in_pages
<< 23) |
5335 /* Install the new entry ... */
5337 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5338 lp
[0] = tswap32(entry_1
);
5339 lp
[1] = tswap32(entry_2
);
5343 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5345 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5346 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5347 uint32_t base_addr
, limit
, flags
;
5348 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5349 int seg_not_present
, useable
, lm
;
5350 uint32_t *lp
, entry_1
, entry_2
;
5352 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5353 if (!target_ldt_info
)
5354 return -TARGET_EFAULT
;
5355 idx
= tswap32(target_ldt_info
->entry_number
);
5356 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5357 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5358 unlock_user_struct(target_ldt_info
, ptr
, 1);
5359 return -TARGET_EINVAL
;
5361 lp
= (uint32_t *)(gdt_table
+ idx
);
5362 entry_1
= tswap32(lp
[0]);
5363 entry_2
= tswap32(lp
[1]);
5365 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5366 contents
= (entry_2
>> 10) & 3;
5367 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5368 seg_32bit
= (entry_2
>> 22) & 1;
5369 limit_in_pages
= (entry_2
>> 23) & 1;
5370 useable
= (entry_2
>> 20) & 1;
5374 lm
= (entry_2
>> 21) & 1;
5376 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5377 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5378 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5379 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5380 base_addr
= (entry_1
>> 16) |
5381 (entry_2
& 0xff000000) |
5382 ((entry_2
& 0xff) << 16);
5383 target_ldt_info
->base_addr
= tswapal(base_addr
);
5384 target_ldt_info
->limit
= tswap32(limit
);
5385 target_ldt_info
->flags
= tswap32(flags
);
5386 unlock_user_struct(target_ldt_info
, ptr
, 1);
5389 #endif /* TARGET_I386 && TARGET_ABI32 */
5391 #ifndef TARGET_ABI32
5392 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5399 case TARGET_ARCH_SET_GS
:
5400 case TARGET_ARCH_SET_FS
:
5401 if (code
== TARGET_ARCH_SET_GS
)
5405 cpu_x86_load_seg(env
, idx
, 0);
5406 env
->segs
[idx
].base
= addr
;
5408 case TARGET_ARCH_GET_GS
:
5409 case TARGET_ARCH_GET_FS
:
5410 if (code
== TARGET_ARCH_GET_GS
)
5414 val
= env
->segs
[idx
].base
;
5415 if (put_user(val
, addr
, abi_ulong
))
5416 ret
= -TARGET_EFAULT
;
5419 ret
= -TARGET_EINVAL
;
5426 #endif /* defined(TARGET_I386) */
5428 #define NEW_STACK_SIZE 0x40000
5431 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5434 pthread_mutex_t mutex
;
5435 pthread_cond_t cond
;
5438 abi_ulong child_tidptr
;
5439 abi_ulong parent_tidptr
;
5443 static void *clone_func(void *arg
)
5445 new_thread_info
*info
= arg
;
5450 rcu_register_thread();
5452 cpu
= ENV_GET_CPU(env
);
5454 ts
= (TaskState
*)cpu
->opaque
;
5455 info
->tid
= gettid();
5456 cpu
->host_tid
= info
->tid
;
5458 if (info
->child_tidptr
)
5459 put_user_u32(info
->tid
, info
->child_tidptr
);
5460 if (info
->parent_tidptr
)
5461 put_user_u32(info
->tid
, info
->parent_tidptr
);
5462 /* Enable signals. */
5463 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5464 /* Signal to the parent that we're ready. */
5465 pthread_mutex_lock(&info
->mutex
);
5466 pthread_cond_broadcast(&info
->cond
);
5467 pthread_mutex_unlock(&info
->mutex
);
5468 /* Wait until the parent has finshed initializing the tls state. */
5469 pthread_mutex_lock(&clone_lock
);
5470 pthread_mutex_unlock(&clone_lock
);
5476 /* do_fork() Must return host values and target errnos (unlike most
5477 do_*() functions). */
5478 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5479 abi_ulong parent_tidptr
, target_ulong newtls
,
5480 abi_ulong child_tidptr
)
5482 CPUState
*cpu
= ENV_GET_CPU(env
);
5486 CPUArchState
*new_env
;
5487 unsigned int nptl_flags
;
5490 /* Emulate vfork() with fork() */
5491 if (flags
& CLONE_VFORK
)
5492 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5494 if (flags
& CLONE_VM
) {
5495 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5496 new_thread_info info
;
5497 pthread_attr_t attr
;
5499 ts
= g_new0(TaskState
, 1);
5500 init_task_state(ts
);
5501 /* we create a new CPU instance. */
5502 new_env
= cpu_copy(env
);
5503 /* Init regs that differ from the parent. */
5504 cpu_clone_regs(new_env
, newsp
);
5505 new_cpu
= ENV_GET_CPU(new_env
);
5506 new_cpu
->opaque
= ts
;
5507 ts
->bprm
= parent_ts
->bprm
;
5508 ts
->info
= parent_ts
->info
;
5509 ts
->signal_mask
= parent_ts
->signal_mask
;
5511 flags
&= ~CLONE_NPTL_FLAGS2
;
5513 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5514 ts
->child_tidptr
= child_tidptr
;
5517 if (nptl_flags
& CLONE_SETTLS
)
5518 cpu_set_tls (new_env
, newtls
);
5520 /* Grab a mutex so that thread setup appears atomic. */
5521 pthread_mutex_lock(&clone_lock
);
5523 memset(&info
, 0, sizeof(info
));
5524 pthread_mutex_init(&info
.mutex
, NULL
);
5525 pthread_mutex_lock(&info
.mutex
);
5526 pthread_cond_init(&info
.cond
, NULL
);
5528 if (nptl_flags
& CLONE_CHILD_SETTID
)
5529 info
.child_tidptr
= child_tidptr
;
5530 if (nptl_flags
& CLONE_PARENT_SETTID
)
5531 info
.parent_tidptr
= parent_tidptr
;
5533 ret
= pthread_attr_init(&attr
);
5534 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5535 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5536 /* It is not safe to deliver signals until the child has finished
5537 initializing, so temporarily block all signals. */
5538 sigfillset(&sigmask
);
5539 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5541 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5542 /* TODO: Free new CPU state if thread creation failed. */
5544 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5545 pthread_attr_destroy(&attr
);
5547 /* Wait for the child to initialize. */
5548 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5550 if (flags
& CLONE_PARENT_SETTID
)
5551 put_user_u32(ret
, parent_tidptr
);
5555 pthread_mutex_unlock(&info
.mutex
);
5556 pthread_cond_destroy(&info
.cond
);
5557 pthread_mutex_destroy(&info
.mutex
);
5558 pthread_mutex_unlock(&clone_lock
);
5560 /* if no CLONE_VM, we consider it is a fork */
5561 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5562 return -TARGET_EINVAL
;
5565 if (block_signals()) {
5566 return -TARGET_ERESTARTSYS
;
5572 /* Child Process. */
5574 cpu_clone_regs(env
, newsp
);
5576 /* There is a race condition here. The parent process could
5577 theoretically read the TID in the child process before the child
5578 tid is set. This would require using either ptrace
5579 (not implemented) or having *_tidptr to point at a shared memory
5580 mapping. We can't repeat the spinlock hack used above because
5581 the child process gets its own copy of the lock. */
5582 if (flags
& CLONE_CHILD_SETTID
)
5583 put_user_u32(gettid(), child_tidptr
);
5584 if (flags
& CLONE_PARENT_SETTID
)
5585 put_user_u32(gettid(), parent_tidptr
);
5586 ts
= (TaskState
*)cpu
->opaque
;
5587 if (flags
& CLONE_SETTLS
)
5588 cpu_set_tls (env
, newtls
);
5589 if (flags
& CLONE_CHILD_CLEARTID
)
5590 ts
->child_tidptr
= child_tidptr
;
5598 /* warning : doesn't handle linux specific flags... */
5599 static int target_to_host_fcntl_cmd(int cmd
)
5602 case TARGET_F_DUPFD
:
5603 case TARGET_F_GETFD
:
5604 case TARGET_F_SETFD
:
5605 case TARGET_F_GETFL
:
5606 case TARGET_F_SETFL
:
5608 case TARGET_F_GETLK
:
5610 case TARGET_F_SETLK
:
5612 case TARGET_F_SETLKW
:
5614 case TARGET_F_GETOWN
:
5616 case TARGET_F_SETOWN
:
5618 case TARGET_F_GETSIG
:
5620 case TARGET_F_SETSIG
:
5622 #if TARGET_ABI_BITS == 32
5623 case TARGET_F_GETLK64
:
5625 case TARGET_F_SETLK64
:
5627 case TARGET_F_SETLKW64
:
5630 case TARGET_F_SETLEASE
:
5632 case TARGET_F_GETLEASE
:
5634 #ifdef F_DUPFD_CLOEXEC
5635 case TARGET_F_DUPFD_CLOEXEC
:
5636 return F_DUPFD_CLOEXEC
;
5638 case TARGET_F_NOTIFY
:
5641 case TARGET_F_GETOWN_EX
:
5645 case TARGET_F_SETOWN_EX
:
5649 case TARGET_F_SETPIPE_SZ
:
5650 return F_SETPIPE_SZ
;
5651 case TARGET_F_GETPIPE_SZ
:
5652 return F_GETPIPE_SZ
;
5655 return -TARGET_EINVAL
;
5657 return -TARGET_EINVAL
;
5660 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5661 static const bitmask_transtbl flock_tbl
[] = {
5662 TRANSTBL_CONVERT(F_RDLCK
),
5663 TRANSTBL_CONVERT(F_WRLCK
),
5664 TRANSTBL_CONVERT(F_UNLCK
),
5665 TRANSTBL_CONVERT(F_EXLCK
),
5666 TRANSTBL_CONVERT(F_SHLCK
),
5670 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5671 abi_ulong target_flock_addr
)
5673 struct target_flock
*target_fl
;
5676 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5677 return -TARGET_EFAULT
;
5680 __get_user(l_type
, &target_fl
->l_type
);
5681 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5682 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5683 __get_user(fl
->l_start
, &target_fl
->l_start
);
5684 __get_user(fl
->l_len
, &target_fl
->l_len
);
5685 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5686 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5690 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5691 const struct flock64
*fl
)
5693 struct target_flock
*target_fl
;
5696 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5697 return -TARGET_EFAULT
;
5700 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5701 __put_user(l_type
, &target_fl
->l_type
);
5702 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5703 __put_user(fl
->l_start
, &target_fl
->l_start
);
5704 __put_user(fl
->l_len
, &target_fl
->l_len
);
5705 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5706 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5710 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5711 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5713 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5714 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
5715 abi_ulong target_flock_addr
)
5717 struct target_eabi_flock64
*target_fl
;
5720 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5721 return -TARGET_EFAULT
;
5724 __get_user(l_type
, &target_fl
->l_type
);
5725 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5726 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5727 __get_user(fl
->l_start
, &target_fl
->l_start
);
5728 __get_user(fl
->l_len
, &target_fl
->l_len
);
5729 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5730 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5734 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
5735 const struct flock64
*fl
)
5737 struct target_eabi_flock64
*target_fl
;
5740 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5741 return -TARGET_EFAULT
;
5744 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5745 __put_user(l_type
, &target_fl
->l_type
);
5746 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5747 __put_user(fl
->l_start
, &target_fl
->l_start
);
5748 __put_user(fl
->l_len
, &target_fl
->l_len
);
5749 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5750 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5755 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5756 abi_ulong target_flock_addr
)
5758 struct target_flock64
*target_fl
;
5761 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5762 return -TARGET_EFAULT
;
5765 __get_user(l_type
, &target_fl
->l_type
);
5766 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5767 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5768 __get_user(fl
->l_start
, &target_fl
->l_start
);
5769 __get_user(fl
->l_len
, &target_fl
->l_len
);
5770 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5771 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5775 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5776 const struct flock64
*fl
)
5778 struct target_flock64
*target_fl
;
5781 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5782 return -TARGET_EFAULT
;
5785 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5786 __put_user(l_type
, &target_fl
->l_type
);
5787 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5788 __put_user(fl
->l_start
, &target_fl
->l_start
);
5789 __put_user(fl
->l_len
, &target_fl
->l_len
);
5790 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5791 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5795 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5797 struct flock64 fl64
;
5799 struct f_owner_ex fox
;
5800 struct target_f_owner_ex
*target_fox
;
5803 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5805 if (host_cmd
== -TARGET_EINVAL
)
5809 case TARGET_F_GETLK
:
5810 ret
= copy_from_user_flock(&fl64
, arg
);
5814 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5816 ret
= copy_to_user_flock(arg
, &fl64
);
5820 case TARGET_F_SETLK
:
5821 case TARGET_F_SETLKW
:
5822 ret
= copy_from_user_flock(&fl64
, arg
);
5826 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5829 case TARGET_F_GETLK64
:
5830 ret
= copy_from_user_flock64(&fl64
, arg
);
5834 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5836 ret
= copy_to_user_flock64(arg
, &fl64
);
5839 case TARGET_F_SETLK64
:
5840 case TARGET_F_SETLKW64
:
5841 ret
= copy_from_user_flock64(&fl64
, arg
);
5845 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5848 case TARGET_F_GETFL
:
5849 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5851 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5855 case TARGET_F_SETFL
:
5856 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5857 target_to_host_bitmask(arg
,
5862 case TARGET_F_GETOWN_EX
:
5863 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5865 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5866 return -TARGET_EFAULT
;
5867 target_fox
->type
= tswap32(fox
.type
);
5868 target_fox
->pid
= tswap32(fox
.pid
);
5869 unlock_user_struct(target_fox
, arg
, 1);
5875 case TARGET_F_SETOWN_EX
:
5876 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5877 return -TARGET_EFAULT
;
5878 fox
.type
= tswap32(target_fox
->type
);
5879 fox
.pid
= tswap32(target_fox
->pid
);
5880 unlock_user_struct(target_fox
, arg
, 0);
5881 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5885 case TARGET_F_SETOWN
:
5886 case TARGET_F_GETOWN
:
5887 case TARGET_F_SETSIG
:
5888 case TARGET_F_GETSIG
:
5889 case TARGET_F_SETLEASE
:
5890 case TARGET_F_GETLEASE
:
5891 case TARGET_F_SETPIPE_SZ
:
5892 case TARGET_F_GETPIPE_SZ
:
5893 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5897 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5905 static inline int high2lowuid(int uid
)
5913 static inline int high2lowgid(int gid
)
5921 static inline int low2highuid(int uid
)
5923 if ((int16_t)uid
== -1)
5929 static inline int low2highgid(int gid
)
5931 if ((int16_t)gid
== -1)
5936 static inline int tswapid(int id
)
5941 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5943 #else /* !USE_UID16 */
5944 static inline int high2lowuid(int uid
)
5948 static inline int high2lowgid(int gid
)
5952 static inline int low2highuid(int uid
)
5956 static inline int low2highgid(int gid
)
5960 static inline int tswapid(int id
)
5965 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5967 #endif /* USE_UID16 */
5969 /* We must do direct syscalls for setting UID/GID, because we want to
5970 * implement the Linux system call semantics of "change only for this thread",
5971 * not the libc/POSIX semantics of "change for all threads in process".
5972 * (See http://ewontfix.com/17/ for more details.)
5973 * We use the 32-bit version of the syscalls if present; if it is not
5974 * then either the host architecture supports 32-bit UIDs natively with
5975 * the standard syscall, or the 16-bit UID is the best we can do.
5977 #ifdef __NR_setuid32
5978 #define __NR_sys_setuid __NR_setuid32
5980 #define __NR_sys_setuid __NR_setuid
5982 #ifdef __NR_setgid32
5983 #define __NR_sys_setgid __NR_setgid32
5985 #define __NR_sys_setgid __NR_setgid
5987 #ifdef __NR_setresuid32
5988 #define __NR_sys_setresuid __NR_setresuid32
5990 #define __NR_sys_setresuid __NR_setresuid
5992 #ifdef __NR_setresgid32
5993 #define __NR_sys_setresgid __NR_setresgid32
5995 #define __NR_sys_setresgid __NR_setresgid
5998 _syscall1(int, sys_setuid
, uid_t
, uid
)
5999 _syscall1(int, sys_setgid
, gid_t
, gid
)
6000 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6001 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6003 void syscall_init(void)
6006 const argtype
*arg_type
;
6010 thunk_init(STRUCT_MAX
);
6012 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6013 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6014 #include "syscall_types.h"
6016 #undef STRUCT_SPECIAL
6018 /* Build target_to_host_errno_table[] table from
6019 * host_to_target_errno_table[]. */
6020 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6021 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6024 /* we patch the ioctl size if necessary. We rely on the fact that
6025 no ioctl has all the bits at '1' in the size field */
6027 while (ie
->target_cmd
!= 0) {
6028 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6029 TARGET_IOC_SIZEMASK
) {
6030 arg_type
= ie
->arg_type
;
6031 if (arg_type
[0] != TYPE_PTR
) {
6032 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6037 size
= thunk_type_size(arg_type
, 0);
6038 ie
->target_cmd
= (ie
->target_cmd
&
6039 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6040 (size
<< TARGET_IOC_SIZESHIFT
);
6043 /* automatic consistency check if same arch */
6044 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6045 (defined(__x86_64__) && defined(TARGET_X86_64))
6046 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6047 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6048 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6055 #if TARGET_ABI_BITS == 32
6056 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6058 #ifdef TARGET_WORDS_BIGENDIAN
6059 return ((uint64_t)word0
<< 32) | word1
;
6061 return ((uint64_t)word1
<< 32) | word0
;
6064 #else /* TARGET_ABI_BITS == 32 */
6065 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6069 #endif /* TARGET_ABI_BITS != 32 */
6071 #ifdef TARGET_NR_truncate64
6072 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6077 if (regpairs_aligned(cpu_env
)) {
6081 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6085 #ifdef TARGET_NR_ftruncate64
6086 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6091 if (regpairs_aligned(cpu_env
)) {
6095 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6099 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6100 abi_ulong target_addr
)
6102 struct target_timespec
*target_ts
;
6104 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6105 return -TARGET_EFAULT
;
6106 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6107 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6108 unlock_user_struct(target_ts
, target_addr
, 0);
6112 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6113 struct timespec
*host_ts
)
6115 struct target_timespec
*target_ts
;
6117 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6118 return -TARGET_EFAULT
;
6119 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6120 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6121 unlock_user_struct(target_ts
, target_addr
, 1);
6125 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6126 abi_ulong target_addr
)
6128 struct target_itimerspec
*target_itspec
;
6130 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6131 return -TARGET_EFAULT
;
6134 host_itspec
->it_interval
.tv_sec
=
6135 tswapal(target_itspec
->it_interval
.tv_sec
);
6136 host_itspec
->it_interval
.tv_nsec
=
6137 tswapal(target_itspec
->it_interval
.tv_nsec
);
6138 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6139 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6141 unlock_user_struct(target_itspec
, target_addr
, 1);
6145 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6146 struct itimerspec
*host_its
)
6148 struct target_itimerspec
*target_itspec
;
6150 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6151 return -TARGET_EFAULT
;
6154 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6155 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6157 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6158 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6160 unlock_user_struct(target_itspec
, target_addr
, 0);
6164 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6165 abi_ulong target_addr
)
6167 struct target_sigevent
*target_sevp
;
6169 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6170 return -TARGET_EFAULT
;
6173 /* This union is awkward on 64 bit systems because it has a 32 bit
6174 * integer and a pointer in it; we follow the conversion approach
6175 * used for handling sigval types in signal.c so the guest should get
6176 * the correct value back even if we did a 64 bit byteswap and it's
6177 * using the 32 bit integer.
6179 host_sevp
->sigev_value
.sival_ptr
=
6180 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6181 host_sevp
->sigev_signo
=
6182 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6183 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6184 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6186 unlock_user_struct(target_sevp
, target_addr
, 1);
6190 #if defined(TARGET_NR_mlockall)
6191 static inline int target_to_host_mlockall_arg(int arg
)
6195 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6196 result
|= MCL_CURRENT
;
6198 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6199 result
|= MCL_FUTURE
;
6205 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6206 abi_ulong target_addr
,
6207 struct stat
*host_st
)
6209 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6210 if (((CPUARMState
*)cpu_env
)->eabi
) {
6211 struct target_eabi_stat64
*target_st
;
6213 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6214 return -TARGET_EFAULT
;
6215 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6216 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6217 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6218 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6219 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6221 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6222 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6223 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6224 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6225 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6226 __put_user(host_st
->st_size
, &target_st
->st_size
);
6227 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6228 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6229 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6230 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6231 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6232 unlock_user_struct(target_st
, target_addr
, 1);
6236 #if defined(TARGET_HAS_STRUCT_STAT64)
6237 struct target_stat64
*target_st
;
6239 struct target_stat
*target_st
;
6242 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6243 return -TARGET_EFAULT
;
6244 memset(target_st
, 0, sizeof(*target_st
));
6245 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6246 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6247 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6248 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6250 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6251 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6252 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6253 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6254 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6255 /* XXX: better use of kernel struct */
6256 __put_user(host_st
->st_size
, &target_st
->st_size
);
6257 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6258 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6259 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6260 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6261 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6262 unlock_user_struct(target_st
, target_addr
, 1);
6268 /* ??? Using host futex calls even when target atomic operations
6269 are not really atomic probably breaks things. However implementing
6270 futexes locally would make futexes shared between multiple processes
6271 tricky. However they're probably useless because guest atomic
6272 operations won't work either. */
6273 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6274 target_ulong uaddr2
, int val3
)
6276 struct timespec ts
, *pts
;
6279 /* ??? We assume FUTEX_* constants are the same on both host
6281 #ifdef FUTEX_CMD_MASK
6282 base_op
= op
& FUTEX_CMD_MASK
;
6288 case FUTEX_WAIT_BITSET
:
6291 target_to_host_timespec(pts
, timeout
);
6295 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6298 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6300 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6302 case FUTEX_CMP_REQUEUE
:
6304 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6305 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6306 But the prototype takes a `struct timespec *'; insert casts
6307 to satisfy the compiler. We do not need to tswap TIMEOUT
6308 since it's not compared to guest memory. */
6309 pts
= (struct timespec
*)(uintptr_t) timeout
;
6310 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6312 (base_op
== FUTEX_CMP_REQUEUE
6316 return -TARGET_ENOSYS
;
6319 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6320 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6321 abi_long handle
, abi_long mount_id
,
6324 struct file_handle
*target_fh
;
6325 struct file_handle
*fh
;
6329 unsigned int size
, total_size
;
6331 if (get_user_s32(size
, handle
)) {
6332 return -TARGET_EFAULT
;
6335 name
= lock_user_string(pathname
);
6337 return -TARGET_EFAULT
;
6340 total_size
= sizeof(struct file_handle
) + size
;
6341 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6343 unlock_user(name
, pathname
, 0);
6344 return -TARGET_EFAULT
;
6347 fh
= g_malloc0(total_size
);
6348 fh
->handle_bytes
= size
;
6350 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6351 unlock_user(name
, pathname
, 0);
6353 /* man name_to_handle_at(2):
6354 * Other than the use of the handle_bytes field, the caller should treat
6355 * the file_handle structure as an opaque data type
6358 memcpy(target_fh
, fh
, total_size
);
6359 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6360 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6362 unlock_user(target_fh
, handle
, total_size
);
6364 if (put_user_s32(mid
, mount_id
)) {
6365 return -TARGET_EFAULT
;
6373 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6374 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6377 struct file_handle
*target_fh
;
6378 struct file_handle
*fh
;
6379 unsigned int size
, total_size
;
6382 if (get_user_s32(size
, handle
)) {
6383 return -TARGET_EFAULT
;
6386 total_size
= sizeof(struct file_handle
) + size
;
6387 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6389 return -TARGET_EFAULT
;
6392 fh
= g_memdup(target_fh
, total_size
);
6393 fh
->handle_bytes
= size
;
6394 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6396 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6397 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6401 unlock_user(target_fh
, handle
, total_size
);
6407 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6409 /* signalfd siginfo conversion */
6412 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6413 const struct signalfd_siginfo
*info
)
6415 int sig
= host_to_target_signal(info
->ssi_signo
);
6417 /* linux/signalfd.h defines a ssi_addr_lsb
6418 * not defined in sys/signalfd.h but used by some kernels
6421 #ifdef BUS_MCEERR_AO
6422 if (tinfo
->ssi_signo
== SIGBUS
&&
6423 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6424 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6425 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6426 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6427 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6431 tinfo
->ssi_signo
= tswap32(sig
);
6432 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6433 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6434 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6435 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6436 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6437 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6438 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6439 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6440 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6441 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6442 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6443 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6444 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6445 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6446 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6449 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6453 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6454 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6460 static TargetFdTrans target_signalfd_trans
= {
6461 .host_to_target_data
= host_to_target_data_signalfd
,
6464 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6467 target_sigset_t
*target_mask
;
6471 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6472 return -TARGET_EINVAL
;
6474 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6475 return -TARGET_EFAULT
;
6478 target_to_host_sigset(&host_mask
, target_mask
);
6480 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6482 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6484 fd_trans_register(ret
, &target_signalfd_trans
);
6487 unlock_user_struct(target_mask
, mask
, 0);
6493 /* Map host to target signal numbers for the wait family of syscalls.
6494 Assume all other status bits are the same. */
6495 int host_to_target_waitstatus(int status
)
6497 if (WIFSIGNALED(status
)) {
6498 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6500 if (WIFSTOPPED(status
)) {
6501 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6507 static int open_self_cmdline(void *cpu_env
, int fd
)
6510 bool word_skipped
= false;
6512 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6522 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6525 fd_orig
= close(fd_orig
);
6528 } else if (nb_read
== 0) {
6532 if (!word_skipped
) {
6533 /* Skip the first string, which is the path to qemu-*-static
6534 instead of the actual command. */
6535 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6537 /* Null byte found, skip one string */
6539 nb_read
-= cp_buf
- buf
;
6540 word_skipped
= true;
6545 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6554 return close(fd_orig
);
6557 static int open_self_maps(void *cpu_env
, int fd
)
6559 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6560 TaskState
*ts
= cpu
->opaque
;
6566 fp
= fopen("/proc/self/maps", "r");
6571 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6572 int fields
, dev_maj
, dev_min
, inode
;
6573 uint64_t min
, max
, offset
;
6574 char flag_r
, flag_w
, flag_x
, flag_p
;
6575 char path
[512] = "";
6576 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6577 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6578 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6580 if ((fields
< 10) || (fields
> 11)) {
6583 if (h2g_valid(min
)) {
6584 int flags
= page_get_flags(h2g(min
));
6585 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6586 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6589 if (h2g(min
) == ts
->info
->stack_limit
) {
6590 pstrcpy(path
, sizeof(path
), " [stack]");
6592 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6593 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6594 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6595 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6596 path
[0] ? " " : "", path
);
6606 static int open_self_stat(void *cpu_env
, int fd
)
6608 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6609 TaskState
*ts
= cpu
->opaque
;
6610 abi_ulong start_stack
= ts
->info
->start_stack
;
6613 for (i
= 0; i
< 44; i
++) {
6621 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6622 } else if (i
== 1) {
6624 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6625 } else if (i
== 27) {
6628 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6630 /* for the rest, there is MasterCard */
6631 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6635 if (write(fd
, buf
, len
) != len
) {
6643 static int open_self_auxv(void *cpu_env
, int fd
)
6645 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6646 TaskState
*ts
= cpu
->opaque
;
6647 abi_ulong auxv
= ts
->info
->saved_auxv
;
6648 abi_ulong len
= ts
->info
->auxv_len
;
6652 * Auxiliary vector is stored in target process stack.
6653 * read in whole auxv vector and copy it to file
6655 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6659 r
= write(fd
, ptr
, len
);
6666 lseek(fd
, 0, SEEK_SET
);
6667 unlock_user(ptr
, auxv
, len
);
6673 static int is_proc_myself(const char *filename
, const char *entry
)
6675 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6676 filename
+= strlen("/proc/");
6677 if (!strncmp(filename
, "self/", strlen("self/"))) {
6678 filename
+= strlen("self/");
6679 } else if (*filename
>= '1' && *filename
<= '9') {
6681 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6682 if (!strncmp(filename
, myself
, strlen(myself
))) {
6683 filename
+= strlen(myself
);
6690 if (!strcmp(filename
, entry
)) {
6697 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6698 static int is_proc(const char *filename
, const char *entry
)
6700 return strcmp(filename
, entry
) == 0;
6703 static int open_net_route(void *cpu_env
, int fd
)
6710 fp
= fopen("/proc/net/route", "r");
6717 read
= getline(&line
, &len
, fp
);
6718 dprintf(fd
, "%s", line
);
6722 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6724 uint32_t dest
, gw
, mask
;
6725 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6726 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6727 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6728 &mask
, &mtu
, &window
, &irtt
);
6729 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6730 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6731 metric
, tswap32(mask
), mtu
, window
, irtt
);
6741 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6744 const char *filename
;
6745 int (*fill
)(void *cpu_env
, int fd
);
6746 int (*cmp
)(const char *s1
, const char *s2
);
6748 const struct fake_open
*fake_open
;
6749 static const struct fake_open fakes
[] = {
6750 { "maps", open_self_maps
, is_proc_myself
},
6751 { "stat", open_self_stat
, is_proc_myself
},
6752 { "auxv", open_self_auxv
, is_proc_myself
},
6753 { "cmdline", open_self_cmdline
, is_proc_myself
},
6754 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6755 { "/proc/net/route", open_net_route
, is_proc
},
6757 { NULL
, NULL
, NULL
}
6760 if (is_proc_myself(pathname
, "exe")) {
6761 int execfd
= qemu_getauxval(AT_EXECFD
);
6762 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6765 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6766 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6771 if (fake_open
->filename
) {
6773 char filename
[PATH_MAX
];
6776 /* create temporary file to map stat to */
6777 tmpdir
= getenv("TMPDIR");
6780 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6781 fd
= mkstemp(filename
);
6787 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6793 lseek(fd
, 0, SEEK_SET
);
6798 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6801 #define TIMER_MAGIC 0x0caf0000
6802 #define TIMER_MAGIC_MASK 0xffff0000
6804 /* Convert QEMU provided timer ID back to internal 16bit index format */
6805 static target_timer_t
get_timer_id(abi_long arg
)
6807 target_timer_t timerid
= arg
;
6809 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6810 return -TARGET_EINVAL
;
6815 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6816 return -TARGET_EINVAL
;
6822 /* do_syscall() should always have a single exit point at the end so
6823 that actions, such as logging of syscall results, can be performed.
6824 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6825 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6826 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6827 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6830 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6836 #if defined(DEBUG_ERESTARTSYS)
6837 /* Debug-only code for exercising the syscall-restart code paths
6838 * in the per-architecture cpu main loops: restart every syscall
6839 * the guest makes once before letting it through.
6846 return -TARGET_ERESTARTSYS
;
6852 gemu_log("syscall %d", num
);
6854 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
6856 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6859 case TARGET_NR_exit
:
6860 /* In old applications this may be used to implement _exit(2).
6861 However in threaded applictions it is used for thread termination,
6862 and _exit_group is used for application termination.
6863 Do thread termination if we have more then one thread. */
6865 if (block_signals()) {
6866 ret
= -TARGET_ERESTARTSYS
;
6870 if (CPU_NEXT(first_cpu
)) {
6874 /* Remove the CPU from the list. */
6875 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6878 if (ts
->child_tidptr
) {
6879 put_user_u32(0, ts
->child_tidptr
);
6880 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6884 object_unref(OBJECT(cpu
));
6886 rcu_unregister_thread();
6892 gdb_exit(cpu_env
, arg1
);
6894 ret
= 0; /* avoid warning */
6896 case TARGET_NR_read
:
6900 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6902 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6904 fd_trans_host_to_target_data(arg1
)) {
6905 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6907 unlock_user(p
, arg2
, ret
);
6910 case TARGET_NR_write
:
6911 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6913 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6914 unlock_user(p
, arg2
, 0);
6916 #ifdef TARGET_NR_open
6917 case TARGET_NR_open
:
6918 if (!(p
= lock_user_string(arg1
)))
6920 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6921 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6923 fd_trans_unregister(ret
);
6924 unlock_user(p
, arg1
, 0);
6927 case TARGET_NR_openat
:
6928 if (!(p
= lock_user_string(arg2
)))
6930 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6931 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6933 fd_trans_unregister(ret
);
6934 unlock_user(p
, arg2
, 0);
6936 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6937 case TARGET_NR_name_to_handle_at
:
6938 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6941 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6942 case TARGET_NR_open_by_handle_at
:
6943 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6944 fd_trans_unregister(ret
);
6947 case TARGET_NR_close
:
6948 fd_trans_unregister(arg1
);
6949 ret
= get_errno(close(arg1
));
6954 #ifdef TARGET_NR_fork
6955 case TARGET_NR_fork
:
6956 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6959 #ifdef TARGET_NR_waitpid
6960 case TARGET_NR_waitpid
:
6963 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6964 if (!is_error(ret
) && arg2
&& ret
6965 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6970 #ifdef TARGET_NR_waitid
6971 case TARGET_NR_waitid
:
6975 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6976 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6977 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6979 host_to_target_siginfo(p
, &info
);
6980 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6985 #ifdef TARGET_NR_creat /* not on alpha */
6986 case TARGET_NR_creat
:
6987 if (!(p
= lock_user_string(arg1
)))
6989 ret
= get_errno(creat(p
, arg2
));
6990 fd_trans_unregister(ret
);
6991 unlock_user(p
, arg1
, 0);
6994 #ifdef TARGET_NR_link
6995 case TARGET_NR_link
:
6998 p
= lock_user_string(arg1
);
6999 p2
= lock_user_string(arg2
);
7001 ret
= -TARGET_EFAULT
;
7003 ret
= get_errno(link(p
, p2
));
7004 unlock_user(p2
, arg2
, 0);
7005 unlock_user(p
, arg1
, 0);
7009 #if defined(TARGET_NR_linkat)
7010 case TARGET_NR_linkat
:
7015 p
= lock_user_string(arg2
);
7016 p2
= lock_user_string(arg4
);
7018 ret
= -TARGET_EFAULT
;
7020 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7021 unlock_user(p
, arg2
, 0);
7022 unlock_user(p2
, arg4
, 0);
7026 #ifdef TARGET_NR_unlink
7027 case TARGET_NR_unlink
:
7028 if (!(p
= lock_user_string(arg1
)))
7030 ret
= get_errno(unlink(p
));
7031 unlock_user(p
, arg1
, 0);
7034 #if defined(TARGET_NR_unlinkat)
7035 case TARGET_NR_unlinkat
:
7036 if (!(p
= lock_user_string(arg2
)))
7038 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7039 unlock_user(p
, arg2
, 0);
7042 case TARGET_NR_execve
:
7044 char **argp
, **envp
;
7047 abi_ulong guest_argp
;
7048 abi_ulong guest_envp
;
7055 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7056 if (get_user_ual(addr
, gp
))
7064 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7065 if (get_user_ual(addr
, gp
))
7072 argp
= alloca((argc
+ 1) * sizeof(void *));
7073 envp
= alloca((envc
+ 1) * sizeof(void *));
7075 for (gp
= guest_argp
, q
= argp
; gp
;
7076 gp
+= sizeof(abi_ulong
), q
++) {
7077 if (get_user_ual(addr
, gp
))
7081 if (!(*q
= lock_user_string(addr
)))
7083 total_size
+= strlen(*q
) + 1;
7087 for (gp
= guest_envp
, q
= envp
; gp
;
7088 gp
+= sizeof(abi_ulong
), q
++) {
7089 if (get_user_ual(addr
, gp
))
7093 if (!(*q
= lock_user_string(addr
)))
7095 total_size
+= strlen(*q
) + 1;
7099 if (!(p
= lock_user_string(arg1
)))
7101 /* Although execve() is not an interruptible syscall it is
7102 * a special case where we must use the safe_syscall wrapper:
7103 * if we allow a signal to happen before we make the host
7104 * syscall then we will 'lose' it, because at the point of
7105 * execve the process leaves QEMU's control. So we use the
7106 * safe syscall wrapper to ensure that we either take the
7107 * signal as a guest signal, or else it does not happen
7108 * before the execve completes and makes it the other
7109 * program's problem.
7111 ret
= get_errno(safe_execve(p
, argp
, envp
));
7112 unlock_user(p
, arg1
, 0);
7117 ret
= -TARGET_EFAULT
;
7120 for (gp
= guest_argp
, q
= argp
; *q
;
7121 gp
+= sizeof(abi_ulong
), q
++) {
7122 if (get_user_ual(addr
, gp
)
7125 unlock_user(*q
, addr
, 0);
7127 for (gp
= guest_envp
, q
= envp
; *q
;
7128 gp
+= sizeof(abi_ulong
), q
++) {
7129 if (get_user_ual(addr
, gp
)
7132 unlock_user(*q
, addr
, 0);
7136 case TARGET_NR_chdir
:
7137 if (!(p
= lock_user_string(arg1
)))
7139 ret
= get_errno(chdir(p
));
7140 unlock_user(p
, arg1
, 0);
7142 #ifdef TARGET_NR_time
7143 case TARGET_NR_time
:
7146 ret
= get_errno(time(&host_time
));
7149 && put_user_sal(host_time
, arg1
))
7154 #ifdef TARGET_NR_mknod
7155 case TARGET_NR_mknod
:
7156 if (!(p
= lock_user_string(arg1
)))
7158 ret
= get_errno(mknod(p
, arg2
, arg3
));
7159 unlock_user(p
, arg1
, 0);
7162 #if defined(TARGET_NR_mknodat)
7163 case TARGET_NR_mknodat
:
7164 if (!(p
= lock_user_string(arg2
)))
7166 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7167 unlock_user(p
, arg2
, 0);
7170 #ifdef TARGET_NR_chmod
7171 case TARGET_NR_chmod
:
7172 if (!(p
= lock_user_string(arg1
)))
7174 ret
= get_errno(chmod(p
, arg2
));
7175 unlock_user(p
, arg1
, 0);
7178 #ifdef TARGET_NR_break
7179 case TARGET_NR_break
:
7182 #ifdef TARGET_NR_oldstat
7183 case TARGET_NR_oldstat
:
7186 case TARGET_NR_lseek
:
7187 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7189 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7190 /* Alpha specific */
7191 case TARGET_NR_getxpid
:
7192 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7193 ret
= get_errno(getpid());
7196 #ifdef TARGET_NR_getpid
7197 case TARGET_NR_getpid
:
7198 ret
= get_errno(getpid());
7201 case TARGET_NR_mount
:
7203 /* need to look at the data field */
7207 p
= lock_user_string(arg1
);
7215 p2
= lock_user_string(arg2
);
7218 unlock_user(p
, arg1
, 0);
7224 p3
= lock_user_string(arg3
);
7227 unlock_user(p
, arg1
, 0);
7229 unlock_user(p2
, arg2
, 0);
7236 /* FIXME - arg5 should be locked, but it isn't clear how to
7237 * do that since it's not guaranteed to be a NULL-terminated
7241 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7243 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7245 ret
= get_errno(ret
);
7248 unlock_user(p
, arg1
, 0);
7250 unlock_user(p2
, arg2
, 0);
7252 unlock_user(p3
, arg3
, 0);
7256 #ifdef TARGET_NR_umount
7257 case TARGET_NR_umount
:
7258 if (!(p
= lock_user_string(arg1
)))
7260 ret
= get_errno(umount(p
));
7261 unlock_user(p
, arg1
, 0);
7264 #ifdef TARGET_NR_stime /* not on alpha */
7265 case TARGET_NR_stime
:
7268 if (get_user_sal(host_time
, arg1
))
7270 ret
= get_errno(stime(&host_time
));
7274 case TARGET_NR_ptrace
:
7276 #ifdef TARGET_NR_alarm /* not on alpha */
7277 case TARGET_NR_alarm
:
7281 #ifdef TARGET_NR_oldfstat
7282 case TARGET_NR_oldfstat
:
7285 #ifdef TARGET_NR_pause /* not on alpha */
7286 case TARGET_NR_pause
:
7287 if (!block_signals()) {
7288 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7290 ret
= -TARGET_EINTR
;
7293 #ifdef TARGET_NR_utime
7294 case TARGET_NR_utime
:
7296 struct utimbuf tbuf
, *host_tbuf
;
7297 struct target_utimbuf
*target_tbuf
;
7299 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7301 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7302 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7303 unlock_user_struct(target_tbuf
, arg2
, 0);
7308 if (!(p
= lock_user_string(arg1
)))
7310 ret
= get_errno(utime(p
, host_tbuf
));
7311 unlock_user(p
, arg1
, 0);
7315 #ifdef TARGET_NR_utimes
7316 case TARGET_NR_utimes
:
7318 struct timeval
*tvp
, tv
[2];
7320 if (copy_from_user_timeval(&tv
[0], arg2
)
7321 || copy_from_user_timeval(&tv
[1],
7322 arg2
+ sizeof(struct target_timeval
)))
7328 if (!(p
= lock_user_string(arg1
)))
7330 ret
= get_errno(utimes(p
, tvp
));
7331 unlock_user(p
, arg1
, 0);
7335 #if defined(TARGET_NR_futimesat)
7336 case TARGET_NR_futimesat
:
7338 struct timeval
*tvp
, tv
[2];
7340 if (copy_from_user_timeval(&tv
[0], arg3
)
7341 || copy_from_user_timeval(&tv
[1],
7342 arg3
+ sizeof(struct target_timeval
)))
7348 if (!(p
= lock_user_string(arg2
)))
7350 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7351 unlock_user(p
, arg2
, 0);
7355 #ifdef TARGET_NR_stty
7356 case TARGET_NR_stty
:
7359 #ifdef TARGET_NR_gtty
7360 case TARGET_NR_gtty
:
7363 #ifdef TARGET_NR_access
7364 case TARGET_NR_access
:
7365 if (!(p
= lock_user_string(arg1
)))
7367 ret
= get_errno(access(path(p
), arg2
));
7368 unlock_user(p
, arg1
, 0);
7371 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7372 case TARGET_NR_faccessat
:
7373 if (!(p
= lock_user_string(arg2
)))
7375 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7376 unlock_user(p
, arg2
, 0);
7379 #ifdef TARGET_NR_nice /* not on alpha */
7380 case TARGET_NR_nice
:
7381 ret
= get_errno(nice(arg1
));
7384 #ifdef TARGET_NR_ftime
7385 case TARGET_NR_ftime
:
7388 case TARGET_NR_sync
:
7392 case TARGET_NR_kill
:
7393 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7395 #ifdef TARGET_NR_rename
7396 case TARGET_NR_rename
:
7399 p
= lock_user_string(arg1
);
7400 p2
= lock_user_string(arg2
);
7402 ret
= -TARGET_EFAULT
;
7404 ret
= get_errno(rename(p
, p2
));
7405 unlock_user(p2
, arg2
, 0);
7406 unlock_user(p
, arg1
, 0);
7410 #if defined(TARGET_NR_renameat)
7411 case TARGET_NR_renameat
:
7414 p
= lock_user_string(arg2
);
7415 p2
= lock_user_string(arg4
);
7417 ret
= -TARGET_EFAULT
;
7419 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7420 unlock_user(p2
, arg4
, 0);
7421 unlock_user(p
, arg2
, 0);
7425 #ifdef TARGET_NR_mkdir
7426 case TARGET_NR_mkdir
:
7427 if (!(p
= lock_user_string(arg1
)))
7429 ret
= get_errno(mkdir(p
, arg2
));
7430 unlock_user(p
, arg1
, 0);
7433 #if defined(TARGET_NR_mkdirat)
7434 case TARGET_NR_mkdirat
:
7435 if (!(p
= lock_user_string(arg2
)))
7437 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7438 unlock_user(p
, arg2
, 0);
7441 #ifdef TARGET_NR_rmdir
7442 case TARGET_NR_rmdir
:
7443 if (!(p
= lock_user_string(arg1
)))
7445 ret
= get_errno(rmdir(p
));
7446 unlock_user(p
, arg1
, 0);
7450 ret
= get_errno(dup(arg1
));
7452 fd_trans_dup(arg1
, ret
);
7455 #ifdef TARGET_NR_pipe
7456 case TARGET_NR_pipe
:
7457 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7460 #ifdef TARGET_NR_pipe2
7461 case TARGET_NR_pipe2
:
7462 ret
= do_pipe(cpu_env
, arg1
,
7463 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7466 case TARGET_NR_times
:
7468 struct target_tms
*tmsp
;
7470 ret
= get_errno(times(&tms
));
7472 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7475 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7476 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7477 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7478 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7481 ret
= host_to_target_clock_t(ret
);
7484 #ifdef TARGET_NR_prof
7485 case TARGET_NR_prof
:
7488 #ifdef TARGET_NR_signal
7489 case TARGET_NR_signal
:
7492 case TARGET_NR_acct
:
7494 ret
= get_errno(acct(NULL
));
7496 if (!(p
= lock_user_string(arg1
)))
7498 ret
= get_errno(acct(path(p
)));
7499 unlock_user(p
, arg1
, 0);
7502 #ifdef TARGET_NR_umount2
7503 case TARGET_NR_umount2
:
7504 if (!(p
= lock_user_string(arg1
)))
7506 ret
= get_errno(umount2(p
, arg2
));
7507 unlock_user(p
, arg1
, 0);
7510 #ifdef TARGET_NR_lock
7511 case TARGET_NR_lock
:
7514 case TARGET_NR_ioctl
:
7515 ret
= do_ioctl(arg1
, arg2
, arg3
);
7517 case TARGET_NR_fcntl
:
7518 ret
= do_fcntl(arg1
, arg2
, arg3
);
7520 #ifdef TARGET_NR_mpx
7524 case TARGET_NR_setpgid
:
7525 ret
= get_errno(setpgid(arg1
, arg2
));
7527 #ifdef TARGET_NR_ulimit
7528 case TARGET_NR_ulimit
:
7531 #ifdef TARGET_NR_oldolduname
7532 case TARGET_NR_oldolduname
:
7535 case TARGET_NR_umask
:
7536 ret
= get_errno(umask(arg1
));
7538 case TARGET_NR_chroot
:
7539 if (!(p
= lock_user_string(arg1
)))
7541 ret
= get_errno(chroot(p
));
7542 unlock_user(p
, arg1
, 0);
7544 #ifdef TARGET_NR_ustat
7545 case TARGET_NR_ustat
:
7548 #ifdef TARGET_NR_dup2
7549 case TARGET_NR_dup2
:
7550 ret
= get_errno(dup2(arg1
, arg2
));
7552 fd_trans_dup(arg1
, arg2
);
7556 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7557 case TARGET_NR_dup3
:
7558 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7560 fd_trans_dup(arg1
, arg2
);
7564 #ifdef TARGET_NR_getppid /* not on alpha */
7565 case TARGET_NR_getppid
:
7566 ret
= get_errno(getppid());
7569 #ifdef TARGET_NR_getpgrp
7570 case TARGET_NR_getpgrp
:
7571 ret
= get_errno(getpgrp());
7574 case TARGET_NR_setsid
:
7575 ret
= get_errno(setsid());
7577 #ifdef TARGET_NR_sigaction
7578 case TARGET_NR_sigaction
:
7580 #if defined(TARGET_ALPHA)
7581 struct target_sigaction act
, oact
, *pact
= 0;
7582 struct target_old_sigaction
*old_act
;
7584 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7586 act
._sa_handler
= old_act
->_sa_handler
;
7587 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7588 act
.sa_flags
= old_act
->sa_flags
;
7589 act
.sa_restorer
= 0;
7590 unlock_user_struct(old_act
, arg2
, 0);
7593 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7594 if (!is_error(ret
) && arg3
) {
7595 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7597 old_act
->_sa_handler
= oact
._sa_handler
;
7598 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7599 old_act
->sa_flags
= oact
.sa_flags
;
7600 unlock_user_struct(old_act
, arg3
, 1);
7602 #elif defined(TARGET_MIPS)
7603 struct target_sigaction act
, oact
, *pact
, *old_act
;
7606 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7608 act
._sa_handler
= old_act
->_sa_handler
;
7609 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7610 act
.sa_flags
= old_act
->sa_flags
;
7611 unlock_user_struct(old_act
, arg2
, 0);
7617 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7619 if (!is_error(ret
) && arg3
) {
7620 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7622 old_act
->_sa_handler
= oact
._sa_handler
;
7623 old_act
->sa_flags
= oact
.sa_flags
;
7624 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7625 old_act
->sa_mask
.sig
[1] = 0;
7626 old_act
->sa_mask
.sig
[2] = 0;
7627 old_act
->sa_mask
.sig
[3] = 0;
7628 unlock_user_struct(old_act
, arg3
, 1);
7631 struct target_old_sigaction
*old_act
;
7632 struct target_sigaction act
, oact
, *pact
;
7634 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7636 act
._sa_handler
= old_act
->_sa_handler
;
7637 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7638 act
.sa_flags
= old_act
->sa_flags
;
7639 act
.sa_restorer
= old_act
->sa_restorer
;
7640 unlock_user_struct(old_act
, arg2
, 0);
7645 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7646 if (!is_error(ret
) && arg3
) {
7647 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7649 old_act
->_sa_handler
= oact
._sa_handler
;
7650 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7651 old_act
->sa_flags
= oact
.sa_flags
;
7652 old_act
->sa_restorer
= oact
.sa_restorer
;
7653 unlock_user_struct(old_act
, arg3
, 1);
7659 case TARGET_NR_rt_sigaction
:
7661 #if defined(TARGET_ALPHA)
7662 struct target_sigaction act
, oact
, *pact
= 0;
7663 struct target_rt_sigaction
*rt_act
;
7664 /* ??? arg4 == sizeof(sigset_t). */
7666 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7668 act
._sa_handler
= rt_act
->_sa_handler
;
7669 act
.sa_mask
= rt_act
->sa_mask
;
7670 act
.sa_flags
= rt_act
->sa_flags
;
7671 act
.sa_restorer
= arg5
;
7672 unlock_user_struct(rt_act
, arg2
, 0);
7675 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7676 if (!is_error(ret
) && arg3
) {
7677 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7679 rt_act
->_sa_handler
= oact
._sa_handler
;
7680 rt_act
->sa_mask
= oact
.sa_mask
;
7681 rt_act
->sa_flags
= oact
.sa_flags
;
7682 unlock_user_struct(rt_act
, arg3
, 1);
7685 struct target_sigaction
*act
;
7686 struct target_sigaction
*oact
;
7689 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7694 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7695 ret
= -TARGET_EFAULT
;
7696 goto rt_sigaction_fail
;
7700 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7703 unlock_user_struct(act
, arg2
, 0);
7705 unlock_user_struct(oact
, arg3
, 1);
7709 #ifdef TARGET_NR_sgetmask /* not on alpha */
7710 case TARGET_NR_sgetmask
:
7713 abi_ulong target_set
;
7714 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7716 host_to_target_old_sigset(&target_set
, &cur_set
);
7722 #ifdef TARGET_NR_ssetmask /* not on alpha */
7723 case TARGET_NR_ssetmask
:
7725 sigset_t set
, oset
, cur_set
;
7726 abi_ulong target_set
= arg1
;
7727 /* We only have one word of the new mask so we must read
7728 * the rest of it with do_sigprocmask() and OR in this word.
7729 * We are guaranteed that a do_sigprocmask() that only queries
7730 * the signal mask will not fail.
7732 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7734 target_to_host_old_sigset(&set
, &target_set
);
7735 sigorset(&set
, &set
, &cur_set
);
7736 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7738 host_to_target_old_sigset(&target_set
, &oset
);
7744 #ifdef TARGET_NR_sigprocmask
7745 case TARGET_NR_sigprocmask
:
7747 #if defined(TARGET_ALPHA)
7748 sigset_t set
, oldset
;
7753 case TARGET_SIG_BLOCK
:
7756 case TARGET_SIG_UNBLOCK
:
7759 case TARGET_SIG_SETMASK
:
7763 ret
= -TARGET_EINVAL
;
7767 target_to_host_old_sigset(&set
, &mask
);
7769 ret
= do_sigprocmask(how
, &set
, &oldset
);
7770 if (!is_error(ret
)) {
7771 host_to_target_old_sigset(&mask
, &oldset
);
7773 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7776 sigset_t set
, oldset
, *set_ptr
;
7781 case TARGET_SIG_BLOCK
:
7784 case TARGET_SIG_UNBLOCK
:
7787 case TARGET_SIG_SETMASK
:
7791 ret
= -TARGET_EINVAL
;
7794 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7796 target_to_host_old_sigset(&set
, p
);
7797 unlock_user(p
, arg2
, 0);
7803 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7804 if (!is_error(ret
) && arg3
) {
7805 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7807 host_to_target_old_sigset(p
, &oldset
);
7808 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7814 case TARGET_NR_rt_sigprocmask
:
7817 sigset_t set
, oldset
, *set_ptr
;
7821 case TARGET_SIG_BLOCK
:
7824 case TARGET_SIG_UNBLOCK
:
7827 case TARGET_SIG_SETMASK
:
7831 ret
= -TARGET_EINVAL
;
7834 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7836 target_to_host_sigset(&set
, p
);
7837 unlock_user(p
, arg2
, 0);
7843 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7844 if (!is_error(ret
) && arg3
) {
7845 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7847 host_to_target_sigset(p
, &oldset
);
7848 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7852 #ifdef TARGET_NR_sigpending
7853 case TARGET_NR_sigpending
:
7856 ret
= get_errno(sigpending(&set
));
7857 if (!is_error(ret
)) {
7858 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7860 host_to_target_old_sigset(p
, &set
);
7861 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7866 case TARGET_NR_rt_sigpending
:
7869 ret
= get_errno(sigpending(&set
));
7870 if (!is_error(ret
)) {
7871 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7873 host_to_target_sigset(p
, &set
);
7874 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7878 #ifdef TARGET_NR_sigsuspend
7879 case TARGET_NR_sigsuspend
:
7881 TaskState
*ts
= cpu
->opaque
;
7882 #if defined(TARGET_ALPHA)
7883 abi_ulong mask
= arg1
;
7884 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7886 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7888 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7889 unlock_user(p
, arg1
, 0);
7891 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7893 if (ret
!= -TARGET_ERESTARTSYS
) {
7894 ts
->in_sigsuspend
= 1;
7899 case TARGET_NR_rt_sigsuspend
:
7901 TaskState
*ts
= cpu
->opaque
;
7902 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7904 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7905 unlock_user(p
, arg1
, 0);
7906 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7908 if (ret
!= -TARGET_ERESTARTSYS
) {
7909 ts
->in_sigsuspend
= 1;
7913 case TARGET_NR_rt_sigtimedwait
:
7916 struct timespec uts
, *puts
;
7919 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7921 target_to_host_sigset(&set
, p
);
7922 unlock_user(p
, arg1
, 0);
7925 target_to_host_timespec(puts
, arg3
);
7929 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7931 if (!is_error(ret
)) {
7933 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7938 host_to_target_siginfo(p
, &uinfo
);
7939 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7941 ret
= host_to_target_signal(ret
);
7945 case TARGET_NR_rt_sigqueueinfo
:
7949 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
7953 target_to_host_siginfo(&uinfo
, p
);
7954 unlock_user(p
, arg1
, 0);
7955 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7958 #ifdef TARGET_NR_sigreturn
7959 case TARGET_NR_sigreturn
:
7960 if (block_signals()) {
7961 ret
= -TARGET_ERESTARTSYS
;
7963 ret
= do_sigreturn(cpu_env
);
7967 case TARGET_NR_rt_sigreturn
:
7968 if (block_signals()) {
7969 ret
= -TARGET_ERESTARTSYS
;
7971 ret
= do_rt_sigreturn(cpu_env
);
7974 case TARGET_NR_sethostname
:
7975 if (!(p
= lock_user_string(arg1
)))
7977 ret
= get_errno(sethostname(p
, arg2
));
7978 unlock_user(p
, arg1
, 0);
7980 case TARGET_NR_setrlimit
:
7982 int resource
= target_to_host_resource(arg1
);
7983 struct target_rlimit
*target_rlim
;
7985 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7987 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7988 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7989 unlock_user_struct(target_rlim
, arg2
, 0);
7990 ret
= get_errno(setrlimit(resource
, &rlim
));
7993 case TARGET_NR_getrlimit
:
7995 int resource
= target_to_host_resource(arg1
);
7996 struct target_rlimit
*target_rlim
;
7999 ret
= get_errno(getrlimit(resource
, &rlim
));
8000 if (!is_error(ret
)) {
8001 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8003 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8004 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8005 unlock_user_struct(target_rlim
, arg2
, 1);
8009 case TARGET_NR_getrusage
:
8011 struct rusage rusage
;
8012 ret
= get_errno(getrusage(arg1
, &rusage
));
8013 if (!is_error(ret
)) {
8014 ret
= host_to_target_rusage(arg2
, &rusage
);
8018 case TARGET_NR_gettimeofday
:
8021 ret
= get_errno(gettimeofday(&tv
, NULL
));
8022 if (!is_error(ret
)) {
8023 if (copy_to_user_timeval(arg1
, &tv
))
8028 case TARGET_NR_settimeofday
:
8030 struct timeval tv
, *ptv
= NULL
;
8031 struct timezone tz
, *ptz
= NULL
;
8034 if (copy_from_user_timeval(&tv
, arg1
)) {
8041 if (copy_from_user_timezone(&tz
, arg2
)) {
8047 ret
= get_errno(settimeofday(ptv
, ptz
));
8050 #if defined(TARGET_NR_select)
8051 case TARGET_NR_select
:
8052 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8053 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8056 struct target_sel_arg_struct
*sel
;
8057 abi_ulong inp
, outp
, exp
, tvp
;
8060 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8062 nsel
= tswapal(sel
->n
);
8063 inp
= tswapal(sel
->inp
);
8064 outp
= tswapal(sel
->outp
);
8065 exp
= tswapal(sel
->exp
);
8066 tvp
= tswapal(sel
->tvp
);
8067 unlock_user_struct(sel
, arg1
, 0);
8068 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8073 #ifdef TARGET_NR_pselect6
8074 case TARGET_NR_pselect6
:
8076 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8077 fd_set rfds
, wfds
, efds
;
8078 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8079 struct timespec ts
, *ts_ptr
;
8082 * The 6th arg is actually two args smashed together,
8083 * so we cannot use the C library.
8091 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8092 target_sigset_t
*target_sigset
;
8100 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8104 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8108 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8114 * This takes a timespec, and not a timeval, so we cannot
8115 * use the do_select() helper ...
8118 if (target_to_host_timespec(&ts
, ts_addr
)) {
8126 /* Extract the two packed args for the sigset */
8129 sig
.size
= SIGSET_T_SIZE
;
8131 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8135 arg_sigset
= tswapal(arg7
[0]);
8136 arg_sigsize
= tswapal(arg7
[1]);
8137 unlock_user(arg7
, arg6
, 0);
8141 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8142 /* Like the kernel, we enforce correct size sigsets */
8143 ret
= -TARGET_EINVAL
;
8146 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8147 sizeof(*target_sigset
), 1);
8148 if (!target_sigset
) {
8151 target_to_host_sigset(&set
, target_sigset
);
8152 unlock_user(target_sigset
, arg_sigset
, 0);
8160 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8163 if (!is_error(ret
)) {
8164 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8166 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8168 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8171 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8177 #ifdef TARGET_NR_symlink
8178 case TARGET_NR_symlink
:
8181 p
= lock_user_string(arg1
);
8182 p2
= lock_user_string(arg2
);
8184 ret
= -TARGET_EFAULT
;
8186 ret
= get_errno(symlink(p
, p2
));
8187 unlock_user(p2
, arg2
, 0);
8188 unlock_user(p
, arg1
, 0);
8192 #if defined(TARGET_NR_symlinkat)
8193 case TARGET_NR_symlinkat
:
8196 p
= lock_user_string(arg1
);
8197 p2
= lock_user_string(arg3
);
8199 ret
= -TARGET_EFAULT
;
8201 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8202 unlock_user(p2
, arg3
, 0);
8203 unlock_user(p
, arg1
, 0);
8207 #ifdef TARGET_NR_oldlstat
8208 case TARGET_NR_oldlstat
:
8211 #ifdef TARGET_NR_readlink
8212 case TARGET_NR_readlink
:
8215 p
= lock_user_string(arg1
);
8216 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8218 ret
= -TARGET_EFAULT
;
8220 /* Short circuit this for the magic exe check. */
8221 ret
= -TARGET_EINVAL
;
8222 } else if (is_proc_myself((const char *)p
, "exe")) {
8223 char real
[PATH_MAX
], *temp
;
8224 temp
= realpath(exec_path
, real
);
8225 /* Return value is # of bytes that we wrote to the buffer. */
8227 ret
= get_errno(-1);
8229 /* Don't worry about sign mismatch as earlier mapping
8230 * logic would have thrown a bad address error. */
8231 ret
= MIN(strlen(real
), arg3
);
8232 /* We cannot NUL terminate the string. */
8233 memcpy(p2
, real
, ret
);
8236 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8238 unlock_user(p2
, arg2
, ret
);
8239 unlock_user(p
, arg1
, 0);
8243 #if defined(TARGET_NR_readlinkat)
8244 case TARGET_NR_readlinkat
:
8247 p
= lock_user_string(arg2
);
8248 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8250 ret
= -TARGET_EFAULT
;
8251 } else if (is_proc_myself((const char *)p
, "exe")) {
8252 char real
[PATH_MAX
], *temp
;
8253 temp
= realpath(exec_path
, real
);
8254 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8255 snprintf((char *)p2
, arg4
, "%s", real
);
8257 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8259 unlock_user(p2
, arg3
, ret
);
8260 unlock_user(p
, arg2
, 0);
8264 #ifdef TARGET_NR_uselib
8265 case TARGET_NR_uselib
:
8268 #ifdef TARGET_NR_swapon
8269 case TARGET_NR_swapon
:
8270 if (!(p
= lock_user_string(arg1
)))
8272 ret
= get_errno(swapon(p
, arg2
));
8273 unlock_user(p
, arg1
, 0);
8276 case TARGET_NR_reboot
:
8277 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8278 /* arg4 must be ignored in all other cases */
8279 p
= lock_user_string(arg4
);
8283 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8284 unlock_user(p
, arg4
, 0);
8286 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8289 #ifdef TARGET_NR_readdir
8290 case TARGET_NR_readdir
:
8293 #ifdef TARGET_NR_mmap
8294 case TARGET_NR_mmap
:
8295 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8296 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8297 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8298 || defined(TARGET_S390X)
8301 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8302 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8310 unlock_user(v
, arg1
, 0);
8311 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8312 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8316 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8317 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8323 #ifdef TARGET_NR_mmap2
8324 case TARGET_NR_mmap2
:
8326 #define MMAP_SHIFT 12
8328 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8329 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8331 arg6
<< MMAP_SHIFT
));
8334 case TARGET_NR_munmap
:
8335 ret
= get_errno(target_munmap(arg1
, arg2
));
8337 case TARGET_NR_mprotect
:
8339 TaskState
*ts
= cpu
->opaque
;
8340 /* Special hack to detect libc making the stack executable. */
8341 if ((arg3
& PROT_GROWSDOWN
)
8342 && arg1
>= ts
->info
->stack_limit
8343 && arg1
<= ts
->info
->start_stack
) {
8344 arg3
&= ~PROT_GROWSDOWN
;
8345 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8346 arg1
= ts
->info
->stack_limit
;
8349 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8351 #ifdef TARGET_NR_mremap
8352 case TARGET_NR_mremap
:
8353 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8356 /* ??? msync/mlock/munlock are broken for softmmu. */
8357 #ifdef TARGET_NR_msync
8358 case TARGET_NR_msync
:
8359 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8362 #ifdef TARGET_NR_mlock
8363 case TARGET_NR_mlock
:
8364 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8367 #ifdef TARGET_NR_munlock
8368 case TARGET_NR_munlock
:
8369 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8372 #ifdef TARGET_NR_mlockall
8373 case TARGET_NR_mlockall
:
8374 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8377 #ifdef TARGET_NR_munlockall
8378 case TARGET_NR_munlockall
:
8379 ret
= get_errno(munlockall());
8382 case TARGET_NR_truncate
:
8383 if (!(p
= lock_user_string(arg1
)))
8385 ret
= get_errno(truncate(p
, arg2
));
8386 unlock_user(p
, arg1
, 0);
8388 case TARGET_NR_ftruncate
:
8389 ret
= get_errno(ftruncate(arg1
, arg2
));
8391 case TARGET_NR_fchmod
:
8392 ret
= get_errno(fchmod(arg1
, arg2
));
8394 #if defined(TARGET_NR_fchmodat)
8395 case TARGET_NR_fchmodat
:
8396 if (!(p
= lock_user_string(arg2
)))
8398 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8399 unlock_user(p
, arg2
, 0);
8402 case TARGET_NR_getpriority
:
8403 /* Note that negative values are valid for getpriority, so we must
8404 differentiate based on errno settings. */
8406 ret
= getpriority(arg1
, arg2
);
8407 if (ret
== -1 && errno
!= 0) {
8408 ret
= -host_to_target_errno(errno
);
8412 /* Return value is the unbiased priority. Signal no error. */
8413 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8415 /* Return value is a biased priority to avoid negative numbers. */
8419 case TARGET_NR_setpriority
:
8420 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8422 #ifdef TARGET_NR_profil
8423 case TARGET_NR_profil
:
8426 case TARGET_NR_statfs
:
8427 if (!(p
= lock_user_string(arg1
)))
8429 ret
= get_errno(statfs(path(p
), &stfs
));
8430 unlock_user(p
, arg1
, 0);
8432 if (!is_error(ret
)) {
8433 struct target_statfs
*target_stfs
;
8435 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8437 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8438 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8439 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8440 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8441 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8442 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8443 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8444 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8445 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8446 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8447 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8448 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8449 unlock_user_struct(target_stfs
, arg2
, 1);
8452 case TARGET_NR_fstatfs
:
8453 ret
= get_errno(fstatfs(arg1
, &stfs
));
8454 goto convert_statfs
;
8455 #ifdef TARGET_NR_statfs64
8456 case TARGET_NR_statfs64
:
8457 if (!(p
= lock_user_string(arg1
)))
8459 ret
= get_errno(statfs(path(p
), &stfs
));
8460 unlock_user(p
, arg1
, 0);
8462 if (!is_error(ret
)) {
8463 struct target_statfs64
*target_stfs
;
8465 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8467 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8468 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8469 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8470 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8471 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8472 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8473 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8474 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8475 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8476 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8477 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8478 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8479 unlock_user_struct(target_stfs
, arg3
, 1);
8482 case TARGET_NR_fstatfs64
:
8483 ret
= get_errno(fstatfs(arg1
, &stfs
));
8484 goto convert_statfs64
;
8486 #ifdef TARGET_NR_ioperm
8487 case TARGET_NR_ioperm
:
8490 #ifdef TARGET_NR_socketcall
8491 case TARGET_NR_socketcall
:
8492 ret
= do_socketcall(arg1
, arg2
);
8495 #ifdef TARGET_NR_accept
8496 case TARGET_NR_accept
:
8497 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8500 #ifdef TARGET_NR_accept4
8501 case TARGET_NR_accept4
:
8502 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8505 #ifdef TARGET_NR_bind
8506 case TARGET_NR_bind
:
8507 ret
= do_bind(arg1
, arg2
, arg3
);
8510 #ifdef TARGET_NR_connect
8511 case TARGET_NR_connect
:
8512 ret
= do_connect(arg1
, arg2
, arg3
);
8515 #ifdef TARGET_NR_getpeername
8516 case TARGET_NR_getpeername
:
8517 ret
= do_getpeername(arg1
, arg2
, arg3
);
8520 #ifdef TARGET_NR_getsockname
8521 case TARGET_NR_getsockname
:
8522 ret
= do_getsockname(arg1
, arg2
, arg3
);
8525 #ifdef TARGET_NR_getsockopt
8526 case TARGET_NR_getsockopt
:
8527 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8530 #ifdef TARGET_NR_listen
8531 case TARGET_NR_listen
:
8532 ret
= get_errno(listen(arg1
, arg2
));
8535 #ifdef TARGET_NR_recv
8536 case TARGET_NR_recv
:
8537 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8540 #ifdef TARGET_NR_recvfrom
8541 case TARGET_NR_recvfrom
:
8542 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8545 #ifdef TARGET_NR_recvmsg
8546 case TARGET_NR_recvmsg
:
8547 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8550 #ifdef TARGET_NR_send
8551 case TARGET_NR_send
:
8552 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8555 #ifdef TARGET_NR_sendmsg
8556 case TARGET_NR_sendmsg
:
8557 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8560 #ifdef TARGET_NR_sendmmsg
8561 case TARGET_NR_sendmmsg
:
8562 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8564 case TARGET_NR_recvmmsg
:
8565 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8568 #ifdef TARGET_NR_sendto
8569 case TARGET_NR_sendto
:
8570 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8573 #ifdef TARGET_NR_shutdown
8574 case TARGET_NR_shutdown
:
8575 ret
= get_errno(shutdown(arg1
, arg2
));
8578 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8579 case TARGET_NR_getrandom
:
8580 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8584 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8585 unlock_user(p
, arg1
, ret
);
8588 #ifdef TARGET_NR_socket
8589 case TARGET_NR_socket
:
8590 ret
= do_socket(arg1
, arg2
, arg3
);
8591 fd_trans_unregister(ret
);
8594 #ifdef TARGET_NR_socketpair
8595 case TARGET_NR_socketpair
:
8596 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8599 #ifdef TARGET_NR_setsockopt
8600 case TARGET_NR_setsockopt
:
8601 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8605 case TARGET_NR_syslog
:
8606 if (!(p
= lock_user_string(arg2
)))
8608 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8609 unlock_user(p
, arg2
, 0);
8612 case TARGET_NR_setitimer
:
8614 struct itimerval value
, ovalue
, *pvalue
;
8618 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8619 || copy_from_user_timeval(&pvalue
->it_value
,
8620 arg2
+ sizeof(struct target_timeval
)))
8625 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8626 if (!is_error(ret
) && arg3
) {
8627 if (copy_to_user_timeval(arg3
,
8628 &ovalue
.it_interval
)
8629 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8635 case TARGET_NR_getitimer
:
8637 struct itimerval value
;
8639 ret
= get_errno(getitimer(arg1
, &value
));
8640 if (!is_error(ret
) && arg2
) {
8641 if (copy_to_user_timeval(arg2
,
8643 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8649 #ifdef TARGET_NR_stat
8650 case TARGET_NR_stat
:
8651 if (!(p
= lock_user_string(arg1
)))
8653 ret
= get_errno(stat(path(p
), &st
));
8654 unlock_user(p
, arg1
, 0);
8657 #ifdef TARGET_NR_lstat
8658 case TARGET_NR_lstat
:
8659 if (!(p
= lock_user_string(arg1
)))
8661 ret
= get_errno(lstat(path(p
), &st
));
8662 unlock_user(p
, arg1
, 0);
8665 case TARGET_NR_fstat
:
8667 ret
= get_errno(fstat(arg1
, &st
));
8668 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8671 if (!is_error(ret
)) {
8672 struct target_stat
*target_st
;
8674 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8676 memset(target_st
, 0, sizeof(*target_st
));
8677 __put_user(st
.st_dev
, &target_st
->st_dev
);
8678 __put_user(st
.st_ino
, &target_st
->st_ino
);
8679 __put_user(st
.st_mode
, &target_st
->st_mode
);
8680 __put_user(st
.st_uid
, &target_st
->st_uid
);
8681 __put_user(st
.st_gid
, &target_st
->st_gid
);
8682 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8683 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8684 __put_user(st
.st_size
, &target_st
->st_size
);
8685 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8686 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8687 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8688 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8689 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8690 unlock_user_struct(target_st
, arg2
, 1);
8694 #ifdef TARGET_NR_olduname
8695 case TARGET_NR_olduname
:
8698 #ifdef TARGET_NR_iopl
8699 case TARGET_NR_iopl
:
8702 case TARGET_NR_vhangup
:
8703 ret
= get_errno(vhangup());
8705 #ifdef TARGET_NR_idle
8706 case TARGET_NR_idle
:
8709 #ifdef TARGET_NR_syscall
8710 case TARGET_NR_syscall
:
8711 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8712 arg6
, arg7
, arg8
, 0);
8715 case TARGET_NR_wait4
:
8718 abi_long status_ptr
= arg2
;
8719 struct rusage rusage
, *rusage_ptr
;
8720 abi_ulong target_rusage
= arg4
;
8721 abi_long rusage_err
;
8723 rusage_ptr
= &rusage
;
8726 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8727 if (!is_error(ret
)) {
8728 if (status_ptr
&& ret
) {
8729 status
= host_to_target_waitstatus(status
);
8730 if (put_user_s32(status
, status_ptr
))
8733 if (target_rusage
) {
8734 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8742 #ifdef TARGET_NR_swapoff
8743 case TARGET_NR_swapoff
:
8744 if (!(p
= lock_user_string(arg1
)))
8746 ret
= get_errno(swapoff(p
));
8747 unlock_user(p
, arg1
, 0);
8750 case TARGET_NR_sysinfo
:
8752 struct target_sysinfo
*target_value
;
8753 struct sysinfo value
;
8754 ret
= get_errno(sysinfo(&value
));
8755 if (!is_error(ret
) && arg1
)
8757 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8759 __put_user(value
.uptime
, &target_value
->uptime
);
8760 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8761 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8762 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8763 __put_user(value
.totalram
, &target_value
->totalram
);
8764 __put_user(value
.freeram
, &target_value
->freeram
);
8765 __put_user(value
.sharedram
, &target_value
->sharedram
);
8766 __put_user(value
.bufferram
, &target_value
->bufferram
);
8767 __put_user(value
.totalswap
, &target_value
->totalswap
);
8768 __put_user(value
.freeswap
, &target_value
->freeswap
);
8769 __put_user(value
.procs
, &target_value
->procs
);
8770 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8771 __put_user(value
.freehigh
, &target_value
->freehigh
);
8772 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8773 unlock_user_struct(target_value
, arg1
, 1);
8777 #ifdef TARGET_NR_ipc
8779 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8782 #ifdef TARGET_NR_semget
8783 case TARGET_NR_semget
:
8784 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8787 #ifdef TARGET_NR_semop
8788 case TARGET_NR_semop
:
8789 ret
= do_semop(arg1
, arg2
, arg3
);
8792 #ifdef TARGET_NR_semctl
8793 case TARGET_NR_semctl
:
8794 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8797 #ifdef TARGET_NR_msgctl
8798 case TARGET_NR_msgctl
:
8799 ret
= do_msgctl(arg1
, arg2
, arg3
);
8802 #ifdef TARGET_NR_msgget
8803 case TARGET_NR_msgget
:
8804 ret
= get_errno(msgget(arg1
, arg2
));
8807 #ifdef TARGET_NR_msgrcv
8808 case TARGET_NR_msgrcv
:
8809 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8812 #ifdef TARGET_NR_msgsnd
8813 case TARGET_NR_msgsnd
:
8814 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8817 #ifdef TARGET_NR_shmget
8818 case TARGET_NR_shmget
:
8819 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8822 #ifdef TARGET_NR_shmctl
8823 case TARGET_NR_shmctl
:
8824 ret
= do_shmctl(arg1
, arg2
, arg3
);
8827 #ifdef TARGET_NR_shmat
8828 case TARGET_NR_shmat
:
8829 ret
= do_shmat(arg1
, arg2
, arg3
);
8832 #ifdef TARGET_NR_shmdt
8833 case TARGET_NR_shmdt
:
8834 ret
= do_shmdt(arg1
);
8837 case TARGET_NR_fsync
:
8838 ret
= get_errno(fsync(arg1
));
8840 case TARGET_NR_clone
:
8841 /* Linux manages to have three different orderings for its
8842 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8843 * match the kernel's CONFIG_CLONE_* settings.
8844 * Microblaze is further special in that it uses a sixth
8845 * implicit argument to clone for the TLS pointer.
8847 #if defined(TARGET_MICROBLAZE)
8848 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8849 #elif defined(TARGET_CLONE_BACKWARDS)
8850 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8851 #elif defined(TARGET_CLONE_BACKWARDS2)
8852 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8854 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8857 #ifdef __NR_exit_group
8858 /* new thread calls */
8859 case TARGET_NR_exit_group
:
8863 gdb_exit(cpu_env
, arg1
);
8864 ret
= get_errno(exit_group(arg1
));
8867 case TARGET_NR_setdomainname
:
8868 if (!(p
= lock_user_string(arg1
)))
8870 ret
= get_errno(setdomainname(p
, arg2
));
8871 unlock_user(p
, arg1
, 0);
8873 case TARGET_NR_uname
:
8874 /* no need to transcode because we use the linux syscall */
8876 struct new_utsname
* buf
;
8878 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8880 ret
= get_errno(sys_uname(buf
));
8881 if (!is_error(ret
)) {
8882 /* Overrite the native machine name with whatever is being
8884 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8885 /* Allow the user to override the reported release. */
8886 if (qemu_uname_release
&& *qemu_uname_release
)
8887 strcpy (buf
->release
, qemu_uname_release
);
8889 unlock_user_struct(buf
, arg1
, 1);
8893 case TARGET_NR_modify_ldt
:
8894 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8896 #if !defined(TARGET_X86_64)
8897 case TARGET_NR_vm86old
:
8899 case TARGET_NR_vm86
:
8900 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8904 case TARGET_NR_adjtimex
:
8906 #ifdef TARGET_NR_create_module
8907 case TARGET_NR_create_module
:
8909 case TARGET_NR_init_module
:
8910 case TARGET_NR_delete_module
:
8911 #ifdef TARGET_NR_get_kernel_syms
8912 case TARGET_NR_get_kernel_syms
:
8915 case TARGET_NR_quotactl
:
8917 case TARGET_NR_getpgid
:
8918 ret
= get_errno(getpgid(arg1
));
8920 case TARGET_NR_fchdir
:
8921 ret
= get_errno(fchdir(arg1
));
8923 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8924 case TARGET_NR_bdflush
:
8927 #ifdef TARGET_NR_sysfs
8928 case TARGET_NR_sysfs
:
8931 case TARGET_NR_personality
:
8932 ret
= get_errno(personality(arg1
));
8934 #ifdef TARGET_NR_afs_syscall
8935 case TARGET_NR_afs_syscall
:
8938 #ifdef TARGET_NR__llseek /* Not on alpha */
8939 case TARGET_NR__llseek
:
8942 #if !defined(__NR_llseek)
8943 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8945 ret
= get_errno(res
);
8950 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8952 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8958 #ifdef TARGET_NR_getdents
8959 case TARGET_NR_getdents
:
8960 #ifdef __NR_getdents
8961 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8963 struct target_dirent
*target_dirp
;
8964 struct linux_dirent
*dirp
;
8965 abi_long count
= arg3
;
8967 dirp
= g_try_malloc(count
);
8969 ret
= -TARGET_ENOMEM
;
8973 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8974 if (!is_error(ret
)) {
8975 struct linux_dirent
*de
;
8976 struct target_dirent
*tde
;
8978 int reclen
, treclen
;
8979 int count1
, tnamelen
;
8983 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8987 reclen
= de
->d_reclen
;
8988 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8989 assert(tnamelen
>= 0);
8990 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8991 assert(count1
+ treclen
<= count
);
8992 tde
->d_reclen
= tswap16(treclen
);
8993 tde
->d_ino
= tswapal(de
->d_ino
);
8994 tde
->d_off
= tswapal(de
->d_off
);
8995 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8996 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8998 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9002 unlock_user(target_dirp
, arg2
, ret
);
9008 struct linux_dirent
*dirp
;
9009 abi_long count
= arg3
;
9011 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9013 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9014 if (!is_error(ret
)) {
9015 struct linux_dirent
*de
;
9020 reclen
= de
->d_reclen
;
9023 de
->d_reclen
= tswap16(reclen
);
9024 tswapls(&de
->d_ino
);
9025 tswapls(&de
->d_off
);
9026 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9030 unlock_user(dirp
, arg2
, ret
);
9034 /* Implement getdents in terms of getdents64 */
9036 struct linux_dirent64
*dirp
;
9037 abi_long count
= arg3
;
9039 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9043 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9044 if (!is_error(ret
)) {
9045 /* Convert the dirent64 structs to target dirent. We do this
9046 * in-place, since we can guarantee that a target_dirent is no
9047 * larger than a dirent64; however this means we have to be
9048 * careful to read everything before writing in the new format.
9050 struct linux_dirent64
*de
;
9051 struct target_dirent
*tde
;
9056 tde
= (struct target_dirent
*)dirp
;
9058 int namelen
, treclen
;
9059 int reclen
= de
->d_reclen
;
9060 uint64_t ino
= de
->d_ino
;
9061 int64_t off
= de
->d_off
;
9062 uint8_t type
= de
->d_type
;
9064 namelen
= strlen(de
->d_name
);
9065 treclen
= offsetof(struct target_dirent
, d_name
)
9067 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9069 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9070 tde
->d_ino
= tswapal(ino
);
9071 tde
->d_off
= tswapal(off
);
9072 tde
->d_reclen
= tswap16(treclen
);
9073 /* The target_dirent type is in what was formerly a padding
9074 * byte at the end of the structure:
9076 *(((char *)tde
) + treclen
- 1) = type
;
9078 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9079 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9085 unlock_user(dirp
, arg2
, ret
);
9089 #endif /* TARGET_NR_getdents */
9090 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9091 case TARGET_NR_getdents64
:
9093 struct linux_dirent64
*dirp
;
9094 abi_long count
= arg3
;
9095 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9097 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9098 if (!is_error(ret
)) {
9099 struct linux_dirent64
*de
;
9104 reclen
= de
->d_reclen
;
9107 de
->d_reclen
= tswap16(reclen
);
9108 tswap64s((uint64_t *)&de
->d_ino
);
9109 tswap64s((uint64_t *)&de
->d_off
);
9110 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9114 unlock_user(dirp
, arg2
, ret
);
9117 #endif /* TARGET_NR_getdents64 */
9118 #if defined(TARGET_NR__newselect)
9119 case TARGET_NR__newselect
:
9120 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9123 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9124 # ifdef TARGET_NR_poll
9125 case TARGET_NR_poll
:
9127 # ifdef TARGET_NR_ppoll
9128 case TARGET_NR_ppoll
:
9131 struct target_pollfd
*target_pfd
;
9132 unsigned int nfds
= arg2
;
9139 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9140 sizeof(struct target_pollfd
) * nfds
, 1);
9145 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9146 for (i
= 0; i
< nfds
; i
++) {
9147 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9148 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9153 # ifdef TARGET_NR_ppoll
9154 case TARGET_NR_ppoll
:
9156 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9157 target_sigset_t
*target_set
;
9158 sigset_t _set
, *set
= &_set
;
9161 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9162 unlock_user(target_pfd
, arg1
, 0);
9170 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9172 unlock_user(target_pfd
, arg1
, 0);
9175 target_to_host_sigset(set
, target_set
);
9180 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9181 set
, SIGSET_T_SIZE
));
9183 if (!is_error(ret
) && arg3
) {
9184 host_to_target_timespec(arg3
, timeout_ts
);
9187 unlock_user(target_set
, arg4
, 0);
9192 # ifdef TARGET_NR_poll
9193 case TARGET_NR_poll
:
9195 struct timespec ts
, *pts
;
9198 /* Convert ms to secs, ns */
9199 ts
.tv_sec
= arg3
/ 1000;
9200 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9203 /* -ve poll() timeout means "infinite" */
9206 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9211 g_assert_not_reached();
9214 if (!is_error(ret
)) {
9215 for(i
= 0; i
< nfds
; i
++) {
9216 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9219 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9223 case TARGET_NR_flock
:
9224 /* NOTE: the flock constant seems to be the same for every
9226 ret
= get_errno(safe_flock(arg1
, arg2
));
9228 case TARGET_NR_readv
:
9230 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9232 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9233 unlock_iovec(vec
, arg2
, arg3
, 1);
9235 ret
= -host_to_target_errno(errno
);
9239 case TARGET_NR_writev
:
9241 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9243 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9244 unlock_iovec(vec
, arg2
, arg3
, 0);
9246 ret
= -host_to_target_errno(errno
);
9250 case TARGET_NR_getsid
:
9251 ret
= get_errno(getsid(arg1
));
9253 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9254 case TARGET_NR_fdatasync
:
9255 ret
= get_errno(fdatasync(arg1
));
9258 #ifdef TARGET_NR__sysctl
9259 case TARGET_NR__sysctl
:
9260 /* We don't implement this, but ENOTDIR is always a safe
9262 ret
= -TARGET_ENOTDIR
;
9265 case TARGET_NR_sched_getaffinity
:
9267 unsigned int mask_size
;
9268 unsigned long *mask
;
9271 * sched_getaffinity needs multiples of ulong, so need to take
9272 * care of mismatches between target ulong and host ulong sizes.
9274 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9275 ret
= -TARGET_EINVAL
;
9278 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9280 mask
= alloca(mask_size
);
9281 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9283 if (!is_error(ret
)) {
9285 /* More data returned than the caller's buffer will fit.
9286 * This only happens if sizeof(abi_long) < sizeof(long)
9287 * and the caller passed us a buffer holding an odd number
9288 * of abi_longs. If the host kernel is actually using the
9289 * extra 4 bytes then fail EINVAL; otherwise we can just
9290 * ignore them and only copy the interesting part.
9292 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9293 if (numcpus
> arg2
* 8) {
9294 ret
= -TARGET_EINVAL
;
9300 if (copy_to_user(arg3
, mask
, ret
)) {
9306 case TARGET_NR_sched_setaffinity
:
9308 unsigned int mask_size
;
9309 unsigned long *mask
;
9312 * sched_setaffinity needs multiples of ulong, so need to take
9313 * care of mismatches between target ulong and host ulong sizes.
9315 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9316 ret
= -TARGET_EINVAL
;
9319 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9321 mask
= alloca(mask_size
);
9322 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9325 memcpy(mask
, p
, arg2
);
9326 unlock_user_struct(p
, arg2
, 0);
9328 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9331 case TARGET_NR_sched_setparam
:
9333 struct sched_param
*target_schp
;
9334 struct sched_param schp
;
9337 return -TARGET_EINVAL
;
9339 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9341 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9342 unlock_user_struct(target_schp
, arg2
, 0);
9343 ret
= get_errno(sched_setparam(arg1
, &schp
));
9346 case TARGET_NR_sched_getparam
:
9348 struct sched_param
*target_schp
;
9349 struct sched_param schp
;
9352 return -TARGET_EINVAL
;
9354 ret
= get_errno(sched_getparam(arg1
, &schp
));
9355 if (!is_error(ret
)) {
9356 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9358 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9359 unlock_user_struct(target_schp
, arg2
, 1);
9363 case TARGET_NR_sched_setscheduler
:
9365 struct sched_param
*target_schp
;
9366 struct sched_param schp
;
9368 return -TARGET_EINVAL
;
9370 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9372 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9373 unlock_user_struct(target_schp
, arg3
, 0);
9374 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9377 case TARGET_NR_sched_getscheduler
:
9378 ret
= get_errno(sched_getscheduler(arg1
));
9380 case TARGET_NR_sched_yield
:
9381 ret
= get_errno(sched_yield());
9383 case TARGET_NR_sched_get_priority_max
:
9384 ret
= get_errno(sched_get_priority_max(arg1
));
9386 case TARGET_NR_sched_get_priority_min
:
9387 ret
= get_errno(sched_get_priority_min(arg1
));
9389 case TARGET_NR_sched_rr_get_interval
:
9392 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9393 if (!is_error(ret
)) {
9394 ret
= host_to_target_timespec(arg2
, &ts
);
9398 case TARGET_NR_nanosleep
:
9400 struct timespec req
, rem
;
9401 target_to_host_timespec(&req
, arg1
);
9402 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9403 if (is_error(ret
) && arg2
) {
9404 host_to_target_timespec(arg2
, &rem
);
9408 #ifdef TARGET_NR_query_module
9409 case TARGET_NR_query_module
:
9412 #ifdef TARGET_NR_nfsservctl
9413 case TARGET_NR_nfsservctl
:
9416 case TARGET_NR_prctl
:
9418 case PR_GET_PDEATHSIG
:
9421 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9422 if (!is_error(ret
) && arg2
9423 && put_user_ual(deathsig
, arg2
)) {
9431 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9435 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9437 unlock_user(name
, arg2
, 16);
9442 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9446 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9448 unlock_user(name
, arg2
, 0);
9453 /* Most prctl options have no pointer arguments */
9454 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9458 #ifdef TARGET_NR_arch_prctl
9459 case TARGET_NR_arch_prctl
:
9460 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9461 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9467 #ifdef TARGET_NR_pread64
9468 case TARGET_NR_pread64
:
9469 if (regpairs_aligned(cpu_env
)) {
9473 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9475 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9476 unlock_user(p
, arg2
, ret
);
9478 case TARGET_NR_pwrite64
:
9479 if (regpairs_aligned(cpu_env
)) {
9483 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9485 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9486 unlock_user(p
, arg2
, 0);
9489 case TARGET_NR_getcwd
:
9490 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9492 ret
= get_errno(sys_getcwd1(p
, arg2
));
9493 unlock_user(p
, arg1
, ret
);
9495 case TARGET_NR_capget
:
9496 case TARGET_NR_capset
:
9498 struct target_user_cap_header
*target_header
;
9499 struct target_user_cap_data
*target_data
= NULL
;
9500 struct __user_cap_header_struct header
;
9501 struct __user_cap_data_struct data
[2];
9502 struct __user_cap_data_struct
*dataptr
= NULL
;
9503 int i
, target_datalen
;
9506 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9509 header
.version
= tswap32(target_header
->version
);
9510 header
.pid
= tswap32(target_header
->pid
);
9512 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9513 /* Version 2 and up takes pointer to two user_data structs */
9517 target_datalen
= sizeof(*target_data
) * data_items
;
9520 if (num
== TARGET_NR_capget
) {
9521 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9523 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9526 unlock_user_struct(target_header
, arg1
, 0);
9530 if (num
== TARGET_NR_capset
) {
9531 for (i
= 0; i
< data_items
; i
++) {
9532 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9533 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9534 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9541 if (num
== TARGET_NR_capget
) {
9542 ret
= get_errno(capget(&header
, dataptr
));
9544 ret
= get_errno(capset(&header
, dataptr
));
9547 /* The kernel always updates version for both capget and capset */
9548 target_header
->version
= tswap32(header
.version
);
9549 unlock_user_struct(target_header
, arg1
, 1);
9552 if (num
== TARGET_NR_capget
) {
9553 for (i
= 0; i
< data_items
; i
++) {
9554 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9555 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9556 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9558 unlock_user(target_data
, arg2
, target_datalen
);
9560 unlock_user(target_data
, arg2
, 0);
9565 case TARGET_NR_sigaltstack
:
9566 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9569 #ifdef CONFIG_SENDFILE
9570 case TARGET_NR_sendfile
:
9575 ret
= get_user_sal(off
, arg3
);
9576 if (is_error(ret
)) {
9581 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9582 if (!is_error(ret
) && arg3
) {
9583 abi_long ret2
= put_user_sal(off
, arg3
);
9584 if (is_error(ret2
)) {
9590 #ifdef TARGET_NR_sendfile64
9591 case TARGET_NR_sendfile64
:
9596 ret
= get_user_s64(off
, arg3
);
9597 if (is_error(ret
)) {
9602 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9603 if (!is_error(ret
) && arg3
) {
9604 abi_long ret2
= put_user_s64(off
, arg3
);
9605 if (is_error(ret2
)) {
9613 case TARGET_NR_sendfile
:
9614 #ifdef TARGET_NR_sendfile64
9615 case TARGET_NR_sendfile64
:
9620 #ifdef TARGET_NR_getpmsg
9621 case TARGET_NR_getpmsg
:
9624 #ifdef TARGET_NR_putpmsg
9625 case TARGET_NR_putpmsg
:
9628 #ifdef TARGET_NR_vfork
9629 case TARGET_NR_vfork
:
9630 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9634 #ifdef TARGET_NR_ugetrlimit
9635 case TARGET_NR_ugetrlimit
:
9638 int resource
= target_to_host_resource(arg1
);
9639 ret
= get_errno(getrlimit(resource
, &rlim
));
9640 if (!is_error(ret
)) {
9641 struct target_rlimit
*target_rlim
;
9642 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9644 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9645 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9646 unlock_user_struct(target_rlim
, arg2
, 1);
9651 #ifdef TARGET_NR_truncate64
9652 case TARGET_NR_truncate64
:
9653 if (!(p
= lock_user_string(arg1
)))
9655 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9656 unlock_user(p
, arg1
, 0);
9659 #ifdef TARGET_NR_ftruncate64
9660 case TARGET_NR_ftruncate64
:
9661 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9664 #ifdef TARGET_NR_stat64
9665 case TARGET_NR_stat64
:
9666 if (!(p
= lock_user_string(arg1
)))
9668 ret
= get_errno(stat(path(p
), &st
));
9669 unlock_user(p
, arg1
, 0);
9671 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9674 #ifdef TARGET_NR_lstat64
9675 case TARGET_NR_lstat64
:
9676 if (!(p
= lock_user_string(arg1
)))
9678 ret
= get_errno(lstat(path(p
), &st
));
9679 unlock_user(p
, arg1
, 0);
9681 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9684 #ifdef TARGET_NR_fstat64
9685 case TARGET_NR_fstat64
:
9686 ret
= get_errno(fstat(arg1
, &st
));
9688 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9691 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9692 #ifdef TARGET_NR_fstatat64
9693 case TARGET_NR_fstatat64
:
9695 #ifdef TARGET_NR_newfstatat
9696 case TARGET_NR_newfstatat
:
9698 if (!(p
= lock_user_string(arg2
)))
9700 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9702 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9705 #ifdef TARGET_NR_lchown
9706 case TARGET_NR_lchown
:
9707 if (!(p
= lock_user_string(arg1
)))
9709 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9710 unlock_user(p
, arg1
, 0);
9713 #ifdef TARGET_NR_getuid
9714 case TARGET_NR_getuid
:
9715 ret
= get_errno(high2lowuid(getuid()));
9718 #ifdef TARGET_NR_getgid
9719 case TARGET_NR_getgid
:
9720 ret
= get_errno(high2lowgid(getgid()));
9723 #ifdef TARGET_NR_geteuid
9724 case TARGET_NR_geteuid
:
9725 ret
= get_errno(high2lowuid(geteuid()));
9728 #ifdef TARGET_NR_getegid
9729 case TARGET_NR_getegid
:
9730 ret
= get_errno(high2lowgid(getegid()));
9733 case TARGET_NR_setreuid
:
9734 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9736 case TARGET_NR_setregid
:
9737 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9739 case TARGET_NR_getgroups
:
9741 int gidsetsize
= arg1
;
9742 target_id
*target_grouplist
;
9746 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9747 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9748 if (gidsetsize
== 0)
9750 if (!is_error(ret
)) {
9751 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9752 if (!target_grouplist
)
9754 for(i
= 0;i
< ret
; i
++)
9755 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9756 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9760 case TARGET_NR_setgroups
:
9762 int gidsetsize
= arg1
;
9763 target_id
*target_grouplist
;
9764 gid_t
*grouplist
= NULL
;
9767 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9768 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9769 if (!target_grouplist
) {
9770 ret
= -TARGET_EFAULT
;
9773 for (i
= 0; i
< gidsetsize
; i
++) {
9774 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9776 unlock_user(target_grouplist
, arg2
, 0);
9778 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9781 case TARGET_NR_fchown
:
9782 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9784 #if defined(TARGET_NR_fchownat)
9785 case TARGET_NR_fchownat
:
9786 if (!(p
= lock_user_string(arg2
)))
9788 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9789 low2highgid(arg4
), arg5
));
9790 unlock_user(p
, arg2
, 0);
9793 #ifdef TARGET_NR_setresuid
9794 case TARGET_NR_setresuid
:
9795 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9797 low2highuid(arg3
)));
9800 #ifdef TARGET_NR_getresuid
9801 case TARGET_NR_getresuid
:
9803 uid_t ruid
, euid
, suid
;
9804 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9805 if (!is_error(ret
)) {
9806 if (put_user_id(high2lowuid(ruid
), arg1
)
9807 || put_user_id(high2lowuid(euid
), arg2
)
9808 || put_user_id(high2lowuid(suid
), arg3
))
9814 #ifdef TARGET_NR_getresgid
9815 case TARGET_NR_setresgid
:
9816 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9818 low2highgid(arg3
)));
9821 #ifdef TARGET_NR_getresgid
9822 case TARGET_NR_getresgid
:
9824 gid_t rgid
, egid
, sgid
;
9825 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9826 if (!is_error(ret
)) {
9827 if (put_user_id(high2lowgid(rgid
), arg1
)
9828 || put_user_id(high2lowgid(egid
), arg2
)
9829 || put_user_id(high2lowgid(sgid
), arg3
))
9835 #ifdef TARGET_NR_chown
9836 case TARGET_NR_chown
:
9837 if (!(p
= lock_user_string(arg1
)))
9839 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9840 unlock_user(p
, arg1
, 0);
9843 case TARGET_NR_setuid
:
9844 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9846 case TARGET_NR_setgid
:
9847 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9849 case TARGET_NR_setfsuid
:
9850 ret
= get_errno(setfsuid(arg1
));
9852 case TARGET_NR_setfsgid
:
9853 ret
= get_errno(setfsgid(arg1
));
9856 #ifdef TARGET_NR_lchown32
9857 case TARGET_NR_lchown32
:
9858 if (!(p
= lock_user_string(arg1
)))
9860 ret
= get_errno(lchown(p
, arg2
, arg3
));
9861 unlock_user(p
, arg1
, 0);
9864 #ifdef TARGET_NR_getuid32
9865 case TARGET_NR_getuid32
:
9866 ret
= get_errno(getuid());
9870 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9871 /* Alpha specific */
9872 case TARGET_NR_getxuid
:
9876 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9878 ret
= get_errno(getuid());
9881 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9882 /* Alpha specific */
9883 case TARGET_NR_getxgid
:
9887 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9889 ret
= get_errno(getgid());
9892 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9893 /* Alpha specific */
9894 case TARGET_NR_osf_getsysinfo
:
9895 ret
= -TARGET_EOPNOTSUPP
;
9897 case TARGET_GSI_IEEE_FP_CONTROL
:
9899 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9901 /* Copied from linux ieee_fpcr_to_swcr. */
9902 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9903 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9904 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9905 | SWCR_TRAP_ENABLE_DZE
9906 | SWCR_TRAP_ENABLE_OVF
);
9907 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9908 | SWCR_TRAP_ENABLE_INE
);
9909 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9910 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9912 if (put_user_u64 (swcr
, arg2
))
9918 /* case GSI_IEEE_STATE_AT_SIGNAL:
9919 -- Not implemented in linux kernel.
9921 -- Retrieves current unaligned access state; not much used.
9923 -- Retrieves implver information; surely not used.
9925 -- Grabs a copy of the HWRPB; surely not used.
9930 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9931 /* Alpha specific */
9932 case TARGET_NR_osf_setsysinfo
:
9933 ret
= -TARGET_EOPNOTSUPP
;
9935 case TARGET_SSI_IEEE_FP_CONTROL
:
9937 uint64_t swcr
, fpcr
, orig_fpcr
;
9939 if (get_user_u64 (swcr
, arg2
)) {
9942 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9943 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9945 /* Copied from linux ieee_swcr_to_fpcr. */
9946 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9947 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9948 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9949 | SWCR_TRAP_ENABLE_DZE
9950 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9951 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9952 | SWCR_TRAP_ENABLE_INE
)) << 57;
9953 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9954 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9956 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9961 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9963 uint64_t exc
, fpcr
, orig_fpcr
;
9966 if (get_user_u64(exc
, arg2
)) {
9970 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9972 /* We only add to the exception status here. */
9973 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9975 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9978 /* Old exceptions are not signaled. */
9979 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9981 /* If any exceptions set by this call,
9982 and are unmasked, send a signal. */
9984 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9985 si_code
= TARGET_FPE_FLTRES
;
9987 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9988 si_code
= TARGET_FPE_FLTUND
;
9990 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9991 si_code
= TARGET_FPE_FLTOVF
;
9993 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9994 si_code
= TARGET_FPE_FLTDIV
;
9996 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9997 si_code
= TARGET_FPE_FLTINV
;
10000 target_siginfo_t info
;
10001 info
.si_signo
= SIGFPE
;
10003 info
.si_code
= si_code
;
10004 info
._sifields
._sigfault
._addr
10005 = ((CPUArchState
*)cpu_env
)->pc
;
10006 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10011 /* case SSI_NVPAIRS:
10012 -- Used with SSIN_UACPROC to enable unaligned accesses.
10013 case SSI_IEEE_STATE_AT_SIGNAL:
10014 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10015 -- Not implemented in linux kernel
10020 #ifdef TARGET_NR_osf_sigprocmask
10021 /* Alpha specific. */
10022 case TARGET_NR_osf_sigprocmask
:
10026 sigset_t set
, oldset
;
10029 case TARGET_SIG_BLOCK
:
10032 case TARGET_SIG_UNBLOCK
:
10035 case TARGET_SIG_SETMASK
:
10039 ret
= -TARGET_EINVAL
;
10043 target_to_host_old_sigset(&set
, &mask
);
10044 ret
= do_sigprocmask(how
, &set
, &oldset
);
10046 host_to_target_old_sigset(&mask
, &oldset
);
10053 #ifdef TARGET_NR_getgid32
10054 case TARGET_NR_getgid32
:
10055 ret
= get_errno(getgid());
10058 #ifdef TARGET_NR_geteuid32
10059 case TARGET_NR_geteuid32
:
10060 ret
= get_errno(geteuid());
10063 #ifdef TARGET_NR_getegid32
10064 case TARGET_NR_getegid32
:
10065 ret
= get_errno(getegid());
10068 #ifdef TARGET_NR_setreuid32
10069 case TARGET_NR_setreuid32
:
10070 ret
= get_errno(setreuid(arg1
, arg2
));
10073 #ifdef TARGET_NR_setregid32
10074 case TARGET_NR_setregid32
:
10075 ret
= get_errno(setregid(arg1
, arg2
));
10078 #ifdef TARGET_NR_getgroups32
10079 case TARGET_NR_getgroups32
:
10081 int gidsetsize
= arg1
;
10082 uint32_t *target_grouplist
;
10086 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10087 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10088 if (gidsetsize
== 0)
10090 if (!is_error(ret
)) {
10091 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10092 if (!target_grouplist
) {
10093 ret
= -TARGET_EFAULT
;
10096 for(i
= 0;i
< ret
; i
++)
10097 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10098 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10103 #ifdef TARGET_NR_setgroups32
10104 case TARGET_NR_setgroups32
:
10106 int gidsetsize
= arg1
;
10107 uint32_t *target_grouplist
;
10111 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10112 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10113 if (!target_grouplist
) {
10114 ret
= -TARGET_EFAULT
;
10117 for(i
= 0;i
< gidsetsize
; i
++)
10118 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10119 unlock_user(target_grouplist
, arg2
, 0);
10120 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10124 #ifdef TARGET_NR_fchown32
10125 case TARGET_NR_fchown32
:
10126 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10129 #ifdef TARGET_NR_setresuid32
10130 case TARGET_NR_setresuid32
:
10131 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10134 #ifdef TARGET_NR_getresuid32
10135 case TARGET_NR_getresuid32
:
10137 uid_t ruid
, euid
, suid
;
10138 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10139 if (!is_error(ret
)) {
10140 if (put_user_u32(ruid
, arg1
)
10141 || put_user_u32(euid
, arg2
)
10142 || put_user_u32(suid
, arg3
))
10148 #ifdef TARGET_NR_setresgid32
10149 case TARGET_NR_setresgid32
:
10150 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10153 #ifdef TARGET_NR_getresgid32
10154 case TARGET_NR_getresgid32
:
10156 gid_t rgid
, egid
, sgid
;
10157 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10158 if (!is_error(ret
)) {
10159 if (put_user_u32(rgid
, arg1
)
10160 || put_user_u32(egid
, arg2
)
10161 || put_user_u32(sgid
, arg3
))
10167 #ifdef TARGET_NR_chown32
10168 case TARGET_NR_chown32
:
10169 if (!(p
= lock_user_string(arg1
)))
10171 ret
= get_errno(chown(p
, arg2
, arg3
));
10172 unlock_user(p
, arg1
, 0);
10175 #ifdef TARGET_NR_setuid32
10176 case TARGET_NR_setuid32
:
10177 ret
= get_errno(sys_setuid(arg1
));
10180 #ifdef TARGET_NR_setgid32
10181 case TARGET_NR_setgid32
:
10182 ret
= get_errno(sys_setgid(arg1
));
10185 #ifdef TARGET_NR_setfsuid32
10186 case TARGET_NR_setfsuid32
:
10187 ret
= get_errno(setfsuid(arg1
));
10190 #ifdef TARGET_NR_setfsgid32
10191 case TARGET_NR_setfsgid32
:
10192 ret
= get_errno(setfsgid(arg1
));
10196 case TARGET_NR_pivot_root
:
10197 goto unimplemented
;
10198 #ifdef TARGET_NR_mincore
10199 case TARGET_NR_mincore
:
10202 ret
= -TARGET_EFAULT
;
10203 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10205 if (!(p
= lock_user_string(arg3
)))
10207 ret
= get_errno(mincore(a
, arg2
, p
));
10208 unlock_user(p
, arg3
, ret
);
10210 unlock_user(a
, arg1
, 0);
10214 #ifdef TARGET_NR_arm_fadvise64_64
10215 case TARGET_NR_arm_fadvise64_64
:
10216 /* arm_fadvise64_64 looks like fadvise64_64 but
10217 * with different argument order: fd, advice, offset, len
10218 * rather than the usual fd, offset, len, advice.
10219 * Note that offset and len are both 64-bit so appear as
10220 * pairs of 32-bit registers.
10222 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10223 target_offset64(arg5
, arg6
), arg2
);
10224 ret
= -host_to_target_errno(ret
);
10228 #if TARGET_ABI_BITS == 32
10230 #ifdef TARGET_NR_fadvise64_64
10231 case TARGET_NR_fadvise64_64
:
10232 /* 6 args: fd, offset (high, low), len (high, low), advice */
10233 if (regpairs_aligned(cpu_env
)) {
10234 /* offset is in (3,4), len in (5,6) and advice in 7 */
10241 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10242 target_offset64(arg2
, arg3
),
10243 target_offset64(arg4
, arg5
),
10248 #ifdef TARGET_NR_fadvise64
10249 case TARGET_NR_fadvise64
:
10250 /* 5 args: fd, offset (high, low), len, advice */
10251 if (regpairs_aligned(cpu_env
)) {
10252 /* offset is in (3,4), len in 5 and advice in 6 */
10258 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10259 target_offset64(arg2
, arg3
),
10264 #else /* not a 32-bit ABI */
10265 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10266 #ifdef TARGET_NR_fadvise64_64
10267 case TARGET_NR_fadvise64_64
:
10269 #ifdef TARGET_NR_fadvise64
10270 case TARGET_NR_fadvise64
:
10272 #ifdef TARGET_S390X
10274 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10275 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10276 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10277 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10281 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10284 #endif /* end of 64-bit ABI fadvise handling */
10286 #ifdef TARGET_NR_madvise
10287 case TARGET_NR_madvise
:
10288 /* A straight passthrough may not be safe because qemu sometimes
10289 turns private file-backed mappings into anonymous mappings.
10290 This will break MADV_DONTNEED.
10291 This is a hint, so ignoring and returning success is ok. */
10292 ret
= get_errno(0);
10295 #if TARGET_ABI_BITS == 32
10296 case TARGET_NR_fcntl64
:
10300 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10301 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10304 if (((CPUARMState
*)cpu_env
)->eabi
) {
10305 copyfrom
= copy_from_user_eabi_flock64
;
10306 copyto
= copy_to_user_eabi_flock64
;
10310 cmd
= target_to_host_fcntl_cmd(arg2
);
10311 if (cmd
== -TARGET_EINVAL
) {
10317 case TARGET_F_GETLK64
:
10318 ret
= copyfrom(&fl
, arg3
);
10322 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10324 ret
= copyto(arg3
, &fl
);
10328 case TARGET_F_SETLK64
:
10329 case TARGET_F_SETLKW64
:
10330 ret
= copyfrom(&fl
, arg3
);
10334 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10337 ret
= do_fcntl(arg1
, arg2
, arg3
);
10343 #ifdef TARGET_NR_cacheflush
10344 case TARGET_NR_cacheflush
:
10345 /* self-modifying code is handled automatically, so nothing needed */
10349 #ifdef TARGET_NR_security
10350 case TARGET_NR_security
:
10351 goto unimplemented
;
10353 #ifdef TARGET_NR_getpagesize
10354 case TARGET_NR_getpagesize
:
10355 ret
= TARGET_PAGE_SIZE
;
10358 case TARGET_NR_gettid
:
10359 ret
= get_errno(gettid());
10361 #ifdef TARGET_NR_readahead
10362 case TARGET_NR_readahead
:
10363 #if TARGET_ABI_BITS == 32
10364 if (regpairs_aligned(cpu_env
)) {
10369 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10371 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10376 #ifdef TARGET_NR_setxattr
10377 case TARGET_NR_listxattr
:
10378 case TARGET_NR_llistxattr
:
10382 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10384 ret
= -TARGET_EFAULT
;
10388 p
= lock_user_string(arg1
);
10390 if (num
== TARGET_NR_listxattr
) {
10391 ret
= get_errno(listxattr(p
, b
, arg3
));
10393 ret
= get_errno(llistxattr(p
, b
, arg3
));
10396 ret
= -TARGET_EFAULT
;
10398 unlock_user(p
, arg1
, 0);
10399 unlock_user(b
, arg2
, arg3
);
10402 case TARGET_NR_flistxattr
:
10406 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10408 ret
= -TARGET_EFAULT
;
10412 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10413 unlock_user(b
, arg2
, arg3
);
10416 case TARGET_NR_setxattr
:
10417 case TARGET_NR_lsetxattr
:
10419 void *p
, *n
, *v
= 0;
10421 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10423 ret
= -TARGET_EFAULT
;
10427 p
= lock_user_string(arg1
);
10428 n
= lock_user_string(arg2
);
10430 if (num
== TARGET_NR_setxattr
) {
10431 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10433 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10436 ret
= -TARGET_EFAULT
;
10438 unlock_user(p
, arg1
, 0);
10439 unlock_user(n
, arg2
, 0);
10440 unlock_user(v
, arg3
, 0);
10443 case TARGET_NR_fsetxattr
:
10447 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10449 ret
= -TARGET_EFAULT
;
10453 n
= lock_user_string(arg2
);
10455 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10457 ret
= -TARGET_EFAULT
;
10459 unlock_user(n
, arg2
, 0);
10460 unlock_user(v
, arg3
, 0);
10463 case TARGET_NR_getxattr
:
10464 case TARGET_NR_lgetxattr
:
10466 void *p
, *n
, *v
= 0;
10468 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10470 ret
= -TARGET_EFAULT
;
10474 p
= lock_user_string(arg1
);
10475 n
= lock_user_string(arg2
);
10477 if (num
== TARGET_NR_getxattr
) {
10478 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10480 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10483 ret
= -TARGET_EFAULT
;
10485 unlock_user(p
, arg1
, 0);
10486 unlock_user(n
, arg2
, 0);
10487 unlock_user(v
, arg3
, arg4
);
10490 case TARGET_NR_fgetxattr
:
10494 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10496 ret
= -TARGET_EFAULT
;
10500 n
= lock_user_string(arg2
);
10502 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10504 ret
= -TARGET_EFAULT
;
10506 unlock_user(n
, arg2
, 0);
10507 unlock_user(v
, arg3
, arg4
);
10510 case TARGET_NR_removexattr
:
10511 case TARGET_NR_lremovexattr
:
10514 p
= lock_user_string(arg1
);
10515 n
= lock_user_string(arg2
);
10517 if (num
== TARGET_NR_removexattr
) {
10518 ret
= get_errno(removexattr(p
, n
));
10520 ret
= get_errno(lremovexattr(p
, n
));
10523 ret
= -TARGET_EFAULT
;
10525 unlock_user(p
, arg1
, 0);
10526 unlock_user(n
, arg2
, 0);
10529 case TARGET_NR_fremovexattr
:
10532 n
= lock_user_string(arg2
);
10534 ret
= get_errno(fremovexattr(arg1
, n
));
10536 ret
= -TARGET_EFAULT
;
10538 unlock_user(n
, arg2
, 0);
10542 #endif /* CONFIG_ATTR */
10543 #ifdef TARGET_NR_set_thread_area
10544 case TARGET_NR_set_thread_area
:
10545 #if defined(TARGET_MIPS)
10546 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10549 #elif defined(TARGET_CRIS)
10551 ret
= -TARGET_EINVAL
;
10553 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10557 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10558 ret
= do_set_thread_area(cpu_env
, arg1
);
10560 #elif defined(TARGET_M68K)
10562 TaskState
*ts
= cpu
->opaque
;
10563 ts
->tp_value
= arg1
;
10568 goto unimplemented_nowarn
;
10571 #ifdef TARGET_NR_get_thread_area
10572 case TARGET_NR_get_thread_area
:
10573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10574 ret
= do_get_thread_area(cpu_env
, arg1
);
10576 #elif defined(TARGET_M68K)
10578 TaskState
*ts
= cpu
->opaque
;
10579 ret
= ts
->tp_value
;
10583 goto unimplemented_nowarn
;
10586 #ifdef TARGET_NR_getdomainname
10587 case TARGET_NR_getdomainname
:
10588 goto unimplemented_nowarn
;
10591 #ifdef TARGET_NR_clock_gettime
10592 case TARGET_NR_clock_gettime
:
10594 struct timespec ts
;
10595 ret
= get_errno(clock_gettime(arg1
, &ts
));
10596 if (!is_error(ret
)) {
10597 host_to_target_timespec(arg2
, &ts
);
10602 #ifdef TARGET_NR_clock_getres
10603 case TARGET_NR_clock_getres
:
10605 struct timespec ts
;
10606 ret
= get_errno(clock_getres(arg1
, &ts
));
10607 if (!is_error(ret
)) {
10608 host_to_target_timespec(arg2
, &ts
);
10613 #ifdef TARGET_NR_clock_nanosleep
10614 case TARGET_NR_clock_nanosleep
:
10616 struct timespec ts
;
10617 target_to_host_timespec(&ts
, arg3
);
10618 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10619 &ts
, arg4
? &ts
: NULL
));
10621 host_to_target_timespec(arg4
, &ts
);
10623 #if defined(TARGET_PPC)
10624 /* clock_nanosleep is odd in that it returns positive errno values.
10625 * On PPC, CR0 bit 3 should be set in such a situation. */
10626 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10627 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10634 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10635 case TARGET_NR_set_tid_address
:
10636 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10640 case TARGET_NR_tkill
:
10641 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10644 case TARGET_NR_tgkill
:
10645 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10646 target_to_host_signal(arg3
)));
10649 #ifdef TARGET_NR_set_robust_list
10650 case TARGET_NR_set_robust_list
:
10651 case TARGET_NR_get_robust_list
:
10652 /* The ABI for supporting robust futexes has userspace pass
10653 * the kernel a pointer to a linked list which is updated by
10654 * userspace after the syscall; the list is walked by the kernel
10655 * when the thread exits. Since the linked list in QEMU guest
10656 * memory isn't a valid linked list for the host and we have
10657 * no way to reliably intercept the thread-death event, we can't
10658 * support these. Silently return ENOSYS so that guest userspace
10659 * falls back to a non-robust futex implementation (which should
10660 * be OK except in the corner case of the guest crashing while
10661 * holding a mutex that is shared with another process via
10664 goto unimplemented_nowarn
;
10667 #if defined(TARGET_NR_utimensat)
10668 case TARGET_NR_utimensat
:
10670 struct timespec
*tsp
, ts
[2];
10674 target_to_host_timespec(ts
, arg3
);
10675 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10679 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10681 if (!(p
= lock_user_string(arg2
))) {
10682 ret
= -TARGET_EFAULT
;
10685 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10686 unlock_user(p
, arg2
, 0);
10691 case TARGET_NR_futex
:
10692 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10694 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10695 case TARGET_NR_inotify_init
:
10696 ret
= get_errno(sys_inotify_init());
10699 #ifdef CONFIG_INOTIFY1
10700 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10701 case TARGET_NR_inotify_init1
:
10702 ret
= get_errno(sys_inotify_init1(arg1
));
10706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10707 case TARGET_NR_inotify_add_watch
:
10708 p
= lock_user_string(arg2
);
10709 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10710 unlock_user(p
, arg2
, 0);
10713 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10714 case TARGET_NR_inotify_rm_watch
:
10715 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10719 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10720 case TARGET_NR_mq_open
:
10722 struct mq_attr posix_mq_attr
, *attrp
;
10724 p
= lock_user_string(arg1
- 1);
10726 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10727 attrp
= &posix_mq_attr
;
10731 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10732 unlock_user (p
, arg1
, 0);
10736 case TARGET_NR_mq_unlink
:
10737 p
= lock_user_string(arg1
- 1);
10738 ret
= get_errno(mq_unlink(p
));
10739 unlock_user (p
, arg1
, 0);
10742 case TARGET_NR_mq_timedsend
:
10744 struct timespec ts
;
10746 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10748 target_to_host_timespec(&ts
, arg5
);
10749 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10750 host_to_target_timespec(arg5
, &ts
);
10752 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10754 unlock_user (p
, arg2
, arg3
);
10758 case TARGET_NR_mq_timedreceive
:
10760 struct timespec ts
;
10763 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10765 target_to_host_timespec(&ts
, arg5
);
10766 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10768 host_to_target_timespec(arg5
, &ts
);
10770 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10773 unlock_user (p
, arg2
, arg3
);
10775 put_user_u32(prio
, arg4
);
10779 /* Not implemented for now... */
10780 /* case TARGET_NR_mq_notify: */
10783 case TARGET_NR_mq_getsetattr
:
10785 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10788 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10789 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10792 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10793 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10800 #ifdef CONFIG_SPLICE
10801 #ifdef TARGET_NR_tee
10802 case TARGET_NR_tee
:
10804 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10808 #ifdef TARGET_NR_splice
10809 case TARGET_NR_splice
:
10811 loff_t loff_in
, loff_out
;
10812 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10814 if (get_user_u64(loff_in
, arg2
)) {
10817 ploff_in
= &loff_in
;
10820 if (get_user_u64(loff_out
, arg4
)) {
10823 ploff_out
= &loff_out
;
10825 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10827 if (put_user_u64(loff_in
, arg2
)) {
10832 if (put_user_u64(loff_out
, arg4
)) {
10839 #ifdef TARGET_NR_vmsplice
10840 case TARGET_NR_vmsplice
:
10842 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10844 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10845 unlock_iovec(vec
, arg2
, arg3
, 0);
10847 ret
= -host_to_target_errno(errno
);
10852 #endif /* CONFIG_SPLICE */
10853 #ifdef CONFIG_EVENTFD
10854 #if defined(TARGET_NR_eventfd)
10855 case TARGET_NR_eventfd
:
10856 ret
= get_errno(eventfd(arg1
, 0));
10857 fd_trans_unregister(ret
);
10860 #if defined(TARGET_NR_eventfd2)
10861 case TARGET_NR_eventfd2
:
10863 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10864 if (arg2
& TARGET_O_NONBLOCK
) {
10865 host_flags
|= O_NONBLOCK
;
10867 if (arg2
& TARGET_O_CLOEXEC
) {
10868 host_flags
|= O_CLOEXEC
;
10870 ret
= get_errno(eventfd(arg1
, host_flags
));
10871 fd_trans_unregister(ret
);
10875 #endif /* CONFIG_EVENTFD */
10876 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10877 case TARGET_NR_fallocate
:
10878 #if TARGET_ABI_BITS == 32
10879 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10880 target_offset64(arg5
, arg6
)));
10882 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10886 #if defined(CONFIG_SYNC_FILE_RANGE)
10887 #if defined(TARGET_NR_sync_file_range)
10888 case TARGET_NR_sync_file_range
:
10889 #if TARGET_ABI_BITS == 32
10890 #if defined(TARGET_MIPS)
10891 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10892 target_offset64(arg5
, arg6
), arg7
));
10894 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10895 target_offset64(arg4
, arg5
), arg6
));
10896 #endif /* !TARGET_MIPS */
10898 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10902 #if defined(TARGET_NR_sync_file_range2)
10903 case TARGET_NR_sync_file_range2
:
10904 /* This is like sync_file_range but the arguments are reordered */
10905 #if TARGET_ABI_BITS == 32
10906 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10907 target_offset64(arg5
, arg6
), arg2
));
10909 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10914 #if defined(TARGET_NR_signalfd4)
10915 case TARGET_NR_signalfd4
:
10916 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10919 #if defined(TARGET_NR_signalfd)
10920 case TARGET_NR_signalfd
:
10921 ret
= do_signalfd4(arg1
, arg2
, 0);
10924 #if defined(CONFIG_EPOLL)
10925 #if defined(TARGET_NR_epoll_create)
10926 case TARGET_NR_epoll_create
:
10927 ret
= get_errno(epoll_create(arg1
));
10930 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10931 case TARGET_NR_epoll_create1
:
10932 ret
= get_errno(epoll_create1(arg1
));
10935 #if defined(TARGET_NR_epoll_ctl)
10936 case TARGET_NR_epoll_ctl
:
10938 struct epoll_event ep
;
10939 struct epoll_event
*epp
= 0;
10941 struct target_epoll_event
*target_ep
;
10942 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10945 ep
.events
= tswap32(target_ep
->events
);
10946 /* The epoll_data_t union is just opaque data to the kernel,
10947 * so we transfer all 64 bits across and need not worry what
10948 * actual data type it is.
10950 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10951 unlock_user_struct(target_ep
, arg4
, 0);
10954 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10959 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10960 #if defined(TARGET_NR_epoll_wait)
10961 case TARGET_NR_epoll_wait
:
10963 #if defined(TARGET_NR_epoll_pwait)
10964 case TARGET_NR_epoll_pwait
:
10967 struct target_epoll_event
*target_ep
;
10968 struct epoll_event
*ep
;
10970 int maxevents
= arg3
;
10971 int timeout
= arg4
;
10973 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10974 maxevents
* sizeof(struct target_epoll_event
), 1);
10979 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10982 #if defined(TARGET_NR_epoll_pwait)
10983 case TARGET_NR_epoll_pwait
:
10985 target_sigset_t
*target_set
;
10986 sigset_t _set
, *set
= &_set
;
10989 target_set
= lock_user(VERIFY_READ
, arg5
,
10990 sizeof(target_sigset_t
), 1);
10992 unlock_user(target_ep
, arg2
, 0);
10995 target_to_host_sigset(set
, target_set
);
10996 unlock_user(target_set
, arg5
, 0);
11001 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11002 set
, SIGSET_T_SIZE
));
11006 #if defined(TARGET_NR_epoll_wait)
11007 case TARGET_NR_epoll_wait
:
11008 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11013 ret
= -TARGET_ENOSYS
;
11015 if (!is_error(ret
)) {
11017 for (i
= 0; i
< ret
; i
++) {
11018 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11019 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11022 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11027 #ifdef TARGET_NR_prlimit64
11028 case TARGET_NR_prlimit64
:
11030 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11031 struct target_rlimit64
*target_rnew
, *target_rold
;
11032 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11033 int resource
= target_to_host_resource(arg2
);
11035 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11038 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11039 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11040 unlock_user_struct(target_rnew
, arg3
, 0);
11044 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11045 if (!is_error(ret
) && arg4
) {
11046 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11049 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11050 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11051 unlock_user_struct(target_rold
, arg4
, 1);
11056 #ifdef TARGET_NR_gethostname
11057 case TARGET_NR_gethostname
:
11059 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11061 ret
= get_errno(gethostname(name
, arg2
));
11062 unlock_user(name
, arg1
, arg2
);
11064 ret
= -TARGET_EFAULT
;
11069 #ifdef TARGET_NR_atomic_cmpxchg_32
11070 case TARGET_NR_atomic_cmpxchg_32
:
11072 /* should use start_exclusive from main.c */
11073 abi_ulong mem_value
;
11074 if (get_user_u32(mem_value
, arg6
)) {
11075 target_siginfo_t info
;
11076 info
.si_signo
= SIGSEGV
;
11078 info
.si_code
= TARGET_SEGV_MAPERR
;
11079 info
._sifields
._sigfault
._addr
= arg6
;
11080 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11084 if (mem_value
== arg2
)
11085 put_user_u32(arg1
, arg6
);
11090 #ifdef TARGET_NR_atomic_barrier
11091 case TARGET_NR_atomic_barrier
:
11093 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11099 #ifdef TARGET_NR_timer_create
11100 case TARGET_NR_timer_create
:
11102 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11104 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11107 int timer_index
= next_free_host_timer();
11109 if (timer_index
< 0) {
11110 ret
= -TARGET_EAGAIN
;
11112 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11115 phost_sevp
= &host_sevp
;
11116 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11122 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11126 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11135 #ifdef TARGET_NR_timer_settime
11136 case TARGET_NR_timer_settime
:
11138 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11139 * struct itimerspec * old_value */
11140 target_timer_t timerid
= get_timer_id(arg1
);
11144 } else if (arg3
== 0) {
11145 ret
= -TARGET_EINVAL
;
11147 timer_t htimer
= g_posix_timers
[timerid
];
11148 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11150 target_to_host_itimerspec(&hspec_new
, arg3
);
11152 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11153 host_to_target_itimerspec(arg2
, &hspec_old
);
11159 #ifdef TARGET_NR_timer_gettime
11160 case TARGET_NR_timer_gettime
:
11162 /* args: timer_t timerid, struct itimerspec *curr_value */
11163 target_timer_t timerid
= get_timer_id(arg1
);
11167 } else if (!arg2
) {
11168 ret
= -TARGET_EFAULT
;
11170 timer_t htimer
= g_posix_timers
[timerid
];
11171 struct itimerspec hspec
;
11172 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11174 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11175 ret
= -TARGET_EFAULT
;
11182 #ifdef TARGET_NR_timer_getoverrun
11183 case TARGET_NR_timer_getoverrun
:
11185 /* args: timer_t timerid */
11186 target_timer_t timerid
= get_timer_id(arg1
);
11191 timer_t htimer
= g_posix_timers
[timerid
];
11192 ret
= get_errno(timer_getoverrun(htimer
));
11194 fd_trans_unregister(ret
);
11199 #ifdef TARGET_NR_timer_delete
11200 case TARGET_NR_timer_delete
:
11202 /* args: timer_t timerid */
11203 target_timer_t timerid
= get_timer_id(arg1
);
11208 timer_t htimer
= g_posix_timers
[timerid
];
11209 ret
= get_errno(timer_delete(htimer
));
11210 g_posix_timers
[timerid
] = 0;
11216 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11217 case TARGET_NR_timerfd_create
:
11218 ret
= get_errno(timerfd_create(arg1
,
11219 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11223 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11224 case TARGET_NR_timerfd_gettime
:
11226 struct itimerspec its_curr
;
11228 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11230 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11237 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11238 case TARGET_NR_timerfd_settime
:
11240 struct itimerspec its_new
, its_old
, *p_new
;
11243 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11251 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11253 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11260 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11261 case TARGET_NR_ioprio_get
:
11262 ret
= get_errno(ioprio_get(arg1
, arg2
));
11266 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11267 case TARGET_NR_ioprio_set
:
11268 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11272 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11273 case TARGET_NR_setns
:
11274 ret
= get_errno(setns(arg1
, arg2
));
11277 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11278 case TARGET_NR_unshare
:
11279 ret
= get_errno(unshare(arg1
));
11285 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11286 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11287 unimplemented_nowarn
:
11289 ret
= -TARGET_ENOSYS
;
11294 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11297 print_syscall_ret(num
, ret
);
11298 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11301 ret
= -TARGET_EFAULT
;