4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 #define __NR__llseek __NR_lseek
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
203 _syscall0(int, gettid
)
205 /* This is a replacement for the host gettid() and must return a host
207 static int gettid(void) {
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
220 loff_t
*, res
, uint
, wh
);
222 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
223 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group
,int,error_code
)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address
,int *,tidptr
)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
238 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
248 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
249 struct __user_cap_data_struct
*, data
);
250 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get
, int, which
, int, who
)
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
262 static bitmask_transtbl fcntl_flags_tbl
[] = {
263 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
264 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
265 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
266 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
267 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
268 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
269 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
270 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
271 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
272 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
273 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
274 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
275 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
286 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
295 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
297 typedef struct TargetFdTrans
{
298 TargetFdDataFunc host_to_target_data
;
299 TargetFdDataFunc target_to_host_data
;
300 TargetFdAddrFunc target_to_host_addr
;
303 static TargetFdTrans
**target_fd_trans
;
305 static unsigned int target_fd_max
;
307 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
309 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
310 return target_fd_trans
[fd
]->host_to_target_data
;
315 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
317 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
318 return target_fd_trans
[fd
]->target_to_host_addr
;
323 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
327 if (fd
>= target_fd_max
) {
328 oldmax
= target_fd_max
;
329 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans
= g_renew(TargetFdTrans
*,
331 target_fd_trans
, target_fd_max
);
332 memset((void *)(target_fd_trans
+ oldmax
), 0,
333 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
335 target_fd_trans
[fd
] = trans
;
338 static void fd_trans_unregister(int fd
)
340 if (fd
>= 0 && fd
< target_fd_max
) {
341 target_fd_trans
[fd
] = NULL
;
345 static void fd_trans_dup(int oldfd
, int newfd
)
347 fd_trans_unregister(newfd
);
348 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
349 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
353 static int sys_getcwd1(char *buf
, size_t size
)
355 if (getcwd(buf
, size
) == NULL
) {
356 /* getcwd() sets errno */
359 return strlen(buf
)+1;
362 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
365 * open(2) has extra parameter 'mode' when called with
368 if ((flags
& O_CREAT
) != 0) {
369 return (openat(dirfd
, pathname
, flags
, mode
));
371 return (openat(dirfd
, pathname
, flags
));
374 #ifdef TARGET_NR_utimensat
375 #ifdef CONFIG_UTIMENSAT
376 static int sys_utimensat(int dirfd
, const char *pathname
,
377 const struct timespec times
[2], int flags
)
379 if (pathname
== NULL
)
380 return futimens(dirfd
, times
);
382 return utimensat(dirfd
, pathname
, times
, flags
);
384 #elif defined(__NR_utimensat)
385 #define __NR_sys_utimensat __NR_utimensat
386 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
387 const struct timespec
*,tsp
,int,flags
)
389 static int sys_utimensat(int dirfd
, const char *pathname
,
390 const struct timespec times
[2], int flags
)
396 #endif /* TARGET_NR_utimensat */
398 #ifdef CONFIG_INOTIFY
399 #include <sys/inotify.h>
401 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
402 static int sys_inotify_init(void)
404 return (inotify_init());
407 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
408 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
410 return (inotify_add_watch(fd
, pathname
, mask
));
413 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
414 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
416 return (inotify_rm_watch(fd
, wd
));
419 #ifdef CONFIG_INOTIFY1
420 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
421 static int sys_inotify_init1(int flags
)
423 return (inotify_init1(flags
));
428 /* Userspace can usually survive runtime without inotify */
429 #undef TARGET_NR_inotify_init
430 #undef TARGET_NR_inotify_init1
431 #undef TARGET_NR_inotify_add_watch
432 #undef TARGET_NR_inotify_rm_watch
433 #endif /* CONFIG_INOTIFY */
435 #if defined(TARGET_NR_ppoll)
437 # define __NR_ppoll -1
439 #define __NR_sys_ppoll __NR_ppoll
440 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
441 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
445 #if defined(TARGET_NR_pselect6)
446 #ifndef __NR_pselect6
447 # define __NR_pselect6 -1
449 #define __NR_sys_pselect6 __NR_pselect6
450 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
451 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
454 #if defined(TARGET_NR_prlimit64)
455 #ifndef __NR_prlimit64
456 # define __NR_prlimit64 -1
458 #define __NR_sys_prlimit64 __NR_prlimit64
459 /* The glibc rlimit structure may not be that used by the underlying syscall */
460 struct host_rlimit64
{
464 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
465 const struct host_rlimit64
*, new_limit
,
466 struct host_rlimit64
*, old_limit
)
470 #if defined(TARGET_NR_timer_create)
471 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
472 static timer_t g_posix_timers
[32] = { 0, } ;
474 static inline int next_free_host_timer(void)
477 /* FIXME: Does finding the next free slot require a lock? */
478 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
479 if (g_posix_timers
[k
] == 0) {
480 g_posix_timers
[k
] = (timer_t
) 1;
488 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
490 static inline int regpairs_aligned(void *cpu_env
) {
491 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
493 #elif defined(TARGET_MIPS)
494 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
495 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
496 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
497 * of registers which translates to the same as ARM/MIPS, because we start with
499 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
501 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
504 #define ERRNO_TABLE_SIZE 1200
506 /* target_to_host_errno_table[] is initialized from
507 * host_to_target_errno_table[] in syscall_init(). */
508 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
512 * This list is the union of errno values overridden in asm-<arch>/errno.h
513 * minus the errnos that are not actually generic to all archs.
515 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
516 [EAGAIN
] = TARGET_EAGAIN
,
517 [EIDRM
] = TARGET_EIDRM
,
518 [ECHRNG
] = TARGET_ECHRNG
,
519 [EL2NSYNC
] = TARGET_EL2NSYNC
,
520 [EL3HLT
] = TARGET_EL3HLT
,
521 [EL3RST
] = TARGET_EL3RST
,
522 [ELNRNG
] = TARGET_ELNRNG
,
523 [EUNATCH
] = TARGET_EUNATCH
,
524 [ENOCSI
] = TARGET_ENOCSI
,
525 [EL2HLT
] = TARGET_EL2HLT
,
526 [EDEADLK
] = TARGET_EDEADLK
,
527 [ENOLCK
] = TARGET_ENOLCK
,
528 [EBADE
] = TARGET_EBADE
,
529 [EBADR
] = TARGET_EBADR
,
530 [EXFULL
] = TARGET_EXFULL
,
531 [ENOANO
] = TARGET_ENOANO
,
532 [EBADRQC
] = TARGET_EBADRQC
,
533 [EBADSLT
] = TARGET_EBADSLT
,
534 [EBFONT
] = TARGET_EBFONT
,
535 [ENOSTR
] = TARGET_ENOSTR
,
536 [ENODATA
] = TARGET_ENODATA
,
537 [ETIME
] = TARGET_ETIME
,
538 [ENOSR
] = TARGET_ENOSR
,
539 [ENONET
] = TARGET_ENONET
,
540 [ENOPKG
] = TARGET_ENOPKG
,
541 [EREMOTE
] = TARGET_EREMOTE
,
542 [ENOLINK
] = TARGET_ENOLINK
,
543 [EADV
] = TARGET_EADV
,
544 [ESRMNT
] = TARGET_ESRMNT
,
545 [ECOMM
] = TARGET_ECOMM
,
546 [EPROTO
] = TARGET_EPROTO
,
547 [EDOTDOT
] = TARGET_EDOTDOT
,
548 [EMULTIHOP
] = TARGET_EMULTIHOP
,
549 [EBADMSG
] = TARGET_EBADMSG
,
550 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
551 [EOVERFLOW
] = TARGET_EOVERFLOW
,
552 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
553 [EBADFD
] = TARGET_EBADFD
,
554 [EREMCHG
] = TARGET_EREMCHG
,
555 [ELIBACC
] = TARGET_ELIBACC
,
556 [ELIBBAD
] = TARGET_ELIBBAD
,
557 [ELIBSCN
] = TARGET_ELIBSCN
,
558 [ELIBMAX
] = TARGET_ELIBMAX
,
559 [ELIBEXEC
] = TARGET_ELIBEXEC
,
560 [EILSEQ
] = TARGET_EILSEQ
,
561 [ENOSYS
] = TARGET_ENOSYS
,
562 [ELOOP
] = TARGET_ELOOP
,
563 [ERESTART
] = TARGET_ERESTART
,
564 [ESTRPIPE
] = TARGET_ESTRPIPE
,
565 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
566 [EUSERS
] = TARGET_EUSERS
,
567 [ENOTSOCK
] = TARGET_ENOTSOCK
,
568 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
569 [EMSGSIZE
] = TARGET_EMSGSIZE
,
570 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
571 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
572 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
573 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
574 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
575 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
576 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
577 [EADDRINUSE
] = TARGET_EADDRINUSE
,
578 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
579 [ENETDOWN
] = TARGET_ENETDOWN
,
580 [ENETUNREACH
] = TARGET_ENETUNREACH
,
581 [ENETRESET
] = TARGET_ENETRESET
,
582 [ECONNABORTED
] = TARGET_ECONNABORTED
,
583 [ECONNRESET
] = TARGET_ECONNRESET
,
584 [ENOBUFS
] = TARGET_ENOBUFS
,
585 [EISCONN
] = TARGET_EISCONN
,
586 [ENOTCONN
] = TARGET_ENOTCONN
,
587 [EUCLEAN
] = TARGET_EUCLEAN
,
588 [ENOTNAM
] = TARGET_ENOTNAM
,
589 [ENAVAIL
] = TARGET_ENAVAIL
,
590 [EISNAM
] = TARGET_EISNAM
,
591 [EREMOTEIO
] = TARGET_EREMOTEIO
,
592 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
593 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
594 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
595 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
596 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
597 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
598 [EALREADY
] = TARGET_EALREADY
,
599 [EINPROGRESS
] = TARGET_EINPROGRESS
,
600 [ESTALE
] = TARGET_ESTALE
,
601 [ECANCELED
] = TARGET_ECANCELED
,
602 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
603 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
605 [ENOKEY
] = TARGET_ENOKEY
,
608 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
611 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
614 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
617 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
619 #ifdef ENOTRECOVERABLE
620 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
624 static inline int host_to_target_errno(int err
)
626 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
627 host_to_target_errno_table
[err
]) {
628 return host_to_target_errno_table
[err
];
633 static inline int target_to_host_errno(int err
)
635 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
636 target_to_host_errno_table
[err
]) {
637 return target_to_host_errno_table
[err
];
642 static inline abi_long
get_errno(abi_long ret
)
645 return -host_to_target_errno(errno
);
650 static inline int is_error(abi_long ret
)
652 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
655 char *target_strerror(int err
)
657 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
660 return strerror(target_to_host_errno(err
));
663 #define safe_syscall0(type, name) \
664 static type safe_##name(void) \
666 return safe_syscall(__NR_##name); \
669 #define safe_syscall1(type, name, type1, arg1) \
670 static type safe_##name(type1 arg1) \
672 return safe_syscall(__NR_##name, arg1); \
675 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
676 static type safe_##name(type1 arg1, type2 arg2) \
678 return safe_syscall(__NR_##name, arg1, arg2); \
681 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
682 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
684 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
687 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
689 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
691 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
694 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
695 type4, arg4, type5, arg5) \
696 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
699 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
702 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
703 type4, arg4, type5, arg5, type6, arg6) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705 type5 arg5, type6 arg6) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
710 static inline int host_to_target_sock_type(int host_type
)
714 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
716 target_type
= TARGET_SOCK_DGRAM
;
719 target_type
= TARGET_SOCK_STREAM
;
722 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
726 #if defined(SOCK_CLOEXEC)
727 if (host_type
& SOCK_CLOEXEC
) {
728 target_type
|= TARGET_SOCK_CLOEXEC
;
732 #if defined(SOCK_NONBLOCK)
733 if (host_type
& SOCK_NONBLOCK
) {
734 target_type
|= TARGET_SOCK_NONBLOCK
;
741 static abi_ulong target_brk
;
742 static abi_ulong target_original_brk
;
743 static abi_ulong brk_page
;
745 void target_set_brk(abi_ulong new_brk
)
747 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
748 brk_page
= HOST_PAGE_ALIGN(target_brk
);
751 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
752 #define DEBUGF_BRK(message, args...)
754 /* do_brk() must return target values and target errnos. */
755 abi_long
do_brk(abi_ulong new_brk
)
757 abi_long mapped_addr
;
760 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
763 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
766 if (new_brk
< target_original_brk
) {
767 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
772 /* If the new brk is less than the highest page reserved to the
773 * target heap allocation, set it and we're almost done... */
774 if (new_brk
<= brk_page
) {
775 /* Heap contents are initialized to zero, as for anonymous
777 if (new_brk
> target_brk
) {
778 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
780 target_brk
= new_brk
;
781 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
785 /* We need to allocate more memory after the brk... Note that
786 * we don't use MAP_FIXED because that will map over the top of
787 * any existing mapping (like the one with the host libc or qemu
788 * itself); instead we treat "mapped but at wrong address" as
789 * a failure and unmap again.
791 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
792 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
793 PROT_READ
|PROT_WRITE
,
794 MAP_ANON
|MAP_PRIVATE
, 0, 0));
796 if (mapped_addr
== brk_page
) {
797 /* Heap contents are initialized to zero, as for anonymous
798 * mapped pages. Technically the new pages are already
799 * initialized to zero since they *are* anonymous mapped
800 * pages, however we have to take care with the contents that
801 * come from the remaining part of the previous page: it may
802 * contains garbage data due to a previous heap usage (grown
804 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
806 target_brk
= new_brk
;
807 brk_page
= HOST_PAGE_ALIGN(target_brk
);
808 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
811 } else if (mapped_addr
!= -1) {
812 /* Mapped but at wrong address, meaning there wasn't actually
813 * enough space for this brk.
815 target_munmap(mapped_addr
, new_alloc_size
);
817 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
820 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
823 #if defined(TARGET_ALPHA)
824 /* We (partially) emulate OSF/1 on Alpha, which requires we
825 return a proper errno, not an unchanged brk value. */
826 return -TARGET_ENOMEM
;
828 /* For everything else, return the previous break. */
832 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
833 abi_ulong target_fds_addr
,
837 abi_ulong b
, *target_fds
;
839 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
840 if (!(target_fds
= lock_user(VERIFY_READ
,
842 sizeof(abi_ulong
) * nw
,
844 return -TARGET_EFAULT
;
848 for (i
= 0; i
< nw
; i
++) {
849 /* grab the abi_ulong */
850 __get_user(b
, &target_fds
[i
]);
851 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
852 /* check the bit inside the abi_ulong */
859 unlock_user(target_fds
, target_fds_addr
, 0);
864 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
865 abi_ulong target_fds_addr
,
868 if (target_fds_addr
) {
869 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
870 return -TARGET_EFAULT
;
878 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
884 abi_ulong
*target_fds
;
886 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
887 if (!(target_fds
= lock_user(VERIFY_WRITE
,
889 sizeof(abi_ulong
) * nw
,
891 return -TARGET_EFAULT
;
894 for (i
= 0; i
< nw
; i
++) {
896 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
897 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
900 __put_user(v
, &target_fds
[i
]);
903 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
908 #if defined(__alpha__)
914 static inline abi_long
host_to_target_clock_t(long ticks
)
916 #if HOST_HZ == TARGET_HZ
919 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
923 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
924 const struct rusage
*rusage
)
926 struct target_rusage
*target_rusage
;
928 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
929 return -TARGET_EFAULT
;
930 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
931 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
932 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
933 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
934 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
935 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
936 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
937 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
938 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
939 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
940 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
941 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
942 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
943 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
944 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
945 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
946 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
947 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
948 unlock_user_struct(target_rusage
, target_addr
, 1);
953 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
955 abi_ulong target_rlim_swap
;
958 target_rlim_swap
= tswapal(target_rlim
);
959 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
960 return RLIM_INFINITY
;
962 result
= target_rlim_swap
;
963 if (target_rlim_swap
!= (rlim_t
)result
)
964 return RLIM_INFINITY
;
969 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
971 abi_ulong target_rlim_swap
;
974 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
975 target_rlim_swap
= TARGET_RLIM_INFINITY
;
977 target_rlim_swap
= rlim
;
978 result
= tswapal(target_rlim_swap
);
983 static inline int target_to_host_resource(int code
)
986 case TARGET_RLIMIT_AS
:
988 case TARGET_RLIMIT_CORE
:
990 case TARGET_RLIMIT_CPU
:
992 case TARGET_RLIMIT_DATA
:
994 case TARGET_RLIMIT_FSIZE
:
996 case TARGET_RLIMIT_LOCKS
:
998 case TARGET_RLIMIT_MEMLOCK
:
999 return RLIMIT_MEMLOCK
;
1000 case TARGET_RLIMIT_MSGQUEUE
:
1001 return RLIMIT_MSGQUEUE
;
1002 case TARGET_RLIMIT_NICE
:
1004 case TARGET_RLIMIT_NOFILE
:
1005 return RLIMIT_NOFILE
;
1006 case TARGET_RLIMIT_NPROC
:
1007 return RLIMIT_NPROC
;
1008 case TARGET_RLIMIT_RSS
:
1010 case TARGET_RLIMIT_RTPRIO
:
1011 return RLIMIT_RTPRIO
;
1012 case TARGET_RLIMIT_SIGPENDING
:
1013 return RLIMIT_SIGPENDING
;
1014 case TARGET_RLIMIT_STACK
:
1015 return RLIMIT_STACK
;
1021 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1022 abi_ulong target_tv_addr
)
1024 struct target_timeval
*target_tv
;
1026 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1027 return -TARGET_EFAULT
;
1029 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1030 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1032 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1037 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1038 const struct timeval
*tv
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1043 return -TARGET_EFAULT
;
1045 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1053 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1054 abi_ulong target_tz_addr
)
1056 struct target_timezone
*target_tz
;
1058 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1059 return -TARGET_EFAULT
;
1062 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1063 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1065 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1070 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1073 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1074 abi_ulong target_mq_attr_addr
)
1076 struct target_mq_attr
*target_mq_attr
;
1078 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1079 target_mq_attr_addr
, 1))
1080 return -TARGET_EFAULT
;
1082 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1083 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1084 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1085 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1087 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1092 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1093 const struct mq_attr
*attr
)
1095 struct target_mq_attr
*target_mq_attr
;
1097 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1098 target_mq_attr_addr
, 0))
1099 return -TARGET_EFAULT
;
1101 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1102 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1103 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1104 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1106 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1112 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1113 /* do_select() must return target values and target errnos. */
1114 static abi_long
do_select(int n
,
1115 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1116 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1118 fd_set rfds
, wfds
, efds
;
1119 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1120 struct timeval tv
, *tv_ptr
;
1123 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1127 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1131 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1136 if (target_tv_addr
) {
1137 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1138 return -TARGET_EFAULT
;
1144 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1146 if (!is_error(ret
)) {
1147 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1148 return -TARGET_EFAULT
;
1149 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1150 return -TARGET_EFAULT
;
1151 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1152 return -TARGET_EFAULT
;
1154 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1155 return -TARGET_EFAULT
;
1162 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1165 return pipe2(host_pipe
, flags
);
1171 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1172 int flags
, int is_pipe2
)
1176 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1179 return get_errno(ret
);
1181 /* Several targets have special calling conventions for the original
1182 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1184 #if defined(TARGET_ALPHA)
1185 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1186 return host_pipe
[0];
1187 #elif defined(TARGET_MIPS)
1188 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1189 return host_pipe
[0];
1190 #elif defined(TARGET_SH4)
1191 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1192 return host_pipe
[0];
1193 #elif defined(TARGET_SPARC)
1194 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1195 return host_pipe
[0];
1199 if (put_user_s32(host_pipe
[0], pipedes
)
1200 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1201 return -TARGET_EFAULT
;
1202 return get_errno(ret
);
1205 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1206 abi_ulong target_addr
,
1209 struct target_ip_mreqn
*target_smreqn
;
1211 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1213 return -TARGET_EFAULT
;
1214 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1215 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1216 if (len
== sizeof(struct target_ip_mreqn
))
1217 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1218 unlock_user(target_smreqn
, target_addr
, 0);
1223 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1224 abi_ulong target_addr
,
1227 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1228 sa_family_t sa_family
;
1229 struct target_sockaddr
*target_saddr
;
1231 if (fd_trans_target_to_host_addr(fd
)) {
1232 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1235 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1237 return -TARGET_EFAULT
;
1239 sa_family
= tswap16(target_saddr
->sa_family
);
1241 /* Oops. The caller might send a incomplete sun_path; sun_path
1242 * must be terminated by \0 (see the manual page), but
1243 * unfortunately it is quite common to specify sockaddr_un
1244 * length as "strlen(x->sun_path)" while it should be
1245 * "strlen(...) + 1". We'll fix that here if needed.
1246 * Linux kernel has a similar feature.
1249 if (sa_family
== AF_UNIX
) {
1250 if (len
< unix_maxlen
&& len
> 0) {
1251 char *cp
= (char*)target_saddr
;
1253 if ( cp
[len
-1] && !cp
[len
] )
1256 if (len
> unix_maxlen
)
1260 memcpy(addr
, target_saddr
, len
);
1261 addr
->sa_family
= sa_family
;
1262 if (sa_family
== AF_PACKET
) {
1263 struct target_sockaddr_ll
*lladdr
;
1265 lladdr
= (struct target_sockaddr_ll
*)addr
;
1266 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1267 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1269 unlock_user(target_saddr
, target_addr
, 0);
1274 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1275 struct sockaddr
*addr
,
1278 struct target_sockaddr
*target_saddr
;
1280 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1282 return -TARGET_EFAULT
;
1283 memcpy(target_saddr
, addr
, len
);
1284 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1285 unlock_user(target_saddr
, target_addr
, len
);
1290 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1291 struct target_msghdr
*target_msgh
)
1293 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1294 abi_long msg_controllen
;
1295 abi_ulong target_cmsg_addr
;
1296 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1297 socklen_t space
= 0;
1299 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1300 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1302 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1303 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1304 target_cmsg_start
= target_cmsg
;
1306 return -TARGET_EFAULT
;
1308 while (cmsg
&& target_cmsg
) {
1309 void *data
= CMSG_DATA(cmsg
);
1310 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1312 int len
= tswapal(target_cmsg
->cmsg_len
)
1313 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1315 space
+= CMSG_SPACE(len
);
1316 if (space
> msgh
->msg_controllen
) {
1317 space
-= CMSG_SPACE(len
);
1318 /* This is a QEMU bug, since we allocated the payload
1319 * area ourselves (unlike overflow in host-to-target
1320 * conversion, which is just the guest giving us a buffer
1321 * that's too small). It can't happen for the payload types
1322 * we currently support; if it becomes an issue in future
1323 * we would need to improve our allocation strategy to
1324 * something more intelligent than "twice the size of the
1325 * target buffer we're reading from".
1327 gemu_log("Host cmsg overflow\n");
1331 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1332 cmsg
->cmsg_level
= SOL_SOCKET
;
1334 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1336 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1337 cmsg
->cmsg_len
= CMSG_LEN(len
);
1339 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1340 int *fd
= (int *)data
;
1341 int *target_fd
= (int *)target_data
;
1342 int i
, numfds
= len
/ sizeof(int);
1344 for (i
= 0; i
< numfds
; i
++) {
1345 __get_user(fd
[i
], target_fd
+ i
);
1347 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1348 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1349 struct ucred
*cred
= (struct ucred
*)data
;
1350 struct target_ucred
*target_cred
=
1351 (struct target_ucred
*)target_data
;
1353 __get_user(cred
->pid
, &target_cred
->pid
);
1354 __get_user(cred
->uid
, &target_cred
->uid
);
1355 __get_user(cred
->gid
, &target_cred
->gid
);
1357 gemu_log("Unsupported ancillary data: %d/%d\n",
1358 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1359 memcpy(data
, target_data
, len
);
1362 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1363 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1366 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1368 msgh
->msg_controllen
= space
;
1372 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1373 struct msghdr
*msgh
)
1375 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1376 abi_long msg_controllen
;
1377 abi_ulong target_cmsg_addr
;
1378 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1379 socklen_t space
= 0;
1381 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1382 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1384 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1385 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1386 target_cmsg_start
= target_cmsg
;
1388 return -TARGET_EFAULT
;
1390 while (cmsg
&& target_cmsg
) {
1391 void *data
= CMSG_DATA(cmsg
);
1392 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1394 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1395 int tgt_len
, tgt_space
;
1397 /* We never copy a half-header but may copy half-data;
1398 * this is Linux's behaviour in put_cmsg(). Note that
1399 * truncation here is a guest problem (which we report
1400 * to the guest via the CTRUNC bit), unlike truncation
1401 * in target_to_host_cmsg, which is a QEMU bug.
1403 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1404 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1408 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1409 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1411 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1413 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1415 tgt_len
= TARGET_CMSG_LEN(len
);
1417 /* Payload types which need a different size of payload on
1418 * the target must adjust tgt_len here.
1420 switch (cmsg
->cmsg_level
) {
1422 switch (cmsg
->cmsg_type
) {
1424 tgt_len
= sizeof(struct target_timeval
);
1433 if (msg_controllen
< tgt_len
) {
1434 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1435 tgt_len
= msg_controllen
;
1438 /* We must now copy-and-convert len bytes of payload
1439 * into tgt_len bytes of destination space. Bear in mind
1440 * that in both source and destination we may be dealing
1441 * with a truncated value!
1443 switch (cmsg
->cmsg_level
) {
1445 switch (cmsg
->cmsg_type
) {
1448 int *fd
= (int *)data
;
1449 int *target_fd
= (int *)target_data
;
1450 int i
, numfds
= tgt_len
/ sizeof(int);
1452 for (i
= 0; i
< numfds
; i
++) {
1453 __put_user(fd
[i
], target_fd
+ i
);
1459 struct timeval
*tv
= (struct timeval
*)data
;
1460 struct target_timeval
*target_tv
=
1461 (struct target_timeval
*)target_data
;
1463 if (len
!= sizeof(struct timeval
) ||
1464 tgt_len
!= sizeof(struct target_timeval
)) {
1468 /* copy struct timeval to target */
1469 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1470 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1473 case SCM_CREDENTIALS
:
1475 struct ucred
*cred
= (struct ucred
*)data
;
1476 struct target_ucred
*target_cred
=
1477 (struct target_ucred
*)target_data
;
1479 __put_user(cred
->pid
, &target_cred
->pid
);
1480 __put_user(cred
->uid
, &target_cred
->uid
);
1481 __put_user(cred
->gid
, &target_cred
->gid
);
1491 gemu_log("Unsupported ancillary data: %d/%d\n",
1492 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1493 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1494 if (tgt_len
> len
) {
1495 memset(target_data
+ len
, 0, tgt_len
- len
);
1499 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1500 tgt_space
= TARGET_CMSG_SPACE(len
);
1501 if (msg_controllen
< tgt_space
) {
1502 tgt_space
= msg_controllen
;
1504 msg_controllen
-= tgt_space
;
1506 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1507 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1510 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1512 target_msgh
->msg_controllen
= tswapal(space
);
1516 /* do_setsockopt() Must return target values and target errnos. */
1517 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1518 abi_ulong optval_addr
, socklen_t optlen
)
1522 struct ip_mreqn
*ip_mreq
;
1523 struct ip_mreq_source
*ip_mreq_source
;
1527 /* TCP options all take an 'int' value. */
1528 if (optlen
< sizeof(uint32_t))
1529 return -TARGET_EINVAL
;
1531 if (get_user_u32(val
, optval_addr
))
1532 return -TARGET_EFAULT
;
1533 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1540 case IP_ROUTER_ALERT
:
1544 case IP_MTU_DISCOVER
:
1550 case IP_MULTICAST_TTL
:
1551 case IP_MULTICAST_LOOP
:
1553 if (optlen
>= sizeof(uint32_t)) {
1554 if (get_user_u32(val
, optval_addr
))
1555 return -TARGET_EFAULT
;
1556 } else if (optlen
>= 1) {
1557 if (get_user_u8(val
, optval_addr
))
1558 return -TARGET_EFAULT
;
1560 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1562 case IP_ADD_MEMBERSHIP
:
1563 case IP_DROP_MEMBERSHIP
:
1564 if (optlen
< sizeof (struct target_ip_mreq
) ||
1565 optlen
> sizeof (struct target_ip_mreqn
))
1566 return -TARGET_EINVAL
;
1568 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1569 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1570 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1573 case IP_BLOCK_SOURCE
:
1574 case IP_UNBLOCK_SOURCE
:
1575 case IP_ADD_SOURCE_MEMBERSHIP
:
1576 case IP_DROP_SOURCE_MEMBERSHIP
:
1577 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1578 return -TARGET_EINVAL
;
1580 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1581 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1582 unlock_user (ip_mreq_source
, optval_addr
, 0);
1591 case IPV6_MTU_DISCOVER
:
1594 case IPV6_RECVPKTINFO
:
1596 if (optlen
< sizeof(uint32_t)) {
1597 return -TARGET_EINVAL
;
1599 if (get_user_u32(val
, optval_addr
)) {
1600 return -TARGET_EFAULT
;
1602 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1603 &val
, sizeof(val
)));
1612 /* struct icmp_filter takes an u32 value */
1613 if (optlen
< sizeof(uint32_t)) {
1614 return -TARGET_EINVAL
;
1617 if (get_user_u32(val
, optval_addr
)) {
1618 return -TARGET_EFAULT
;
1620 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1621 &val
, sizeof(val
)));
1628 case TARGET_SOL_SOCKET
:
1630 case TARGET_SO_RCVTIMEO
:
1634 optname
= SO_RCVTIMEO
;
1637 if (optlen
!= sizeof(struct target_timeval
)) {
1638 return -TARGET_EINVAL
;
1641 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1642 return -TARGET_EFAULT
;
1645 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1649 case TARGET_SO_SNDTIMEO
:
1650 optname
= SO_SNDTIMEO
;
1652 case TARGET_SO_ATTACH_FILTER
:
1654 struct target_sock_fprog
*tfprog
;
1655 struct target_sock_filter
*tfilter
;
1656 struct sock_fprog fprog
;
1657 struct sock_filter
*filter
;
1660 if (optlen
!= sizeof(*tfprog
)) {
1661 return -TARGET_EINVAL
;
1663 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1664 return -TARGET_EFAULT
;
1666 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1667 tswapal(tfprog
->filter
), 0)) {
1668 unlock_user_struct(tfprog
, optval_addr
, 1);
1669 return -TARGET_EFAULT
;
1672 fprog
.len
= tswap16(tfprog
->len
);
1673 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1674 if (filter
== NULL
) {
1675 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1676 unlock_user_struct(tfprog
, optval_addr
, 1);
1677 return -TARGET_ENOMEM
;
1679 for (i
= 0; i
< fprog
.len
; i
++) {
1680 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1681 filter
[i
].jt
= tfilter
[i
].jt
;
1682 filter
[i
].jf
= tfilter
[i
].jf
;
1683 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1685 fprog
.filter
= filter
;
1687 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1688 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1691 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1692 unlock_user_struct(tfprog
, optval_addr
, 1);
1695 case TARGET_SO_BINDTODEVICE
:
1697 char *dev_ifname
, *addr_ifname
;
1699 if (optlen
> IFNAMSIZ
- 1) {
1700 optlen
= IFNAMSIZ
- 1;
1702 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1704 return -TARGET_EFAULT
;
1706 optname
= SO_BINDTODEVICE
;
1707 addr_ifname
= alloca(IFNAMSIZ
);
1708 memcpy(addr_ifname
, dev_ifname
, optlen
);
1709 addr_ifname
[optlen
] = 0;
1710 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1711 addr_ifname
, optlen
));
1712 unlock_user (dev_ifname
, optval_addr
, 0);
1715 /* Options with 'int' argument. */
1716 case TARGET_SO_DEBUG
:
1719 case TARGET_SO_REUSEADDR
:
1720 optname
= SO_REUSEADDR
;
1722 case TARGET_SO_TYPE
:
1725 case TARGET_SO_ERROR
:
1728 case TARGET_SO_DONTROUTE
:
1729 optname
= SO_DONTROUTE
;
1731 case TARGET_SO_BROADCAST
:
1732 optname
= SO_BROADCAST
;
1734 case TARGET_SO_SNDBUF
:
1735 optname
= SO_SNDBUF
;
1737 case TARGET_SO_SNDBUFFORCE
:
1738 optname
= SO_SNDBUFFORCE
;
1740 case TARGET_SO_RCVBUF
:
1741 optname
= SO_RCVBUF
;
1743 case TARGET_SO_RCVBUFFORCE
:
1744 optname
= SO_RCVBUFFORCE
;
1746 case TARGET_SO_KEEPALIVE
:
1747 optname
= SO_KEEPALIVE
;
1749 case TARGET_SO_OOBINLINE
:
1750 optname
= SO_OOBINLINE
;
1752 case TARGET_SO_NO_CHECK
:
1753 optname
= SO_NO_CHECK
;
1755 case TARGET_SO_PRIORITY
:
1756 optname
= SO_PRIORITY
;
1759 case TARGET_SO_BSDCOMPAT
:
1760 optname
= SO_BSDCOMPAT
;
1763 case TARGET_SO_PASSCRED
:
1764 optname
= SO_PASSCRED
;
1766 case TARGET_SO_PASSSEC
:
1767 optname
= SO_PASSSEC
;
1769 case TARGET_SO_TIMESTAMP
:
1770 optname
= SO_TIMESTAMP
;
1772 case TARGET_SO_RCVLOWAT
:
1773 optname
= SO_RCVLOWAT
;
1779 if (optlen
< sizeof(uint32_t))
1780 return -TARGET_EINVAL
;
1782 if (get_user_u32(val
, optval_addr
))
1783 return -TARGET_EFAULT
;
1784 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1788 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1789 ret
= -TARGET_ENOPROTOOPT
;
1794 /* do_getsockopt() Must return target values and target errnos. */
1795 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1796 abi_ulong optval_addr
, abi_ulong optlen
)
1803 case TARGET_SOL_SOCKET
:
1806 /* These don't just return a single integer */
1807 case TARGET_SO_LINGER
:
1808 case TARGET_SO_RCVTIMEO
:
1809 case TARGET_SO_SNDTIMEO
:
1810 case TARGET_SO_PEERNAME
:
1812 case TARGET_SO_PEERCRED
: {
1815 struct target_ucred
*tcr
;
1817 if (get_user_u32(len
, optlen
)) {
1818 return -TARGET_EFAULT
;
1821 return -TARGET_EINVAL
;
1825 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1833 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1834 return -TARGET_EFAULT
;
1836 __put_user(cr
.pid
, &tcr
->pid
);
1837 __put_user(cr
.uid
, &tcr
->uid
);
1838 __put_user(cr
.gid
, &tcr
->gid
);
1839 unlock_user_struct(tcr
, optval_addr
, 1);
1840 if (put_user_u32(len
, optlen
)) {
1841 return -TARGET_EFAULT
;
1845 /* Options with 'int' argument. */
1846 case TARGET_SO_DEBUG
:
1849 case TARGET_SO_REUSEADDR
:
1850 optname
= SO_REUSEADDR
;
1852 case TARGET_SO_TYPE
:
1855 case TARGET_SO_ERROR
:
1858 case TARGET_SO_DONTROUTE
:
1859 optname
= SO_DONTROUTE
;
1861 case TARGET_SO_BROADCAST
:
1862 optname
= SO_BROADCAST
;
1864 case TARGET_SO_SNDBUF
:
1865 optname
= SO_SNDBUF
;
1867 case TARGET_SO_RCVBUF
:
1868 optname
= SO_RCVBUF
;
1870 case TARGET_SO_KEEPALIVE
:
1871 optname
= SO_KEEPALIVE
;
1873 case TARGET_SO_OOBINLINE
:
1874 optname
= SO_OOBINLINE
;
1876 case TARGET_SO_NO_CHECK
:
1877 optname
= SO_NO_CHECK
;
1879 case TARGET_SO_PRIORITY
:
1880 optname
= SO_PRIORITY
;
1883 case TARGET_SO_BSDCOMPAT
:
1884 optname
= SO_BSDCOMPAT
;
1887 case TARGET_SO_PASSCRED
:
1888 optname
= SO_PASSCRED
;
1890 case TARGET_SO_TIMESTAMP
:
1891 optname
= SO_TIMESTAMP
;
1893 case TARGET_SO_RCVLOWAT
:
1894 optname
= SO_RCVLOWAT
;
1896 case TARGET_SO_ACCEPTCONN
:
1897 optname
= SO_ACCEPTCONN
;
1904 /* TCP options all take an 'int' value. */
1906 if (get_user_u32(len
, optlen
))
1907 return -TARGET_EFAULT
;
1909 return -TARGET_EINVAL
;
1911 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1914 if (optname
== SO_TYPE
) {
1915 val
= host_to_target_sock_type(val
);
1920 if (put_user_u32(val
, optval_addr
))
1921 return -TARGET_EFAULT
;
1923 if (put_user_u8(val
, optval_addr
))
1924 return -TARGET_EFAULT
;
1926 if (put_user_u32(len
, optlen
))
1927 return -TARGET_EFAULT
;
1934 case IP_ROUTER_ALERT
:
1938 case IP_MTU_DISCOVER
:
1944 case IP_MULTICAST_TTL
:
1945 case IP_MULTICAST_LOOP
:
1946 if (get_user_u32(len
, optlen
))
1947 return -TARGET_EFAULT
;
1949 return -TARGET_EINVAL
;
1951 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1954 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1956 if (put_user_u32(len
, optlen
)
1957 || put_user_u8(val
, optval_addr
))
1958 return -TARGET_EFAULT
;
1960 if (len
> sizeof(int))
1962 if (put_user_u32(len
, optlen
)
1963 || put_user_u32(val
, optval_addr
))
1964 return -TARGET_EFAULT
;
1968 ret
= -TARGET_ENOPROTOOPT
;
1974 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1976 ret
= -TARGET_EOPNOTSUPP
;
1982 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1983 int count
, int copy
)
1985 struct target_iovec
*target_vec
;
1987 abi_ulong total_len
, max_len
;
1990 bool bad_address
= false;
1996 if (count
< 0 || count
> IOV_MAX
) {
2001 vec
= g_try_new0(struct iovec
, count
);
2007 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2008 count
* sizeof(struct target_iovec
), 1);
2009 if (target_vec
== NULL
) {
2014 /* ??? If host page size > target page size, this will result in a
2015 value larger than what we can actually support. */
2016 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2019 for (i
= 0; i
< count
; i
++) {
2020 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2021 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2026 } else if (len
== 0) {
2027 /* Zero length pointer is ignored. */
2028 vec
[i
].iov_base
= 0;
2030 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2031 /* If the first buffer pointer is bad, this is a fault. But
2032 * subsequent bad buffers will result in a partial write; this
2033 * is realized by filling the vector with null pointers and
2035 if (!vec
[i
].iov_base
) {
2046 if (len
> max_len
- total_len
) {
2047 len
= max_len
- total_len
;
2050 vec
[i
].iov_len
= len
;
2054 unlock_user(target_vec
, target_addr
, 0);
2059 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2060 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2063 unlock_user(target_vec
, target_addr
, 0);
2070 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2071 int count
, int copy
)
2073 struct target_iovec
*target_vec
;
2076 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2077 count
* sizeof(struct target_iovec
), 1);
2079 for (i
= 0; i
< count
; i
++) {
2080 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2081 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2085 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2087 unlock_user(target_vec
, target_addr
, 0);
2093 static inline int target_to_host_sock_type(int *type
)
2096 int target_type
= *type
;
2098 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2099 case TARGET_SOCK_DGRAM
:
2100 host_type
= SOCK_DGRAM
;
2102 case TARGET_SOCK_STREAM
:
2103 host_type
= SOCK_STREAM
;
2106 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2109 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2110 #if defined(SOCK_CLOEXEC)
2111 host_type
|= SOCK_CLOEXEC
;
2113 return -TARGET_EINVAL
;
2116 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2117 #if defined(SOCK_NONBLOCK)
2118 host_type
|= SOCK_NONBLOCK
;
2119 #elif !defined(O_NONBLOCK)
2120 return -TARGET_EINVAL
;
2127 /* Try to emulate socket type flags after socket creation. */
2128 static int sock_flags_fixup(int fd
, int target_type
)
2130 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2131 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2132 int flags
= fcntl(fd
, F_GETFL
);
2133 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2135 return -TARGET_EINVAL
;
2142 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2143 abi_ulong target_addr
,
2146 struct sockaddr
*addr
= host_addr
;
2147 struct target_sockaddr
*target_saddr
;
2149 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2150 if (!target_saddr
) {
2151 return -TARGET_EFAULT
;
2154 memcpy(addr
, target_saddr
, len
);
2155 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2156 /* spkt_protocol is big-endian */
2158 unlock_user(target_saddr
, target_addr
, 0);
2162 static TargetFdTrans target_packet_trans
= {
2163 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2166 /* do_socket() Must return target values and target errnos. */
2167 static abi_long
do_socket(int domain
, int type
, int protocol
)
2169 int target_type
= type
;
2172 ret
= target_to_host_sock_type(&type
);
2177 if (domain
== PF_NETLINK
)
2178 return -TARGET_EAFNOSUPPORT
;
2180 if (domain
== AF_PACKET
||
2181 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2182 protocol
= tswap16(protocol
);
2185 ret
= get_errno(socket(domain
, type
, protocol
));
2187 ret
= sock_flags_fixup(ret
, target_type
);
2188 if (type
== SOCK_PACKET
) {
2189 /* Manage an obsolete case :
2190 * if socket type is SOCK_PACKET, bind by name
2192 fd_trans_register(ret
, &target_packet_trans
);
2198 /* do_bind() Must return target values and target errnos. */
2199 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2205 if ((int)addrlen
< 0) {
2206 return -TARGET_EINVAL
;
2209 addr
= alloca(addrlen
+1);
2211 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2215 return get_errno(bind(sockfd
, addr
, addrlen
));
2218 /* do_connect() Must return target values and target errnos. */
2219 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2225 if ((int)addrlen
< 0) {
2226 return -TARGET_EINVAL
;
2229 addr
= alloca(addrlen
+1);
2231 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2235 return get_errno(connect(sockfd
, addr
, addrlen
));
2238 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2239 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2240 int flags
, int send
)
2246 abi_ulong target_vec
;
2248 if (msgp
->msg_name
) {
2249 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2250 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2251 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2252 tswapal(msgp
->msg_name
),
2258 msg
.msg_name
= NULL
;
2259 msg
.msg_namelen
= 0;
2261 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2262 msg
.msg_control
= alloca(msg
.msg_controllen
);
2263 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2265 count
= tswapal(msgp
->msg_iovlen
);
2266 target_vec
= tswapal(msgp
->msg_iov
);
2267 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2268 target_vec
, count
, send
);
2270 ret
= -host_to_target_errno(errno
);
2273 msg
.msg_iovlen
= count
;
2277 ret
= target_to_host_cmsg(&msg
, msgp
);
2279 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2281 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2282 if (!is_error(ret
)) {
2284 ret
= host_to_target_cmsg(msgp
, &msg
);
2285 if (!is_error(ret
)) {
2286 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2287 if (msg
.msg_name
!= NULL
) {
2288 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2289 msg
.msg_name
, msg
.msg_namelen
);
2301 unlock_iovec(vec
, target_vec
, count
, !send
);
2306 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2307 int flags
, int send
)
2310 struct target_msghdr
*msgp
;
2312 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2316 return -TARGET_EFAULT
;
2318 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2319 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2323 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2324 * so it might not have this *mmsg-specific flag either.
2326 #ifndef MSG_WAITFORONE
2327 #define MSG_WAITFORONE 0x10000
2330 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2331 unsigned int vlen
, unsigned int flags
,
2334 struct target_mmsghdr
*mmsgp
;
2338 if (vlen
> UIO_MAXIOV
) {
2342 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2344 return -TARGET_EFAULT
;
2347 for (i
= 0; i
< vlen
; i
++) {
2348 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2349 if (is_error(ret
)) {
2352 mmsgp
[i
].msg_len
= tswap32(ret
);
2353 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2354 if (flags
& MSG_WAITFORONE
) {
2355 flags
|= MSG_DONTWAIT
;
2359 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2361 /* Return number of datagrams sent if we sent any at all;
2362 * otherwise return the error.
2370 /* If we don't have a system accept4() then just call accept.
2371 * The callsites to do_accept4() will ensure that they don't
2372 * pass a non-zero flags argument in this config.
2374 #ifndef CONFIG_ACCEPT4
2375 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2376 socklen_t
*addrlen
, int flags
)
2379 return accept(sockfd
, addr
, addrlen
);
2383 /* do_accept4() Must return target values and target errnos. */
2384 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2385 abi_ulong target_addrlen_addr
, int flags
)
2392 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2394 if (target_addr
== 0) {
2395 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2398 /* linux returns EINVAL if addrlen pointer is invalid */
2399 if (get_user_u32(addrlen
, target_addrlen_addr
))
2400 return -TARGET_EINVAL
;
2402 if ((int)addrlen
< 0) {
2403 return -TARGET_EINVAL
;
2406 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2407 return -TARGET_EINVAL
;
2409 addr
= alloca(addrlen
);
2411 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2412 if (!is_error(ret
)) {
2413 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2414 if (put_user_u32(addrlen
, target_addrlen_addr
))
2415 ret
= -TARGET_EFAULT
;
2420 /* do_getpeername() Must return target values and target errnos. */
2421 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2422 abi_ulong target_addrlen_addr
)
2428 if (get_user_u32(addrlen
, target_addrlen_addr
))
2429 return -TARGET_EFAULT
;
2431 if ((int)addrlen
< 0) {
2432 return -TARGET_EINVAL
;
2435 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2436 return -TARGET_EFAULT
;
2438 addr
= alloca(addrlen
);
2440 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2441 if (!is_error(ret
)) {
2442 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2443 if (put_user_u32(addrlen
, target_addrlen_addr
))
2444 ret
= -TARGET_EFAULT
;
2449 /* do_getsockname() Must return target values and target errnos. */
2450 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2451 abi_ulong target_addrlen_addr
)
2457 if (get_user_u32(addrlen
, target_addrlen_addr
))
2458 return -TARGET_EFAULT
;
2460 if ((int)addrlen
< 0) {
2461 return -TARGET_EINVAL
;
2464 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2465 return -TARGET_EFAULT
;
2467 addr
= alloca(addrlen
);
2469 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2470 if (!is_error(ret
)) {
2471 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2472 if (put_user_u32(addrlen
, target_addrlen_addr
))
2473 ret
= -TARGET_EFAULT
;
2478 /* do_socketpair() Must return target values and target errnos. */
2479 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2480 abi_ulong target_tab_addr
)
2485 target_to_host_sock_type(&type
);
2487 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2488 if (!is_error(ret
)) {
2489 if (put_user_s32(tab
[0], target_tab_addr
)
2490 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2491 ret
= -TARGET_EFAULT
;
2496 /* do_sendto() Must return target values and target errnos. */
2497 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2498 abi_ulong target_addr
, socklen_t addrlen
)
2504 if ((int)addrlen
< 0) {
2505 return -TARGET_EINVAL
;
2508 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2510 return -TARGET_EFAULT
;
2512 addr
= alloca(addrlen
+1);
2513 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2515 unlock_user(host_msg
, msg
, 0);
2518 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2520 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2522 unlock_user(host_msg
, msg
, 0);
2526 /* do_recvfrom() Must return target values and target errnos. */
2527 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2528 abi_ulong target_addr
,
2529 abi_ulong target_addrlen
)
2536 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2538 return -TARGET_EFAULT
;
2540 if (get_user_u32(addrlen
, target_addrlen
)) {
2541 ret
= -TARGET_EFAULT
;
2544 if ((int)addrlen
< 0) {
2545 ret
= -TARGET_EINVAL
;
2548 addr
= alloca(addrlen
);
2549 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2551 addr
= NULL
; /* To keep compiler quiet. */
2552 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2554 if (!is_error(ret
)) {
2556 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2557 if (put_user_u32(addrlen
, target_addrlen
)) {
2558 ret
= -TARGET_EFAULT
;
2562 unlock_user(host_msg
, msg
, len
);
2565 unlock_user(host_msg
, msg
, 0);
2570 #ifdef TARGET_NR_socketcall
2571 /* do_socketcall() Must return target values and target errnos. */
2572 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2574 static const unsigned ac
[] = { /* number of arguments per call */
2575 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2576 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2577 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2578 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2579 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2581 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2582 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2583 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2584 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2585 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2586 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2587 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2588 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2589 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2590 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2591 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2592 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2593 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2594 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2596 abi_long a
[6]; /* max 6 args */
2598 /* first, collect the arguments in a[] according to ac[] */
2599 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2601 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2602 for (i
= 0; i
< ac
[num
]; ++i
) {
2603 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2604 return -TARGET_EFAULT
;
2609 /* now when we have the args, actually handle the call */
2611 case SOCKOP_socket
: /* domain, type, protocol */
2612 return do_socket(a
[0], a
[1], a
[2]);
2613 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2614 return do_bind(a
[0], a
[1], a
[2]);
2615 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2616 return do_connect(a
[0], a
[1], a
[2]);
2617 case SOCKOP_listen
: /* sockfd, backlog */
2618 return get_errno(listen(a
[0], a
[1]));
2619 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2620 return do_accept4(a
[0], a
[1], a
[2], 0);
2621 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2622 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2623 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2624 return do_getsockname(a
[0], a
[1], a
[2]);
2625 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2626 return do_getpeername(a
[0], a
[1], a
[2]);
2627 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2628 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2629 case SOCKOP_send
: /* sockfd, msg, len, flags */
2630 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2631 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2632 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2633 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2634 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2635 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2636 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2637 case SOCKOP_shutdown
: /* sockfd, how */
2638 return get_errno(shutdown(a
[0], a
[1]));
2639 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2640 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2641 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2642 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2643 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2644 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2645 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2646 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2647 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2648 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2649 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2650 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2652 gemu_log("Unsupported socketcall: %d\n", num
);
2653 return -TARGET_ENOSYS
;
2658 #define N_SHM_REGIONS 32
2660 static struct shm_region
{
2664 } shm_regions
[N_SHM_REGIONS
];
2666 struct target_semid_ds
2668 struct target_ipc_perm sem_perm
;
2669 abi_ulong sem_otime
;
2670 #if !defined(TARGET_PPC64)
2671 abi_ulong __unused1
;
2673 abi_ulong sem_ctime
;
2674 #if !defined(TARGET_PPC64)
2675 abi_ulong __unused2
;
2677 abi_ulong sem_nsems
;
2678 abi_ulong __unused3
;
2679 abi_ulong __unused4
;
2682 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2683 abi_ulong target_addr
)
2685 struct target_ipc_perm
*target_ip
;
2686 struct target_semid_ds
*target_sd
;
2688 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2689 return -TARGET_EFAULT
;
2690 target_ip
= &(target_sd
->sem_perm
);
2691 host_ip
->__key
= tswap32(target_ip
->__key
);
2692 host_ip
->uid
= tswap32(target_ip
->uid
);
2693 host_ip
->gid
= tswap32(target_ip
->gid
);
2694 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2695 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2696 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2697 host_ip
->mode
= tswap32(target_ip
->mode
);
2699 host_ip
->mode
= tswap16(target_ip
->mode
);
2701 #if defined(TARGET_PPC)
2702 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2704 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2706 unlock_user_struct(target_sd
, target_addr
, 0);
2710 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2711 struct ipc_perm
*host_ip
)
2713 struct target_ipc_perm
*target_ip
;
2714 struct target_semid_ds
*target_sd
;
2716 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2717 return -TARGET_EFAULT
;
2718 target_ip
= &(target_sd
->sem_perm
);
2719 target_ip
->__key
= tswap32(host_ip
->__key
);
2720 target_ip
->uid
= tswap32(host_ip
->uid
);
2721 target_ip
->gid
= tswap32(host_ip
->gid
);
2722 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2723 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2724 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2725 target_ip
->mode
= tswap32(host_ip
->mode
);
2727 target_ip
->mode
= tswap16(host_ip
->mode
);
2729 #if defined(TARGET_PPC)
2730 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2732 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2734 unlock_user_struct(target_sd
, target_addr
, 1);
2738 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2739 abi_ulong target_addr
)
2741 struct target_semid_ds
*target_sd
;
2743 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2744 return -TARGET_EFAULT
;
2745 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2746 return -TARGET_EFAULT
;
2747 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2748 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2749 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2750 unlock_user_struct(target_sd
, target_addr
, 0);
2754 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2755 struct semid_ds
*host_sd
)
2757 struct target_semid_ds
*target_sd
;
2759 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2760 return -TARGET_EFAULT
;
2761 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2762 return -TARGET_EFAULT
;
2763 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2764 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2765 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2766 unlock_user_struct(target_sd
, target_addr
, 1);
2770 struct target_seminfo
{
2783 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2784 struct seminfo
*host_seminfo
)
2786 struct target_seminfo
*target_seminfo
;
2787 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2788 return -TARGET_EFAULT
;
2789 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2790 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2791 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2792 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2793 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2794 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2795 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2796 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2797 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2798 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2799 unlock_user_struct(target_seminfo
, target_addr
, 1);
2805 struct semid_ds
*buf
;
2806 unsigned short *array
;
2807 struct seminfo
*__buf
;
2810 union target_semun
{
2817 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2818 abi_ulong target_addr
)
2821 unsigned short *array
;
2823 struct semid_ds semid_ds
;
2826 semun
.buf
= &semid_ds
;
2828 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2830 return get_errno(ret
);
2832 nsems
= semid_ds
.sem_nsems
;
2834 *host_array
= g_try_new(unsigned short, nsems
);
2836 return -TARGET_ENOMEM
;
2838 array
= lock_user(VERIFY_READ
, target_addr
,
2839 nsems
*sizeof(unsigned short), 1);
2841 g_free(*host_array
);
2842 return -TARGET_EFAULT
;
2845 for(i
=0; i
<nsems
; i
++) {
2846 __get_user((*host_array
)[i
], &array
[i
]);
2848 unlock_user(array
, target_addr
, 0);
2853 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2854 unsigned short **host_array
)
2857 unsigned short *array
;
2859 struct semid_ds semid_ds
;
2862 semun
.buf
= &semid_ds
;
2864 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2866 return get_errno(ret
);
2868 nsems
= semid_ds
.sem_nsems
;
2870 array
= lock_user(VERIFY_WRITE
, target_addr
,
2871 nsems
*sizeof(unsigned short), 0);
2873 return -TARGET_EFAULT
;
2875 for(i
=0; i
<nsems
; i
++) {
2876 __put_user((*host_array
)[i
], &array
[i
]);
2878 g_free(*host_array
);
2879 unlock_user(array
, target_addr
, 1);
2884 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2885 abi_ulong target_arg
)
2887 union target_semun target_su
= { .buf
= target_arg
};
2889 struct semid_ds dsarg
;
2890 unsigned short *array
= NULL
;
2891 struct seminfo seminfo
;
2892 abi_long ret
= -TARGET_EINVAL
;
2899 /* In 64 bit cross-endian situations, we will erroneously pick up
2900 * the wrong half of the union for the "val" element. To rectify
2901 * this, the entire 8-byte structure is byteswapped, followed by
2902 * a swap of the 4 byte val field. In other cases, the data is
2903 * already in proper host byte order. */
2904 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2905 target_su
.buf
= tswapal(target_su
.buf
);
2906 arg
.val
= tswap32(target_su
.val
);
2908 arg
.val
= target_su
.val
;
2910 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2914 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2918 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2919 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2926 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2930 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2931 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2937 arg
.__buf
= &seminfo
;
2938 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2939 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2947 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2954 struct target_sembuf
{
2955 unsigned short sem_num
;
2960 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2961 abi_ulong target_addr
,
2964 struct target_sembuf
*target_sembuf
;
2967 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2968 nsops
*sizeof(struct target_sembuf
), 1);
2970 return -TARGET_EFAULT
;
2972 for(i
=0; i
<nsops
; i
++) {
2973 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2974 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2975 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2978 unlock_user(target_sembuf
, target_addr
, 0);
2983 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2985 struct sembuf sops
[nsops
];
2987 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2988 return -TARGET_EFAULT
;
2990 return get_errno(semop(semid
, sops
, nsops
));
2993 struct target_msqid_ds
2995 struct target_ipc_perm msg_perm
;
2996 abi_ulong msg_stime
;
2997 #if TARGET_ABI_BITS == 32
2998 abi_ulong __unused1
;
3000 abi_ulong msg_rtime
;
3001 #if TARGET_ABI_BITS == 32
3002 abi_ulong __unused2
;
3004 abi_ulong msg_ctime
;
3005 #if TARGET_ABI_BITS == 32
3006 abi_ulong __unused3
;
3008 abi_ulong __msg_cbytes
;
3010 abi_ulong msg_qbytes
;
3011 abi_ulong msg_lspid
;
3012 abi_ulong msg_lrpid
;
3013 abi_ulong __unused4
;
3014 abi_ulong __unused5
;
3017 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3018 abi_ulong target_addr
)
3020 struct target_msqid_ds
*target_md
;
3022 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3023 return -TARGET_EFAULT
;
3024 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3025 return -TARGET_EFAULT
;
3026 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3027 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3028 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3029 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3030 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3031 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3032 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3033 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3034 unlock_user_struct(target_md
, target_addr
, 0);
3038 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3039 struct msqid_ds
*host_md
)
3041 struct target_msqid_ds
*target_md
;
3043 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3044 return -TARGET_EFAULT
;
3045 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3046 return -TARGET_EFAULT
;
3047 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3048 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3049 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3050 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3051 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3052 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3053 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3054 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3055 unlock_user_struct(target_md
, target_addr
, 1);
3059 struct target_msginfo
{
3067 unsigned short int msgseg
;
3070 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3071 struct msginfo
*host_msginfo
)
3073 struct target_msginfo
*target_msginfo
;
3074 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3075 return -TARGET_EFAULT
;
3076 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3077 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3078 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3079 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3080 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3081 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3082 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3083 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3084 unlock_user_struct(target_msginfo
, target_addr
, 1);
3088 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3090 struct msqid_ds dsarg
;
3091 struct msginfo msginfo
;
3092 abi_long ret
= -TARGET_EINVAL
;
3100 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3101 return -TARGET_EFAULT
;
3102 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3103 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3104 return -TARGET_EFAULT
;
3107 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3111 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3112 if (host_to_target_msginfo(ptr
, &msginfo
))
3113 return -TARGET_EFAULT
;
3120 struct target_msgbuf
{
3125 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3126 ssize_t msgsz
, int msgflg
)
3128 struct target_msgbuf
*target_mb
;
3129 struct msgbuf
*host_mb
;
3133 return -TARGET_EINVAL
;
3136 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3137 return -TARGET_EFAULT
;
3138 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3140 unlock_user_struct(target_mb
, msgp
, 0);
3141 return -TARGET_ENOMEM
;
3143 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3144 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3145 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3147 unlock_user_struct(target_mb
, msgp
, 0);
3152 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3153 unsigned int msgsz
, abi_long msgtyp
,
3156 struct target_msgbuf
*target_mb
;
3158 struct msgbuf
*host_mb
;
3161 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3162 return -TARGET_EFAULT
;
3164 host_mb
= g_malloc(msgsz
+sizeof(long));
3165 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3168 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3169 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3170 if (!target_mtext
) {
3171 ret
= -TARGET_EFAULT
;
3174 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3175 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3178 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3182 unlock_user_struct(target_mb
, msgp
, 1);
3187 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3188 abi_ulong target_addr
)
3190 struct target_shmid_ds
*target_sd
;
3192 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3193 return -TARGET_EFAULT
;
3194 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3195 return -TARGET_EFAULT
;
3196 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3197 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3198 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3199 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3200 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3201 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3202 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3203 unlock_user_struct(target_sd
, target_addr
, 0);
3207 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3208 struct shmid_ds
*host_sd
)
3210 struct target_shmid_ds
*target_sd
;
3212 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3213 return -TARGET_EFAULT
;
3214 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3215 return -TARGET_EFAULT
;
3216 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3217 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3218 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3219 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3220 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3221 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3222 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3223 unlock_user_struct(target_sd
, target_addr
, 1);
3227 struct target_shminfo
{
3235 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3236 struct shminfo
*host_shminfo
)
3238 struct target_shminfo
*target_shminfo
;
3239 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3240 return -TARGET_EFAULT
;
3241 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3242 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3243 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3244 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3245 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3246 unlock_user_struct(target_shminfo
, target_addr
, 1);
3250 struct target_shm_info
{
3255 abi_ulong swap_attempts
;
3256 abi_ulong swap_successes
;
3259 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3260 struct shm_info
*host_shm_info
)
3262 struct target_shm_info
*target_shm_info
;
3263 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3264 return -TARGET_EFAULT
;
3265 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3266 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3267 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3268 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3269 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3270 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3271 unlock_user_struct(target_shm_info
, target_addr
, 1);
3275 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3277 struct shmid_ds dsarg
;
3278 struct shminfo shminfo
;
3279 struct shm_info shm_info
;
3280 abi_long ret
= -TARGET_EINVAL
;
3288 if (target_to_host_shmid_ds(&dsarg
, buf
))
3289 return -TARGET_EFAULT
;
3290 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3291 if (host_to_target_shmid_ds(buf
, &dsarg
))
3292 return -TARGET_EFAULT
;
3295 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3296 if (host_to_target_shminfo(buf
, &shminfo
))
3297 return -TARGET_EFAULT
;
3300 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3301 if (host_to_target_shm_info(buf
, &shm_info
))
3302 return -TARGET_EFAULT
;
3307 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3314 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3318 struct shmid_ds shm_info
;
3321 /* find out the length of the shared memory segment */
3322 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3323 if (is_error(ret
)) {
3324 /* can't get length, bail out */
3331 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3333 abi_ulong mmap_start
;
3335 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3337 if (mmap_start
== -1) {
3339 host_raddr
= (void *)-1;
3341 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3344 if (host_raddr
== (void *)-1) {
3346 return get_errno((long)host_raddr
);
3348 raddr
=h2g((unsigned long)host_raddr
);
3350 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3351 PAGE_VALID
| PAGE_READ
|
3352 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3354 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3355 if (!shm_regions
[i
].in_use
) {
3356 shm_regions
[i
].in_use
= true;
3357 shm_regions
[i
].start
= raddr
;
3358 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3368 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3372 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3373 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3374 shm_regions
[i
].in_use
= false;
3375 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3380 return get_errno(shmdt(g2h(shmaddr
)));
3383 #ifdef TARGET_NR_ipc
3384 /* ??? This only works with linear mappings. */
3385 /* do_ipc() must return target values and target errnos. */
3386 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3387 abi_long second
, abi_long third
,
3388 abi_long ptr
, abi_long fifth
)
3393 version
= call
>> 16;
3398 ret
= do_semop(first
, ptr
, second
);
3402 ret
= get_errno(semget(first
, second
, third
));
3405 case IPCOP_semctl
: {
3406 /* The semun argument to semctl is passed by value, so dereference the
3409 get_user_ual(atptr
, ptr
);
3410 ret
= do_semctl(first
, second
, third
, atptr
);
3415 ret
= get_errno(msgget(first
, second
));
3419 ret
= do_msgsnd(first
, ptr
, second
, third
);
3423 ret
= do_msgctl(first
, second
, ptr
);
3430 struct target_ipc_kludge
{
3435 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3436 ret
= -TARGET_EFAULT
;
3440 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3442 unlock_user_struct(tmp
, ptr
, 0);
3446 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3455 raddr
= do_shmat(first
, ptr
, second
);
3456 if (is_error(raddr
))
3457 return get_errno(raddr
);
3458 if (put_user_ual(raddr
, third
))
3459 return -TARGET_EFAULT
;
3463 ret
= -TARGET_EINVAL
;
3468 ret
= do_shmdt(ptr
);
3472 /* IPC_* flag values are the same on all linux platforms */
3473 ret
= get_errno(shmget(first
, second
, third
));
3476 /* IPC_* and SHM_* command values are the same on all linux platforms */
3478 ret
= do_shmctl(first
, second
, ptr
);
3481 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3482 ret
= -TARGET_ENOSYS
;
3489 /* kernel structure types definitions */
3491 #define STRUCT(name, ...) STRUCT_ ## name,
3492 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3494 #include "syscall_types.h"
3498 #undef STRUCT_SPECIAL
3500 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3501 #define STRUCT_SPECIAL(name)
3502 #include "syscall_types.h"
3504 #undef STRUCT_SPECIAL
3506 typedef struct IOCTLEntry IOCTLEntry
;
3508 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3509 int fd
, int cmd
, abi_long arg
);
3513 unsigned int host_cmd
;
3516 do_ioctl_fn
*do_ioctl
;
3517 const argtype arg_type
[5];
3520 #define IOC_R 0x0001
3521 #define IOC_W 0x0002
3522 #define IOC_RW (IOC_R | IOC_W)
3524 #define MAX_STRUCT_SIZE 4096
3526 #ifdef CONFIG_FIEMAP
3527 /* So fiemap access checks don't overflow on 32 bit systems.
3528 * This is very slightly smaller than the limit imposed by
3529 * the underlying kernel.
3531 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3532 / sizeof(struct fiemap_extent))
3534 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3535 int fd
, int cmd
, abi_long arg
)
3537 /* The parameter for this ioctl is a struct fiemap followed
3538 * by an array of struct fiemap_extent whose size is set
3539 * in fiemap->fm_extent_count. The array is filled in by the
3542 int target_size_in
, target_size_out
;
3544 const argtype
*arg_type
= ie
->arg_type
;
3545 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3548 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3552 assert(arg_type
[0] == TYPE_PTR
);
3553 assert(ie
->access
== IOC_RW
);
3555 target_size_in
= thunk_type_size(arg_type
, 0);
3556 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3558 return -TARGET_EFAULT
;
3560 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3561 unlock_user(argptr
, arg
, 0);
3562 fm
= (struct fiemap
*)buf_temp
;
3563 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3564 return -TARGET_EINVAL
;
3567 outbufsz
= sizeof (*fm
) +
3568 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3570 if (outbufsz
> MAX_STRUCT_SIZE
) {
3571 /* We can't fit all the extents into the fixed size buffer.
3572 * Allocate one that is large enough and use it instead.
3574 fm
= g_try_malloc(outbufsz
);
3576 return -TARGET_ENOMEM
;
3578 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3581 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3582 if (!is_error(ret
)) {
3583 target_size_out
= target_size_in
;
3584 /* An extent_count of 0 means we were only counting the extents
3585 * so there are no structs to copy
3587 if (fm
->fm_extent_count
!= 0) {
3588 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3590 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3592 ret
= -TARGET_EFAULT
;
3594 /* Convert the struct fiemap */
3595 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3596 if (fm
->fm_extent_count
!= 0) {
3597 p
= argptr
+ target_size_in
;
3598 /* ...and then all the struct fiemap_extents */
3599 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3600 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3605 unlock_user(argptr
, arg
, target_size_out
);
3615 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3616 int fd
, int cmd
, abi_long arg
)
3618 const argtype
*arg_type
= ie
->arg_type
;
3622 struct ifconf
*host_ifconf
;
3624 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3625 int target_ifreq_size
;
3630 abi_long target_ifc_buf
;
3634 assert(arg_type
[0] == TYPE_PTR
);
3635 assert(ie
->access
== IOC_RW
);
3638 target_size
= thunk_type_size(arg_type
, 0);
3640 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3642 return -TARGET_EFAULT
;
3643 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3644 unlock_user(argptr
, arg
, 0);
3646 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3647 target_ifc_len
= host_ifconf
->ifc_len
;
3648 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3650 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3651 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3652 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3654 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3655 if (outbufsz
> MAX_STRUCT_SIZE
) {
3656 /* We can't fit all the extents into the fixed size buffer.
3657 * Allocate one that is large enough and use it instead.
3659 host_ifconf
= malloc(outbufsz
);
3661 return -TARGET_ENOMEM
;
3663 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3666 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3668 host_ifconf
->ifc_len
= host_ifc_len
;
3669 host_ifconf
->ifc_buf
= host_ifc_buf
;
3671 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3672 if (!is_error(ret
)) {
3673 /* convert host ifc_len to target ifc_len */
3675 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3676 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3677 host_ifconf
->ifc_len
= target_ifc_len
;
3679 /* restore target ifc_buf */
3681 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3683 /* copy struct ifconf to target user */
3685 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3687 return -TARGET_EFAULT
;
3688 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3689 unlock_user(argptr
, arg
, target_size
);
3691 /* copy ifreq[] to target user */
3693 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3694 for (i
= 0; i
< nb_ifreq
; i
++) {
3695 thunk_convert(argptr
+ i
* target_ifreq_size
,
3696 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3697 ifreq_arg_type
, THUNK_TARGET
);
3699 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3709 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3710 int cmd
, abi_long arg
)
3713 struct dm_ioctl
*host_dm
;
3714 abi_long guest_data
;
3715 uint32_t guest_data_size
;
3717 const argtype
*arg_type
= ie
->arg_type
;
3719 void *big_buf
= NULL
;
3723 target_size
= thunk_type_size(arg_type
, 0);
3724 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3726 ret
= -TARGET_EFAULT
;
3729 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3730 unlock_user(argptr
, arg
, 0);
3732 /* buf_temp is too small, so fetch things into a bigger buffer */
3733 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3734 memcpy(big_buf
, buf_temp
, target_size
);
3738 guest_data
= arg
+ host_dm
->data_start
;
3739 if ((guest_data
- arg
) < 0) {
3743 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3744 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3746 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3747 switch (ie
->host_cmd
) {
3749 case DM_LIST_DEVICES
:
3752 case DM_DEV_SUSPEND
:
3755 case DM_TABLE_STATUS
:
3756 case DM_TABLE_CLEAR
:
3758 case DM_LIST_VERSIONS
:
3762 case DM_DEV_SET_GEOMETRY
:
3763 /* data contains only strings */
3764 memcpy(host_data
, argptr
, guest_data_size
);
3767 memcpy(host_data
, argptr
, guest_data_size
);
3768 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3772 void *gspec
= argptr
;
3773 void *cur_data
= host_data
;
3774 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3775 int spec_size
= thunk_type_size(arg_type
, 0);
3778 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3779 struct dm_target_spec
*spec
= cur_data
;
3783 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3784 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3786 spec
->next
= sizeof(*spec
) + slen
;
3787 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3789 cur_data
+= spec
->next
;
3794 ret
= -TARGET_EINVAL
;
3795 unlock_user(argptr
, guest_data
, 0);
3798 unlock_user(argptr
, guest_data
, 0);
3800 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3801 if (!is_error(ret
)) {
3802 guest_data
= arg
+ host_dm
->data_start
;
3803 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3804 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3805 switch (ie
->host_cmd
) {
3810 case DM_DEV_SUSPEND
:
3813 case DM_TABLE_CLEAR
:
3815 case DM_DEV_SET_GEOMETRY
:
3816 /* no return data */
3818 case DM_LIST_DEVICES
:
3820 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3821 uint32_t remaining_data
= guest_data_size
;
3822 void *cur_data
= argptr
;
3823 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3824 int nl_size
= 12; /* can't use thunk_size due to alignment */
3827 uint32_t next
= nl
->next
;
3829 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3831 if (remaining_data
< nl
->next
) {
3832 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3835 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3836 strcpy(cur_data
+ nl_size
, nl
->name
);
3837 cur_data
+= nl
->next
;
3838 remaining_data
-= nl
->next
;
3842 nl
= (void*)nl
+ next
;
3847 case DM_TABLE_STATUS
:
3849 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3850 void *cur_data
= argptr
;
3851 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3852 int spec_size
= thunk_type_size(arg_type
, 0);
3855 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3856 uint32_t next
= spec
->next
;
3857 int slen
= strlen((char*)&spec
[1]) + 1;
3858 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3859 if (guest_data_size
< spec
->next
) {
3860 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3863 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3864 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3865 cur_data
= argptr
+ spec
->next
;
3866 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3872 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3873 int count
= *(uint32_t*)hdata
;
3874 uint64_t *hdev
= hdata
+ 8;
3875 uint64_t *gdev
= argptr
+ 8;
3878 *(uint32_t*)argptr
= tswap32(count
);
3879 for (i
= 0; i
< count
; i
++) {
3880 *gdev
= tswap64(*hdev
);
3886 case DM_LIST_VERSIONS
:
3888 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3889 uint32_t remaining_data
= guest_data_size
;
3890 void *cur_data
= argptr
;
3891 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3892 int vers_size
= thunk_type_size(arg_type
, 0);
3895 uint32_t next
= vers
->next
;
3897 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3899 if (remaining_data
< vers
->next
) {
3900 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3903 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3904 strcpy(cur_data
+ vers_size
, vers
->name
);
3905 cur_data
+= vers
->next
;
3906 remaining_data
-= vers
->next
;
3910 vers
= (void*)vers
+ next
;
3915 unlock_user(argptr
, guest_data
, 0);
3916 ret
= -TARGET_EINVAL
;
3919 unlock_user(argptr
, guest_data
, guest_data_size
);
3921 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3923 ret
= -TARGET_EFAULT
;
3926 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3927 unlock_user(argptr
, arg
, target_size
);
3934 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3935 int cmd
, abi_long arg
)
3939 const argtype
*arg_type
= ie
->arg_type
;
3940 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3943 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3944 struct blkpg_partition host_part
;
3946 /* Read and convert blkpg */
3948 target_size
= thunk_type_size(arg_type
, 0);
3949 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3951 ret
= -TARGET_EFAULT
;
3954 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3955 unlock_user(argptr
, arg
, 0);
3957 switch (host_blkpg
->op
) {
3958 case BLKPG_ADD_PARTITION
:
3959 case BLKPG_DEL_PARTITION
:
3960 /* payload is struct blkpg_partition */
3963 /* Unknown opcode */
3964 ret
= -TARGET_EINVAL
;
3968 /* Read and convert blkpg->data */
3969 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3970 target_size
= thunk_type_size(part_arg_type
, 0);
3971 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3973 ret
= -TARGET_EFAULT
;
3976 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3977 unlock_user(argptr
, arg
, 0);
3979 /* Swizzle the data pointer to our local copy and call! */
3980 host_blkpg
->data
= &host_part
;
3981 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3987 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3988 int fd
, int cmd
, abi_long arg
)
3990 const argtype
*arg_type
= ie
->arg_type
;
3991 const StructEntry
*se
;
3992 const argtype
*field_types
;
3993 const int *dst_offsets
, *src_offsets
;
3996 abi_ulong
*target_rt_dev_ptr
;
3997 unsigned long *host_rt_dev_ptr
;
4001 assert(ie
->access
== IOC_W
);
4002 assert(*arg_type
== TYPE_PTR
);
4004 assert(*arg_type
== TYPE_STRUCT
);
4005 target_size
= thunk_type_size(arg_type
, 0);
4006 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4008 return -TARGET_EFAULT
;
4011 assert(*arg_type
== (int)STRUCT_rtentry
);
4012 se
= struct_entries
+ *arg_type
++;
4013 assert(se
->convert
[0] == NULL
);
4014 /* convert struct here to be able to catch rt_dev string */
4015 field_types
= se
->field_types
;
4016 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4017 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4018 for (i
= 0; i
< se
->nb_fields
; i
++) {
4019 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4020 assert(*field_types
== TYPE_PTRVOID
);
4021 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4022 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4023 if (*target_rt_dev_ptr
!= 0) {
4024 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4025 tswapal(*target_rt_dev_ptr
));
4026 if (!*host_rt_dev_ptr
) {
4027 unlock_user(argptr
, arg
, 0);
4028 return -TARGET_EFAULT
;
4031 *host_rt_dev_ptr
= 0;
4036 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4037 argptr
+ src_offsets
[i
],
4038 field_types
, THUNK_HOST
);
4040 unlock_user(argptr
, arg
, 0);
4042 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4043 if (*host_rt_dev_ptr
!= 0) {
4044 unlock_user((void *)*host_rt_dev_ptr
,
4045 *target_rt_dev_ptr
, 0);
4050 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4051 int fd
, int cmd
, abi_long arg
)
4053 int sig
= target_to_host_signal(arg
);
4054 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4057 static IOCTLEntry ioctl_entries
[] = {
4058 #define IOCTL(cmd, access, ...) \
4059 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4060 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4061 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4066 /* ??? Implement proper locking for ioctls. */
4067 /* do_ioctl() Must return target values and target errnos. */
4068 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4070 const IOCTLEntry
*ie
;
4071 const argtype
*arg_type
;
4073 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4079 if (ie
->target_cmd
== 0) {
4080 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4081 return -TARGET_ENOSYS
;
4083 if (ie
->target_cmd
== cmd
)
4087 arg_type
= ie
->arg_type
;
4089 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4092 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4095 switch(arg_type
[0]) {
4098 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4102 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4106 target_size
= thunk_type_size(arg_type
, 0);
4107 switch(ie
->access
) {
4109 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4110 if (!is_error(ret
)) {
4111 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4113 return -TARGET_EFAULT
;
4114 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4115 unlock_user(argptr
, arg
, target_size
);
4119 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4121 return -TARGET_EFAULT
;
4122 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4123 unlock_user(argptr
, arg
, 0);
4124 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4128 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4130 return -TARGET_EFAULT
;
4131 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4132 unlock_user(argptr
, arg
, 0);
4133 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4134 if (!is_error(ret
)) {
4135 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4137 return -TARGET_EFAULT
;
4138 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4139 unlock_user(argptr
, arg
, target_size
);
4145 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4146 (long)cmd
, arg_type
[0]);
4147 ret
= -TARGET_ENOSYS
;
4153 static const bitmask_transtbl iflag_tbl
[] = {
4154 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4155 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4156 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4157 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4158 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4159 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4160 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4161 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4162 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4163 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4164 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4165 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4166 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4167 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4171 static const bitmask_transtbl oflag_tbl
[] = {
4172 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4173 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4174 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4175 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4176 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4177 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4178 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4179 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4180 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4181 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4182 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4183 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4184 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4185 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4186 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4187 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4188 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4189 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4190 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4191 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4192 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4193 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4194 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4195 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4199 static const bitmask_transtbl cflag_tbl
[] = {
4200 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4201 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4202 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4203 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4204 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4205 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4206 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4207 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4208 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4209 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4210 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4211 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4212 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4213 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4214 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4215 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4216 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4217 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4218 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4219 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4220 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4221 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4222 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4223 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4224 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4225 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4226 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4227 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4228 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4229 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4230 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4234 static const bitmask_transtbl lflag_tbl
[] = {
4235 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4236 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4237 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4238 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4239 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4240 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4241 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4242 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4243 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4244 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4245 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4246 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4247 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4248 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4249 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4253 static void target_to_host_termios (void *dst
, const void *src
)
4255 struct host_termios
*host
= dst
;
4256 const struct target_termios
*target
= src
;
4259 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4261 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4263 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4265 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4266 host
->c_line
= target
->c_line
;
4268 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4269 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4270 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4271 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4272 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4273 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4274 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4275 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4276 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4277 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4278 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4279 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4280 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4281 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4282 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4283 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4284 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4285 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4288 static void host_to_target_termios (void *dst
, const void *src
)
4290 struct target_termios
*target
= dst
;
4291 const struct host_termios
*host
= src
;
4294 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4296 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4298 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4300 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4301 target
->c_line
= host
->c_line
;
4303 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4304 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4305 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4306 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4307 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4308 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4309 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4310 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4311 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4312 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4313 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4314 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4315 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4316 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4317 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4318 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4319 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4320 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4323 static const StructEntry struct_termios_def
= {
4324 .convert
= { host_to_target_termios
, target_to_host_termios
},
4325 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4326 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4329 static bitmask_transtbl mmap_flags_tbl
[] = {
4330 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4331 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4332 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4333 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4334 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4335 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4336 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4337 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4338 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4343 #if defined(TARGET_I386)
4345 /* NOTE: there is really one LDT for all the threads */
4346 static uint8_t *ldt_table
;
4348 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4355 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4356 if (size
> bytecount
)
4358 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4360 return -TARGET_EFAULT
;
4361 /* ??? Should this by byteswapped? */
4362 memcpy(p
, ldt_table
, size
);
4363 unlock_user(p
, ptr
, size
);
4367 /* XXX: add locking support */
4368 static abi_long
write_ldt(CPUX86State
*env
,
4369 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4371 struct target_modify_ldt_ldt_s ldt_info
;
4372 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4373 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4374 int seg_not_present
, useable
, lm
;
4375 uint32_t *lp
, entry_1
, entry_2
;
4377 if (bytecount
!= sizeof(ldt_info
))
4378 return -TARGET_EINVAL
;
4379 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4380 return -TARGET_EFAULT
;
4381 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4382 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4383 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4384 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4385 unlock_user_struct(target_ldt_info
, ptr
, 0);
4387 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4388 return -TARGET_EINVAL
;
4389 seg_32bit
= ldt_info
.flags
& 1;
4390 contents
= (ldt_info
.flags
>> 1) & 3;
4391 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4392 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4393 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4394 useable
= (ldt_info
.flags
>> 6) & 1;
4398 lm
= (ldt_info
.flags
>> 7) & 1;
4400 if (contents
== 3) {
4402 return -TARGET_EINVAL
;
4403 if (seg_not_present
== 0)
4404 return -TARGET_EINVAL
;
4406 /* allocate the LDT */
4408 env
->ldt
.base
= target_mmap(0,
4409 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4410 PROT_READ
|PROT_WRITE
,
4411 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4412 if (env
->ldt
.base
== -1)
4413 return -TARGET_ENOMEM
;
4414 memset(g2h(env
->ldt
.base
), 0,
4415 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4416 env
->ldt
.limit
= 0xffff;
4417 ldt_table
= g2h(env
->ldt
.base
);
4420 /* NOTE: same code as Linux kernel */
4421 /* Allow LDTs to be cleared by the user. */
4422 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4425 read_exec_only
== 1 &&
4427 limit_in_pages
== 0 &&
4428 seg_not_present
== 1 &&
4436 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4437 (ldt_info
.limit
& 0x0ffff);
4438 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4439 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4440 (ldt_info
.limit
& 0xf0000) |
4441 ((read_exec_only
^ 1) << 9) |
4443 ((seg_not_present
^ 1) << 15) |
4445 (limit_in_pages
<< 23) |
4449 entry_2
|= (useable
<< 20);
4451 /* Install the new entry ... */
4453 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4454 lp
[0] = tswap32(entry_1
);
4455 lp
[1] = tswap32(entry_2
);
4459 /* specific and weird i386 syscalls */
4460 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4461 unsigned long bytecount
)
4467 ret
= read_ldt(ptr
, bytecount
);
4470 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4473 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4476 ret
= -TARGET_ENOSYS
;
4482 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4483 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4485 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4486 struct target_modify_ldt_ldt_s ldt_info
;
4487 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4488 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4489 int seg_not_present
, useable
, lm
;
4490 uint32_t *lp
, entry_1
, entry_2
;
4493 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4494 if (!target_ldt_info
)
4495 return -TARGET_EFAULT
;
4496 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4497 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4498 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4499 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4500 if (ldt_info
.entry_number
== -1) {
4501 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4502 if (gdt_table
[i
] == 0) {
4503 ldt_info
.entry_number
= i
;
4504 target_ldt_info
->entry_number
= tswap32(i
);
4509 unlock_user_struct(target_ldt_info
, ptr
, 1);
4511 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4512 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4513 return -TARGET_EINVAL
;
4514 seg_32bit
= ldt_info
.flags
& 1;
4515 contents
= (ldt_info
.flags
>> 1) & 3;
4516 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4517 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4518 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4519 useable
= (ldt_info
.flags
>> 6) & 1;
4523 lm
= (ldt_info
.flags
>> 7) & 1;
4526 if (contents
== 3) {
4527 if (seg_not_present
== 0)
4528 return -TARGET_EINVAL
;
4531 /* NOTE: same code as Linux kernel */
4532 /* Allow LDTs to be cleared by the user. */
4533 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4534 if ((contents
== 0 &&
4535 read_exec_only
== 1 &&
4537 limit_in_pages
== 0 &&
4538 seg_not_present
== 1 &&
4546 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4547 (ldt_info
.limit
& 0x0ffff);
4548 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4549 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4550 (ldt_info
.limit
& 0xf0000) |
4551 ((read_exec_only
^ 1) << 9) |
4553 ((seg_not_present
^ 1) << 15) |
4555 (limit_in_pages
<< 23) |
4560 /* Install the new entry ... */
4562 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4563 lp
[0] = tswap32(entry_1
);
4564 lp
[1] = tswap32(entry_2
);
4568 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4570 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4571 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4572 uint32_t base_addr
, limit
, flags
;
4573 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4574 int seg_not_present
, useable
, lm
;
4575 uint32_t *lp
, entry_1
, entry_2
;
4577 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4578 if (!target_ldt_info
)
4579 return -TARGET_EFAULT
;
4580 idx
= tswap32(target_ldt_info
->entry_number
);
4581 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4582 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4583 unlock_user_struct(target_ldt_info
, ptr
, 1);
4584 return -TARGET_EINVAL
;
4586 lp
= (uint32_t *)(gdt_table
+ idx
);
4587 entry_1
= tswap32(lp
[0]);
4588 entry_2
= tswap32(lp
[1]);
4590 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4591 contents
= (entry_2
>> 10) & 3;
4592 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4593 seg_32bit
= (entry_2
>> 22) & 1;
4594 limit_in_pages
= (entry_2
>> 23) & 1;
4595 useable
= (entry_2
>> 20) & 1;
4599 lm
= (entry_2
>> 21) & 1;
4601 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4602 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4603 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4604 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4605 base_addr
= (entry_1
>> 16) |
4606 (entry_2
& 0xff000000) |
4607 ((entry_2
& 0xff) << 16);
4608 target_ldt_info
->base_addr
= tswapal(base_addr
);
4609 target_ldt_info
->limit
= tswap32(limit
);
4610 target_ldt_info
->flags
= tswap32(flags
);
4611 unlock_user_struct(target_ldt_info
, ptr
, 1);
4614 #endif /* TARGET_I386 && TARGET_ABI32 */
4616 #ifndef TARGET_ABI32
4617 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4624 case TARGET_ARCH_SET_GS
:
4625 case TARGET_ARCH_SET_FS
:
4626 if (code
== TARGET_ARCH_SET_GS
)
4630 cpu_x86_load_seg(env
, idx
, 0);
4631 env
->segs
[idx
].base
= addr
;
4633 case TARGET_ARCH_GET_GS
:
4634 case TARGET_ARCH_GET_FS
:
4635 if (code
== TARGET_ARCH_GET_GS
)
4639 val
= env
->segs
[idx
].base
;
4640 if (put_user(val
, addr
, abi_ulong
))
4641 ret
= -TARGET_EFAULT
;
4644 ret
= -TARGET_EINVAL
;
4651 #endif /* defined(TARGET_I386) */
4653 #define NEW_STACK_SIZE 0x40000
4656 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4659 pthread_mutex_t mutex
;
4660 pthread_cond_t cond
;
4663 abi_ulong child_tidptr
;
4664 abi_ulong parent_tidptr
;
4668 static void *clone_func(void *arg
)
4670 new_thread_info
*info
= arg
;
4675 rcu_register_thread();
4677 cpu
= ENV_GET_CPU(env
);
4679 ts
= (TaskState
*)cpu
->opaque
;
4680 info
->tid
= gettid();
4681 cpu
->host_tid
= info
->tid
;
4683 if (info
->child_tidptr
)
4684 put_user_u32(info
->tid
, info
->child_tidptr
);
4685 if (info
->parent_tidptr
)
4686 put_user_u32(info
->tid
, info
->parent_tidptr
);
4687 /* Enable signals. */
4688 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4689 /* Signal to the parent that we're ready. */
4690 pthread_mutex_lock(&info
->mutex
);
4691 pthread_cond_broadcast(&info
->cond
);
4692 pthread_mutex_unlock(&info
->mutex
);
4693 /* Wait until the parent has finshed initializing the tls state. */
4694 pthread_mutex_lock(&clone_lock
);
4695 pthread_mutex_unlock(&clone_lock
);
4701 /* do_fork() Must return host values and target errnos (unlike most
4702 do_*() functions). */
4703 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4704 abi_ulong parent_tidptr
, target_ulong newtls
,
4705 abi_ulong child_tidptr
)
4707 CPUState
*cpu
= ENV_GET_CPU(env
);
4711 CPUArchState
*new_env
;
4712 unsigned int nptl_flags
;
4715 /* Emulate vfork() with fork() */
4716 if (flags
& CLONE_VFORK
)
4717 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4719 if (flags
& CLONE_VM
) {
4720 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4721 new_thread_info info
;
4722 pthread_attr_t attr
;
4724 ts
= g_new0(TaskState
, 1);
4725 init_task_state(ts
);
4726 /* we create a new CPU instance. */
4727 new_env
= cpu_copy(env
);
4728 /* Init regs that differ from the parent. */
4729 cpu_clone_regs(new_env
, newsp
);
4730 new_cpu
= ENV_GET_CPU(new_env
);
4731 new_cpu
->opaque
= ts
;
4732 ts
->bprm
= parent_ts
->bprm
;
4733 ts
->info
= parent_ts
->info
;
4735 flags
&= ~CLONE_NPTL_FLAGS2
;
4737 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4738 ts
->child_tidptr
= child_tidptr
;
4741 if (nptl_flags
& CLONE_SETTLS
)
4742 cpu_set_tls (new_env
, newtls
);
4744 /* Grab a mutex so that thread setup appears atomic. */
4745 pthread_mutex_lock(&clone_lock
);
4747 memset(&info
, 0, sizeof(info
));
4748 pthread_mutex_init(&info
.mutex
, NULL
);
4749 pthread_mutex_lock(&info
.mutex
);
4750 pthread_cond_init(&info
.cond
, NULL
);
4752 if (nptl_flags
& CLONE_CHILD_SETTID
)
4753 info
.child_tidptr
= child_tidptr
;
4754 if (nptl_flags
& CLONE_PARENT_SETTID
)
4755 info
.parent_tidptr
= parent_tidptr
;
4757 ret
= pthread_attr_init(&attr
);
4758 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4759 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4760 /* It is not safe to deliver signals until the child has finished
4761 initializing, so temporarily block all signals. */
4762 sigfillset(&sigmask
);
4763 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4765 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4766 /* TODO: Free new CPU state if thread creation failed. */
4768 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4769 pthread_attr_destroy(&attr
);
4771 /* Wait for the child to initialize. */
4772 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4774 if (flags
& CLONE_PARENT_SETTID
)
4775 put_user_u32(ret
, parent_tidptr
);
4779 pthread_mutex_unlock(&info
.mutex
);
4780 pthread_cond_destroy(&info
.cond
);
4781 pthread_mutex_destroy(&info
.mutex
);
4782 pthread_mutex_unlock(&clone_lock
);
4784 /* if no CLONE_VM, we consider it is a fork */
4785 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4786 return -TARGET_EINVAL
;
4791 /* Child Process. */
4793 cpu_clone_regs(env
, newsp
);
4795 /* There is a race condition here. The parent process could
4796 theoretically read the TID in the child process before the child
4797 tid is set. This would require using either ptrace
4798 (not implemented) or having *_tidptr to point at a shared memory
4799 mapping. We can't repeat the spinlock hack used above because
4800 the child process gets its own copy of the lock. */
4801 if (flags
& CLONE_CHILD_SETTID
)
4802 put_user_u32(gettid(), child_tidptr
);
4803 if (flags
& CLONE_PARENT_SETTID
)
4804 put_user_u32(gettid(), parent_tidptr
);
4805 ts
= (TaskState
*)cpu
->opaque
;
4806 if (flags
& CLONE_SETTLS
)
4807 cpu_set_tls (env
, newtls
);
4808 if (flags
& CLONE_CHILD_CLEARTID
)
4809 ts
->child_tidptr
= child_tidptr
;
4817 /* warning : doesn't handle linux specific flags... */
4818 static int target_to_host_fcntl_cmd(int cmd
)
4821 case TARGET_F_DUPFD
:
4822 case TARGET_F_GETFD
:
4823 case TARGET_F_SETFD
:
4824 case TARGET_F_GETFL
:
4825 case TARGET_F_SETFL
:
4827 case TARGET_F_GETLK
:
4829 case TARGET_F_SETLK
:
4831 case TARGET_F_SETLKW
:
4833 case TARGET_F_GETOWN
:
4835 case TARGET_F_SETOWN
:
4837 case TARGET_F_GETSIG
:
4839 case TARGET_F_SETSIG
:
4841 #if TARGET_ABI_BITS == 32
4842 case TARGET_F_GETLK64
:
4844 case TARGET_F_SETLK64
:
4846 case TARGET_F_SETLKW64
:
4849 case TARGET_F_SETLEASE
:
4851 case TARGET_F_GETLEASE
:
4853 #ifdef F_DUPFD_CLOEXEC
4854 case TARGET_F_DUPFD_CLOEXEC
:
4855 return F_DUPFD_CLOEXEC
;
4857 case TARGET_F_NOTIFY
:
4860 case TARGET_F_GETOWN_EX
:
4864 case TARGET_F_SETOWN_EX
:
4868 return -TARGET_EINVAL
;
4870 return -TARGET_EINVAL
;
4873 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4874 static const bitmask_transtbl flock_tbl
[] = {
4875 TRANSTBL_CONVERT(F_RDLCK
),
4876 TRANSTBL_CONVERT(F_WRLCK
),
4877 TRANSTBL_CONVERT(F_UNLCK
),
4878 TRANSTBL_CONVERT(F_EXLCK
),
4879 TRANSTBL_CONVERT(F_SHLCK
),
4883 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4886 struct target_flock
*target_fl
;
4887 struct flock64 fl64
;
4888 struct target_flock64
*target_fl64
;
4890 struct f_owner_ex fox
;
4891 struct target_f_owner_ex
*target_fox
;
4894 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4896 if (host_cmd
== -TARGET_EINVAL
)
4900 case TARGET_F_GETLK
:
4901 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4902 return -TARGET_EFAULT
;
4904 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4905 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4906 fl
.l_start
= tswapal(target_fl
->l_start
);
4907 fl
.l_len
= tswapal(target_fl
->l_len
);
4908 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4909 unlock_user_struct(target_fl
, arg
, 0);
4910 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4912 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4913 return -TARGET_EFAULT
;
4915 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4916 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4917 target_fl
->l_start
= tswapal(fl
.l_start
);
4918 target_fl
->l_len
= tswapal(fl
.l_len
);
4919 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4920 unlock_user_struct(target_fl
, arg
, 1);
4924 case TARGET_F_SETLK
:
4925 case TARGET_F_SETLKW
:
4926 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4927 return -TARGET_EFAULT
;
4929 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4930 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4931 fl
.l_start
= tswapal(target_fl
->l_start
);
4932 fl
.l_len
= tswapal(target_fl
->l_len
);
4933 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4934 unlock_user_struct(target_fl
, arg
, 0);
4935 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4938 case TARGET_F_GETLK64
:
4939 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4940 return -TARGET_EFAULT
;
4942 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4943 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4944 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4945 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4946 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4947 unlock_user_struct(target_fl64
, arg
, 0);
4948 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4950 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4951 return -TARGET_EFAULT
;
4952 target_fl64
->l_type
=
4953 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4954 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4955 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4956 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4957 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4958 unlock_user_struct(target_fl64
, arg
, 1);
4961 case TARGET_F_SETLK64
:
4962 case TARGET_F_SETLKW64
:
4963 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4964 return -TARGET_EFAULT
;
4966 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4967 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4968 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4969 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4970 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4971 unlock_user_struct(target_fl64
, arg
, 0);
4972 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4975 case TARGET_F_GETFL
:
4976 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4978 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4982 case TARGET_F_SETFL
:
4983 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4987 case TARGET_F_GETOWN_EX
:
4988 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4990 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4991 return -TARGET_EFAULT
;
4992 target_fox
->type
= tswap32(fox
.type
);
4993 target_fox
->pid
= tswap32(fox
.pid
);
4994 unlock_user_struct(target_fox
, arg
, 1);
5000 case TARGET_F_SETOWN_EX
:
5001 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5002 return -TARGET_EFAULT
;
5003 fox
.type
= tswap32(target_fox
->type
);
5004 fox
.pid
= tswap32(target_fox
->pid
);
5005 unlock_user_struct(target_fox
, arg
, 0);
5006 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5010 case TARGET_F_SETOWN
:
5011 case TARGET_F_GETOWN
:
5012 case TARGET_F_SETSIG
:
5013 case TARGET_F_GETSIG
:
5014 case TARGET_F_SETLEASE
:
5015 case TARGET_F_GETLEASE
:
5016 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5020 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5028 static inline int high2lowuid(int uid
)
5036 static inline int high2lowgid(int gid
)
5044 static inline int low2highuid(int uid
)
5046 if ((int16_t)uid
== -1)
5052 static inline int low2highgid(int gid
)
5054 if ((int16_t)gid
== -1)
5059 static inline int tswapid(int id
)
5064 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5066 #else /* !USE_UID16 */
5067 static inline int high2lowuid(int uid
)
5071 static inline int high2lowgid(int gid
)
5075 static inline int low2highuid(int uid
)
5079 static inline int low2highgid(int gid
)
5083 static inline int tswapid(int id
)
5088 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5090 #endif /* USE_UID16 */
5092 void syscall_init(void)
5095 const argtype
*arg_type
;
5099 thunk_init(STRUCT_MAX
);
5101 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5102 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5103 #include "syscall_types.h"
5105 #undef STRUCT_SPECIAL
5107 /* Build target_to_host_errno_table[] table from
5108 * host_to_target_errno_table[]. */
5109 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5110 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5113 /* we patch the ioctl size if necessary. We rely on the fact that
5114 no ioctl has all the bits at '1' in the size field */
5116 while (ie
->target_cmd
!= 0) {
5117 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5118 TARGET_IOC_SIZEMASK
) {
5119 arg_type
= ie
->arg_type
;
5120 if (arg_type
[0] != TYPE_PTR
) {
5121 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5126 size
= thunk_type_size(arg_type
, 0);
5127 ie
->target_cmd
= (ie
->target_cmd
&
5128 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5129 (size
<< TARGET_IOC_SIZESHIFT
);
5132 /* automatic consistency check if same arch */
5133 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5134 (defined(__x86_64__) && defined(TARGET_X86_64))
5135 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5136 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5137 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5144 #if TARGET_ABI_BITS == 32
5145 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5147 #ifdef TARGET_WORDS_BIGENDIAN
5148 return ((uint64_t)word0
<< 32) | word1
;
5150 return ((uint64_t)word1
<< 32) | word0
;
5153 #else /* TARGET_ABI_BITS == 32 */
5154 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5158 #endif /* TARGET_ABI_BITS != 32 */
5160 #ifdef TARGET_NR_truncate64
5161 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5166 if (regpairs_aligned(cpu_env
)) {
5170 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5174 #ifdef TARGET_NR_ftruncate64
5175 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5180 if (regpairs_aligned(cpu_env
)) {
5184 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5188 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5189 abi_ulong target_addr
)
5191 struct target_timespec
*target_ts
;
5193 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5194 return -TARGET_EFAULT
;
5195 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5196 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5197 unlock_user_struct(target_ts
, target_addr
, 0);
5201 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5202 struct timespec
*host_ts
)
5204 struct target_timespec
*target_ts
;
5206 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5207 return -TARGET_EFAULT
;
5208 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5209 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5210 unlock_user_struct(target_ts
, target_addr
, 1);
5214 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5215 abi_ulong target_addr
)
5217 struct target_itimerspec
*target_itspec
;
5219 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5220 return -TARGET_EFAULT
;
5223 host_itspec
->it_interval
.tv_sec
=
5224 tswapal(target_itspec
->it_interval
.tv_sec
);
5225 host_itspec
->it_interval
.tv_nsec
=
5226 tswapal(target_itspec
->it_interval
.tv_nsec
);
5227 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5228 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5230 unlock_user_struct(target_itspec
, target_addr
, 1);
5234 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5235 struct itimerspec
*host_its
)
5237 struct target_itimerspec
*target_itspec
;
5239 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5240 return -TARGET_EFAULT
;
5243 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5244 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5246 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5247 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5249 unlock_user_struct(target_itspec
, target_addr
, 0);
5253 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5254 abi_ulong target_addr
)
5256 struct target_sigevent
*target_sevp
;
5258 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5259 return -TARGET_EFAULT
;
5262 /* This union is awkward on 64 bit systems because it has a 32 bit
5263 * integer and a pointer in it; we follow the conversion approach
5264 * used for handling sigval types in signal.c so the guest should get
5265 * the correct value back even if we did a 64 bit byteswap and it's
5266 * using the 32 bit integer.
5268 host_sevp
->sigev_value
.sival_ptr
=
5269 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5270 host_sevp
->sigev_signo
=
5271 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5272 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5273 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5275 unlock_user_struct(target_sevp
, target_addr
, 1);
5279 #if defined(TARGET_NR_mlockall)
5280 static inline int target_to_host_mlockall_arg(int arg
)
5284 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5285 result
|= MCL_CURRENT
;
5287 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5288 result
|= MCL_FUTURE
;
5294 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5295 abi_ulong target_addr
,
5296 struct stat
*host_st
)
5298 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5299 if (((CPUARMState
*)cpu_env
)->eabi
) {
5300 struct target_eabi_stat64
*target_st
;
5302 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5303 return -TARGET_EFAULT
;
5304 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5305 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5306 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5307 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5308 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5310 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5311 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5312 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5313 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5314 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5315 __put_user(host_st
->st_size
, &target_st
->st_size
);
5316 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5317 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5318 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5319 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5320 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5321 unlock_user_struct(target_st
, target_addr
, 1);
5325 #if defined(TARGET_HAS_STRUCT_STAT64)
5326 struct target_stat64
*target_st
;
5328 struct target_stat
*target_st
;
5331 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5332 return -TARGET_EFAULT
;
5333 memset(target_st
, 0, sizeof(*target_st
));
5334 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5335 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5336 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5337 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5339 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5340 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5341 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5342 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5343 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5344 /* XXX: better use of kernel struct */
5345 __put_user(host_st
->st_size
, &target_st
->st_size
);
5346 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5347 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5348 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5349 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5350 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5351 unlock_user_struct(target_st
, target_addr
, 1);
5357 /* ??? Using host futex calls even when target atomic operations
5358 are not really atomic probably breaks things. However implementing
5359 futexes locally would make futexes shared between multiple processes
5360 tricky. However they're probably useless because guest atomic
5361 operations won't work either. */
5362 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5363 target_ulong uaddr2
, int val3
)
5365 struct timespec ts
, *pts
;
5368 /* ??? We assume FUTEX_* constants are the same on both host
5370 #ifdef FUTEX_CMD_MASK
5371 base_op
= op
& FUTEX_CMD_MASK
;
5377 case FUTEX_WAIT_BITSET
:
5380 target_to_host_timespec(pts
, timeout
);
5384 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5387 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5389 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5391 case FUTEX_CMP_REQUEUE
:
5393 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5394 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5395 But the prototype takes a `struct timespec *'; insert casts
5396 to satisfy the compiler. We do not need to tswap TIMEOUT
5397 since it's not compared to guest memory. */
5398 pts
= (struct timespec
*)(uintptr_t) timeout
;
5399 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5401 (base_op
== FUTEX_CMP_REQUEUE
5405 return -TARGET_ENOSYS
;
5408 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5409 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5410 abi_long handle
, abi_long mount_id
,
5413 struct file_handle
*target_fh
;
5414 struct file_handle
*fh
;
5418 unsigned int size
, total_size
;
5420 if (get_user_s32(size
, handle
)) {
5421 return -TARGET_EFAULT
;
5424 name
= lock_user_string(pathname
);
5426 return -TARGET_EFAULT
;
5429 total_size
= sizeof(struct file_handle
) + size
;
5430 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5432 unlock_user(name
, pathname
, 0);
5433 return -TARGET_EFAULT
;
5436 fh
= g_malloc0(total_size
);
5437 fh
->handle_bytes
= size
;
5439 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5440 unlock_user(name
, pathname
, 0);
5442 /* man name_to_handle_at(2):
5443 * Other than the use of the handle_bytes field, the caller should treat
5444 * the file_handle structure as an opaque data type
5447 memcpy(target_fh
, fh
, total_size
);
5448 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5449 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5451 unlock_user(target_fh
, handle
, total_size
);
5453 if (put_user_s32(mid
, mount_id
)) {
5454 return -TARGET_EFAULT
;
5462 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5463 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5466 struct file_handle
*target_fh
;
5467 struct file_handle
*fh
;
5468 unsigned int size
, total_size
;
5471 if (get_user_s32(size
, handle
)) {
5472 return -TARGET_EFAULT
;
5475 total_size
= sizeof(struct file_handle
) + size
;
5476 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5478 return -TARGET_EFAULT
;
5481 fh
= g_memdup(target_fh
, total_size
);
5482 fh
->handle_bytes
= size
;
5483 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5485 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5486 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5490 unlock_user(target_fh
, handle
, total_size
);
5496 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5498 /* signalfd siginfo conversion */
5501 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5502 const struct signalfd_siginfo
*info
)
5504 int sig
= host_to_target_signal(info
->ssi_signo
);
5506 /* linux/signalfd.h defines a ssi_addr_lsb
5507 * not defined in sys/signalfd.h but used by some kernels
5510 #ifdef BUS_MCEERR_AO
5511 if (tinfo
->ssi_signo
== SIGBUS
&&
5512 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5513 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5514 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5515 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5516 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5520 tinfo
->ssi_signo
= tswap32(sig
);
5521 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5522 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5523 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5524 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5525 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5526 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5527 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5528 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5529 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5530 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5531 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5532 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5533 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5534 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5535 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5538 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5542 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5543 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5549 static TargetFdTrans target_signalfd_trans
= {
5550 .host_to_target_data
= host_to_target_data_signalfd
,
5553 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5556 target_sigset_t
*target_mask
;
5560 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5561 return -TARGET_EINVAL
;
5563 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5564 return -TARGET_EFAULT
;
5567 target_to_host_sigset(&host_mask
, target_mask
);
5569 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5571 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5573 fd_trans_register(ret
, &target_signalfd_trans
);
5576 unlock_user_struct(target_mask
, mask
, 0);
5582 /* Map host to target signal numbers for the wait family of syscalls.
5583 Assume all other status bits are the same. */
5584 int host_to_target_waitstatus(int status
)
5586 if (WIFSIGNALED(status
)) {
5587 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5589 if (WIFSTOPPED(status
)) {
5590 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5596 static int open_self_cmdline(void *cpu_env
, int fd
)
5599 bool word_skipped
= false;
5601 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5611 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5614 fd_orig
= close(fd_orig
);
5617 } else if (nb_read
== 0) {
5621 if (!word_skipped
) {
5622 /* Skip the first string, which is the path to qemu-*-static
5623 instead of the actual command. */
5624 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5626 /* Null byte found, skip one string */
5628 nb_read
-= cp_buf
- buf
;
5629 word_skipped
= true;
5634 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5643 return close(fd_orig
);
5646 static int open_self_maps(void *cpu_env
, int fd
)
5648 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5649 TaskState
*ts
= cpu
->opaque
;
5655 fp
= fopen("/proc/self/maps", "r");
5660 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5661 int fields
, dev_maj
, dev_min
, inode
;
5662 uint64_t min
, max
, offset
;
5663 char flag_r
, flag_w
, flag_x
, flag_p
;
5664 char path
[512] = "";
5665 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5666 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5667 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5669 if ((fields
< 10) || (fields
> 11)) {
5672 if (h2g_valid(min
)) {
5673 int flags
= page_get_flags(h2g(min
));
5674 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5675 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5678 if (h2g(min
) == ts
->info
->stack_limit
) {
5679 pstrcpy(path
, sizeof(path
), " [stack]");
5681 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5682 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5683 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5684 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5685 path
[0] ? " " : "", path
);
5695 static int open_self_stat(void *cpu_env
, int fd
)
5697 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5698 TaskState
*ts
= cpu
->opaque
;
5699 abi_ulong start_stack
= ts
->info
->start_stack
;
5702 for (i
= 0; i
< 44; i
++) {
5710 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5711 } else if (i
== 1) {
5713 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5714 } else if (i
== 27) {
5717 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5719 /* for the rest, there is MasterCard */
5720 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5724 if (write(fd
, buf
, len
) != len
) {
5732 static int open_self_auxv(void *cpu_env
, int fd
)
5734 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5735 TaskState
*ts
= cpu
->opaque
;
5736 abi_ulong auxv
= ts
->info
->saved_auxv
;
5737 abi_ulong len
= ts
->info
->auxv_len
;
5741 * Auxiliary vector is stored in target process stack.
5742 * read in whole auxv vector and copy it to file
5744 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5748 r
= write(fd
, ptr
, len
);
5755 lseek(fd
, 0, SEEK_SET
);
5756 unlock_user(ptr
, auxv
, len
);
5762 static int is_proc_myself(const char *filename
, const char *entry
)
5764 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5765 filename
+= strlen("/proc/");
5766 if (!strncmp(filename
, "self/", strlen("self/"))) {
5767 filename
+= strlen("self/");
5768 } else if (*filename
>= '1' && *filename
<= '9') {
5770 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5771 if (!strncmp(filename
, myself
, strlen(myself
))) {
5772 filename
+= strlen(myself
);
5779 if (!strcmp(filename
, entry
)) {
5786 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5787 static int is_proc(const char *filename
, const char *entry
)
5789 return strcmp(filename
, entry
) == 0;
5792 static int open_net_route(void *cpu_env
, int fd
)
5799 fp
= fopen("/proc/net/route", "r");
5806 read
= getline(&line
, &len
, fp
);
5807 dprintf(fd
, "%s", line
);
5811 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5813 uint32_t dest
, gw
, mask
;
5814 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5815 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5816 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5817 &mask
, &mtu
, &window
, &irtt
);
5818 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5819 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5820 metric
, tswap32(mask
), mtu
, window
, irtt
);
5830 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5833 const char *filename
;
5834 int (*fill
)(void *cpu_env
, int fd
);
5835 int (*cmp
)(const char *s1
, const char *s2
);
5837 const struct fake_open
*fake_open
;
5838 static const struct fake_open fakes
[] = {
5839 { "maps", open_self_maps
, is_proc_myself
},
5840 { "stat", open_self_stat
, is_proc_myself
},
5841 { "auxv", open_self_auxv
, is_proc_myself
},
5842 { "cmdline", open_self_cmdline
, is_proc_myself
},
5843 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5844 { "/proc/net/route", open_net_route
, is_proc
},
5846 { NULL
, NULL
, NULL
}
5849 if (is_proc_myself(pathname
, "exe")) {
5850 int execfd
= qemu_getauxval(AT_EXECFD
);
5851 return execfd
? execfd
: sys_openat(dirfd
, exec_path
, flags
, mode
);
5854 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5855 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5860 if (fake_open
->filename
) {
5862 char filename
[PATH_MAX
];
5865 /* create temporary file to map stat to */
5866 tmpdir
= getenv("TMPDIR");
5869 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5870 fd
= mkstemp(filename
);
5876 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5882 lseek(fd
, 0, SEEK_SET
);
5887 return sys_openat(dirfd
, path(pathname
), flags
, mode
);
5890 #define TIMER_MAGIC 0x0caf0000
5891 #define TIMER_MAGIC_MASK 0xffff0000
5893 /* Convert QEMU provided timer ID back to internal 16bit index format */
5894 static target_timer_t
get_timer_id(abi_long arg
)
5896 target_timer_t timerid
= arg
;
5898 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5899 return -TARGET_EINVAL
;
5904 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5905 return -TARGET_EINVAL
;
5911 /* do_syscall() should always have a single exit point at the end so
5912 that actions, such as logging of syscall results, can be performed.
5913 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5914 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5915 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5916 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5919 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5925 #if defined(DEBUG_ERESTARTSYS)
5926 /* Debug-only code for exercising the syscall-restart code paths
5927 * in the per-architecture cpu main loops: restart every syscall
5928 * the guest makes once before letting it through.
5935 return -TARGET_ERESTARTSYS
;
5941 gemu_log("syscall %d", num
);
5944 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5947 case TARGET_NR_exit
:
5948 /* In old applications this may be used to implement _exit(2).
5949 However in threaded applictions it is used for thread termination,
5950 and _exit_group is used for application termination.
5951 Do thread termination if we have more then one thread. */
5952 /* FIXME: This probably breaks if a signal arrives. We should probably
5953 be disabling signals. */
5954 if (CPU_NEXT(first_cpu
)) {
5958 /* Remove the CPU from the list. */
5959 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5962 if (ts
->child_tidptr
) {
5963 put_user_u32(0, ts
->child_tidptr
);
5964 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5968 object_unref(OBJECT(cpu
));
5970 rcu_unregister_thread();
5976 gdb_exit(cpu_env
, arg1
);
5978 ret
= 0; /* avoid warning */
5980 case TARGET_NR_read
:
5984 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5986 ret
= get_errno(read(arg1
, p
, arg3
));
5988 fd_trans_host_to_target_data(arg1
)) {
5989 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5991 unlock_user(p
, arg2
, ret
);
5994 case TARGET_NR_write
:
5995 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5997 ret
= get_errno(write(arg1
, p
, arg3
));
5998 unlock_user(p
, arg2
, 0);
6000 #ifdef TARGET_NR_open
6001 case TARGET_NR_open
:
6002 if (!(p
= lock_user_string(arg1
)))
6004 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6005 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6007 fd_trans_unregister(ret
);
6008 unlock_user(p
, arg1
, 0);
6011 case TARGET_NR_openat
:
6012 if (!(p
= lock_user_string(arg2
)))
6014 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6015 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6017 fd_trans_unregister(ret
);
6018 unlock_user(p
, arg2
, 0);
6020 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6021 case TARGET_NR_name_to_handle_at
:
6022 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6025 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6026 case TARGET_NR_open_by_handle_at
:
6027 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6028 fd_trans_unregister(ret
);
6031 case TARGET_NR_close
:
6032 fd_trans_unregister(arg1
);
6033 ret
= get_errno(close(arg1
));
6038 #ifdef TARGET_NR_fork
6039 case TARGET_NR_fork
:
6040 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6043 #ifdef TARGET_NR_waitpid
6044 case TARGET_NR_waitpid
:
6047 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
6048 if (!is_error(ret
) && arg2
&& ret
6049 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6054 #ifdef TARGET_NR_waitid
6055 case TARGET_NR_waitid
:
6059 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
6060 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6061 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6063 host_to_target_siginfo(p
, &info
);
6064 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6069 #ifdef TARGET_NR_creat /* not on alpha */
6070 case TARGET_NR_creat
:
6071 if (!(p
= lock_user_string(arg1
)))
6073 ret
= get_errno(creat(p
, arg2
));
6074 fd_trans_unregister(ret
);
6075 unlock_user(p
, arg1
, 0);
6078 #ifdef TARGET_NR_link
6079 case TARGET_NR_link
:
6082 p
= lock_user_string(arg1
);
6083 p2
= lock_user_string(arg2
);
6085 ret
= -TARGET_EFAULT
;
6087 ret
= get_errno(link(p
, p2
));
6088 unlock_user(p2
, arg2
, 0);
6089 unlock_user(p
, arg1
, 0);
6093 #if defined(TARGET_NR_linkat)
6094 case TARGET_NR_linkat
:
6099 p
= lock_user_string(arg2
);
6100 p2
= lock_user_string(arg4
);
6102 ret
= -TARGET_EFAULT
;
6104 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6105 unlock_user(p
, arg2
, 0);
6106 unlock_user(p2
, arg4
, 0);
6110 #ifdef TARGET_NR_unlink
6111 case TARGET_NR_unlink
:
6112 if (!(p
= lock_user_string(arg1
)))
6114 ret
= get_errno(unlink(p
));
6115 unlock_user(p
, arg1
, 0);
6118 #if defined(TARGET_NR_unlinkat)
6119 case TARGET_NR_unlinkat
:
6120 if (!(p
= lock_user_string(arg2
)))
6122 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6123 unlock_user(p
, arg2
, 0);
6126 case TARGET_NR_execve
:
6128 char **argp
, **envp
;
6131 abi_ulong guest_argp
;
6132 abi_ulong guest_envp
;
6139 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6140 if (get_user_ual(addr
, gp
))
6148 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6149 if (get_user_ual(addr
, gp
))
6156 argp
= alloca((argc
+ 1) * sizeof(void *));
6157 envp
= alloca((envc
+ 1) * sizeof(void *));
6159 for (gp
= guest_argp
, q
= argp
; gp
;
6160 gp
+= sizeof(abi_ulong
), q
++) {
6161 if (get_user_ual(addr
, gp
))
6165 if (!(*q
= lock_user_string(addr
)))
6167 total_size
+= strlen(*q
) + 1;
6171 for (gp
= guest_envp
, q
= envp
; gp
;
6172 gp
+= sizeof(abi_ulong
), q
++) {
6173 if (get_user_ual(addr
, gp
))
6177 if (!(*q
= lock_user_string(addr
)))
6179 total_size
+= strlen(*q
) + 1;
6183 if (!(p
= lock_user_string(arg1
)))
6185 ret
= get_errno(execve(p
, argp
, envp
));
6186 unlock_user(p
, arg1
, 0);
6191 ret
= -TARGET_EFAULT
;
6194 for (gp
= guest_argp
, q
= argp
; *q
;
6195 gp
+= sizeof(abi_ulong
), q
++) {
6196 if (get_user_ual(addr
, gp
)
6199 unlock_user(*q
, addr
, 0);
6201 for (gp
= guest_envp
, q
= envp
; *q
;
6202 gp
+= sizeof(abi_ulong
), q
++) {
6203 if (get_user_ual(addr
, gp
)
6206 unlock_user(*q
, addr
, 0);
6210 case TARGET_NR_chdir
:
6211 if (!(p
= lock_user_string(arg1
)))
6213 ret
= get_errno(chdir(p
));
6214 unlock_user(p
, arg1
, 0);
6216 #ifdef TARGET_NR_time
6217 case TARGET_NR_time
:
6220 ret
= get_errno(time(&host_time
));
6223 && put_user_sal(host_time
, arg1
))
6228 #ifdef TARGET_NR_mknod
6229 case TARGET_NR_mknod
:
6230 if (!(p
= lock_user_string(arg1
)))
6232 ret
= get_errno(mknod(p
, arg2
, arg3
));
6233 unlock_user(p
, arg1
, 0);
6236 #if defined(TARGET_NR_mknodat)
6237 case TARGET_NR_mknodat
:
6238 if (!(p
= lock_user_string(arg2
)))
6240 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6241 unlock_user(p
, arg2
, 0);
6244 #ifdef TARGET_NR_chmod
6245 case TARGET_NR_chmod
:
6246 if (!(p
= lock_user_string(arg1
)))
6248 ret
= get_errno(chmod(p
, arg2
));
6249 unlock_user(p
, arg1
, 0);
6252 #ifdef TARGET_NR_break
6253 case TARGET_NR_break
:
6256 #ifdef TARGET_NR_oldstat
6257 case TARGET_NR_oldstat
:
6260 case TARGET_NR_lseek
:
6261 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6263 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6264 /* Alpha specific */
6265 case TARGET_NR_getxpid
:
6266 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6267 ret
= get_errno(getpid());
6270 #ifdef TARGET_NR_getpid
6271 case TARGET_NR_getpid
:
6272 ret
= get_errno(getpid());
6275 case TARGET_NR_mount
:
6277 /* need to look at the data field */
6281 p
= lock_user_string(arg1
);
6289 p2
= lock_user_string(arg2
);
6292 unlock_user(p
, arg1
, 0);
6298 p3
= lock_user_string(arg3
);
6301 unlock_user(p
, arg1
, 0);
6303 unlock_user(p2
, arg2
, 0);
6310 /* FIXME - arg5 should be locked, but it isn't clear how to
6311 * do that since it's not guaranteed to be a NULL-terminated
6315 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6317 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6319 ret
= get_errno(ret
);
6322 unlock_user(p
, arg1
, 0);
6324 unlock_user(p2
, arg2
, 0);
6326 unlock_user(p3
, arg3
, 0);
6330 #ifdef TARGET_NR_umount
6331 case TARGET_NR_umount
:
6332 if (!(p
= lock_user_string(arg1
)))
6334 ret
= get_errno(umount(p
));
6335 unlock_user(p
, arg1
, 0);
6338 #ifdef TARGET_NR_stime /* not on alpha */
6339 case TARGET_NR_stime
:
6342 if (get_user_sal(host_time
, arg1
))
6344 ret
= get_errno(stime(&host_time
));
6348 case TARGET_NR_ptrace
:
6350 #ifdef TARGET_NR_alarm /* not on alpha */
6351 case TARGET_NR_alarm
:
6355 #ifdef TARGET_NR_oldfstat
6356 case TARGET_NR_oldfstat
:
6359 #ifdef TARGET_NR_pause /* not on alpha */
6360 case TARGET_NR_pause
:
6361 ret
= get_errno(pause());
6364 #ifdef TARGET_NR_utime
6365 case TARGET_NR_utime
:
6367 struct utimbuf tbuf
, *host_tbuf
;
6368 struct target_utimbuf
*target_tbuf
;
6370 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6372 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6373 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6374 unlock_user_struct(target_tbuf
, arg2
, 0);
6379 if (!(p
= lock_user_string(arg1
)))
6381 ret
= get_errno(utime(p
, host_tbuf
));
6382 unlock_user(p
, arg1
, 0);
6386 #ifdef TARGET_NR_utimes
6387 case TARGET_NR_utimes
:
6389 struct timeval
*tvp
, tv
[2];
6391 if (copy_from_user_timeval(&tv
[0], arg2
)
6392 || copy_from_user_timeval(&tv
[1],
6393 arg2
+ sizeof(struct target_timeval
)))
6399 if (!(p
= lock_user_string(arg1
)))
6401 ret
= get_errno(utimes(p
, tvp
));
6402 unlock_user(p
, arg1
, 0);
6406 #if defined(TARGET_NR_futimesat)
6407 case TARGET_NR_futimesat
:
6409 struct timeval
*tvp
, tv
[2];
6411 if (copy_from_user_timeval(&tv
[0], arg3
)
6412 || copy_from_user_timeval(&tv
[1],
6413 arg3
+ sizeof(struct target_timeval
)))
6419 if (!(p
= lock_user_string(arg2
)))
6421 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6422 unlock_user(p
, arg2
, 0);
6426 #ifdef TARGET_NR_stty
6427 case TARGET_NR_stty
:
6430 #ifdef TARGET_NR_gtty
6431 case TARGET_NR_gtty
:
6434 #ifdef TARGET_NR_access
6435 case TARGET_NR_access
:
6436 if (!(p
= lock_user_string(arg1
)))
6438 ret
= get_errno(access(path(p
), arg2
));
6439 unlock_user(p
, arg1
, 0);
6442 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6443 case TARGET_NR_faccessat
:
6444 if (!(p
= lock_user_string(arg2
)))
6446 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6447 unlock_user(p
, arg2
, 0);
6450 #ifdef TARGET_NR_nice /* not on alpha */
6451 case TARGET_NR_nice
:
6452 ret
= get_errno(nice(arg1
));
6455 #ifdef TARGET_NR_ftime
6456 case TARGET_NR_ftime
:
6459 case TARGET_NR_sync
:
6463 case TARGET_NR_kill
:
6464 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6466 #ifdef TARGET_NR_rename
6467 case TARGET_NR_rename
:
6470 p
= lock_user_string(arg1
);
6471 p2
= lock_user_string(arg2
);
6473 ret
= -TARGET_EFAULT
;
6475 ret
= get_errno(rename(p
, p2
));
6476 unlock_user(p2
, arg2
, 0);
6477 unlock_user(p
, arg1
, 0);
6481 #if defined(TARGET_NR_renameat)
6482 case TARGET_NR_renameat
:
6485 p
= lock_user_string(arg2
);
6486 p2
= lock_user_string(arg4
);
6488 ret
= -TARGET_EFAULT
;
6490 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6491 unlock_user(p2
, arg4
, 0);
6492 unlock_user(p
, arg2
, 0);
6496 #ifdef TARGET_NR_mkdir
6497 case TARGET_NR_mkdir
:
6498 if (!(p
= lock_user_string(arg1
)))
6500 ret
= get_errno(mkdir(p
, arg2
));
6501 unlock_user(p
, arg1
, 0);
6504 #if defined(TARGET_NR_mkdirat)
6505 case TARGET_NR_mkdirat
:
6506 if (!(p
= lock_user_string(arg2
)))
6508 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6509 unlock_user(p
, arg2
, 0);
6512 #ifdef TARGET_NR_rmdir
6513 case TARGET_NR_rmdir
:
6514 if (!(p
= lock_user_string(arg1
)))
6516 ret
= get_errno(rmdir(p
));
6517 unlock_user(p
, arg1
, 0);
6521 ret
= get_errno(dup(arg1
));
6523 fd_trans_dup(arg1
, ret
);
6526 #ifdef TARGET_NR_pipe
6527 case TARGET_NR_pipe
:
6528 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6531 #ifdef TARGET_NR_pipe2
6532 case TARGET_NR_pipe2
:
6533 ret
= do_pipe(cpu_env
, arg1
,
6534 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6537 case TARGET_NR_times
:
6539 struct target_tms
*tmsp
;
6541 ret
= get_errno(times(&tms
));
6543 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6546 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6547 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6548 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6549 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6552 ret
= host_to_target_clock_t(ret
);
6555 #ifdef TARGET_NR_prof
6556 case TARGET_NR_prof
:
6559 #ifdef TARGET_NR_signal
6560 case TARGET_NR_signal
:
6563 case TARGET_NR_acct
:
6565 ret
= get_errno(acct(NULL
));
6567 if (!(p
= lock_user_string(arg1
)))
6569 ret
= get_errno(acct(path(p
)));
6570 unlock_user(p
, arg1
, 0);
6573 #ifdef TARGET_NR_umount2
6574 case TARGET_NR_umount2
:
6575 if (!(p
= lock_user_string(arg1
)))
6577 ret
= get_errno(umount2(p
, arg2
));
6578 unlock_user(p
, arg1
, 0);
6581 #ifdef TARGET_NR_lock
6582 case TARGET_NR_lock
:
6585 case TARGET_NR_ioctl
:
6586 ret
= do_ioctl(arg1
, arg2
, arg3
);
6588 case TARGET_NR_fcntl
:
6589 ret
= do_fcntl(arg1
, arg2
, arg3
);
6591 #ifdef TARGET_NR_mpx
6595 case TARGET_NR_setpgid
:
6596 ret
= get_errno(setpgid(arg1
, arg2
));
6598 #ifdef TARGET_NR_ulimit
6599 case TARGET_NR_ulimit
:
6602 #ifdef TARGET_NR_oldolduname
6603 case TARGET_NR_oldolduname
:
6606 case TARGET_NR_umask
:
6607 ret
= get_errno(umask(arg1
));
6609 case TARGET_NR_chroot
:
6610 if (!(p
= lock_user_string(arg1
)))
6612 ret
= get_errno(chroot(p
));
6613 unlock_user(p
, arg1
, 0);
6615 #ifdef TARGET_NR_ustat
6616 case TARGET_NR_ustat
:
6619 #ifdef TARGET_NR_dup2
6620 case TARGET_NR_dup2
:
6621 ret
= get_errno(dup2(arg1
, arg2
));
6623 fd_trans_dup(arg1
, arg2
);
6627 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6628 case TARGET_NR_dup3
:
6629 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6631 fd_trans_dup(arg1
, arg2
);
6635 #ifdef TARGET_NR_getppid /* not on alpha */
6636 case TARGET_NR_getppid
:
6637 ret
= get_errno(getppid());
6640 #ifdef TARGET_NR_getpgrp
6641 case TARGET_NR_getpgrp
:
6642 ret
= get_errno(getpgrp());
6645 case TARGET_NR_setsid
:
6646 ret
= get_errno(setsid());
6648 #ifdef TARGET_NR_sigaction
6649 case TARGET_NR_sigaction
:
6651 #if defined(TARGET_ALPHA)
6652 struct target_sigaction act
, oact
, *pact
= 0;
6653 struct target_old_sigaction
*old_act
;
6655 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6657 act
._sa_handler
= old_act
->_sa_handler
;
6658 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6659 act
.sa_flags
= old_act
->sa_flags
;
6660 act
.sa_restorer
= 0;
6661 unlock_user_struct(old_act
, arg2
, 0);
6664 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6665 if (!is_error(ret
) && arg3
) {
6666 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6668 old_act
->_sa_handler
= oact
._sa_handler
;
6669 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6670 old_act
->sa_flags
= oact
.sa_flags
;
6671 unlock_user_struct(old_act
, arg3
, 1);
6673 #elif defined(TARGET_MIPS)
6674 struct target_sigaction act
, oact
, *pact
, *old_act
;
6677 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6679 act
._sa_handler
= old_act
->_sa_handler
;
6680 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6681 act
.sa_flags
= old_act
->sa_flags
;
6682 unlock_user_struct(old_act
, arg2
, 0);
6688 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6690 if (!is_error(ret
) && arg3
) {
6691 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6693 old_act
->_sa_handler
= oact
._sa_handler
;
6694 old_act
->sa_flags
= oact
.sa_flags
;
6695 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6696 old_act
->sa_mask
.sig
[1] = 0;
6697 old_act
->sa_mask
.sig
[2] = 0;
6698 old_act
->sa_mask
.sig
[3] = 0;
6699 unlock_user_struct(old_act
, arg3
, 1);
6702 struct target_old_sigaction
*old_act
;
6703 struct target_sigaction act
, oact
, *pact
;
6705 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6707 act
._sa_handler
= old_act
->_sa_handler
;
6708 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6709 act
.sa_flags
= old_act
->sa_flags
;
6710 act
.sa_restorer
= old_act
->sa_restorer
;
6711 unlock_user_struct(old_act
, arg2
, 0);
6716 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6717 if (!is_error(ret
) && arg3
) {
6718 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6720 old_act
->_sa_handler
= oact
._sa_handler
;
6721 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6722 old_act
->sa_flags
= oact
.sa_flags
;
6723 old_act
->sa_restorer
= oact
.sa_restorer
;
6724 unlock_user_struct(old_act
, arg3
, 1);
6730 case TARGET_NR_rt_sigaction
:
6732 #if defined(TARGET_ALPHA)
6733 struct target_sigaction act
, oact
, *pact
= 0;
6734 struct target_rt_sigaction
*rt_act
;
6735 /* ??? arg4 == sizeof(sigset_t). */
6737 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6739 act
._sa_handler
= rt_act
->_sa_handler
;
6740 act
.sa_mask
= rt_act
->sa_mask
;
6741 act
.sa_flags
= rt_act
->sa_flags
;
6742 act
.sa_restorer
= arg5
;
6743 unlock_user_struct(rt_act
, arg2
, 0);
6746 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6747 if (!is_error(ret
) && arg3
) {
6748 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6750 rt_act
->_sa_handler
= oact
._sa_handler
;
6751 rt_act
->sa_mask
= oact
.sa_mask
;
6752 rt_act
->sa_flags
= oact
.sa_flags
;
6753 unlock_user_struct(rt_act
, arg3
, 1);
6756 struct target_sigaction
*act
;
6757 struct target_sigaction
*oact
;
6760 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6765 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6766 ret
= -TARGET_EFAULT
;
6767 goto rt_sigaction_fail
;
6771 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6774 unlock_user_struct(act
, arg2
, 0);
6776 unlock_user_struct(oact
, arg3
, 1);
6780 #ifdef TARGET_NR_sgetmask /* not on alpha */
6781 case TARGET_NR_sgetmask
:
6784 abi_ulong target_set
;
6785 do_sigprocmask(0, NULL
, &cur_set
);
6786 host_to_target_old_sigset(&target_set
, &cur_set
);
6791 #ifdef TARGET_NR_ssetmask /* not on alpha */
6792 case TARGET_NR_ssetmask
:
6794 sigset_t set
, oset
, cur_set
;
6795 abi_ulong target_set
= arg1
;
6796 do_sigprocmask(0, NULL
, &cur_set
);
6797 target_to_host_old_sigset(&set
, &target_set
);
6798 sigorset(&set
, &set
, &cur_set
);
6799 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6800 host_to_target_old_sigset(&target_set
, &oset
);
6805 #ifdef TARGET_NR_sigprocmask
6806 case TARGET_NR_sigprocmask
:
6808 #if defined(TARGET_ALPHA)
6809 sigset_t set
, oldset
;
6814 case TARGET_SIG_BLOCK
:
6817 case TARGET_SIG_UNBLOCK
:
6820 case TARGET_SIG_SETMASK
:
6824 ret
= -TARGET_EINVAL
;
6828 target_to_host_old_sigset(&set
, &mask
);
6830 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6831 if (!is_error(ret
)) {
6832 host_to_target_old_sigset(&mask
, &oldset
);
6834 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6837 sigset_t set
, oldset
, *set_ptr
;
6842 case TARGET_SIG_BLOCK
:
6845 case TARGET_SIG_UNBLOCK
:
6848 case TARGET_SIG_SETMASK
:
6852 ret
= -TARGET_EINVAL
;
6855 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6857 target_to_host_old_sigset(&set
, p
);
6858 unlock_user(p
, arg2
, 0);
6864 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6865 if (!is_error(ret
) && arg3
) {
6866 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6868 host_to_target_old_sigset(p
, &oldset
);
6869 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6875 case TARGET_NR_rt_sigprocmask
:
6878 sigset_t set
, oldset
, *set_ptr
;
6882 case TARGET_SIG_BLOCK
:
6885 case TARGET_SIG_UNBLOCK
:
6888 case TARGET_SIG_SETMASK
:
6892 ret
= -TARGET_EINVAL
;
6895 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6897 target_to_host_sigset(&set
, p
);
6898 unlock_user(p
, arg2
, 0);
6904 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6905 if (!is_error(ret
) && arg3
) {
6906 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6908 host_to_target_sigset(p
, &oldset
);
6909 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6913 #ifdef TARGET_NR_sigpending
6914 case TARGET_NR_sigpending
:
6917 ret
= get_errno(sigpending(&set
));
6918 if (!is_error(ret
)) {
6919 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6921 host_to_target_old_sigset(p
, &set
);
6922 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6927 case TARGET_NR_rt_sigpending
:
6930 ret
= get_errno(sigpending(&set
));
6931 if (!is_error(ret
)) {
6932 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6934 host_to_target_sigset(p
, &set
);
6935 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6939 #ifdef TARGET_NR_sigsuspend
6940 case TARGET_NR_sigsuspend
:
6943 #if defined(TARGET_ALPHA)
6944 abi_ulong mask
= arg1
;
6945 target_to_host_old_sigset(&set
, &mask
);
6947 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6949 target_to_host_old_sigset(&set
, p
);
6950 unlock_user(p
, arg1
, 0);
6952 ret
= get_errno(sigsuspend(&set
));
6956 case TARGET_NR_rt_sigsuspend
:
6959 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6961 target_to_host_sigset(&set
, p
);
6962 unlock_user(p
, arg1
, 0);
6963 ret
= get_errno(sigsuspend(&set
));
6966 case TARGET_NR_rt_sigtimedwait
:
6969 struct timespec uts
, *puts
;
6972 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6974 target_to_host_sigset(&set
, p
);
6975 unlock_user(p
, arg1
, 0);
6978 target_to_host_timespec(puts
, arg3
);
6982 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6983 if (!is_error(ret
)) {
6985 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6990 host_to_target_siginfo(p
, &uinfo
);
6991 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6993 ret
= host_to_target_signal(ret
);
6997 case TARGET_NR_rt_sigqueueinfo
:
7000 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7002 target_to_host_siginfo(&uinfo
, p
);
7003 unlock_user(p
, arg1
, 0);
7004 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7007 #ifdef TARGET_NR_sigreturn
7008 case TARGET_NR_sigreturn
:
7009 ret
= do_sigreturn(cpu_env
);
7012 case TARGET_NR_rt_sigreturn
:
7013 ret
= do_rt_sigreturn(cpu_env
);
7015 case TARGET_NR_sethostname
:
7016 if (!(p
= lock_user_string(arg1
)))
7018 ret
= get_errno(sethostname(p
, arg2
));
7019 unlock_user(p
, arg1
, 0);
7021 case TARGET_NR_setrlimit
:
7023 int resource
= target_to_host_resource(arg1
);
7024 struct target_rlimit
*target_rlim
;
7026 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7028 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7029 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7030 unlock_user_struct(target_rlim
, arg2
, 0);
7031 ret
= get_errno(setrlimit(resource
, &rlim
));
7034 case TARGET_NR_getrlimit
:
7036 int resource
= target_to_host_resource(arg1
);
7037 struct target_rlimit
*target_rlim
;
7040 ret
= get_errno(getrlimit(resource
, &rlim
));
7041 if (!is_error(ret
)) {
7042 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7044 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7045 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7046 unlock_user_struct(target_rlim
, arg2
, 1);
7050 case TARGET_NR_getrusage
:
7052 struct rusage rusage
;
7053 ret
= get_errno(getrusage(arg1
, &rusage
));
7054 if (!is_error(ret
)) {
7055 ret
= host_to_target_rusage(arg2
, &rusage
);
7059 case TARGET_NR_gettimeofday
:
7062 ret
= get_errno(gettimeofday(&tv
, NULL
));
7063 if (!is_error(ret
)) {
7064 if (copy_to_user_timeval(arg1
, &tv
))
7069 case TARGET_NR_settimeofday
:
7071 struct timeval tv
, *ptv
= NULL
;
7072 struct timezone tz
, *ptz
= NULL
;
7075 if (copy_from_user_timeval(&tv
, arg1
)) {
7082 if (copy_from_user_timezone(&tz
, arg2
)) {
7088 ret
= get_errno(settimeofday(ptv
, ptz
));
7091 #if defined(TARGET_NR_select)
7092 case TARGET_NR_select
:
7093 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7094 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7097 struct target_sel_arg_struct
*sel
;
7098 abi_ulong inp
, outp
, exp
, tvp
;
7101 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7103 nsel
= tswapal(sel
->n
);
7104 inp
= tswapal(sel
->inp
);
7105 outp
= tswapal(sel
->outp
);
7106 exp
= tswapal(sel
->exp
);
7107 tvp
= tswapal(sel
->tvp
);
7108 unlock_user_struct(sel
, arg1
, 0);
7109 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7114 #ifdef TARGET_NR_pselect6
7115 case TARGET_NR_pselect6
:
7117 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7118 fd_set rfds
, wfds
, efds
;
7119 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7120 struct timespec ts
, *ts_ptr
;
7123 * The 6th arg is actually two args smashed together,
7124 * so we cannot use the C library.
7132 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7133 target_sigset_t
*target_sigset
;
7141 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7145 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7149 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7155 * This takes a timespec, and not a timeval, so we cannot
7156 * use the do_select() helper ...
7159 if (target_to_host_timespec(&ts
, ts_addr
)) {
7167 /* Extract the two packed args for the sigset */
7170 sig
.size
= _NSIG
/ 8;
7172 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7176 arg_sigset
= tswapal(arg7
[0]);
7177 arg_sigsize
= tswapal(arg7
[1]);
7178 unlock_user(arg7
, arg6
, 0);
7182 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7183 /* Like the kernel, we enforce correct size sigsets */
7184 ret
= -TARGET_EINVAL
;
7187 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7188 sizeof(*target_sigset
), 1);
7189 if (!target_sigset
) {
7192 target_to_host_sigset(&set
, target_sigset
);
7193 unlock_user(target_sigset
, arg_sigset
, 0);
7201 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7204 if (!is_error(ret
)) {
7205 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7207 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7209 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7212 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7218 #ifdef TARGET_NR_symlink
7219 case TARGET_NR_symlink
:
7222 p
= lock_user_string(arg1
);
7223 p2
= lock_user_string(arg2
);
7225 ret
= -TARGET_EFAULT
;
7227 ret
= get_errno(symlink(p
, p2
));
7228 unlock_user(p2
, arg2
, 0);
7229 unlock_user(p
, arg1
, 0);
7233 #if defined(TARGET_NR_symlinkat)
7234 case TARGET_NR_symlinkat
:
7237 p
= lock_user_string(arg1
);
7238 p2
= lock_user_string(arg3
);
7240 ret
= -TARGET_EFAULT
;
7242 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7243 unlock_user(p2
, arg3
, 0);
7244 unlock_user(p
, arg1
, 0);
7248 #ifdef TARGET_NR_oldlstat
7249 case TARGET_NR_oldlstat
:
7252 #ifdef TARGET_NR_readlink
7253 case TARGET_NR_readlink
:
7256 p
= lock_user_string(arg1
);
7257 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7259 ret
= -TARGET_EFAULT
;
7261 /* Short circuit this for the magic exe check. */
7262 ret
= -TARGET_EINVAL
;
7263 } else if (is_proc_myself((const char *)p
, "exe")) {
7264 char real
[PATH_MAX
], *temp
;
7265 temp
= realpath(exec_path
, real
);
7266 /* Return value is # of bytes that we wrote to the buffer. */
7268 ret
= get_errno(-1);
7270 /* Don't worry about sign mismatch as earlier mapping
7271 * logic would have thrown a bad address error. */
7272 ret
= MIN(strlen(real
), arg3
);
7273 /* We cannot NUL terminate the string. */
7274 memcpy(p2
, real
, ret
);
7277 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7279 unlock_user(p2
, arg2
, ret
);
7280 unlock_user(p
, arg1
, 0);
7284 #if defined(TARGET_NR_readlinkat)
7285 case TARGET_NR_readlinkat
:
7288 p
= lock_user_string(arg2
);
7289 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7291 ret
= -TARGET_EFAULT
;
7292 } else if (is_proc_myself((const char *)p
, "exe")) {
7293 char real
[PATH_MAX
], *temp
;
7294 temp
= realpath(exec_path
, real
);
7295 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7296 snprintf((char *)p2
, arg4
, "%s", real
);
7298 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7300 unlock_user(p2
, arg3
, ret
);
7301 unlock_user(p
, arg2
, 0);
7305 #ifdef TARGET_NR_uselib
7306 case TARGET_NR_uselib
:
7309 #ifdef TARGET_NR_swapon
7310 case TARGET_NR_swapon
:
7311 if (!(p
= lock_user_string(arg1
)))
7313 ret
= get_errno(swapon(p
, arg2
));
7314 unlock_user(p
, arg1
, 0);
7317 case TARGET_NR_reboot
:
7318 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7319 /* arg4 must be ignored in all other cases */
7320 p
= lock_user_string(arg4
);
7324 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7325 unlock_user(p
, arg4
, 0);
7327 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7330 #ifdef TARGET_NR_readdir
7331 case TARGET_NR_readdir
:
7334 #ifdef TARGET_NR_mmap
7335 case TARGET_NR_mmap
:
7336 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7337 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7338 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7339 || defined(TARGET_S390X)
7342 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7343 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7351 unlock_user(v
, arg1
, 0);
7352 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7353 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7357 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7358 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7364 #ifdef TARGET_NR_mmap2
7365 case TARGET_NR_mmap2
:
7367 #define MMAP_SHIFT 12
7369 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7370 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7372 arg6
<< MMAP_SHIFT
));
7375 case TARGET_NR_munmap
:
7376 ret
= get_errno(target_munmap(arg1
, arg2
));
7378 case TARGET_NR_mprotect
:
7380 TaskState
*ts
= cpu
->opaque
;
7381 /* Special hack to detect libc making the stack executable. */
7382 if ((arg3
& PROT_GROWSDOWN
)
7383 && arg1
>= ts
->info
->stack_limit
7384 && arg1
<= ts
->info
->start_stack
) {
7385 arg3
&= ~PROT_GROWSDOWN
;
7386 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7387 arg1
= ts
->info
->stack_limit
;
7390 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7392 #ifdef TARGET_NR_mremap
7393 case TARGET_NR_mremap
:
7394 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7397 /* ??? msync/mlock/munlock are broken for softmmu. */
7398 #ifdef TARGET_NR_msync
7399 case TARGET_NR_msync
:
7400 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7403 #ifdef TARGET_NR_mlock
7404 case TARGET_NR_mlock
:
7405 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7408 #ifdef TARGET_NR_munlock
7409 case TARGET_NR_munlock
:
7410 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7413 #ifdef TARGET_NR_mlockall
7414 case TARGET_NR_mlockall
:
7415 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7418 #ifdef TARGET_NR_munlockall
7419 case TARGET_NR_munlockall
:
7420 ret
= get_errno(munlockall());
7423 case TARGET_NR_truncate
:
7424 if (!(p
= lock_user_string(arg1
)))
7426 ret
= get_errno(truncate(p
, arg2
));
7427 unlock_user(p
, arg1
, 0);
7429 case TARGET_NR_ftruncate
:
7430 ret
= get_errno(ftruncate(arg1
, arg2
));
7432 case TARGET_NR_fchmod
:
7433 ret
= get_errno(fchmod(arg1
, arg2
));
7435 #if defined(TARGET_NR_fchmodat)
7436 case TARGET_NR_fchmodat
:
7437 if (!(p
= lock_user_string(arg2
)))
7439 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7440 unlock_user(p
, arg2
, 0);
7443 case TARGET_NR_getpriority
:
7444 /* Note that negative values are valid for getpriority, so we must
7445 differentiate based on errno settings. */
7447 ret
= getpriority(arg1
, arg2
);
7448 if (ret
== -1 && errno
!= 0) {
7449 ret
= -host_to_target_errno(errno
);
7453 /* Return value is the unbiased priority. Signal no error. */
7454 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7456 /* Return value is a biased priority to avoid negative numbers. */
7460 case TARGET_NR_setpriority
:
7461 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7463 #ifdef TARGET_NR_profil
7464 case TARGET_NR_profil
:
7467 case TARGET_NR_statfs
:
7468 if (!(p
= lock_user_string(arg1
)))
7470 ret
= get_errno(statfs(path(p
), &stfs
));
7471 unlock_user(p
, arg1
, 0);
7473 if (!is_error(ret
)) {
7474 struct target_statfs
*target_stfs
;
7476 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7478 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7479 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7480 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7481 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7482 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7483 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7484 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7485 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7486 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7487 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7488 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7489 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7490 unlock_user_struct(target_stfs
, arg2
, 1);
7493 case TARGET_NR_fstatfs
:
7494 ret
= get_errno(fstatfs(arg1
, &stfs
));
7495 goto convert_statfs
;
7496 #ifdef TARGET_NR_statfs64
7497 case TARGET_NR_statfs64
:
7498 if (!(p
= lock_user_string(arg1
)))
7500 ret
= get_errno(statfs(path(p
), &stfs
));
7501 unlock_user(p
, arg1
, 0);
7503 if (!is_error(ret
)) {
7504 struct target_statfs64
*target_stfs
;
7506 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7508 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7509 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7510 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7511 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7512 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7513 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7514 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7515 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7516 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7517 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7518 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7519 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7520 unlock_user_struct(target_stfs
, arg3
, 1);
7523 case TARGET_NR_fstatfs64
:
7524 ret
= get_errno(fstatfs(arg1
, &stfs
));
7525 goto convert_statfs64
;
7527 #ifdef TARGET_NR_ioperm
7528 case TARGET_NR_ioperm
:
7531 #ifdef TARGET_NR_socketcall
7532 case TARGET_NR_socketcall
:
7533 ret
= do_socketcall(arg1
, arg2
);
7536 #ifdef TARGET_NR_accept
7537 case TARGET_NR_accept
:
7538 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7541 #ifdef TARGET_NR_accept4
7542 case TARGET_NR_accept4
:
7543 #ifdef CONFIG_ACCEPT4
7544 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7550 #ifdef TARGET_NR_bind
7551 case TARGET_NR_bind
:
7552 ret
= do_bind(arg1
, arg2
, arg3
);
7555 #ifdef TARGET_NR_connect
7556 case TARGET_NR_connect
:
7557 ret
= do_connect(arg1
, arg2
, arg3
);
7560 #ifdef TARGET_NR_getpeername
7561 case TARGET_NR_getpeername
:
7562 ret
= do_getpeername(arg1
, arg2
, arg3
);
7565 #ifdef TARGET_NR_getsockname
7566 case TARGET_NR_getsockname
:
7567 ret
= do_getsockname(arg1
, arg2
, arg3
);
7570 #ifdef TARGET_NR_getsockopt
7571 case TARGET_NR_getsockopt
:
7572 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7575 #ifdef TARGET_NR_listen
7576 case TARGET_NR_listen
:
7577 ret
= get_errno(listen(arg1
, arg2
));
7580 #ifdef TARGET_NR_recv
7581 case TARGET_NR_recv
:
7582 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7585 #ifdef TARGET_NR_recvfrom
7586 case TARGET_NR_recvfrom
:
7587 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7590 #ifdef TARGET_NR_recvmsg
7591 case TARGET_NR_recvmsg
:
7592 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7595 #ifdef TARGET_NR_send
7596 case TARGET_NR_send
:
7597 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7600 #ifdef TARGET_NR_sendmsg
7601 case TARGET_NR_sendmsg
:
7602 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7605 #ifdef TARGET_NR_sendmmsg
7606 case TARGET_NR_sendmmsg
:
7607 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7609 case TARGET_NR_recvmmsg
:
7610 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7613 #ifdef TARGET_NR_sendto
7614 case TARGET_NR_sendto
:
7615 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7618 #ifdef TARGET_NR_shutdown
7619 case TARGET_NR_shutdown
:
7620 ret
= get_errno(shutdown(arg1
, arg2
));
7623 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7624 case TARGET_NR_getrandom
:
7625 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
7629 ret
= get_errno(getrandom(p
, arg2
, arg3
));
7630 unlock_user(p
, arg1
, ret
);
7633 #ifdef TARGET_NR_socket
7634 case TARGET_NR_socket
:
7635 ret
= do_socket(arg1
, arg2
, arg3
);
7636 fd_trans_unregister(ret
);
7639 #ifdef TARGET_NR_socketpair
7640 case TARGET_NR_socketpair
:
7641 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7644 #ifdef TARGET_NR_setsockopt
7645 case TARGET_NR_setsockopt
:
7646 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7650 case TARGET_NR_syslog
:
7651 if (!(p
= lock_user_string(arg2
)))
7653 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7654 unlock_user(p
, arg2
, 0);
7657 case TARGET_NR_setitimer
:
7659 struct itimerval value
, ovalue
, *pvalue
;
7663 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7664 || copy_from_user_timeval(&pvalue
->it_value
,
7665 arg2
+ sizeof(struct target_timeval
)))
7670 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7671 if (!is_error(ret
) && arg3
) {
7672 if (copy_to_user_timeval(arg3
,
7673 &ovalue
.it_interval
)
7674 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7680 case TARGET_NR_getitimer
:
7682 struct itimerval value
;
7684 ret
= get_errno(getitimer(arg1
, &value
));
7685 if (!is_error(ret
) && arg2
) {
7686 if (copy_to_user_timeval(arg2
,
7688 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7694 #ifdef TARGET_NR_stat
7695 case TARGET_NR_stat
:
7696 if (!(p
= lock_user_string(arg1
)))
7698 ret
= get_errno(stat(path(p
), &st
));
7699 unlock_user(p
, arg1
, 0);
7702 #ifdef TARGET_NR_lstat
7703 case TARGET_NR_lstat
:
7704 if (!(p
= lock_user_string(arg1
)))
7706 ret
= get_errno(lstat(path(p
), &st
));
7707 unlock_user(p
, arg1
, 0);
7710 case TARGET_NR_fstat
:
7712 ret
= get_errno(fstat(arg1
, &st
));
7713 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7716 if (!is_error(ret
)) {
7717 struct target_stat
*target_st
;
7719 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7721 memset(target_st
, 0, sizeof(*target_st
));
7722 __put_user(st
.st_dev
, &target_st
->st_dev
);
7723 __put_user(st
.st_ino
, &target_st
->st_ino
);
7724 __put_user(st
.st_mode
, &target_st
->st_mode
);
7725 __put_user(st
.st_uid
, &target_st
->st_uid
);
7726 __put_user(st
.st_gid
, &target_st
->st_gid
);
7727 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7728 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7729 __put_user(st
.st_size
, &target_st
->st_size
);
7730 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7731 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7732 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7733 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7734 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7735 unlock_user_struct(target_st
, arg2
, 1);
7739 #ifdef TARGET_NR_olduname
7740 case TARGET_NR_olduname
:
7743 #ifdef TARGET_NR_iopl
7744 case TARGET_NR_iopl
:
7747 case TARGET_NR_vhangup
:
7748 ret
= get_errno(vhangup());
7750 #ifdef TARGET_NR_idle
7751 case TARGET_NR_idle
:
7754 #ifdef TARGET_NR_syscall
7755 case TARGET_NR_syscall
:
7756 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7757 arg6
, arg7
, arg8
, 0);
7760 case TARGET_NR_wait4
:
7763 abi_long status_ptr
= arg2
;
7764 struct rusage rusage
, *rusage_ptr
;
7765 abi_ulong target_rusage
= arg4
;
7766 abi_long rusage_err
;
7768 rusage_ptr
= &rusage
;
7771 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7772 if (!is_error(ret
)) {
7773 if (status_ptr
&& ret
) {
7774 status
= host_to_target_waitstatus(status
);
7775 if (put_user_s32(status
, status_ptr
))
7778 if (target_rusage
) {
7779 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7787 #ifdef TARGET_NR_swapoff
7788 case TARGET_NR_swapoff
:
7789 if (!(p
= lock_user_string(arg1
)))
7791 ret
= get_errno(swapoff(p
));
7792 unlock_user(p
, arg1
, 0);
7795 case TARGET_NR_sysinfo
:
7797 struct target_sysinfo
*target_value
;
7798 struct sysinfo value
;
7799 ret
= get_errno(sysinfo(&value
));
7800 if (!is_error(ret
) && arg1
)
7802 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7804 __put_user(value
.uptime
, &target_value
->uptime
);
7805 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7806 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7807 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7808 __put_user(value
.totalram
, &target_value
->totalram
);
7809 __put_user(value
.freeram
, &target_value
->freeram
);
7810 __put_user(value
.sharedram
, &target_value
->sharedram
);
7811 __put_user(value
.bufferram
, &target_value
->bufferram
);
7812 __put_user(value
.totalswap
, &target_value
->totalswap
);
7813 __put_user(value
.freeswap
, &target_value
->freeswap
);
7814 __put_user(value
.procs
, &target_value
->procs
);
7815 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7816 __put_user(value
.freehigh
, &target_value
->freehigh
);
7817 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7818 unlock_user_struct(target_value
, arg1
, 1);
7822 #ifdef TARGET_NR_ipc
7824 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7827 #ifdef TARGET_NR_semget
7828 case TARGET_NR_semget
:
7829 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7832 #ifdef TARGET_NR_semop
7833 case TARGET_NR_semop
:
7834 ret
= do_semop(arg1
, arg2
, arg3
);
7837 #ifdef TARGET_NR_semctl
7838 case TARGET_NR_semctl
:
7839 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7842 #ifdef TARGET_NR_msgctl
7843 case TARGET_NR_msgctl
:
7844 ret
= do_msgctl(arg1
, arg2
, arg3
);
7847 #ifdef TARGET_NR_msgget
7848 case TARGET_NR_msgget
:
7849 ret
= get_errno(msgget(arg1
, arg2
));
7852 #ifdef TARGET_NR_msgrcv
7853 case TARGET_NR_msgrcv
:
7854 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7857 #ifdef TARGET_NR_msgsnd
7858 case TARGET_NR_msgsnd
:
7859 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7862 #ifdef TARGET_NR_shmget
7863 case TARGET_NR_shmget
:
7864 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7867 #ifdef TARGET_NR_shmctl
7868 case TARGET_NR_shmctl
:
7869 ret
= do_shmctl(arg1
, arg2
, arg3
);
7872 #ifdef TARGET_NR_shmat
7873 case TARGET_NR_shmat
:
7874 ret
= do_shmat(arg1
, arg2
, arg3
);
7877 #ifdef TARGET_NR_shmdt
7878 case TARGET_NR_shmdt
:
7879 ret
= do_shmdt(arg1
);
7882 case TARGET_NR_fsync
:
7883 ret
= get_errno(fsync(arg1
));
7885 case TARGET_NR_clone
:
7886 /* Linux manages to have three different orderings for its
7887 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7888 * match the kernel's CONFIG_CLONE_* settings.
7889 * Microblaze is further special in that it uses a sixth
7890 * implicit argument to clone for the TLS pointer.
7892 #if defined(TARGET_MICROBLAZE)
7893 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7894 #elif defined(TARGET_CLONE_BACKWARDS)
7895 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7896 #elif defined(TARGET_CLONE_BACKWARDS2)
7897 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7899 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7902 #ifdef __NR_exit_group
7903 /* new thread calls */
7904 case TARGET_NR_exit_group
:
7908 gdb_exit(cpu_env
, arg1
);
7909 ret
= get_errno(exit_group(arg1
));
7912 case TARGET_NR_setdomainname
:
7913 if (!(p
= lock_user_string(arg1
)))
7915 ret
= get_errno(setdomainname(p
, arg2
));
7916 unlock_user(p
, arg1
, 0);
7918 case TARGET_NR_uname
:
7919 /* no need to transcode because we use the linux syscall */
7921 struct new_utsname
* buf
;
7923 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7925 ret
= get_errno(sys_uname(buf
));
7926 if (!is_error(ret
)) {
7927 /* Overrite the native machine name with whatever is being
7929 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7930 /* Allow the user to override the reported release. */
7931 if (qemu_uname_release
&& *qemu_uname_release
)
7932 strcpy (buf
->release
, qemu_uname_release
);
7934 unlock_user_struct(buf
, arg1
, 1);
7938 case TARGET_NR_modify_ldt
:
7939 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7941 #if !defined(TARGET_X86_64)
7942 case TARGET_NR_vm86old
:
7944 case TARGET_NR_vm86
:
7945 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7949 case TARGET_NR_adjtimex
:
7951 #ifdef TARGET_NR_create_module
7952 case TARGET_NR_create_module
:
7954 case TARGET_NR_init_module
:
7955 case TARGET_NR_delete_module
:
7956 #ifdef TARGET_NR_get_kernel_syms
7957 case TARGET_NR_get_kernel_syms
:
7960 case TARGET_NR_quotactl
:
7962 case TARGET_NR_getpgid
:
7963 ret
= get_errno(getpgid(arg1
));
7965 case TARGET_NR_fchdir
:
7966 ret
= get_errno(fchdir(arg1
));
7968 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7969 case TARGET_NR_bdflush
:
7972 #ifdef TARGET_NR_sysfs
7973 case TARGET_NR_sysfs
:
7976 case TARGET_NR_personality
:
7977 ret
= get_errno(personality(arg1
));
7979 #ifdef TARGET_NR_afs_syscall
7980 case TARGET_NR_afs_syscall
:
7983 #ifdef TARGET_NR__llseek /* Not on alpha */
7984 case TARGET_NR__llseek
:
7987 #if !defined(__NR_llseek)
7988 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7990 ret
= get_errno(res
);
7995 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7997 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8003 #ifdef TARGET_NR_getdents
8004 case TARGET_NR_getdents
:
8005 #ifdef __NR_getdents
8006 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8008 struct target_dirent
*target_dirp
;
8009 struct linux_dirent
*dirp
;
8010 abi_long count
= arg3
;
8012 dirp
= g_try_malloc(count
);
8014 ret
= -TARGET_ENOMEM
;
8018 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8019 if (!is_error(ret
)) {
8020 struct linux_dirent
*de
;
8021 struct target_dirent
*tde
;
8023 int reclen
, treclen
;
8024 int count1
, tnamelen
;
8028 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8032 reclen
= de
->d_reclen
;
8033 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8034 assert(tnamelen
>= 0);
8035 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8036 assert(count1
+ treclen
<= count
);
8037 tde
->d_reclen
= tswap16(treclen
);
8038 tde
->d_ino
= tswapal(de
->d_ino
);
8039 tde
->d_off
= tswapal(de
->d_off
);
8040 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8041 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8043 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8047 unlock_user(target_dirp
, arg2
, ret
);
8053 struct linux_dirent
*dirp
;
8054 abi_long count
= arg3
;
8056 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8058 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8059 if (!is_error(ret
)) {
8060 struct linux_dirent
*de
;
8065 reclen
= de
->d_reclen
;
8068 de
->d_reclen
= tswap16(reclen
);
8069 tswapls(&de
->d_ino
);
8070 tswapls(&de
->d_off
);
8071 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8075 unlock_user(dirp
, arg2
, ret
);
8079 /* Implement getdents in terms of getdents64 */
8081 struct linux_dirent64
*dirp
;
8082 abi_long count
= arg3
;
8084 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8088 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8089 if (!is_error(ret
)) {
8090 /* Convert the dirent64 structs to target dirent. We do this
8091 * in-place, since we can guarantee that a target_dirent is no
8092 * larger than a dirent64; however this means we have to be
8093 * careful to read everything before writing in the new format.
8095 struct linux_dirent64
*de
;
8096 struct target_dirent
*tde
;
8101 tde
= (struct target_dirent
*)dirp
;
8103 int namelen
, treclen
;
8104 int reclen
= de
->d_reclen
;
8105 uint64_t ino
= de
->d_ino
;
8106 int64_t off
= de
->d_off
;
8107 uint8_t type
= de
->d_type
;
8109 namelen
= strlen(de
->d_name
);
8110 treclen
= offsetof(struct target_dirent
, d_name
)
8112 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8114 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8115 tde
->d_ino
= tswapal(ino
);
8116 tde
->d_off
= tswapal(off
);
8117 tde
->d_reclen
= tswap16(treclen
);
8118 /* The target_dirent type is in what was formerly a padding
8119 * byte at the end of the structure:
8121 *(((char *)tde
) + treclen
- 1) = type
;
8123 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8124 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8130 unlock_user(dirp
, arg2
, ret
);
8134 #endif /* TARGET_NR_getdents */
8135 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8136 case TARGET_NR_getdents64
:
8138 struct linux_dirent64
*dirp
;
8139 abi_long count
= arg3
;
8140 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8142 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8143 if (!is_error(ret
)) {
8144 struct linux_dirent64
*de
;
8149 reclen
= de
->d_reclen
;
8152 de
->d_reclen
= tswap16(reclen
);
8153 tswap64s((uint64_t *)&de
->d_ino
);
8154 tswap64s((uint64_t *)&de
->d_off
);
8155 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8159 unlock_user(dirp
, arg2
, ret
);
8162 #endif /* TARGET_NR_getdents64 */
8163 #if defined(TARGET_NR__newselect)
8164 case TARGET_NR__newselect
:
8165 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8168 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8169 # ifdef TARGET_NR_poll
8170 case TARGET_NR_poll
:
8172 # ifdef TARGET_NR_ppoll
8173 case TARGET_NR_ppoll
:
8176 struct target_pollfd
*target_pfd
;
8177 unsigned int nfds
= arg2
;
8185 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8186 sizeof(struct target_pollfd
) * nfds
, 1);
8191 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8192 for (i
= 0; i
< nfds
; i
++) {
8193 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8194 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8198 # ifdef TARGET_NR_ppoll
8199 if (num
== TARGET_NR_ppoll
) {
8200 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8201 target_sigset_t
*target_set
;
8202 sigset_t _set
, *set
= &_set
;
8205 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8206 unlock_user(target_pfd
, arg1
, 0);
8214 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8216 unlock_user(target_pfd
, arg1
, 0);
8219 target_to_host_sigset(set
, target_set
);
8224 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8226 if (!is_error(ret
) && arg3
) {
8227 host_to_target_timespec(arg3
, timeout_ts
);
8230 unlock_user(target_set
, arg4
, 0);
8234 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8236 if (!is_error(ret
)) {
8237 for(i
= 0; i
< nfds
; i
++) {
8238 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8241 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8245 case TARGET_NR_flock
:
8246 /* NOTE: the flock constant seems to be the same for every
8248 ret
= get_errno(flock(arg1
, arg2
));
8250 case TARGET_NR_readv
:
8252 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8254 ret
= get_errno(readv(arg1
, vec
, arg3
));
8255 unlock_iovec(vec
, arg2
, arg3
, 1);
8257 ret
= -host_to_target_errno(errno
);
8261 case TARGET_NR_writev
:
8263 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8265 ret
= get_errno(writev(arg1
, vec
, arg3
));
8266 unlock_iovec(vec
, arg2
, arg3
, 0);
8268 ret
= -host_to_target_errno(errno
);
8272 case TARGET_NR_getsid
:
8273 ret
= get_errno(getsid(arg1
));
8275 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8276 case TARGET_NR_fdatasync
:
8277 ret
= get_errno(fdatasync(arg1
));
8280 #ifdef TARGET_NR__sysctl
8281 case TARGET_NR__sysctl
:
8282 /* We don't implement this, but ENOTDIR is always a safe
8284 ret
= -TARGET_ENOTDIR
;
8287 case TARGET_NR_sched_getaffinity
:
8289 unsigned int mask_size
;
8290 unsigned long *mask
;
8293 * sched_getaffinity needs multiples of ulong, so need to take
8294 * care of mismatches between target ulong and host ulong sizes.
8296 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8297 ret
= -TARGET_EINVAL
;
8300 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8302 mask
= alloca(mask_size
);
8303 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8305 if (!is_error(ret
)) {
8307 /* More data returned than the caller's buffer will fit.
8308 * This only happens if sizeof(abi_long) < sizeof(long)
8309 * and the caller passed us a buffer holding an odd number
8310 * of abi_longs. If the host kernel is actually using the
8311 * extra 4 bytes then fail EINVAL; otherwise we can just
8312 * ignore them and only copy the interesting part.
8314 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8315 if (numcpus
> arg2
* 8) {
8316 ret
= -TARGET_EINVAL
;
8322 if (copy_to_user(arg3
, mask
, ret
)) {
8328 case TARGET_NR_sched_setaffinity
:
8330 unsigned int mask_size
;
8331 unsigned long *mask
;
8334 * sched_setaffinity needs multiples of ulong, so need to take
8335 * care of mismatches between target ulong and host ulong sizes.
8337 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8338 ret
= -TARGET_EINVAL
;
8341 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8343 mask
= alloca(mask_size
);
8344 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8347 memcpy(mask
, p
, arg2
);
8348 unlock_user_struct(p
, arg2
, 0);
8350 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8353 case TARGET_NR_sched_setparam
:
8355 struct sched_param
*target_schp
;
8356 struct sched_param schp
;
8359 return -TARGET_EINVAL
;
8361 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8363 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8364 unlock_user_struct(target_schp
, arg2
, 0);
8365 ret
= get_errno(sched_setparam(arg1
, &schp
));
8368 case TARGET_NR_sched_getparam
:
8370 struct sched_param
*target_schp
;
8371 struct sched_param schp
;
8374 return -TARGET_EINVAL
;
8376 ret
= get_errno(sched_getparam(arg1
, &schp
));
8377 if (!is_error(ret
)) {
8378 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8380 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8381 unlock_user_struct(target_schp
, arg2
, 1);
8385 case TARGET_NR_sched_setscheduler
:
8387 struct sched_param
*target_schp
;
8388 struct sched_param schp
;
8390 return -TARGET_EINVAL
;
8392 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8394 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8395 unlock_user_struct(target_schp
, arg3
, 0);
8396 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8399 case TARGET_NR_sched_getscheduler
:
8400 ret
= get_errno(sched_getscheduler(arg1
));
8402 case TARGET_NR_sched_yield
:
8403 ret
= get_errno(sched_yield());
8405 case TARGET_NR_sched_get_priority_max
:
8406 ret
= get_errno(sched_get_priority_max(arg1
));
8408 case TARGET_NR_sched_get_priority_min
:
8409 ret
= get_errno(sched_get_priority_min(arg1
));
8411 case TARGET_NR_sched_rr_get_interval
:
8414 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8415 if (!is_error(ret
)) {
8416 ret
= host_to_target_timespec(arg2
, &ts
);
8420 case TARGET_NR_nanosleep
:
8422 struct timespec req
, rem
;
8423 target_to_host_timespec(&req
, arg1
);
8424 ret
= get_errno(nanosleep(&req
, &rem
));
8425 if (is_error(ret
) && arg2
) {
8426 host_to_target_timespec(arg2
, &rem
);
8430 #ifdef TARGET_NR_query_module
8431 case TARGET_NR_query_module
:
8434 #ifdef TARGET_NR_nfsservctl
8435 case TARGET_NR_nfsservctl
:
8438 case TARGET_NR_prctl
:
8440 case PR_GET_PDEATHSIG
:
8443 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8444 if (!is_error(ret
) && arg2
8445 && put_user_ual(deathsig
, arg2
)) {
8453 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8457 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8459 unlock_user(name
, arg2
, 16);
8464 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8468 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8470 unlock_user(name
, arg2
, 0);
8475 /* Most prctl options have no pointer arguments */
8476 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8480 #ifdef TARGET_NR_arch_prctl
8481 case TARGET_NR_arch_prctl
:
8482 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8483 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8489 #ifdef TARGET_NR_pread64
8490 case TARGET_NR_pread64
:
8491 if (regpairs_aligned(cpu_env
)) {
8495 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8497 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8498 unlock_user(p
, arg2
, ret
);
8500 case TARGET_NR_pwrite64
:
8501 if (regpairs_aligned(cpu_env
)) {
8505 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8507 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8508 unlock_user(p
, arg2
, 0);
8511 case TARGET_NR_getcwd
:
8512 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8514 ret
= get_errno(sys_getcwd1(p
, arg2
));
8515 unlock_user(p
, arg1
, ret
);
8517 case TARGET_NR_capget
:
8518 case TARGET_NR_capset
:
8520 struct target_user_cap_header
*target_header
;
8521 struct target_user_cap_data
*target_data
= NULL
;
8522 struct __user_cap_header_struct header
;
8523 struct __user_cap_data_struct data
[2];
8524 struct __user_cap_data_struct
*dataptr
= NULL
;
8525 int i
, target_datalen
;
8528 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8531 header
.version
= tswap32(target_header
->version
);
8532 header
.pid
= tswap32(target_header
->pid
);
8534 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8535 /* Version 2 and up takes pointer to two user_data structs */
8539 target_datalen
= sizeof(*target_data
) * data_items
;
8542 if (num
== TARGET_NR_capget
) {
8543 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8545 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8548 unlock_user_struct(target_header
, arg1
, 0);
8552 if (num
== TARGET_NR_capset
) {
8553 for (i
= 0; i
< data_items
; i
++) {
8554 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8555 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8556 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8563 if (num
== TARGET_NR_capget
) {
8564 ret
= get_errno(capget(&header
, dataptr
));
8566 ret
= get_errno(capset(&header
, dataptr
));
8569 /* The kernel always updates version for both capget and capset */
8570 target_header
->version
= tswap32(header
.version
);
8571 unlock_user_struct(target_header
, arg1
, 1);
8574 if (num
== TARGET_NR_capget
) {
8575 for (i
= 0; i
< data_items
; i
++) {
8576 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8577 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8578 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8580 unlock_user(target_data
, arg2
, target_datalen
);
8582 unlock_user(target_data
, arg2
, 0);
8587 case TARGET_NR_sigaltstack
:
8588 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8591 #ifdef CONFIG_SENDFILE
8592 case TARGET_NR_sendfile
:
8597 ret
= get_user_sal(off
, arg3
);
8598 if (is_error(ret
)) {
8603 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8604 if (!is_error(ret
) && arg3
) {
8605 abi_long ret2
= put_user_sal(off
, arg3
);
8606 if (is_error(ret2
)) {
8612 #ifdef TARGET_NR_sendfile64
8613 case TARGET_NR_sendfile64
:
8618 ret
= get_user_s64(off
, arg3
);
8619 if (is_error(ret
)) {
8624 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8625 if (!is_error(ret
) && arg3
) {
8626 abi_long ret2
= put_user_s64(off
, arg3
);
8627 if (is_error(ret2
)) {
8635 case TARGET_NR_sendfile
:
8636 #ifdef TARGET_NR_sendfile64
8637 case TARGET_NR_sendfile64
:
8642 #ifdef TARGET_NR_getpmsg
8643 case TARGET_NR_getpmsg
:
8646 #ifdef TARGET_NR_putpmsg
8647 case TARGET_NR_putpmsg
:
8650 #ifdef TARGET_NR_vfork
8651 case TARGET_NR_vfork
:
8652 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8656 #ifdef TARGET_NR_ugetrlimit
8657 case TARGET_NR_ugetrlimit
:
8660 int resource
= target_to_host_resource(arg1
);
8661 ret
= get_errno(getrlimit(resource
, &rlim
));
8662 if (!is_error(ret
)) {
8663 struct target_rlimit
*target_rlim
;
8664 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8666 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8667 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8668 unlock_user_struct(target_rlim
, arg2
, 1);
8673 #ifdef TARGET_NR_truncate64
8674 case TARGET_NR_truncate64
:
8675 if (!(p
= lock_user_string(arg1
)))
8677 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8678 unlock_user(p
, arg1
, 0);
8681 #ifdef TARGET_NR_ftruncate64
8682 case TARGET_NR_ftruncate64
:
8683 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8686 #ifdef TARGET_NR_stat64
8687 case TARGET_NR_stat64
:
8688 if (!(p
= lock_user_string(arg1
)))
8690 ret
= get_errno(stat(path(p
), &st
));
8691 unlock_user(p
, arg1
, 0);
8693 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8696 #ifdef TARGET_NR_lstat64
8697 case TARGET_NR_lstat64
:
8698 if (!(p
= lock_user_string(arg1
)))
8700 ret
= get_errno(lstat(path(p
), &st
));
8701 unlock_user(p
, arg1
, 0);
8703 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8706 #ifdef TARGET_NR_fstat64
8707 case TARGET_NR_fstat64
:
8708 ret
= get_errno(fstat(arg1
, &st
));
8710 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8713 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8714 #ifdef TARGET_NR_fstatat64
8715 case TARGET_NR_fstatat64
:
8717 #ifdef TARGET_NR_newfstatat
8718 case TARGET_NR_newfstatat
:
8720 if (!(p
= lock_user_string(arg2
)))
8722 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8724 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8727 #ifdef TARGET_NR_lchown
8728 case TARGET_NR_lchown
:
8729 if (!(p
= lock_user_string(arg1
)))
8731 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8732 unlock_user(p
, arg1
, 0);
8735 #ifdef TARGET_NR_getuid
8736 case TARGET_NR_getuid
:
8737 ret
= get_errno(high2lowuid(getuid()));
8740 #ifdef TARGET_NR_getgid
8741 case TARGET_NR_getgid
:
8742 ret
= get_errno(high2lowgid(getgid()));
8745 #ifdef TARGET_NR_geteuid
8746 case TARGET_NR_geteuid
:
8747 ret
= get_errno(high2lowuid(geteuid()));
8750 #ifdef TARGET_NR_getegid
8751 case TARGET_NR_getegid
:
8752 ret
= get_errno(high2lowgid(getegid()));
8755 case TARGET_NR_setreuid
:
8756 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8758 case TARGET_NR_setregid
:
8759 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8761 case TARGET_NR_getgroups
:
8763 int gidsetsize
= arg1
;
8764 target_id
*target_grouplist
;
8768 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8769 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8770 if (gidsetsize
== 0)
8772 if (!is_error(ret
)) {
8773 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8774 if (!target_grouplist
)
8776 for(i
= 0;i
< ret
; i
++)
8777 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8778 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8782 case TARGET_NR_setgroups
:
8784 int gidsetsize
= arg1
;
8785 target_id
*target_grouplist
;
8786 gid_t
*grouplist
= NULL
;
8789 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8790 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8791 if (!target_grouplist
) {
8792 ret
= -TARGET_EFAULT
;
8795 for (i
= 0; i
< gidsetsize
; i
++) {
8796 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8798 unlock_user(target_grouplist
, arg2
, 0);
8800 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8803 case TARGET_NR_fchown
:
8804 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8806 #if defined(TARGET_NR_fchownat)
8807 case TARGET_NR_fchownat
:
8808 if (!(p
= lock_user_string(arg2
)))
8810 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8811 low2highgid(arg4
), arg5
));
8812 unlock_user(p
, arg2
, 0);
8815 #ifdef TARGET_NR_setresuid
8816 case TARGET_NR_setresuid
:
8817 ret
= get_errno(setresuid(low2highuid(arg1
),
8819 low2highuid(arg3
)));
8822 #ifdef TARGET_NR_getresuid
8823 case TARGET_NR_getresuid
:
8825 uid_t ruid
, euid
, suid
;
8826 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8827 if (!is_error(ret
)) {
8828 if (put_user_id(high2lowuid(ruid
), arg1
)
8829 || put_user_id(high2lowuid(euid
), arg2
)
8830 || put_user_id(high2lowuid(suid
), arg3
))
8836 #ifdef TARGET_NR_getresgid
8837 case TARGET_NR_setresgid
:
8838 ret
= get_errno(setresgid(low2highgid(arg1
),
8840 low2highgid(arg3
)));
8843 #ifdef TARGET_NR_getresgid
8844 case TARGET_NR_getresgid
:
8846 gid_t rgid
, egid
, sgid
;
8847 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8848 if (!is_error(ret
)) {
8849 if (put_user_id(high2lowgid(rgid
), arg1
)
8850 || put_user_id(high2lowgid(egid
), arg2
)
8851 || put_user_id(high2lowgid(sgid
), arg3
))
8857 #ifdef TARGET_NR_chown
8858 case TARGET_NR_chown
:
8859 if (!(p
= lock_user_string(arg1
)))
8861 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8862 unlock_user(p
, arg1
, 0);
8865 case TARGET_NR_setuid
:
8866 ret
= get_errno(setuid(low2highuid(arg1
)));
8868 case TARGET_NR_setgid
:
8869 ret
= get_errno(setgid(low2highgid(arg1
)));
8871 case TARGET_NR_setfsuid
:
8872 ret
= get_errno(setfsuid(arg1
));
8874 case TARGET_NR_setfsgid
:
8875 ret
= get_errno(setfsgid(arg1
));
8878 #ifdef TARGET_NR_lchown32
8879 case TARGET_NR_lchown32
:
8880 if (!(p
= lock_user_string(arg1
)))
8882 ret
= get_errno(lchown(p
, arg2
, arg3
));
8883 unlock_user(p
, arg1
, 0);
8886 #ifdef TARGET_NR_getuid32
8887 case TARGET_NR_getuid32
:
8888 ret
= get_errno(getuid());
8892 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8893 /* Alpha specific */
8894 case TARGET_NR_getxuid
:
8898 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8900 ret
= get_errno(getuid());
8903 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8904 /* Alpha specific */
8905 case TARGET_NR_getxgid
:
8909 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8911 ret
= get_errno(getgid());
8914 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8915 /* Alpha specific */
8916 case TARGET_NR_osf_getsysinfo
:
8917 ret
= -TARGET_EOPNOTSUPP
;
8919 case TARGET_GSI_IEEE_FP_CONTROL
:
8921 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8923 /* Copied from linux ieee_fpcr_to_swcr. */
8924 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8925 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8926 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8927 | SWCR_TRAP_ENABLE_DZE
8928 | SWCR_TRAP_ENABLE_OVF
);
8929 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8930 | SWCR_TRAP_ENABLE_INE
);
8931 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8932 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8934 if (put_user_u64 (swcr
, arg2
))
8940 /* case GSI_IEEE_STATE_AT_SIGNAL:
8941 -- Not implemented in linux kernel.
8943 -- Retrieves current unaligned access state; not much used.
8945 -- Retrieves implver information; surely not used.
8947 -- Grabs a copy of the HWRPB; surely not used.
8952 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8953 /* Alpha specific */
8954 case TARGET_NR_osf_setsysinfo
:
8955 ret
= -TARGET_EOPNOTSUPP
;
8957 case TARGET_SSI_IEEE_FP_CONTROL
:
8959 uint64_t swcr
, fpcr
, orig_fpcr
;
8961 if (get_user_u64 (swcr
, arg2
)) {
8964 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8965 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8967 /* Copied from linux ieee_swcr_to_fpcr. */
8968 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8969 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8970 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8971 | SWCR_TRAP_ENABLE_DZE
8972 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8973 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8974 | SWCR_TRAP_ENABLE_INE
)) << 57;
8975 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8976 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8978 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8983 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8985 uint64_t exc
, fpcr
, orig_fpcr
;
8988 if (get_user_u64(exc
, arg2
)) {
8992 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8994 /* We only add to the exception status here. */
8995 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8997 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9000 /* Old exceptions are not signaled. */
9001 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9003 /* If any exceptions set by this call,
9004 and are unmasked, send a signal. */
9006 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9007 si_code
= TARGET_FPE_FLTRES
;
9009 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9010 si_code
= TARGET_FPE_FLTUND
;
9012 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9013 si_code
= TARGET_FPE_FLTOVF
;
9015 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9016 si_code
= TARGET_FPE_FLTDIV
;
9018 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9019 si_code
= TARGET_FPE_FLTINV
;
9022 target_siginfo_t info
;
9023 info
.si_signo
= SIGFPE
;
9025 info
.si_code
= si_code
;
9026 info
._sifields
._sigfault
._addr
9027 = ((CPUArchState
*)cpu_env
)->pc
;
9028 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9033 /* case SSI_NVPAIRS:
9034 -- Used with SSIN_UACPROC to enable unaligned accesses.
9035 case SSI_IEEE_STATE_AT_SIGNAL:
9036 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9037 -- Not implemented in linux kernel
9042 #ifdef TARGET_NR_osf_sigprocmask
9043 /* Alpha specific. */
9044 case TARGET_NR_osf_sigprocmask
:
9048 sigset_t set
, oldset
;
9051 case TARGET_SIG_BLOCK
:
9054 case TARGET_SIG_UNBLOCK
:
9057 case TARGET_SIG_SETMASK
:
9061 ret
= -TARGET_EINVAL
;
9065 target_to_host_old_sigset(&set
, &mask
);
9066 do_sigprocmask(how
, &set
, &oldset
);
9067 host_to_target_old_sigset(&mask
, &oldset
);
9073 #ifdef TARGET_NR_getgid32
9074 case TARGET_NR_getgid32
:
9075 ret
= get_errno(getgid());
9078 #ifdef TARGET_NR_geteuid32
9079 case TARGET_NR_geteuid32
:
9080 ret
= get_errno(geteuid());
9083 #ifdef TARGET_NR_getegid32
9084 case TARGET_NR_getegid32
:
9085 ret
= get_errno(getegid());
9088 #ifdef TARGET_NR_setreuid32
9089 case TARGET_NR_setreuid32
:
9090 ret
= get_errno(setreuid(arg1
, arg2
));
9093 #ifdef TARGET_NR_setregid32
9094 case TARGET_NR_setregid32
:
9095 ret
= get_errno(setregid(arg1
, arg2
));
9098 #ifdef TARGET_NR_getgroups32
9099 case TARGET_NR_getgroups32
:
9101 int gidsetsize
= arg1
;
9102 uint32_t *target_grouplist
;
9106 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9107 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9108 if (gidsetsize
== 0)
9110 if (!is_error(ret
)) {
9111 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9112 if (!target_grouplist
) {
9113 ret
= -TARGET_EFAULT
;
9116 for(i
= 0;i
< ret
; i
++)
9117 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9118 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9123 #ifdef TARGET_NR_setgroups32
9124 case TARGET_NR_setgroups32
:
9126 int gidsetsize
= arg1
;
9127 uint32_t *target_grouplist
;
9131 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9132 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9133 if (!target_grouplist
) {
9134 ret
= -TARGET_EFAULT
;
9137 for(i
= 0;i
< gidsetsize
; i
++)
9138 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9139 unlock_user(target_grouplist
, arg2
, 0);
9140 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9144 #ifdef TARGET_NR_fchown32
9145 case TARGET_NR_fchown32
:
9146 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9149 #ifdef TARGET_NR_setresuid32
9150 case TARGET_NR_setresuid32
:
9151 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9154 #ifdef TARGET_NR_getresuid32
9155 case TARGET_NR_getresuid32
:
9157 uid_t ruid
, euid
, suid
;
9158 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9159 if (!is_error(ret
)) {
9160 if (put_user_u32(ruid
, arg1
)
9161 || put_user_u32(euid
, arg2
)
9162 || put_user_u32(suid
, arg3
))
9168 #ifdef TARGET_NR_setresgid32
9169 case TARGET_NR_setresgid32
:
9170 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9173 #ifdef TARGET_NR_getresgid32
9174 case TARGET_NR_getresgid32
:
9176 gid_t rgid
, egid
, sgid
;
9177 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9178 if (!is_error(ret
)) {
9179 if (put_user_u32(rgid
, arg1
)
9180 || put_user_u32(egid
, arg2
)
9181 || put_user_u32(sgid
, arg3
))
9187 #ifdef TARGET_NR_chown32
9188 case TARGET_NR_chown32
:
9189 if (!(p
= lock_user_string(arg1
)))
9191 ret
= get_errno(chown(p
, arg2
, arg3
));
9192 unlock_user(p
, arg1
, 0);
9195 #ifdef TARGET_NR_setuid32
9196 case TARGET_NR_setuid32
:
9197 ret
= get_errno(setuid(arg1
));
9200 #ifdef TARGET_NR_setgid32
9201 case TARGET_NR_setgid32
:
9202 ret
= get_errno(setgid(arg1
));
9205 #ifdef TARGET_NR_setfsuid32
9206 case TARGET_NR_setfsuid32
:
9207 ret
= get_errno(setfsuid(arg1
));
9210 #ifdef TARGET_NR_setfsgid32
9211 case TARGET_NR_setfsgid32
:
9212 ret
= get_errno(setfsgid(arg1
));
9216 case TARGET_NR_pivot_root
:
9218 #ifdef TARGET_NR_mincore
9219 case TARGET_NR_mincore
:
9222 ret
= -TARGET_EFAULT
;
9223 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9225 if (!(p
= lock_user_string(arg3
)))
9227 ret
= get_errno(mincore(a
, arg2
, p
));
9228 unlock_user(p
, arg3
, ret
);
9230 unlock_user(a
, arg1
, 0);
9234 #ifdef TARGET_NR_arm_fadvise64_64
9235 case TARGET_NR_arm_fadvise64_64
:
9238 * arm_fadvise64_64 looks like fadvise64_64 but
9239 * with different argument order
9247 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9248 #ifdef TARGET_NR_fadvise64_64
9249 case TARGET_NR_fadvise64_64
:
9251 #ifdef TARGET_NR_fadvise64
9252 case TARGET_NR_fadvise64
:
9256 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9257 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9258 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9259 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9263 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9266 #ifdef TARGET_NR_madvise
9267 case TARGET_NR_madvise
:
9268 /* A straight passthrough may not be safe because qemu sometimes
9269 turns private file-backed mappings into anonymous mappings.
9270 This will break MADV_DONTNEED.
9271 This is a hint, so ignoring and returning success is ok. */
9275 #if TARGET_ABI_BITS == 32
9276 case TARGET_NR_fcntl64
:
9280 struct target_flock64
*target_fl
;
9282 struct target_eabi_flock64
*target_efl
;
9285 cmd
= target_to_host_fcntl_cmd(arg2
);
9286 if (cmd
== -TARGET_EINVAL
) {
9292 case TARGET_F_GETLK64
:
9294 if (((CPUARMState
*)cpu_env
)->eabi
) {
9295 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9297 fl
.l_type
= tswap16(target_efl
->l_type
);
9298 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9299 fl
.l_start
= tswap64(target_efl
->l_start
);
9300 fl
.l_len
= tswap64(target_efl
->l_len
);
9301 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9302 unlock_user_struct(target_efl
, arg3
, 0);
9306 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9308 fl
.l_type
= tswap16(target_fl
->l_type
);
9309 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9310 fl
.l_start
= tswap64(target_fl
->l_start
);
9311 fl
.l_len
= tswap64(target_fl
->l_len
);
9312 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9313 unlock_user_struct(target_fl
, arg3
, 0);
9315 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9318 if (((CPUARMState
*)cpu_env
)->eabi
) {
9319 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9321 target_efl
->l_type
= tswap16(fl
.l_type
);
9322 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9323 target_efl
->l_start
= tswap64(fl
.l_start
);
9324 target_efl
->l_len
= tswap64(fl
.l_len
);
9325 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9326 unlock_user_struct(target_efl
, arg3
, 1);
9330 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9332 target_fl
->l_type
= tswap16(fl
.l_type
);
9333 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9334 target_fl
->l_start
= tswap64(fl
.l_start
);
9335 target_fl
->l_len
= tswap64(fl
.l_len
);
9336 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9337 unlock_user_struct(target_fl
, arg3
, 1);
9342 case TARGET_F_SETLK64
:
9343 case TARGET_F_SETLKW64
:
9345 if (((CPUARMState
*)cpu_env
)->eabi
) {
9346 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9348 fl
.l_type
= tswap16(target_efl
->l_type
);
9349 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9350 fl
.l_start
= tswap64(target_efl
->l_start
);
9351 fl
.l_len
= tswap64(target_efl
->l_len
);
9352 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9353 unlock_user_struct(target_efl
, arg3
, 0);
9357 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9359 fl
.l_type
= tswap16(target_fl
->l_type
);
9360 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9361 fl
.l_start
= tswap64(target_fl
->l_start
);
9362 fl
.l_len
= tswap64(target_fl
->l_len
);
9363 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9364 unlock_user_struct(target_fl
, arg3
, 0);
9366 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9369 ret
= do_fcntl(arg1
, arg2
, arg3
);
9375 #ifdef TARGET_NR_cacheflush
9376 case TARGET_NR_cacheflush
:
9377 /* self-modifying code is handled automatically, so nothing needed */
9381 #ifdef TARGET_NR_security
9382 case TARGET_NR_security
:
9385 #ifdef TARGET_NR_getpagesize
9386 case TARGET_NR_getpagesize
:
9387 ret
= TARGET_PAGE_SIZE
;
9390 case TARGET_NR_gettid
:
9391 ret
= get_errno(gettid());
9393 #ifdef TARGET_NR_readahead
9394 case TARGET_NR_readahead
:
9395 #if TARGET_ABI_BITS == 32
9396 if (regpairs_aligned(cpu_env
)) {
9401 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9403 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9408 #ifdef TARGET_NR_setxattr
9409 case TARGET_NR_listxattr
:
9410 case TARGET_NR_llistxattr
:
9414 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9416 ret
= -TARGET_EFAULT
;
9420 p
= lock_user_string(arg1
);
9422 if (num
== TARGET_NR_listxattr
) {
9423 ret
= get_errno(listxattr(p
, b
, arg3
));
9425 ret
= get_errno(llistxattr(p
, b
, arg3
));
9428 ret
= -TARGET_EFAULT
;
9430 unlock_user(p
, arg1
, 0);
9431 unlock_user(b
, arg2
, arg3
);
9434 case TARGET_NR_flistxattr
:
9438 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9440 ret
= -TARGET_EFAULT
;
9444 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9445 unlock_user(b
, arg2
, arg3
);
9448 case TARGET_NR_setxattr
:
9449 case TARGET_NR_lsetxattr
:
9451 void *p
, *n
, *v
= 0;
9453 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9455 ret
= -TARGET_EFAULT
;
9459 p
= lock_user_string(arg1
);
9460 n
= lock_user_string(arg2
);
9462 if (num
== TARGET_NR_setxattr
) {
9463 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9465 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9468 ret
= -TARGET_EFAULT
;
9470 unlock_user(p
, arg1
, 0);
9471 unlock_user(n
, arg2
, 0);
9472 unlock_user(v
, arg3
, 0);
9475 case TARGET_NR_fsetxattr
:
9479 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9481 ret
= -TARGET_EFAULT
;
9485 n
= lock_user_string(arg2
);
9487 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9489 ret
= -TARGET_EFAULT
;
9491 unlock_user(n
, arg2
, 0);
9492 unlock_user(v
, arg3
, 0);
9495 case TARGET_NR_getxattr
:
9496 case TARGET_NR_lgetxattr
:
9498 void *p
, *n
, *v
= 0;
9500 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9502 ret
= -TARGET_EFAULT
;
9506 p
= lock_user_string(arg1
);
9507 n
= lock_user_string(arg2
);
9509 if (num
== TARGET_NR_getxattr
) {
9510 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9512 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9515 ret
= -TARGET_EFAULT
;
9517 unlock_user(p
, arg1
, 0);
9518 unlock_user(n
, arg2
, 0);
9519 unlock_user(v
, arg3
, arg4
);
9522 case TARGET_NR_fgetxattr
:
9526 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9528 ret
= -TARGET_EFAULT
;
9532 n
= lock_user_string(arg2
);
9534 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9536 ret
= -TARGET_EFAULT
;
9538 unlock_user(n
, arg2
, 0);
9539 unlock_user(v
, arg3
, arg4
);
9542 case TARGET_NR_removexattr
:
9543 case TARGET_NR_lremovexattr
:
9546 p
= lock_user_string(arg1
);
9547 n
= lock_user_string(arg2
);
9549 if (num
== TARGET_NR_removexattr
) {
9550 ret
= get_errno(removexattr(p
, n
));
9552 ret
= get_errno(lremovexattr(p
, n
));
9555 ret
= -TARGET_EFAULT
;
9557 unlock_user(p
, arg1
, 0);
9558 unlock_user(n
, arg2
, 0);
9561 case TARGET_NR_fremovexattr
:
9564 n
= lock_user_string(arg2
);
9566 ret
= get_errno(fremovexattr(arg1
, n
));
9568 ret
= -TARGET_EFAULT
;
9570 unlock_user(n
, arg2
, 0);
9574 #endif /* CONFIG_ATTR */
9575 #ifdef TARGET_NR_set_thread_area
9576 case TARGET_NR_set_thread_area
:
9577 #if defined(TARGET_MIPS)
9578 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9581 #elif defined(TARGET_CRIS)
9583 ret
= -TARGET_EINVAL
;
9585 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9589 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9590 ret
= do_set_thread_area(cpu_env
, arg1
);
9592 #elif defined(TARGET_M68K)
9594 TaskState
*ts
= cpu
->opaque
;
9595 ts
->tp_value
= arg1
;
9600 goto unimplemented_nowarn
;
9603 #ifdef TARGET_NR_get_thread_area
9604 case TARGET_NR_get_thread_area
:
9605 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9606 ret
= do_get_thread_area(cpu_env
, arg1
);
9608 #elif defined(TARGET_M68K)
9610 TaskState
*ts
= cpu
->opaque
;
9615 goto unimplemented_nowarn
;
9618 #ifdef TARGET_NR_getdomainname
9619 case TARGET_NR_getdomainname
:
9620 goto unimplemented_nowarn
;
9623 #ifdef TARGET_NR_clock_gettime
9624 case TARGET_NR_clock_gettime
:
9627 ret
= get_errno(clock_gettime(arg1
, &ts
));
9628 if (!is_error(ret
)) {
9629 host_to_target_timespec(arg2
, &ts
);
9634 #ifdef TARGET_NR_clock_getres
9635 case TARGET_NR_clock_getres
:
9638 ret
= get_errno(clock_getres(arg1
, &ts
));
9639 if (!is_error(ret
)) {
9640 host_to_target_timespec(arg2
, &ts
);
9645 #ifdef TARGET_NR_clock_nanosleep
9646 case TARGET_NR_clock_nanosleep
:
9649 target_to_host_timespec(&ts
, arg3
);
9650 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9652 host_to_target_timespec(arg4
, &ts
);
9654 #if defined(TARGET_PPC)
9655 /* clock_nanosleep is odd in that it returns positive errno values.
9656 * On PPC, CR0 bit 3 should be set in such a situation. */
9658 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9665 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9666 case TARGET_NR_set_tid_address
:
9667 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9671 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9672 case TARGET_NR_tkill
:
9673 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9677 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9678 case TARGET_NR_tgkill
:
9679 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9680 target_to_host_signal(arg3
)));
9684 #ifdef TARGET_NR_set_robust_list
9685 case TARGET_NR_set_robust_list
:
9686 case TARGET_NR_get_robust_list
:
9687 /* The ABI for supporting robust futexes has userspace pass
9688 * the kernel a pointer to a linked list which is updated by
9689 * userspace after the syscall; the list is walked by the kernel
9690 * when the thread exits. Since the linked list in QEMU guest
9691 * memory isn't a valid linked list for the host and we have
9692 * no way to reliably intercept the thread-death event, we can't
9693 * support these. Silently return ENOSYS so that guest userspace
9694 * falls back to a non-robust futex implementation (which should
9695 * be OK except in the corner case of the guest crashing while
9696 * holding a mutex that is shared with another process via
9699 goto unimplemented_nowarn
;
9702 #if defined(TARGET_NR_utimensat)
9703 case TARGET_NR_utimensat
:
9705 struct timespec
*tsp
, ts
[2];
9709 target_to_host_timespec(ts
, arg3
);
9710 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9714 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9716 if (!(p
= lock_user_string(arg2
))) {
9717 ret
= -TARGET_EFAULT
;
9720 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9721 unlock_user(p
, arg2
, 0);
9726 case TARGET_NR_futex
:
9727 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9729 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9730 case TARGET_NR_inotify_init
:
9731 ret
= get_errno(sys_inotify_init());
9734 #ifdef CONFIG_INOTIFY1
9735 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9736 case TARGET_NR_inotify_init1
:
9737 ret
= get_errno(sys_inotify_init1(arg1
));
9741 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9742 case TARGET_NR_inotify_add_watch
:
9743 p
= lock_user_string(arg2
);
9744 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9745 unlock_user(p
, arg2
, 0);
9748 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9749 case TARGET_NR_inotify_rm_watch
:
9750 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9754 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9755 case TARGET_NR_mq_open
:
9757 struct mq_attr posix_mq_attr
, *attrp
;
9759 p
= lock_user_string(arg1
- 1);
9761 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9762 attrp
= &posix_mq_attr
;
9766 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9767 unlock_user (p
, arg1
, 0);
9771 case TARGET_NR_mq_unlink
:
9772 p
= lock_user_string(arg1
- 1);
9773 ret
= get_errno(mq_unlink(p
));
9774 unlock_user (p
, arg1
, 0);
9777 case TARGET_NR_mq_timedsend
:
9781 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9783 target_to_host_timespec(&ts
, arg5
);
9784 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9785 host_to_target_timespec(arg5
, &ts
);
9788 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9789 unlock_user (p
, arg2
, arg3
);
9793 case TARGET_NR_mq_timedreceive
:
9798 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9800 target_to_host_timespec(&ts
, arg5
);
9801 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9802 host_to_target_timespec(arg5
, &ts
);
9805 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9806 unlock_user (p
, arg2
, arg3
);
9808 put_user_u32(prio
, arg4
);
9812 /* Not implemented for now... */
9813 /* case TARGET_NR_mq_notify: */
9816 case TARGET_NR_mq_getsetattr
:
9818 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9821 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9822 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9825 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9826 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9833 #ifdef CONFIG_SPLICE
9834 #ifdef TARGET_NR_tee
9837 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9841 #ifdef TARGET_NR_splice
9842 case TARGET_NR_splice
:
9844 loff_t loff_in
, loff_out
;
9845 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9847 if (get_user_u64(loff_in
, arg2
)) {
9850 ploff_in
= &loff_in
;
9853 if (get_user_u64(loff_out
, arg4
)) {
9856 ploff_out
= &loff_out
;
9858 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9860 if (put_user_u64(loff_in
, arg2
)) {
9865 if (put_user_u64(loff_out
, arg4
)) {
9872 #ifdef TARGET_NR_vmsplice
9873 case TARGET_NR_vmsplice
:
9875 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9877 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9878 unlock_iovec(vec
, arg2
, arg3
, 0);
9880 ret
= -host_to_target_errno(errno
);
9885 #endif /* CONFIG_SPLICE */
9886 #ifdef CONFIG_EVENTFD
9887 #if defined(TARGET_NR_eventfd)
9888 case TARGET_NR_eventfd
:
9889 ret
= get_errno(eventfd(arg1
, 0));
9890 fd_trans_unregister(ret
);
9893 #if defined(TARGET_NR_eventfd2)
9894 case TARGET_NR_eventfd2
:
9896 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9897 if (arg2
& TARGET_O_NONBLOCK
) {
9898 host_flags
|= O_NONBLOCK
;
9900 if (arg2
& TARGET_O_CLOEXEC
) {
9901 host_flags
|= O_CLOEXEC
;
9903 ret
= get_errno(eventfd(arg1
, host_flags
));
9904 fd_trans_unregister(ret
);
9908 #endif /* CONFIG_EVENTFD */
9909 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9910 case TARGET_NR_fallocate
:
9911 #if TARGET_ABI_BITS == 32
9912 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9913 target_offset64(arg5
, arg6
)));
9915 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9919 #if defined(CONFIG_SYNC_FILE_RANGE)
9920 #if defined(TARGET_NR_sync_file_range)
9921 case TARGET_NR_sync_file_range
:
9922 #if TARGET_ABI_BITS == 32
9923 #if defined(TARGET_MIPS)
9924 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9925 target_offset64(arg5
, arg6
), arg7
));
9927 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9928 target_offset64(arg4
, arg5
), arg6
));
9929 #endif /* !TARGET_MIPS */
9931 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9935 #if defined(TARGET_NR_sync_file_range2)
9936 case TARGET_NR_sync_file_range2
:
9937 /* This is like sync_file_range but the arguments are reordered */
9938 #if TARGET_ABI_BITS == 32
9939 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9940 target_offset64(arg5
, arg6
), arg2
));
9942 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9947 #if defined(TARGET_NR_signalfd4)
9948 case TARGET_NR_signalfd4
:
9949 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9952 #if defined(TARGET_NR_signalfd)
9953 case TARGET_NR_signalfd
:
9954 ret
= do_signalfd4(arg1
, arg2
, 0);
9957 #if defined(CONFIG_EPOLL)
9958 #if defined(TARGET_NR_epoll_create)
9959 case TARGET_NR_epoll_create
:
9960 ret
= get_errno(epoll_create(arg1
));
9963 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9964 case TARGET_NR_epoll_create1
:
9965 ret
= get_errno(epoll_create1(arg1
));
9968 #if defined(TARGET_NR_epoll_ctl)
9969 case TARGET_NR_epoll_ctl
:
9971 struct epoll_event ep
;
9972 struct epoll_event
*epp
= 0;
9974 struct target_epoll_event
*target_ep
;
9975 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9978 ep
.events
= tswap32(target_ep
->events
);
9979 /* The epoll_data_t union is just opaque data to the kernel,
9980 * so we transfer all 64 bits across and need not worry what
9981 * actual data type it is.
9983 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9984 unlock_user_struct(target_ep
, arg4
, 0);
9987 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9992 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9993 #define IMPLEMENT_EPOLL_PWAIT
9995 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9996 #if defined(TARGET_NR_epoll_wait)
9997 case TARGET_NR_epoll_wait
:
9999 #if defined(IMPLEMENT_EPOLL_PWAIT)
10000 case TARGET_NR_epoll_pwait
:
10003 struct target_epoll_event
*target_ep
;
10004 struct epoll_event
*ep
;
10006 int maxevents
= arg3
;
10007 int timeout
= arg4
;
10009 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10010 maxevents
* sizeof(struct target_epoll_event
), 1);
10015 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10018 #if defined(IMPLEMENT_EPOLL_PWAIT)
10019 case TARGET_NR_epoll_pwait
:
10021 target_sigset_t
*target_set
;
10022 sigset_t _set
, *set
= &_set
;
10025 target_set
= lock_user(VERIFY_READ
, arg5
,
10026 sizeof(target_sigset_t
), 1);
10028 unlock_user(target_ep
, arg2
, 0);
10031 target_to_host_sigset(set
, target_set
);
10032 unlock_user(target_set
, arg5
, 0);
10037 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10041 #if defined(TARGET_NR_epoll_wait)
10042 case TARGET_NR_epoll_wait
:
10043 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10047 ret
= -TARGET_ENOSYS
;
10049 if (!is_error(ret
)) {
10051 for (i
= 0; i
< ret
; i
++) {
10052 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10053 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10056 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10061 #ifdef TARGET_NR_prlimit64
10062 case TARGET_NR_prlimit64
:
10064 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10065 struct target_rlimit64
*target_rnew
, *target_rold
;
10066 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10067 int resource
= target_to_host_resource(arg2
);
10069 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10072 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10073 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10074 unlock_user_struct(target_rnew
, arg3
, 0);
10078 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10079 if (!is_error(ret
) && arg4
) {
10080 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10083 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10084 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10085 unlock_user_struct(target_rold
, arg4
, 1);
10090 #ifdef TARGET_NR_gethostname
10091 case TARGET_NR_gethostname
:
10093 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10095 ret
= get_errno(gethostname(name
, arg2
));
10096 unlock_user(name
, arg1
, arg2
);
10098 ret
= -TARGET_EFAULT
;
10103 #ifdef TARGET_NR_atomic_cmpxchg_32
10104 case TARGET_NR_atomic_cmpxchg_32
:
10106 /* should use start_exclusive from main.c */
10107 abi_ulong mem_value
;
10108 if (get_user_u32(mem_value
, arg6
)) {
10109 target_siginfo_t info
;
10110 info
.si_signo
= SIGSEGV
;
10112 info
.si_code
= TARGET_SEGV_MAPERR
;
10113 info
._sifields
._sigfault
._addr
= arg6
;
10114 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10118 if (mem_value
== arg2
)
10119 put_user_u32(arg1
, arg6
);
10124 #ifdef TARGET_NR_atomic_barrier
10125 case TARGET_NR_atomic_barrier
:
10127 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10133 #ifdef TARGET_NR_timer_create
10134 case TARGET_NR_timer_create
:
10136 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10138 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10141 int timer_index
= next_free_host_timer();
10143 if (timer_index
< 0) {
10144 ret
= -TARGET_EAGAIN
;
10146 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10149 phost_sevp
= &host_sevp
;
10150 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10156 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10160 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10169 #ifdef TARGET_NR_timer_settime
10170 case TARGET_NR_timer_settime
:
10172 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10173 * struct itimerspec * old_value */
10174 target_timer_t timerid
= get_timer_id(arg1
);
10178 } else if (arg3
== 0) {
10179 ret
= -TARGET_EINVAL
;
10181 timer_t htimer
= g_posix_timers
[timerid
];
10182 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10184 target_to_host_itimerspec(&hspec_new
, arg3
);
10186 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10187 host_to_target_itimerspec(arg2
, &hspec_old
);
10193 #ifdef TARGET_NR_timer_gettime
10194 case TARGET_NR_timer_gettime
:
10196 /* args: timer_t timerid, struct itimerspec *curr_value */
10197 target_timer_t timerid
= get_timer_id(arg1
);
10201 } else if (!arg2
) {
10202 ret
= -TARGET_EFAULT
;
10204 timer_t htimer
= g_posix_timers
[timerid
];
10205 struct itimerspec hspec
;
10206 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10208 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10209 ret
= -TARGET_EFAULT
;
10216 #ifdef TARGET_NR_timer_getoverrun
10217 case TARGET_NR_timer_getoverrun
:
10219 /* args: timer_t timerid */
10220 target_timer_t timerid
= get_timer_id(arg1
);
10225 timer_t htimer
= g_posix_timers
[timerid
];
10226 ret
= get_errno(timer_getoverrun(htimer
));
10228 fd_trans_unregister(ret
);
10233 #ifdef TARGET_NR_timer_delete
10234 case TARGET_NR_timer_delete
:
10236 /* args: timer_t timerid */
10237 target_timer_t timerid
= get_timer_id(arg1
);
10242 timer_t htimer
= g_posix_timers
[timerid
];
10243 ret
= get_errno(timer_delete(htimer
));
10244 g_posix_timers
[timerid
] = 0;
10250 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10251 case TARGET_NR_timerfd_create
:
10252 ret
= get_errno(timerfd_create(arg1
,
10253 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10257 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10258 case TARGET_NR_timerfd_gettime
:
10260 struct itimerspec its_curr
;
10262 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10264 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10271 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10272 case TARGET_NR_timerfd_settime
:
10274 struct itimerspec its_new
, its_old
, *p_new
;
10277 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10285 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10287 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10294 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10295 case TARGET_NR_ioprio_get
:
10296 ret
= get_errno(ioprio_get(arg1
, arg2
));
10300 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10301 case TARGET_NR_ioprio_set
:
10302 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10306 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10307 case TARGET_NR_setns
:
10308 ret
= get_errno(setns(arg1
, arg2
));
10311 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10312 case TARGET_NR_unshare
:
10313 ret
= get_errno(unshare(arg1
));
10319 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10320 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10321 unimplemented_nowarn
:
10323 ret
= -TARGET_ENOSYS
;
10328 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10331 print_syscall_ret(num
, ret
);
10334 ret
= -TARGET_EFAULT
;