4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 #define __NR__llseek __NR_lseek
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
203 _syscall0(int, gettid
)
205 /* This is a replacement for the host gettid() and must return a host
207 static int gettid(void) {
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
220 loff_t
*, res
, uint
, wh
);
222 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
223 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group
,int,error_code
)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address
,int *,tidptr
)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
238 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
248 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
249 struct __user_cap_data_struct
*, data
);
250 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get
, int, which
, int, who
)
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
262 static bitmask_transtbl fcntl_flags_tbl
[] = {
263 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
264 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
265 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
266 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
267 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
268 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
269 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
270 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
271 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
272 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
273 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
274 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
275 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
286 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
295 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
297 typedef struct TargetFdTrans
{
298 TargetFdDataFunc host_to_target_data
;
299 TargetFdDataFunc target_to_host_data
;
300 TargetFdAddrFunc target_to_host_addr
;
303 static TargetFdTrans
**target_fd_trans
;
305 static unsigned int target_fd_max
;
307 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
309 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
310 return target_fd_trans
[fd
]->host_to_target_data
;
315 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
317 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
318 return target_fd_trans
[fd
]->target_to_host_addr
;
323 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
327 if (fd
>= target_fd_max
) {
328 oldmax
= target_fd_max
;
329 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans
= g_renew(TargetFdTrans
*,
331 target_fd_trans
, target_fd_max
);
332 memset((void *)(target_fd_trans
+ oldmax
), 0,
333 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
335 target_fd_trans
[fd
] = trans
;
338 static void fd_trans_unregister(int fd
)
340 if (fd
>= 0 && fd
< target_fd_max
) {
341 target_fd_trans
[fd
] = NULL
;
345 static void fd_trans_dup(int oldfd
, int newfd
)
347 fd_trans_unregister(newfd
);
348 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
349 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
353 static int sys_getcwd1(char *buf
, size_t size
)
355 if (getcwd(buf
, size
) == NULL
) {
356 /* getcwd() sets errno */
359 return strlen(buf
)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd
, const char *pathname
,
365 const struct timespec times
[2], int flags
)
367 if (pathname
== NULL
)
368 return futimens(dirfd
, times
);
370 return utimensat(dirfd
, pathname
, times
, flags
);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
375 const struct timespec
*,tsp
,int,flags
)
377 static int sys_utimensat(int dirfd
, const char *pathname
,
378 const struct timespec times
[2], int flags
)
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
398 return (inotify_add_watch(fd
, pathname
, mask
));
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
404 return (inotify_rm_watch(fd
, wd
));
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags
)
411 return (inotify_init1(flags
));
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
425 # define __NR_ppoll -1
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
429 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
433 #if defined(TARGET_NR_pselect6)
434 #ifndef __NR_pselect6
435 # define __NR_pselect6 -1
437 #define __NR_sys_pselect6 __NR_pselect6
438 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
439 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
) {
479 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
487 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
489 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
504 [EAGAIN
] = TARGET_EAGAIN
,
505 [EIDRM
] = TARGET_EIDRM
,
506 [ECHRNG
] = TARGET_ECHRNG
,
507 [EL2NSYNC
] = TARGET_EL2NSYNC
,
508 [EL3HLT
] = TARGET_EL3HLT
,
509 [EL3RST
] = TARGET_EL3RST
,
510 [ELNRNG
] = TARGET_ELNRNG
,
511 [EUNATCH
] = TARGET_EUNATCH
,
512 [ENOCSI
] = TARGET_ENOCSI
,
513 [EL2HLT
] = TARGET_EL2HLT
,
514 [EDEADLK
] = TARGET_EDEADLK
,
515 [ENOLCK
] = TARGET_ENOLCK
,
516 [EBADE
] = TARGET_EBADE
,
517 [EBADR
] = TARGET_EBADR
,
518 [EXFULL
] = TARGET_EXFULL
,
519 [ENOANO
] = TARGET_ENOANO
,
520 [EBADRQC
] = TARGET_EBADRQC
,
521 [EBADSLT
] = TARGET_EBADSLT
,
522 [EBFONT
] = TARGET_EBFONT
,
523 [ENOSTR
] = TARGET_ENOSTR
,
524 [ENODATA
] = TARGET_ENODATA
,
525 [ETIME
] = TARGET_ETIME
,
526 [ENOSR
] = TARGET_ENOSR
,
527 [ENONET
] = TARGET_ENONET
,
528 [ENOPKG
] = TARGET_ENOPKG
,
529 [EREMOTE
] = TARGET_EREMOTE
,
530 [ENOLINK
] = TARGET_ENOLINK
,
531 [EADV
] = TARGET_EADV
,
532 [ESRMNT
] = TARGET_ESRMNT
,
533 [ECOMM
] = TARGET_ECOMM
,
534 [EPROTO
] = TARGET_EPROTO
,
535 [EDOTDOT
] = TARGET_EDOTDOT
,
536 [EMULTIHOP
] = TARGET_EMULTIHOP
,
537 [EBADMSG
] = TARGET_EBADMSG
,
538 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
539 [EOVERFLOW
] = TARGET_EOVERFLOW
,
540 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
541 [EBADFD
] = TARGET_EBADFD
,
542 [EREMCHG
] = TARGET_EREMCHG
,
543 [ELIBACC
] = TARGET_ELIBACC
,
544 [ELIBBAD
] = TARGET_ELIBBAD
,
545 [ELIBSCN
] = TARGET_ELIBSCN
,
546 [ELIBMAX
] = TARGET_ELIBMAX
,
547 [ELIBEXEC
] = TARGET_ELIBEXEC
,
548 [EILSEQ
] = TARGET_EILSEQ
,
549 [ENOSYS
] = TARGET_ENOSYS
,
550 [ELOOP
] = TARGET_ELOOP
,
551 [ERESTART
] = TARGET_ERESTART
,
552 [ESTRPIPE
] = TARGET_ESTRPIPE
,
553 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
554 [EUSERS
] = TARGET_EUSERS
,
555 [ENOTSOCK
] = TARGET_ENOTSOCK
,
556 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
557 [EMSGSIZE
] = TARGET_EMSGSIZE
,
558 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
559 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
560 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
561 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
562 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
563 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
564 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
565 [EADDRINUSE
] = TARGET_EADDRINUSE
,
566 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
567 [ENETDOWN
] = TARGET_ENETDOWN
,
568 [ENETUNREACH
] = TARGET_ENETUNREACH
,
569 [ENETRESET
] = TARGET_ENETRESET
,
570 [ECONNABORTED
] = TARGET_ECONNABORTED
,
571 [ECONNRESET
] = TARGET_ECONNRESET
,
572 [ENOBUFS
] = TARGET_ENOBUFS
,
573 [EISCONN
] = TARGET_EISCONN
,
574 [ENOTCONN
] = TARGET_ENOTCONN
,
575 [EUCLEAN
] = TARGET_EUCLEAN
,
576 [ENOTNAM
] = TARGET_ENOTNAM
,
577 [ENAVAIL
] = TARGET_ENAVAIL
,
578 [EISNAM
] = TARGET_EISNAM
,
579 [EREMOTEIO
] = TARGET_EREMOTEIO
,
580 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
581 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
582 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
583 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
584 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
585 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
586 [EALREADY
] = TARGET_EALREADY
,
587 [EINPROGRESS
] = TARGET_EINPROGRESS
,
588 [ESTALE
] = TARGET_ESTALE
,
589 [ECANCELED
] = TARGET_ECANCELED
,
590 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
591 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
593 [ENOKEY
] = TARGET_ENOKEY
,
596 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
599 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
602 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
605 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
612 static inline int host_to_target_errno(int err
)
614 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
615 host_to_target_errno_table
[err
]) {
616 return host_to_target_errno_table
[err
];
621 static inline int target_to_host_errno(int err
)
623 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
624 target_to_host_errno_table
[err
]) {
625 return target_to_host_errno_table
[err
];
630 static inline abi_long
get_errno(abi_long ret
)
633 return -host_to_target_errno(errno
);
638 static inline int is_error(abi_long ret
)
640 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
643 char *target_strerror(int err
)
645 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
648 return strerror(target_to_host_errno(err
));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
699 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
700 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
701 int, flags
, mode_t
, mode
)
702 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
703 struct rusage
*, rusage
)
704 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
705 int, options
, struct rusage
*, rusage
)
707 static inline int host_to_target_sock_type(int host_type
)
711 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
713 target_type
= TARGET_SOCK_DGRAM
;
716 target_type
= TARGET_SOCK_STREAM
;
719 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
723 #if defined(SOCK_CLOEXEC)
724 if (host_type
& SOCK_CLOEXEC
) {
725 target_type
|= TARGET_SOCK_CLOEXEC
;
729 #if defined(SOCK_NONBLOCK)
730 if (host_type
& SOCK_NONBLOCK
) {
731 target_type
|= TARGET_SOCK_NONBLOCK
;
738 static abi_ulong target_brk
;
739 static abi_ulong target_original_brk
;
740 static abi_ulong brk_page
;
742 void target_set_brk(abi_ulong new_brk
)
744 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
745 brk_page
= HOST_PAGE_ALIGN(target_brk
);
748 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
749 #define DEBUGF_BRK(message, args...)
751 /* do_brk() must return target values and target errnos. */
752 abi_long
do_brk(abi_ulong new_brk
)
754 abi_long mapped_addr
;
757 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
760 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
763 if (new_brk
< target_original_brk
) {
764 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
769 /* If the new brk is less than the highest page reserved to the
770 * target heap allocation, set it and we're almost done... */
771 if (new_brk
<= brk_page
) {
772 /* Heap contents are initialized to zero, as for anonymous
774 if (new_brk
> target_brk
) {
775 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
777 target_brk
= new_brk
;
778 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
782 /* We need to allocate more memory after the brk... Note that
783 * we don't use MAP_FIXED because that will map over the top of
784 * any existing mapping (like the one with the host libc or qemu
785 * itself); instead we treat "mapped but at wrong address" as
786 * a failure and unmap again.
788 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
789 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
790 PROT_READ
|PROT_WRITE
,
791 MAP_ANON
|MAP_PRIVATE
, 0, 0));
793 if (mapped_addr
== brk_page
) {
794 /* Heap contents are initialized to zero, as for anonymous
795 * mapped pages. Technically the new pages are already
796 * initialized to zero since they *are* anonymous mapped
797 * pages, however we have to take care with the contents that
798 * come from the remaining part of the previous page: it may
799 * contains garbage data due to a previous heap usage (grown
801 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
803 target_brk
= new_brk
;
804 brk_page
= HOST_PAGE_ALIGN(target_brk
);
805 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
808 } else if (mapped_addr
!= -1) {
809 /* Mapped but at wrong address, meaning there wasn't actually
810 * enough space for this brk.
812 target_munmap(mapped_addr
, new_alloc_size
);
814 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
817 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
820 #if defined(TARGET_ALPHA)
821 /* We (partially) emulate OSF/1 on Alpha, which requires we
822 return a proper errno, not an unchanged brk value. */
823 return -TARGET_ENOMEM
;
825 /* For everything else, return the previous break. */
829 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
830 abi_ulong target_fds_addr
,
834 abi_ulong b
, *target_fds
;
836 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
837 if (!(target_fds
= lock_user(VERIFY_READ
,
839 sizeof(abi_ulong
) * nw
,
841 return -TARGET_EFAULT
;
845 for (i
= 0; i
< nw
; i
++) {
846 /* grab the abi_ulong */
847 __get_user(b
, &target_fds
[i
]);
848 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
849 /* check the bit inside the abi_ulong */
856 unlock_user(target_fds
, target_fds_addr
, 0);
861 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
862 abi_ulong target_fds_addr
,
865 if (target_fds_addr
) {
866 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
867 return -TARGET_EFAULT
;
875 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
881 abi_ulong
*target_fds
;
883 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
884 if (!(target_fds
= lock_user(VERIFY_WRITE
,
886 sizeof(abi_ulong
) * nw
,
888 return -TARGET_EFAULT
;
891 for (i
= 0; i
< nw
; i
++) {
893 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
894 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
897 __put_user(v
, &target_fds
[i
]);
900 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
905 #if defined(__alpha__)
911 static inline abi_long
host_to_target_clock_t(long ticks
)
913 #if HOST_HZ == TARGET_HZ
916 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
920 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
921 const struct rusage
*rusage
)
923 struct target_rusage
*target_rusage
;
925 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
926 return -TARGET_EFAULT
;
927 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
928 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
929 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
930 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
931 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
932 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
933 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
934 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
935 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
936 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
937 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
938 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
939 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
940 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
941 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
942 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
943 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
944 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
945 unlock_user_struct(target_rusage
, target_addr
, 1);
950 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
952 abi_ulong target_rlim_swap
;
955 target_rlim_swap
= tswapal(target_rlim
);
956 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
957 return RLIM_INFINITY
;
959 result
= target_rlim_swap
;
960 if (target_rlim_swap
!= (rlim_t
)result
)
961 return RLIM_INFINITY
;
966 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
968 abi_ulong target_rlim_swap
;
971 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
972 target_rlim_swap
= TARGET_RLIM_INFINITY
;
974 target_rlim_swap
= rlim
;
975 result
= tswapal(target_rlim_swap
);
980 static inline int target_to_host_resource(int code
)
983 case TARGET_RLIMIT_AS
:
985 case TARGET_RLIMIT_CORE
:
987 case TARGET_RLIMIT_CPU
:
989 case TARGET_RLIMIT_DATA
:
991 case TARGET_RLIMIT_FSIZE
:
993 case TARGET_RLIMIT_LOCKS
:
995 case TARGET_RLIMIT_MEMLOCK
:
996 return RLIMIT_MEMLOCK
;
997 case TARGET_RLIMIT_MSGQUEUE
:
998 return RLIMIT_MSGQUEUE
;
999 case TARGET_RLIMIT_NICE
:
1001 case TARGET_RLIMIT_NOFILE
:
1002 return RLIMIT_NOFILE
;
1003 case TARGET_RLIMIT_NPROC
:
1004 return RLIMIT_NPROC
;
1005 case TARGET_RLIMIT_RSS
:
1007 case TARGET_RLIMIT_RTPRIO
:
1008 return RLIMIT_RTPRIO
;
1009 case TARGET_RLIMIT_SIGPENDING
:
1010 return RLIMIT_SIGPENDING
;
1011 case TARGET_RLIMIT_STACK
:
1012 return RLIMIT_STACK
;
1018 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1019 abi_ulong target_tv_addr
)
1021 struct target_timeval
*target_tv
;
1023 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1024 return -TARGET_EFAULT
;
1026 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1027 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1029 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1034 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1035 const struct timeval
*tv
)
1037 struct target_timeval
*target_tv
;
1039 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1040 return -TARGET_EFAULT
;
1042 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1043 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1045 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1050 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1051 abi_ulong target_tz_addr
)
1053 struct target_timezone
*target_tz
;
1055 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1056 return -TARGET_EFAULT
;
1059 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1060 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1062 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1067 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1070 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1071 abi_ulong target_mq_attr_addr
)
1073 struct target_mq_attr
*target_mq_attr
;
1075 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1076 target_mq_attr_addr
, 1))
1077 return -TARGET_EFAULT
;
1079 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1080 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1081 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1082 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1084 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1089 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1090 const struct mq_attr
*attr
)
1092 struct target_mq_attr
*target_mq_attr
;
1094 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1095 target_mq_attr_addr
, 0))
1096 return -TARGET_EFAULT
;
1098 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1099 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1100 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1101 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1103 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1109 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1110 /* do_select() must return target values and target errnos. */
1111 static abi_long
do_select(int n
,
1112 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1113 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1115 fd_set rfds
, wfds
, efds
;
1116 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1117 struct timeval tv
, *tv_ptr
;
1120 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1124 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1128 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1133 if (target_tv_addr
) {
1134 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1135 return -TARGET_EFAULT
;
1141 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1143 if (!is_error(ret
)) {
1144 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1145 return -TARGET_EFAULT
;
1146 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1147 return -TARGET_EFAULT
;
1148 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1149 return -TARGET_EFAULT
;
1151 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1152 return -TARGET_EFAULT
;
1159 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1162 return pipe2(host_pipe
, flags
);
1168 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1169 int flags
, int is_pipe2
)
1173 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1176 return get_errno(ret
);
1178 /* Several targets have special calling conventions for the original
1179 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1181 #if defined(TARGET_ALPHA)
1182 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1183 return host_pipe
[0];
1184 #elif defined(TARGET_MIPS)
1185 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1186 return host_pipe
[0];
1187 #elif defined(TARGET_SH4)
1188 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1189 return host_pipe
[0];
1190 #elif defined(TARGET_SPARC)
1191 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1192 return host_pipe
[0];
1196 if (put_user_s32(host_pipe
[0], pipedes
)
1197 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1198 return -TARGET_EFAULT
;
1199 return get_errno(ret
);
1202 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1203 abi_ulong target_addr
,
1206 struct target_ip_mreqn
*target_smreqn
;
1208 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1210 return -TARGET_EFAULT
;
1211 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1212 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1213 if (len
== sizeof(struct target_ip_mreqn
))
1214 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1215 unlock_user(target_smreqn
, target_addr
, 0);
1220 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1221 abi_ulong target_addr
,
1224 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1225 sa_family_t sa_family
;
1226 struct target_sockaddr
*target_saddr
;
1228 if (fd_trans_target_to_host_addr(fd
)) {
1229 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1232 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1234 return -TARGET_EFAULT
;
1236 sa_family
= tswap16(target_saddr
->sa_family
);
1238 /* Oops. The caller might send a incomplete sun_path; sun_path
1239 * must be terminated by \0 (see the manual page), but
1240 * unfortunately it is quite common to specify sockaddr_un
1241 * length as "strlen(x->sun_path)" while it should be
1242 * "strlen(...) + 1". We'll fix that here if needed.
1243 * Linux kernel has a similar feature.
1246 if (sa_family
== AF_UNIX
) {
1247 if (len
< unix_maxlen
&& len
> 0) {
1248 char *cp
= (char*)target_saddr
;
1250 if ( cp
[len
-1] && !cp
[len
] )
1253 if (len
> unix_maxlen
)
1257 memcpy(addr
, target_saddr
, len
);
1258 addr
->sa_family
= sa_family
;
1259 if (sa_family
== AF_PACKET
) {
1260 struct target_sockaddr_ll
*lladdr
;
1262 lladdr
= (struct target_sockaddr_ll
*)addr
;
1263 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1264 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1266 unlock_user(target_saddr
, target_addr
, 0);
1271 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1272 struct sockaddr
*addr
,
1275 struct target_sockaddr
*target_saddr
;
1277 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1279 return -TARGET_EFAULT
;
1280 memcpy(target_saddr
, addr
, len
);
1281 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1282 unlock_user(target_saddr
, target_addr
, len
);
1287 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1288 struct target_msghdr
*target_msgh
)
1290 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1291 abi_long msg_controllen
;
1292 abi_ulong target_cmsg_addr
;
1293 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1294 socklen_t space
= 0;
1296 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1297 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1299 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1300 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1301 target_cmsg_start
= target_cmsg
;
1303 return -TARGET_EFAULT
;
1305 while (cmsg
&& target_cmsg
) {
1306 void *data
= CMSG_DATA(cmsg
);
1307 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1309 int len
= tswapal(target_cmsg
->cmsg_len
)
1310 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1312 space
+= CMSG_SPACE(len
);
1313 if (space
> msgh
->msg_controllen
) {
1314 space
-= CMSG_SPACE(len
);
1315 /* This is a QEMU bug, since we allocated the payload
1316 * area ourselves (unlike overflow in host-to-target
1317 * conversion, which is just the guest giving us a buffer
1318 * that's too small). It can't happen for the payload types
1319 * we currently support; if it becomes an issue in future
1320 * we would need to improve our allocation strategy to
1321 * something more intelligent than "twice the size of the
1322 * target buffer we're reading from".
1324 gemu_log("Host cmsg overflow\n");
1328 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1329 cmsg
->cmsg_level
= SOL_SOCKET
;
1331 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1333 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1334 cmsg
->cmsg_len
= CMSG_LEN(len
);
1336 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1337 int *fd
= (int *)data
;
1338 int *target_fd
= (int *)target_data
;
1339 int i
, numfds
= len
/ sizeof(int);
1341 for (i
= 0; i
< numfds
; i
++) {
1342 __get_user(fd
[i
], target_fd
+ i
);
1344 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1345 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1346 struct ucred
*cred
= (struct ucred
*)data
;
1347 struct target_ucred
*target_cred
=
1348 (struct target_ucred
*)target_data
;
1350 __get_user(cred
->pid
, &target_cred
->pid
);
1351 __get_user(cred
->uid
, &target_cred
->uid
);
1352 __get_user(cred
->gid
, &target_cred
->gid
);
1354 gemu_log("Unsupported ancillary data: %d/%d\n",
1355 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1356 memcpy(data
, target_data
, len
);
1359 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1360 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1363 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1365 msgh
->msg_controllen
= space
;
1369 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1370 struct msghdr
*msgh
)
1372 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1373 abi_long msg_controllen
;
1374 abi_ulong target_cmsg_addr
;
1375 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1376 socklen_t space
= 0;
1378 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1379 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1381 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1382 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1383 target_cmsg_start
= target_cmsg
;
1385 return -TARGET_EFAULT
;
1387 while (cmsg
&& target_cmsg
) {
1388 void *data
= CMSG_DATA(cmsg
);
1389 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1391 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1392 int tgt_len
, tgt_space
;
1394 /* We never copy a half-header but may copy half-data;
1395 * this is Linux's behaviour in put_cmsg(). Note that
1396 * truncation here is a guest problem (which we report
1397 * to the guest via the CTRUNC bit), unlike truncation
1398 * in target_to_host_cmsg, which is a QEMU bug.
1400 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1401 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1405 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1406 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1408 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1410 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1412 tgt_len
= TARGET_CMSG_LEN(len
);
1414 /* Payload types which need a different size of payload on
1415 * the target must adjust tgt_len here.
1417 switch (cmsg
->cmsg_level
) {
1419 switch (cmsg
->cmsg_type
) {
1421 tgt_len
= sizeof(struct target_timeval
);
1430 if (msg_controllen
< tgt_len
) {
1431 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1432 tgt_len
= msg_controllen
;
1435 /* We must now copy-and-convert len bytes of payload
1436 * into tgt_len bytes of destination space. Bear in mind
1437 * that in both source and destination we may be dealing
1438 * with a truncated value!
1440 switch (cmsg
->cmsg_level
) {
1442 switch (cmsg
->cmsg_type
) {
1445 int *fd
= (int *)data
;
1446 int *target_fd
= (int *)target_data
;
1447 int i
, numfds
= tgt_len
/ sizeof(int);
1449 for (i
= 0; i
< numfds
; i
++) {
1450 __put_user(fd
[i
], target_fd
+ i
);
1456 struct timeval
*tv
= (struct timeval
*)data
;
1457 struct target_timeval
*target_tv
=
1458 (struct target_timeval
*)target_data
;
1460 if (len
!= sizeof(struct timeval
) ||
1461 tgt_len
!= sizeof(struct target_timeval
)) {
1465 /* copy struct timeval to target */
1466 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1467 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1470 case SCM_CREDENTIALS
:
1472 struct ucred
*cred
= (struct ucred
*)data
;
1473 struct target_ucred
*target_cred
=
1474 (struct target_ucred
*)target_data
;
1476 __put_user(cred
->pid
, &target_cred
->pid
);
1477 __put_user(cred
->uid
, &target_cred
->uid
);
1478 __put_user(cred
->gid
, &target_cred
->gid
);
1488 gemu_log("Unsupported ancillary data: %d/%d\n",
1489 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1490 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1491 if (tgt_len
> len
) {
1492 memset(target_data
+ len
, 0, tgt_len
- len
);
1496 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1497 tgt_space
= TARGET_CMSG_SPACE(len
);
1498 if (msg_controllen
< tgt_space
) {
1499 tgt_space
= msg_controllen
;
1501 msg_controllen
-= tgt_space
;
1503 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1504 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1507 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1509 target_msgh
->msg_controllen
= tswapal(space
);
1513 /* do_setsockopt() Must return target values and target errnos. */
1514 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1515 abi_ulong optval_addr
, socklen_t optlen
)
1519 struct ip_mreqn
*ip_mreq
;
1520 struct ip_mreq_source
*ip_mreq_source
;
1524 /* TCP options all take an 'int' value. */
1525 if (optlen
< sizeof(uint32_t))
1526 return -TARGET_EINVAL
;
1528 if (get_user_u32(val
, optval_addr
))
1529 return -TARGET_EFAULT
;
1530 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1537 case IP_ROUTER_ALERT
:
1541 case IP_MTU_DISCOVER
:
1547 case IP_MULTICAST_TTL
:
1548 case IP_MULTICAST_LOOP
:
1550 if (optlen
>= sizeof(uint32_t)) {
1551 if (get_user_u32(val
, optval_addr
))
1552 return -TARGET_EFAULT
;
1553 } else if (optlen
>= 1) {
1554 if (get_user_u8(val
, optval_addr
))
1555 return -TARGET_EFAULT
;
1557 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1559 case IP_ADD_MEMBERSHIP
:
1560 case IP_DROP_MEMBERSHIP
:
1561 if (optlen
< sizeof (struct target_ip_mreq
) ||
1562 optlen
> sizeof (struct target_ip_mreqn
))
1563 return -TARGET_EINVAL
;
1565 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1566 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1567 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1570 case IP_BLOCK_SOURCE
:
1571 case IP_UNBLOCK_SOURCE
:
1572 case IP_ADD_SOURCE_MEMBERSHIP
:
1573 case IP_DROP_SOURCE_MEMBERSHIP
:
1574 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1575 return -TARGET_EINVAL
;
1577 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1578 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1579 unlock_user (ip_mreq_source
, optval_addr
, 0);
1588 case IPV6_MTU_DISCOVER
:
1591 case IPV6_RECVPKTINFO
:
1593 if (optlen
< sizeof(uint32_t)) {
1594 return -TARGET_EINVAL
;
1596 if (get_user_u32(val
, optval_addr
)) {
1597 return -TARGET_EFAULT
;
1599 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1600 &val
, sizeof(val
)));
1609 /* struct icmp_filter takes an u32 value */
1610 if (optlen
< sizeof(uint32_t)) {
1611 return -TARGET_EINVAL
;
1614 if (get_user_u32(val
, optval_addr
)) {
1615 return -TARGET_EFAULT
;
1617 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1618 &val
, sizeof(val
)));
1625 case TARGET_SOL_SOCKET
:
1627 case TARGET_SO_RCVTIMEO
:
1631 optname
= SO_RCVTIMEO
;
1634 if (optlen
!= sizeof(struct target_timeval
)) {
1635 return -TARGET_EINVAL
;
1638 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1639 return -TARGET_EFAULT
;
1642 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1646 case TARGET_SO_SNDTIMEO
:
1647 optname
= SO_SNDTIMEO
;
1649 case TARGET_SO_ATTACH_FILTER
:
1651 struct target_sock_fprog
*tfprog
;
1652 struct target_sock_filter
*tfilter
;
1653 struct sock_fprog fprog
;
1654 struct sock_filter
*filter
;
1657 if (optlen
!= sizeof(*tfprog
)) {
1658 return -TARGET_EINVAL
;
1660 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1661 return -TARGET_EFAULT
;
1663 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1664 tswapal(tfprog
->filter
), 0)) {
1665 unlock_user_struct(tfprog
, optval_addr
, 1);
1666 return -TARGET_EFAULT
;
1669 fprog
.len
= tswap16(tfprog
->len
);
1670 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1671 if (filter
== NULL
) {
1672 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1673 unlock_user_struct(tfprog
, optval_addr
, 1);
1674 return -TARGET_ENOMEM
;
1676 for (i
= 0; i
< fprog
.len
; i
++) {
1677 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1678 filter
[i
].jt
= tfilter
[i
].jt
;
1679 filter
[i
].jf
= tfilter
[i
].jf
;
1680 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1682 fprog
.filter
= filter
;
1684 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1685 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1688 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1689 unlock_user_struct(tfprog
, optval_addr
, 1);
1692 case TARGET_SO_BINDTODEVICE
:
1694 char *dev_ifname
, *addr_ifname
;
1696 if (optlen
> IFNAMSIZ
- 1) {
1697 optlen
= IFNAMSIZ
- 1;
1699 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1701 return -TARGET_EFAULT
;
1703 optname
= SO_BINDTODEVICE
;
1704 addr_ifname
= alloca(IFNAMSIZ
);
1705 memcpy(addr_ifname
, dev_ifname
, optlen
);
1706 addr_ifname
[optlen
] = 0;
1707 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1708 addr_ifname
, optlen
));
1709 unlock_user (dev_ifname
, optval_addr
, 0);
1712 /* Options with 'int' argument. */
1713 case TARGET_SO_DEBUG
:
1716 case TARGET_SO_REUSEADDR
:
1717 optname
= SO_REUSEADDR
;
1719 case TARGET_SO_TYPE
:
1722 case TARGET_SO_ERROR
:
1725 case TARGET_SO_DONTROUTE
:
1726 optname
= SO_DONTROUTE
;
1728 case TARGET_SO_BROADCAST
:
1729 optname
= SO_BROADCAST
;
1731 case TARGET_SO_SNDBUF
:
1732 optname
= SO_SNDBUF
;
1734 case TARGET_SO_SNDBUFFORCE
:
1735 optname
= SO_SNDBUFFORCE
;
1737 case TARGET_SO_RCVBUF
:
1738 optname
= SO_RCVBUF
;
1740 case TARGET_SO_RCVBUFFORCE
:
1741 optname
= SO_RCVBUFFORCE
;
1743 case TARGET_SO_KEEPALIVE
:
1744 optname
= SO_KEEPALIVE
;
1746 case TARGET_SO_OOBINLINE
:
1747 optname
= SO_OOBINLINE
;
1749 case TARGET_SO_NO_CHECK
:
1750 optname
= SO_NO_CHECK
;
1752 case TARGET_SO_PRIORITY
:
1753 optname
= SO_PRIORITY
;
1756 case TARGET_SO_BSDCOMPAT
:
1757 optname
= SO_BSDCOMPAT
;
1760 case TARGET_SO_PASSCRED
:
1761 optname
= SO_PASSCRED
;
1763 case TARGET_SO_PASSSEC
:
1764 optname
= SO_PASSSEC
;
1766 case TARGET_SO_TIMESTAMP
:
1767 optname
= SO_TIMESTAMP
;
1769 case TARGET_SO_RCVLOWAT
:
1770 optname
= SO_RCVLOWAT
;
1776 if (optlen
< sizeof(uint32_t))
1777 return -TARGET_EINVAL
;
1779 if (get_user_u32(val
, optval_addr
))
1780 return -TARGET_EFAULT
;
1781 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1785 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1786 ret
= -TARGET_ENOPROTOOPT
;
1791 /* do_getsockopt() Must return target values and target errnos. */
1792 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1793 abi_ulong optval_addr
, abi_ulong optlen
)
1800 case TARGET_SOL_SOCKET
:
1803 /* These don't just return a single integer */
1804 case TARGET_SO_LINGER
:
1805 case TARGET_SO_RCVTIMEO
:
1806 case TARGET_SO_SNDTIMEO
:
1807 case TARGET_SO_PEERNAME
:
1809 case TARGET_SO_PEERCRED
: {
1812 struct target_ucred
*tcr
;
1814 if (get_user_u32(len
, optlen
)) {
1815 return -TARGET_EFAULT
;
1818 return -TARGET_EINVAL
;
1822 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1830 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1831 return -TARGET_EFAULT
;
1833 __put_user(cr
.pid
, &tcr
->pid
);
1834 __put_user(cr
.uid
, &tcr
->uid
);
1835 __put_user(cr
.gid
, &tcr
->gid
);
1836 unlock_user_struct(tcr
, optval_addr
, 1);
1837 if (put_user_u32(len
, optlen
)) {
1838 return -TARGET_EFAULT
;
1842 /* Options with 'int' argument. */
1843 case TARGET_SO_DEBUG
:
1846 case TARGET_SO_REUSEADDR
:
1847 optname
= SO_REUSEADDR
;
1849 case TARGET_SO_TYPE
:
1852 case TARGET_SO_ERROR
:
1855 case TARGET_SO_DONTROUTE
:
1856 optname
= SO_DONTROUTE
;
1858 case TARGET_SO_BROADCAST
:
1859 optname
= SO_BROADCAST
;
1861 case TARGET_SO_SNDBUF
:
1862 optname
= SO_SNDBUF
;
1864 case TARGET_SO_RCVBUF
:
1865 optname
= SO_RCVBUF
;
1867 case TARGET_SO_KEEPALIVE
:
1868 optname
= SO_KEEPALIVE
;
1870 case TARGET_SO_OOBINLINE
:
1871 optname
= SO_OOBINLINE
;
1873 case TARGET_SO_NO_CHECK
:
1874 optname
= SO_NO_CHECK
;
1876 case TARGET_SO_PRIORITY
:
1877 optname
= SO_PRIORITY
;
1880 case TARGET_SO_BSDCOMPAT
:
1881 optname
= SO_BSDCOMPAT
;
1884 case TARGET_SO_PASSCRED
:
1885 optname
= SO_PASSCRED
;
1887 case TARGET_SO_TIMESTAMP
:
1888 optname
= SO_TIMESTAMP
;
1890 case TARGET_SO_RCVLOWAT
:
1891 optname
= SO_RCVLOWAT
;
1893 case TARGET_SO_ACCEPTCONN
:
1894 optname
= SO_ACCEPTCONN
;
1901 /* TCP options all take an 'int' value. */
1903 if (get_user_u32(len
, optlen
))
1904 return -TARGET_EFAULT
;
1906 return -TARGET_EINVAL
;
1908 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1911 if (optname
== SO_TYPE
) {
1912 val
= host_to_target_sock_type(val
);
1917 if (put_user_u32(val
, optval_addr
))
1918 return -TARGET_EFAULT
;
1920 if (put_user_u8(val
, optval_addr
))
1921 return -TARGET_EFAULT
;
1923 if (put_user_u32(len
, optlen
))
1924 return -TARGET_EFAULT
;
1931 case IP_ROUTER_ALERT
:
1935 case IP_MTU_DISCOVER
:
1941 case IP_MULTICAST_TTL
:
1942 case IP_MULTICAST_LOOP
:
1943 if (get_user_u32(len
, optlen
))
1944 return -TARGET_EFAULT
;
1946 return -TARGET_EINVAL
;
1948 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1951 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1953 if (put_user_u32(len
, optlen
)
1954 || put_user_u8(val
, optval_addr
))
1955 return -TARGET_EFAULT
;
1957 if (len
> sizeof(int))
1959 if (put_user_u32(len
, optlen
)
1960 || put_user_u32(val
, optval_addr
))
1961 return -TARGET_EFAULT
;
1965 ret
= -TARGET_ENOPROTOOPT
;
1971 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1973 ret
= -TARGET_EOPNOTSUPP
;
1979 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1980 int count
, int copy
)
1982 struct target_iovec
*target_vec
;
1984 abi_ulong total_len
, max_len
;
1987 bool bad_address
= false;
1993 if (count
< 0 || count
> IOV_MAX
) {
1998 vec
= g_try_new0(struct iovec
, count
);
2004 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2005 count
* sizeof(struct target_iovec
), 1);
2006 if (target_vec
== NULL
) {
2011 /* ??? If host page size > target page size, this will result in a
2012 value larger than what we can actually support. */
2013 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2016 for (i
= 0; i
< count
; i
++) {
2017 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2018 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2023 } else if (len
== 0) {
2024 /* Zero length pointer is ignored. */
2025 vec
[i
].iov_base
= 0;
2027 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2028 /* If the first buffer pointer is bad, this is a fault. But
2029 * subsequent bad buffers will result in a partial write; this
2030 * is realized by filling the vector with null pointers and
2032 if (!vec
[i
].iov_base
) {
2043 if (len
> max_len
- total_len
) {
2044 len
= max_len
- total_len
;
2047 vec
[i
].iov_len
= len
;
2051 unlock_user(target_vec
, target_addr
, 0);
2056 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2057 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2060 unlock_user(target_vec
, target_addr
, 0);
2067 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2068 int count
, int copy
)
2070 struct target_iovec
*target_vec
;
2073 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2074 count
* sizeof(struct target_iovec
), 1);
2076 for (i
= 0; i
< count
; i
++) {
2077 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2078 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2082 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2084 unlock_user(target_vec
, target_addr
, 0);
2090 static inline int target_to_host_sock_type(int *type
)
2093 int target_type
= *type
;
2095 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2096 case TARGET_SOCK_DGRAM
:
2097 host_type
= SOCK_DGRAM
;
2099 case TARGET_SOCK_STREAM
:
2100 host_type
= SOCK_STREAM
;
2103 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2106 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2107 #if defined(SOCK_CLOEXEC)
2108 host_type
|= SOCK_CLOEXEC
;
2110 return -TARGET_EINVAL
;
2113 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2114 #if defined(SOCK_NONBLOCK)
2115 host_type
|= SOCK_NONBLOCK
;
2116 #elif !defined(O_NONBLOCK)
2117 return -TARGET_EINVAL
;
2124 /* Try to emulate socket type flags after socket creation. */
2125 static int sock_flags_fixup(int fd
, int target_type
)
2127 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2128 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2129 int flags
= fcntl(fd
, F_GETFL
);
2130 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2132 return -TARGET_EINVAL
;
2139 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2140 abi_ulong target_addr
,
2143 struct sockaddr
*addr
= host_addr
;
2144 struct target_sockaddr
*target_saddr
;
2146 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2147 if (!target_saddr
) {
2148 return -TARGET_EFAULT
;
2151 memcpy(addr
, target_saddr
, len
);
2152 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2153 /* spkt_protocol is big-endian */
2155 unlock_user(target_saddr
, target_addr
, 0);
2159 static TargetFdTrans target_packet_trans
= {
2160 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2163 /* do_socket() Must return target values and target errnos. */
2164 static abi_long
do_socket(int domain
, int type
, int protocol
)
2166 int target_type
= type
;
2169 ret
= target_to_host_sock_type(&type
);
2174 if (domain
== PF_NETLINK
)
2175 return -TARGET_EAFNOSUPPORT
;
2177 if (domain
== AF_PACKET
||
2178 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2179 protocol
= tswap16(protocol
);
2182 ret
= get_errno(socket(domain
, type
, protocol
));
2184 ret
= sock_flags_fixup(ret
, target_type
);
2185 if (type
== SOCK_PACKET
) {
2186 /* Manage an obsolete case :
2187 * if socket type is SOCK_PACKET, bind by name
2189 fd_trans_register(ret
, &target_packet_trans
);
2195 /* do_bind() Must return target values and target errnos. */
2196 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2202 if ((int)addrlen
< 0) {
2203 return -TARGET_EINVAL
;
2206 addr
= alloca(addrlen
+1);
2208 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2212 return get_errno(bind(sockfd
, addr
, addrlen
));
2215 /* do_connect() Must return target values and target errnos. */
2216 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2222 if ((int)addrlen
< 0) {
2223 return -TARGET_EINVAL
;
2226 addr
= alloca(addrlen
+1);
2228 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2232 return get_errno(connect(sockfd
, addr
, addrlen
));
2235 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2236 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2237 int flags
, int send
)
2243 abi_ulong target_vec
;
2245 if (msgp
->msg_name
) {
2246 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2247 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2248 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2249 tswapal(msgp
->msg_name
),
2255 msg
.msg_name
= NULL
;
2256 msg
.msg_namelen
= 0;
2258 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2259 msg
.msg_control
= alloca(msg
.msg_controllen
);
2260 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2262 count
= tswapal(msgp
->msg_iovlen
);
2263 target_vec
= tswapal(msgp
->msg_iov
);
2264 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2265 target_vec
, count
, send
);
2267 ret
= -host_to_target_errno(errno
);
2270 msg
.msg_iovlen
= count
;
2274 ret
= target_to_host_cmsg(&msg
, msgp
);
2276 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2278 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2279 if (!is_error(ret
)) {
2281 ret
= host_to_target_cmsg(msgp
, &msg
);
2282 if (!is_error(ret
)) {
2283 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2284 if (msg
.msg_name
!= NULL
) {
2285 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2286 msg
.msg_name
, msg
.msg_namelen
);
2298 unlock_iovec(vec
, target_vec
, count
, !send
);
2303 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2304 int flags
, int send
)
2307 struct target_msghdr
*msgp
;
2309 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2313 return -TARGET_EFAULT
;
2315 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2316 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2320 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2321 * so it might not have this *mmsg-specific flag either.
2323 #ifndef MSG_WAITFORONE
2324 #define MSG_WAITFORONE 0x10000
2327 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2328 unsigned int vlen
, unsigned int flags
,
2331 struct target_mmsghdr
*mmsgp
;
2335 if (vlen
> UIO_MAXIOV
) {
2339 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2341 return -TARGET_EFAULT
;
2344 for (i
= 0; i
< vlen
; i
++) {
2345 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2346 if (is_error(ret
)) {
2349 mmsgp
[i
].msg_len
= tswap32(ret
);
2350 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2351 if (flags
& MSG_WAITFORONE
) {
2352 flags
|= MSG_DONTWAIT
;
2356 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2358 /* Return number of datagrams sent if we sent any at all;
2359 * otherwise return the error.
2367 /* If we don't have a system accept4() then just call accept.
2368 * The callsites to do_accept4() will ensure that they don't
2369 * pass a non-zero flags argument in this config.
2371 #ifndef CONFIG_ACCEPT4
2372 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2373 socklen_t
*addrlen
, int flags
)
2376 return accept(sockfd
, addr
, addrlen
);
2380 /* do_accept4() Must return target values and target errnos. */
2381 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2382 abi_ulong target_addrlen_addr
, int flags
)
2389 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2391 if (target_addr
== 0) {
2392 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2395 /* linux returns EINVAL if addrlen pointer is invalid */
2396 if (get_user_u32(addrlen
, target_addrlen_addr
))
2397 return -TARGET_EINVAL
;
2399 if ((int)addrlen
< 0) {
2400 return -TARGET_EINVAL
;
2403 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2404 return -TARGET_EINVAL
;
2406 addr
= alloca(addrlen
);
2408 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2409 if (!is_error(ret
)) {
2410 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2411 if (put_user_u32(addrlen
, target_addrlen_addr
))
2412 ret
= -TARGET_EFAULT
;
2417 /* do_getpeername() Must return target values and target errnos. */
2418 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2419 abi_ulong target_addrlen_addr
)
2425 if (get_user_u32(addrlen
, target_addrlen_addr
))
2426 return -TARGET_EFAULT
;
2428 if ((int)addrlen
< 0) {
2429 return -TARGET_EINVAL
;
2432 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2433 return -TARGET_EFAULT
;
2435 addr
= alloca(addrlen
);
2437 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2438 if (!is_error(ret
)) {
2439 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2440 if (put_user_u32(addrlen
, target_addrlen_addr
))
2441 ret
= -TARGET_EFAULT
;
2446 /* do_getsockname() Must return target values and target errnos. */
2447 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2448 abi_ulong target_addrlen_addr
)
2454 if (get_user_u32(addrlen
, target_addrlen_addr
))
2455 return -TARGET_EFAULT
;
2457 if ((int)addrlen
< 0) {
2458 return -TARGET_EINVAL
;
2461 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2462 return -TARGET_EFAULT
;
2464 addr
= alloca(addrlen
);
2466 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2467 if (!is_error(ret
)) {
2468 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2469 if (put_user_u32(addrlen
, target_addrlen_addr
))
2470 ret
= -TARGET_EFAULT
;
2475 /* do_socketpair() Must return target values and target errnos. */
2476 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2477 abi_ulong target_tab_addr
)
2482 target_to_host_sock_type(&type
);
2484 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2485 if (!is_error(ret
)) {
2486 if (put_user_s32(tab
[0], target_tab_addr
)
2487 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2488 ret
= -TARGET_EFAULT
;
2493 /* do_sendto() Must return target values and target errnos. */
2494 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2495 abi_ulong target_addr
, socklen_t addrlen
)
2501 if ((int)addrlen
< 0) {
2502 return -TARGET_EINVAL
;
2505 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2507 return -TARGET_EFAULT
;
2509 addr
= alloca(addrlen
+1);
2510 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2512 unlock_user(host_msg
, msg
, 0);
2515 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2517 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2519 unlock_user(host_msg
, msg
, 0);
2523 /* do_recvfrom() Must return target values and target errnos. */
2524 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2525 abi_ulong target_addr
,
2526 abi_ulong target_addrlen
)
2533 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2535 return -TARGET_EFAULT
;
2537 if (get_user_u32(addrlen
, target_addrlen
)) {
2538 ret
= -TARGET_EFAULT
;
2541 if ((int)addrlen
< 0) {
2542 ret
= -TARGET_EINVAL
;
2545 addr
= alloca(addrlen
);
2546 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2548 addr
= NULL
; /* To keep compiler quiet. */
2549 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2551 if (!is_error(ret
)) {
2553 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2554 if (put_user_u32(addrlen
, target_addrlen
)) {
2555 ret
= -TARGET_EFAULT
;
2559 unlock_user(host_msg
, msg
, len
);
2562 unlock_user(host_msg
, msg
, 0);
2567 #ifdef TARGET_NR_socketcall
2568 /* do_socketcall() Must return target values and target errnos. */
2569 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2571 static const unsigned ac
[] = { /* number of arguments per call */
2572 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2573 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2574 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2575 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2576 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2577 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2578 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2581 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2582 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2583 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2584 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2585 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2586 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2587 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2588 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2589 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2590 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2591 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2593 abi_long a
[6]; /* max 6 args */
2595 /* first, collect the arguments in a[] according to ac[] */
2596 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2598 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2599 for (i
= 0; i
< ac
[num
]; ++i
) {
2600 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2601 return -TARGET_EFAULT
;
2606 /* now when we have the args, actually handle the call */
2608 case SOCKOP_socket
: /* domain, type, protocol */
2609 return do_socket(a
[0], a
[1], a
[2]);
2610 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2611 return do_bind(a
[0], a
[1], a
[2]);
2612 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2613 return do_connect(a
[0], a
[1], a
[2]);
2614 case SOCKOP_listen
: /* sockfd, backlog */
2615 return get_errno(listen(a
[0], a
[1]));
2616 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2617 return do_accept4(a
[0], a
[1], a
[2], 0);
2618 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2619 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2620 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2621 return do_getsockname(a
[0], a
[1], a
[2]);
2622 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2623 return do_getpeername(a
[0], a
[1], a
[2]);
2624 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2625 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2626 case SOCKOP_send
: /* sockfd, msg, len, flags */
2627 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2628 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2629 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2630 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2631 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2632 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2633 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2634 case SOCKOP_shutdown
: /* sockfd, how */
2635 return get_errno(shutdown(a
[0], a
[1]));
2636 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2637 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2638 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2639 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2640 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2641 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2642 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2643 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2644 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2645 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2646 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2647 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2649 gemu_log("Unsupported socketcall: %d\n", num
);
2650 return -TARGET_ENOSYS
;
2655 #define N_SHM_REGIONS 32
2657 static struct shm_region
{
2661 } shm_regions
[N_SHM_REGIONS
];
2663 struct target_semid_ds
2665 struct target_ipc_perm sem_perm
;
2666 abi_ulong sem_otime
;
2667 #if !defined(TARGET_PPC64)
2668 abi_ulong __unused1
;
2670 abi_ulong sem_ctime
;
2671 #if !defined(TARGET_PPC64)
2672 abi_ulong __unused2
;
2674 abi_ulong sem_nsems
;
2675 abi_ulong __unused3
;
2676 abi_ulong __unused4
;
2679 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2680 abi_ulong target_addr
)
2682 struct target_ipc_perm
*target_ip
;
2683 struct target_semid_ds
*target_sd
;
2685 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2686 return -TARGET_EFAULT
;
2687 target_ip
= &(target_sd
->sem_perm
);
2688 host_ip
->__key
= tswap32(target_ip
->__key
);
2689 host_ip
->uid
= tswap32(target_ip
->uid
);
2690 host_ip
->gid
= tswap32(target_ip
->gid
);
2691 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2692 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2693 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2694 host_ip
->mode
= tswap32(target_ip
->mode
);
2696 host_ip
->mode
= tswap16(target_ip
->mode
);
2698 #if defined(TARGET_PPC)
2699 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2701 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2703 unlock_user_struct(target_sd
, target_addr
, 0);
2707 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2708 struct ipc_perm
*host_ip
)
2710 struct target_ipc_perm
*target_ip
;
2711 struct target_semid_ds
*target_sd
;
2713 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2714 return -TARGET_EFAULT
;
2715 target_ip
= &(target_sd
->sem_perm
);
2716 target_ip
->__key
= tswap32(host_ip
->__key
);
2717 target_ip
->uid
= tswap32(host_ip
->uid
);
2718 target_ip
->gid
= tswap32(host_ip
->gid
);
2719 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2720 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2721 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2722 target_ip
->mode
= tswap32(host_ip
->mode
);
2724 target_ip
->mode
= tswap16(host_ip
->mode
);
2726 #if defined(TARGET_PPC)
2727 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2729 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2731 unlock_user_struct(target_sd
, target_addr
, 1);
2735 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2736 abi_ulong target_addr
)
2738 struct target_semid_ds
*target_sd
;
2740 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2741 return -TARGET_EFAULT
;
2742 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2743 return -TARGET_EFAULT
;
2744 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2745 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2746 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2747 unlock_user_struct(target_sd
, target_addr
, 0);
2751 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2752 struct semid_ds
*host_sd
)
2754 struct target_semid_ds
*target_sd
;
2756 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2757 return -TARGET_EFAULT
;
2758 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2759 return -TARGET_EFAULT
;
2760 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2761 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2762 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2763 unlock_user_struct(target_sd
, target_addr
, 1);
2767 struct target_seminfo
{
2780 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2781 struct seminfo
*host_seminfo
)
2783 struct target_seminfo
*target_seminfo
;
2784 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2785 return -TARGET_EFAULT
;
2786 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2787 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2788 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2789 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2790 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2791 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2792 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2793 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2794 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2795 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2796 unlock_user_struct(target_seminfo
, target_addr
, 1);
2802 struct semid_ds
*buf
;
2803 unsigned short *array
;
2804 struct seminfo
*__buf
;
2807 union target_semun
{
2814 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2815 abi_ulong target_addr
)
2818 unsigned short *array
;
2820 struct semid_ds semid_ds
;
2823 semun
.buf
= &semid_ds
;
2825 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2827 return get_errno(ret
);
2829 nsems
= semid_ds
.sem_nsems
;
2831 *host_array
= g_try_new(unsigned short, nsems
);
2833 return -TARGET_ENOMEM
;
2835 array
= lock_user(VERIFY_READ
, target_addr
,
2836 nsems
*sizeof(unsigned short), 1);
2838 g_free(*host_array
);
2839 return -TARGET_EFAULT
;
2842 for(i
=0; i
<nsems
; i
++) {
2843 __get_user((*host_array
)[i
], &array
[i
]);
2845 unlock_user(array
, target_addr
, 0);
2850 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2851 unsigned short **host_array
)
2854 unsigned short *array
;
2856 struct semid_ds semid_ds
;
2859 semun
.buf
= &semid_ds
;
2861 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2863 return get_errno(ret
);
2865 nsems
= semid_ds
.sem_nsems
;
2867 array
= lock_user(VERIFY_WRITE
, target_addr
,
2868 nsems
*sizeof(unsigned short), 0);
2870 return -TARGET_EFAULT
;
2872 for(i
=0; i
<nsems
; i
++) {
2873 __put_user((*host_array
)[i
], &array
[i
]);
2875 g_free(*host_array
);
2876 unlock_user(array
, target_addr
, 1);
2881 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2882 abi_ulong target_arg
)
2884 union target_semun target_su
= { .buf
= target_arg
};
2886 struct semid_ds dsarg
;
2887 unsigned short *array
= NULL
;
2888 struct seminfo seminfo
;
2889 abi_long ret
= -TARGET_EINVAL
;
2896 /* In 64 bit cross-endian situations, we will erroneously pick up
2897 * the wrong half of the union for the "val" element. To rectify
2898 * this, the entire 8-byte structure is byteswapped, followed by
2899 * a swap of the 4 byte val field. In other cases, the data is
2900 * already in proper host byte order. */
2901 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2902 target_su
.buf
= tswapal(target_su
.buf
);
2903 arg
.val
= tswap32(target_su
.val
);
2905 arg
.val
= target_su
.val
;
2907 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2911 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2915 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2916 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2923 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2927 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2928 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2934 arg
.__buf
= &seminfo
;
2935 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2936 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2944 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2951 struct target_sembuf
{
2952 unsigned short sem_num
;
2957 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2958 abi_ulong target_addr
,
2961 struct target_sembuf
*target_sembuf
;
2964 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2965 nsops
*sizeof(struct target_sembuf
), 1);
2967 return -TARGET_EFAULT
;
2969 for(i
=0; i
<nsops
; i
++) {
2970 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2971 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2972 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2975 unlock_user(target_sembuf
, target_addr
, 0);
2980 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2982 struct sembuf sops
[nsops
];
2984 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2985 return -TARGET_EFAULT
;
2987 return get_errno(semop(semid
, sops
, nsops
));
2990 struct target_msqid_ds
2992 struct target_ipc_perm msg_perm
;
2993 abi_ulong msg_stime
;
2994 #if TARGET_ABI_BITS == 32
2995 abi_ulong __unused1
;
2997 abi_ulong msg_rtime
;
2998 #if TARGET_ABI_BITS == 32
2999 abi_ulong __unused2
;
3001 abi_ulong msg_ctime
;
3002 #if TARGET_ABI_BITS == 32
3003 abi_ulong __unused3
;
3005 abi_ulong __msg_cbytes
;
3007 abi_ulong msg_qbytes
;
3008 abi_ulong msg_lspid
;
3009 abi_ulong msg_lrpid
;
3010 abi_ulong __unused4
;
3011 abi_ulong __unused5
;
3014 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3015 abi_ulong target_addr
)
3017 struct target_msqid_ds
*target_md
;
3019 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3020 return -TARGET_EFAULT
;
3021 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3022 return -TARGET_EFAULT
;
3023 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3024 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3025 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3026 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3027 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3028 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3029 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3030 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3031 unlock_user_struct(target_md
, target_addr
, 0);
3035 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3036 struct msqid_ds
*host_md
)
3038 struct target_msqid_ds
*target_md
;
3040 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3041 return -TARGET_EFAULT
;
3042 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3043 return -TARGET_EFAULT
;
3044 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3045 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3046 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3047 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3048 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3049 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3050 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3051 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3052 unlock_user_struct(target_md
, target_addr
, 1);
3056 struct target_msginfo
{
3064 unsigned short int msgseg
;
3067 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3068 struct msginfo
*host_msginfo
)
3070 struct target_msginfo
*target_msginfo
;
3071 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3072 return -TARGET_EFAULT
;
3073 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3074 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3075 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3076 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3077 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3078 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3079 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3080 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3081 unlock_user_struct(target_msginfo
, target_addr
, 1);
3085 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3087 struct msqid_ds dsarg
;
3088 struct msginfo msginfo
;
3089 abi_long ret
= -TARGET_EINVAL
;
3097 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3098 return -TARGET_EFAULT
;
3099 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3100 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3101 return -TARGET_EFAULT
;
3104 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3108 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3109 if (host_to_target_msginfo(ptr
, &msginfo
))
3110 return -TARGET_EFAULT
;
3117 struct target_msgbuf
{
3122 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3123 ssize_t msgsz
, int msgflg
)
3125 struct target_msgbuf
*target_mb
;
3126 struct msgbuf
*host_mb
;
3130 return -TARGET_EINVAL
;
3133 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3134 return -TARGET_EFAULT
;
3135 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3137 unlock_user_struct(target_mb
, msgp
, 0);
3138 return -TARGET_ENOMEM
;
3140 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3141 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3142 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3144 unlock_user_struct(target_mb
, msgp
, 0);
3149 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3150 unsigned int msgsz
, abi_long msgtyp
,
3153 struct target_msgbuf
*target_mb
;
3155 struct msgbuf
*host_mb
;
3158 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3159 return -TARGET_EFAULT
;
3161 host_mb
= g_malloc(msgsz
+sizeof(long));
3162 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3165 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3166 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3167 if (!target_mtext
) {
3168 ret
= -TARGET_EFAULT
;
3171 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3172 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3175 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3179 unlock_user_struct(target_mb
, msgp
, 1);
3184 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3185 abi_ulong target_addr
)
3187 struct target_shmid_ds
*target_sd
;
3189 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3190 return -TARGET_EFAULT
;
3191 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3192 return -TARGET_EFAULT
;
3193 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3194 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3195 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3196 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3197 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3198 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3199 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3200 unlock_user_struct(target_sd
, target_addr
, 0);
3204 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3205 struct shmid_ds
*host_sd
)
3207 struct target_shmid_ds
*target_sd
;
3209 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3210 return -TARGET_EFAULT
;
3211 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3212 return -TARGET_EFAULT
;
3213 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3214 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3215 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3216 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3217 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3218 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3219 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3220 unlock_user_struct(target_sd
, target_addr
, 1);
3224 struct target_shminfo
{
3232 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3233 struct shminfo
*host_shminfo
)
3235 struct target_shminfo
*target_shminfo
;
3236 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3237 return -TARGET_EFAULT
;
3238 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3239 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3240 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3241 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3242 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3243 unlock_user_struct(target_shminfo
, target_addr
, 1);
3247 struct target_shm_info
{
3252 abi_ulong swap_attempts
;
3253 abi_ulong swap_successes
;
3256 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3257 struct shm_info
*host_shm_info
)
3259 struct target_shm_info
*target_shm_info
;
3260 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3261 return -TARGET_EFAULT
;
3262 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3263 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3264 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3265 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3266 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3267 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3268 unlock_user_struct(target_shm_info
, target_addr
, 1);
3272 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3274 struct shmid_ds dsarg
;
3275 struct shminfo shminfo
;
3276 struct shm_info shm_info
;
3277 abi_long ret
= -TARGET_EINVAL
;
3285 if (target_to_host_shmid_ds(&dsarg
, buf
))
3286 return -TARGET_EFAULT
;
3287 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3288 if (host_to_target_shmid_ds(buf
, &dsarg
))
3289 return -TARGET_EFAULT
;
3292 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3293 if (host_to_target_shminfo(buf
, &shminfo
))
3294 return -TARGET_EFAULT
;
3297 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3298 if (host_to_target_shm_info(buf
, &shm_info
))
3299 return -TARGET_EFAULT
;
3304 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3311 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3315 struct shmid_ds shm_info
;
3318 /* find out the length of the shared memory segment */
3319 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3320 if (is_error(ret
)) {
3321 /* can't get length, bail out */
3328 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3330 abi_ulong mmap_start
;
3332 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3334 if (mmap_start
== -1) {
3336 host_raddr
= (void *)-1;
3338 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3341 if (host_raddr
== (void *)-1) {
3343 return get_errno((long)host_raddr
);
3345 raddr
=h2g((unsigned long)host_raddr
);
3347 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3348 PAGE_VALID
| PAGE_READ
|
3349 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3351 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3352 if (!shm_regions
[i
].in_use
) {
3353 shm_regions
[i
].in_use
= true;
3354 shm_regions
[i
].start
= raddr
;
3355 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3365 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3369 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3370 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3371 shm_regions
[i
].in_use
= false;
3372 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3377 return get_errno(shmdt(g2h(shmaddr
)));
3380 #ifdef TARGET_NR_ipc
3381 /* ??? This only works with linear mappings. */
3382 /* do_ipc() must return target values and target errnos. */
3383 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3384 abi_long second
, abi_long third
,
3385 abi_long ptr
, abi_long fifth
)
3390 version
= call
>> 16;
3395 ret
= do_semop(first
, ptr
, second
);
3399 ret
= get_errno(semget(first
, second
, third
));
3402 case IPCOP_semctl
: {
3403 /* The semun argument to semctl is passed by value, so dereference the
3406 get_user_ual(atptr
, ptr
);
3407 ret
= do_semctl(first
, second
, third
, atptr
);
3412 ret
= get_errno(msgget(first
, second
));
3416 ret
= do_msgsnd(first
, ptr
, second
, third
);
3420 ret
= do_msgctl(first
, second
, ptr
);
3427 struct target_ipc_kludge
{
3432 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3433 ret
= -TARGET_EFAULT
;
3437 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3439 unlock_user_struct(tmp
, ptr
, 0);
3443 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3452 raddr
= do_shmat(first
, ptr
, second
);
3453 if (is_error(raddr
))
3454 return get_errno(raddr
);
3455 if (put_user_ual(raddr
, third
))
3456 return -TARGET_EFAULT
;
3460 ret
= -TARGET_EINVAL
;
3465 ret
= do_shmdt(ptr
);
3469 /* IPC_* flag values are the same on all linux platforms */
3470 ret
= get_errno(shmget(first
, second
, third
));
3473 /* IPC_* and SHM_* command values are the same on all linux platforms */
3475 ret
= do_shmctl(first
, second
, ptr
);
3478 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3479 ret
= -TARGET_ENOSYS
;
3486 /* kernel structure types definitions */
3488 #define STRUCT(name, ...) STRUCT_ ## name,
3489 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3491 #include "syscall_types.h"
3495 #undef STRUCT_SPECIAL
3497 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3498 #define STRUCT_SPECIAL(name)
3499 #include "syscall_types.h"
3501 #undef STRUCT_SPECIAL
3503 typedef struct IOCTLEntry IOCTLEntry
;
3505 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3506 int fd
, int cmd
, abi_long arg
);
3510 unsigned int host_cmd
;
3513 do_ioctl_fn
*do_ioctl
;
3514 const argtype arg_type
[5];
3517 #define IOC_R 0x0001
3518 #define IOC_W 0x0002
3519 #define IOC_RW (IOC_R | IOC_W)
3521 #define MAX_STRUCT_SIZE 4096
3523 #ifdef CONFIG_FIEMAP
3524 /* So fiemap access checks don't overflow on 32 bit systems.
3525 * This is very slightly smaller than the limit imposed by
3526 * the underlying kernel.
3528 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3529 / sizeof(struct fiemap_extent))
3531 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3532 int fd
, int cmd
, abi_long arg
)
3534 /* The parameter for this ioctl is a struct fiemap followed
3535 * by an array of struct fiemap_extent whose size is set
3536 * in fiemap->fm_extent_count. The array is filled in by the
3539 int target_size_in
, target_size_out
;
3541 const argtype
*arg_type
= ie
->arg_type
;
3542 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3545 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3549 assert(arg_type
[0] == TYPE_PTR
);
3550 assert(ie
->access
== IOC_RW
);
3552 target_size_in
= thunk_type_size(arg_type
, 0);
3553 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3555 return -TARGET_EFAULT
;
3557 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3558 unlock_user(argptr
, arg
, 0);
3559 fm
= (struct fiemap
*)buf_temp
;
3560 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3561 return -TARGET_EINVAL
;
3564 outbufsz
= sizeof (*fm
) +
3565 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3567 if (outbufsz
> MAX_STRUCT_SIZE
) {
3568 /* We can't fit all the extents into the fixed size buffer.
3569 * Allocate one that is large enough and use it instead.
3571 fm
= g_try_malloc(outbufsz
);
3573 return -TARGET_ENOMEM
;
3575 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3578 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3579 if (!is_error(ret
)) {
3580 target_size_out
= target_size_in
;
3581 /* An extent_count of 0 means we were only counting the extents
3582 * so there are no structs to copy
3584 if (fm
->fm_extent_count
!= 0) {
3585 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3587 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3589 ret
= -TARGET_EFAULT
;
3591 /* Convert the struct fiemap */
3592 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3593 if (fm
->fm_extent_count
!= 0) {
3594 p
= argptr
+ target_size_in
;
3595 /* ...and then all the struct fiemap_extents */
3596 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3597 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3602 unlock_user(argptr
, arg
, target_size_out
);
3612 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3613 int fd
, int cmd
, abi_long arg
)
3615 const argtype
*arg_type
= ie
->arg_type
;
3619 struct ifconf
*host_ifconf
;
3621 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3622 int target_ifreq_size
;
3627 abi_long target_ifc_buf
;
3631 assert(arg_type
[0] == TYPE_PTR
);
3632 assert(ie
->access
== IOC_RW
);
3635 target_size
= thunk_type_size(arg_type
, 0);
3637 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3639 return -TARGET_EFAULT
;
3640 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3641 unlock_user(argptr
, arg
, 0);
3643 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3644 target_ifc_len
= host_ifconf
->ifc_len
;
3645 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3647 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3648 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3649 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3651 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3652 if (outbufsz
> MAX_STRUCT_SIZE
) {
3653 /* We can't fit all the extents into the fixed size buffer.
3654 * Allocate one that is large enough and use it instead.
3656 host_ifconf
= malloc(outbufsz
);
3658 return -TARGET_ENOMEM
;
3660 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3663 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3665 host_ifconf
->ifc_len
= host_ifc_len
;
3666 host_ifconf
->ifc_buf
= host_ifc_buf
;
3668 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3669 if (!is_error(ret
)) {
3670 /* convert host ifc_len to target ifc_len */
3672 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3673 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3674 host_ifconf
->ifc_len
= target_ifc_len
;
3676 /* restore target ifc_buf */
3678 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3680 /* copy struct ifconf to target user */
3682 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3684 return -TARGET_EFAULT
;
3685 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3686 unlock_user(argptr
, arg
, target_size
);
3688 /* copy ifreq[] to target user */
3690 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3691 for (i
= 0; i
< nb_ifreq
; i
++) {
3692 thunk_convert(argptr
+ i
* target_ifreq_size
,
3693 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3694 ifreq_arg_type
, THUNK_TARGET
);
3696 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3706 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3707 int cmd
, abi_long arg
)
3710 struct dm_ioctl
*host_dm
;
3711 abi_long guest_data
;
3712 uint32_t guest_data_size
;
3714 const argtype
*arg_type
= ie
->arg_type
;
3716 void *big_buf
= NULL
;
3720 target_size
= thunk_type_size(arg_type
, 0);
3721 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3723 ret
= -TARGET_EFAULT
;
3726 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3727 unlock_user(argptr
, arg
, 0);
3729 /* buf_temp is too small, so fetch things into a bigger buffer */
3730 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3731 memcpy(big_buf
, buf_temp
, target_size
);
3735 guest_data
= arg
+ host_dm
->data_start
;
3736 if ((guest_data
- arg
) < 0) {
3740 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3741 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3743 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3744 switch (ie
->host_cmd
) {
3746 case DM_LIST_DEVICES
:
3749 case DM_DEV_SUSPEND
:
3752 case DM_TABLE_STATUS
:
3753 case DM_TABLE_CLEAR
:
3755 case DM_LIST_VERSIONS
:
3759 case DM_DEV_SET_GEOMETRY
:
3760 /* data contains only strings */
3761 memcpy(host_data
, argptr
, guest_data_size
);
3764 memcpy(host_data
, argptr
, guest_data_size
);
3765 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3769 void *gspec
= argptr
;
3770 void *cur_data
= host_data
;
3771 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3772 int spec_size
= thunk_type_size(arg_type
, 0);
3775 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3776 struct dm_target_spec
*spec
= cur_data
;
3780 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3781 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3783 spec
->next
= sizeof(*spec
) + slen
;
3784 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3786 cur_data
+= spec
->next
;
3791 ret
= -TARGET_EINVAL
;
3792 unlock_user(argptr
, guest_data
, 0);
3795 unlock_user(argptr
, guest_data
, 0);
3797 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3798 if (!is_error(ret
)) {
3799 guest_data
= arg
+ host_dm
->data_start
;
3800 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3801 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3802 switch (ie
->host_cmd
) {
3807 case DM_DEV_SUSPEND
:
3810 case DM_TABLE_CLEAR
:
3812 case DM_DEV_SET_GEOMETRY
:
3813 /* no return data */
3815 case DM_LIST_DEVICES
:
3817 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3818 uint32_t remaining_data
= guest_data_size
;
3819 void *cur_data
= argptr
;
3820 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3821 int nl_size
= 12; /* can't use thunk_size due to alignment */
3824 uint32_t next
= nl
->next
;
3826 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3828 if (remaining_data
< nl
->next
) {
3829 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3832 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3833 strcpy(cur_data
+ nl_size
, nl
->name
);
3834 cur_data
+= nl
->next
;
3835 remaining_data
-= nl
->next
;
3839 nl
= (void*)nl
+ next
;
3844 case DM_TABLE_STATUS
:
3846 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3847 void *cur_data
= argptr
;
3848 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3849 int spec_size
= thunk_type_size(arg_type
, 0);
3852 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3853 uint32_t next
= spec
->next
;
3854 int slen
= strlen((char*)&spec
[1]) + 1;
3855 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3856 if (guest_data_size
< spec
->next
) {
3857 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3860 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3861 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3862 cur_data
= argptr
+ spec
->next
;
3863 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3869 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3870 int count
= *(uint32_t*)hdata
;
3871 uint64_t *hdev
= hdata
+ 8;
3872 uint64_t *gdev
= argptr
+ 8;
3875 *(uint32_t*)argptr
= tswap32(count
);
3876 for (i
= 0; i
< count
; i
++) {
3877 *gdev
= tswap64(*hdev
);
3883 case DM_LIST_VERSIONS
:
3885 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3886 uint32_t remaining_data
= guest_data_size
;
3887 void *cur_data
= argptr
;
3888 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3889 int vers_size
= thunk_type_size(arg_type
, 0);
3892 uint32_t next
= vers
->next
;
3894 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3896 if (remaining_data
< vers
->next
) {
3897 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3900 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3901 strcpy(cur_data
+ vers_size
, vers
->name
);
3902 cur_data
+= vers
->next
;
3903 remaining_data
-= vers
->next
;
3907 vers
= (void*)vers
+ next
;
3912 unlock_user(argptr
, guest_data
, 0);
3913 ret
= -TARGET_EINVAL
;
3916 unlock_user(argptr
, guest_data
, guest_data_size
);
3918 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3920 ret
= -TARGET_EFAULT
;
3923 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3924 unlock_user(argptr
, arg
, target_size
);
3931 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3932 int cmd
, abi_long arg
)
3936 const argtype
*arg_type
= ie
->arg_type
;
3937 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3940 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3941 struct blkpg_partition host_part
;
3943 /* Read and convert blkpg */
3945 target_size
= thunk_type_size(arg_type
, 0);
3946 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3948 ret
= -TARGET_EFAULT
;
3951 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3952 unlock_user(argptr
, arg
, 0);
3954 switch (host_blkpg
->op
) {
3955 case BLKPG_ADD_PARTITION
:
3956 case BLKPG_DEL_PARTITION
:
3957 /* payload is struct blkpg_partition */
3960 /* Unknown opcode */
3961 ret
= -TARGET_EINVAL
;
3965 /* Read and convert blkpg->data */
3966 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3967 target_size
= thunk_type_size(part_arg_type
, 0);
3968 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3970 ret
= -TARGET_EFAULT
;
3973 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3974 unlock_user(argptr
, arg
, 0);
3976 /* Swizzle the data pointer to our local copy and call! */
3977 host_blkpg
->data
= &host_part
;
3978 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3984 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3985 int fd
, int cmd
, abi_long arg
)
3987 const argtype
*arg_type
= ie
->arg_type
;
3988 const StructEntry
*se
;
3989 const argtype
*field_types
;
3990 const int *dst_offsets
, *src_offsets
;
3993 abi_ulong
*target_rt_dev_ptr
;
3994 unsigned long *host_rt_dev_ptr
;
3998 assert(ie
->access
== IOC_W
);
3999 assert(*arg_type
== TYPE_PTR
);
4001 assert(*arg_type
== TYPE_STRUCT
);
4002 target_size
= thunk_type_size(arg_type
, 0);
4003 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4005 return -TARGET_EFAULT
;
4008 assert(*arg_type
== (int)STRUCT_rtentry
);
4009 se
= struct_entries
+ *arg_type
++;
4010 assert(se
->convert
[0] == NULL
);
4011 /* convert struct here to be able to catch rt_dev string */
4012 field_types
= se
->field_types
;
4013 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4014 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4015 for (i
= 0; i
< se
->nb_fields
; i
++) {
4016 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4017 assert(*field_types
== TYPE_PTRVOID
);
4018 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4019 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4020 if (*target_rt_dev_ptr
!= 0) {
4021 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4022 tswapal(*target_rt_dev_ptr
));
4023 if (!*host_rt_dev_ptr
) {
4024 unlock_user(argptr
, arg
, 0);
4025 return -TARGET_EFAULT
;
4028 *host_rt_dev_ptr
= 0;
4033 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4034 argptr
+ src_offsets
[i
],
4035 field_types
, THUNK_HOST
);
4037 unlock_user(argptr
, arg
, 0);
4039 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4040 if (*host_rt_dev_ptr
!= 0) {
4041 unlock_user((void *)*host_rt_dev_ptr
,
4042 *target_rt_dev_ptr
, 0);
4047 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4048 int fd
, int cmd
, abi_long arg
)
4050 int sig
= target_to_host_signal(arg
);
4051 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4054 static IOCTLEntry ioctl_entries
[] = {
4055 #define IOCTL(cmd, access, ...) \
4056 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4057 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4058 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4063 /* ??? Implement proper locking for ioctls. */
4064 /* do_ioctl() Must return target values and target errnos. */
4065 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4067 const IOCTLEntry
*ie
;
4068 const argtype
*arg_type
;
4070 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4076 if (ie
->target_cmd
== 0) {
4077 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4078 return -TARGET_ENOSYS
;
4080 if (ie
->target_cmd
== cmd
)
4084 arg_type
= ie
->arg_type
;
4086 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4089 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4092 switch(arg_type
[0]) {
4095 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4099 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4103 target_size
= thunk_type_size(arg_type
, 0);
4104 switch(ie
->access
) {
4106 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4107 if (!is_error(ret
)) {
4108 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4110 return -TARGET_EFAULT
;
4111 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4112 unlock_user(argptr
, arg
, target_size
);
4116 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4118 return -TARGET_EFAULT
;
4119 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4120 unlock_user(argptr
, arg
, 0);
4121 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4125 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4127 return -TARGET_EFAULT
;
4128 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4129 unlock_user(argptr
, arg
, 0);
4130 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4131 if (!is_error(ret
)) {
4132 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4134 return -TARGET_EFAULT
;
4135 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4136 unlock_user(argptr
, arg
, target_size
);
4142 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4143 (long)cmd
, arg_type
[0]);
4144 ret
= -TARGET_ENOSYS
;
4150 static const bitmask_transtbl iflag_tbl
[] = {
4151 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4152 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4153 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4154 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4155 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4156 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4157 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4158 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4159 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4160 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4161 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4162 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4163 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4164 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4168 static const bitmask_transtbl oflag_tbl
[] = {
4169 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4170 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4171 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4172 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4173 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4174 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4175 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4176 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4177 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4178 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4179 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4180 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4181 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4182 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4183 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4184 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4185 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4186 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4187 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4188 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4189 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4190 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4191 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4192 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4196 static const bitmask_transtbl cflag_tbl
[] = {
4197 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4198 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4199 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4200 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4201 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4202 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4203 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4204 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4205 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4206 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4207 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4208 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4209 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4210 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4211 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4212 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4213 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4214 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4215 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4216 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4217 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4218 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4219 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4220 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4221 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4222 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4223 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4224 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4225 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4226 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4227 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4231 static const bitmask_transtbl lflag_tbl
[] = {
4232 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4233 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4234 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4235 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4236 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4237 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4238 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4239 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4240 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4241 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4242 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4243 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4244 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4245 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4246 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4250 static void target_to_host_termios (void *dst
, const void *src
)
4252 struct host_termios
*host
= dst
;
4253 const struct target_termios
*target
= src
;
4256 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4258 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4260 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4262 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4263 host
->c_line
= target
->c_line
;
4265 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4266 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4267 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4268 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4269 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4270 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4271 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4272 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4273 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4274 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4275 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4276 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4277 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4278 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4279 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4280 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4281 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4282 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4285 static void host_to_target_termios (void *dst
, const void *src
)
4287 struct target_termios
*target
= dst
;
4288 const struct host_termios
*host
= src
;
4291 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4293 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4295 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4297 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4298 target
->c_line
= host
->c_line
;
4300 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4301 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4302 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4303 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4304 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4305 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4306 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4307 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4308 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4309 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4310 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4311 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4312 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4313 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4314 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4315 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4316 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4317 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4320 static const StructEntry struct_termios_def
= {
4321 .convert
= { host_to_target_termios
, target_to_host_termios
},
4322 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4323 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4326 static bitmask_transtbl mmap_flags_tbl
[] = {
4327 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4328 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4329 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4330 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4331 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4332 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4333 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4334 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4335 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4340 #if defined(TARGET_I386)
4342 /* NOTE: there is really one LDT for all the threads */
4343 static uint8_t *ldt_table
;
4345 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4352 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4353 if (size
> bytecount
)
4355 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4357 return -TARGET_EFAULT
;
4358 /* ??? Should this by byteswapped? */
4359 memcpy(p
, ldt_table
, size
);
4360 unlock_user(p
, ptr
, size
);
4364 /* XXX: add locking support */
4365 static abi_long
write_ldt(CPUX86State
*env
,
4366 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4368 struct target_modify_ldt_ldt_s ldt_info
;
4369 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4370 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4371 int seg_not_present
, useable
, lm
;
4372 uint32_t *lp
, entry_1
, entry_2
;
4374 if (bytecount
!= sizeof(ldt_info
))
4375 return -TARGET_EINVAL
;
4376 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4377 return -TARGET_EFAULT
;
4378 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4379 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4380 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4381 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4382 unlock_user_struct(target_ldt_info
, ptr
, 0);
4384 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4385 return -TARGET_EINVAL
;
4386 seg_32bit
= ldt_info
.flags
& 1;
4387 contents
= (ldt_info
.flags
>> 1) & 3;
4388 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4389 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4390 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4391 useable
= (ldt_info
.flags
>> 6) & 1;
4395 lm
= (ldt_info
.flags
>> 7) & 1;
4397 if (contents
== 3) {
4399 return -TARGET_EINVAL
;
4400 if (seg_not_present
== 0)
4401 return -TARGET_EINVAL
;
4403 /* allocate the LDT */
4405 env
->ldt
.base
= target_mmap(0,
4406 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4407 PROT_READ
|PROT_WRITE
,
4408 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4409 if (env
->ldt
.base
== -1)
4410 return -TARGET_ENOMEM
;
4411 memset(g2h(env
->ldt
.base
), 0,
4412 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4413 env
->ldt
.limit
= 0xffff;
4414 ldt_table
= g2h(env
->ldt
.base
);
4417 /* NOTE: same code as Linux kernel */
4418 /* Allow LDTs to be cleared by the user. */
4419 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4422 read_exec_only
== 1 &&
4424 limit_in_pages
== 0 &&
4425 seg_not_present
== 1 &&
4433 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4434 (ldt_info
.limit
& 0x0ffff);
4435 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4436 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4437 (ldt_info
.limit
& 0xf0000) |
4438 ((read_exec_only
^ 1) << 9) |
4440 ((seg_not_present
^ 1) << 15) |
4442 (limit_in_pages
<< 23) |
4446 entry_2
|= (useable
<< 20);
4448 /* Install the new entry ... */
4450 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4451 lp
[0] = tswap32(entry_1
);
4452 lp
[1] = tswap32(entry_2
);
4456 /* specific and weird i386 syscalls */
4457 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4458 unsigned long bytecount
)
4464 ret
= read_ldt(ptr
, bytecount
);
4467 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4470 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4473 ret
= -TARGET_ENOSYS
;
4479 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4480 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4482 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4483 struct target_modify_ldt_ldt_s ldt_info
;
4484 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4485 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4486 int seg_not_present
, useable
, lm
;
4487 uint32_t *lp
, entry_1
, entry_2
;
4490 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4491 if (!target_ldt_info
)
4492 return -TARGET_EFAULT
;
4493 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4494 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4495 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4496 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4497 if (ldt_info
.entry_number
== -1) {
4498 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4499 if (gdt_table
[i
] == 0) {
4500 ldt_info
.entry_number
= i
;
4501 target_ldt_info
->entry_number
= tswap32(i
);
4506 unlock_user_struct(target_ldt_info
, ptr
, 1);
4508 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4509 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4510 return -TARGET_EINVAL
;
4511 seg_32bit
= ldt_info
.flags
& 1;
4512 contents
= (ldt_info
.flags
>> 1) & 3;
4513 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4514 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4515 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4516 useable
= (ldt_info
.flags
>> 6) & 1;
4520 lm
= (ldt_info
.flags
>> 7) & 1;
4523 if (contents
== 3) {
4524 if (seg_not_present
== 0)
4525 return -TARGET_EINVAL
;
4528 /* NOTE: same code as Linux kernel */
4529 /* Allow LDTs to be cleared by the user. */
4530 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4531 if ((contents
== 0 &&
4532 read_exec_only
== 1 &&
4534 limit_in_pages
== 0 &&
4535 seg_not_present
== 1 &&
4543 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4544 (ldt_info
.limit
& 0x0ffff);
4545 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4546 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4547 (ldt_info
.limit
& 0xf0000) |
4548 ((read_exec_only
^ 1) << 9) |
4550 ((seg_not_present
^ 1) << 15) |
4552 (limit_in_pages
<< 23) |
4557 /* Install the new entry ... */
4559 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4560 lp
[0] = tswap32(entry_1
);
4561 lp
[1] = tswap32(entry_2
);
4565 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4567 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4568 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4569 uint32_t base_addr
, limit
, flags
;
4570 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4571 int seg_not_present
, useable
, lm
;
4572 uint32_t *lp
, entry_1
, entry_2
;
4574 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4575 if (!target_ldt_info
)
4576 return -TARGET_EFAULT
;
4577 idx
= tswap32(target_ldt_info
->entry_number
);
4578 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4579 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4580 unlock_user_struct(target_ldt_info
, ptr
, 1);
4581 return -TARGET_EINVAL
;
4583 lp
= (uint32_t *)(gdt_table
+ idx
);
4584 entry_1
= tswap32(lp
[0]);
4585 entry_2
= tswap32(lp
[1]);
4587 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4588 contents
= (entry_2
>> 10) & 3;
4589 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4590 seg_32bit
= (entry_2
>> 22) & 1;
4591 limit_in_pages
= (entry_2
>> 23) & 1;
4592 useable
= (entry_2
>> 20) & 1;
4596 lm
= (entry_2
>> 21) & 1;
4598 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4599 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4600 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4601 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4602 base_addr
= (entry_1
>> 16) |
4603 (entry_2
& 0xff000000) |
4604 ((entry_2
& 0xff) << 16);
4605 target_ldt_info
->base_addr
= tswapal(base_addr
);
4606 target_ldt_info
->limit
= tswap32(limit
);
4607 target_ldt_info
->flags
= tswap32(flags
);
4608 unlock_user_struct(target_ldt_info
, ptr
, 1);
4611 #endif /* TARGET_I386 && TARGET_ABI32 */
4613 #ifndef TARGET_ABI32
4614 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4621 case TARGET_ARCH_SET_GS
:
4622 case TARGET_ARCH_SET_FS
:
4623 if (code
== TARGET_ARCH_SET_GS
)
4627 cpu_x86_load_seg(env
, idx
, 0);
4628 env
->segs
[idx
].base
= addr
;
4630 case TARGET_ARCH_GET_GS
:
4631 case TARGET_ARCH_GET_FS
:
4632 if (code
== TARGET_ARCH_GET_GS
)
4636 val
= env
->segs
[idx
].base
;
4637 if (put_user(val
, addr
, abi_ulong
))
4638 ret
= -TARGET_EFAULT
;
4641 ret
= -TARGET_EINVAL
;
4648 #endif /* defined(TARGET_I386) */
4650 #define NEW_STACK_SIZE 0x40000
4653 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4656 pthread_mutex_t mutex
;
4657 pthread_cond_t cond
;
4660 abi_ulong child_tidptr
;
4661 abi_ulong parent_tidptr
;
4665 static void *clone_func(void *arg
)
4667 new_thread_info
*info
= arg
;
4672 rcu_register_thread();
4674 cpu
= ENV_GET_CPU(env
);
4676 ts
= (TaskState
*)cpu
->opaque
;
4677 info
->tid
= gettid();
4678 cpu
->host_tid
= info
->tid
;
4680 if (info
->child_tidptr
)
4681 put_user_u32(info
->tid
, info
->child_tidptr
);
4682 if (info
->parent_tidptr
)
4683 put_user_u32(info
->tid
, info
->parent_tidptr
);
4684 /* Enable signals. */
4685 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4686 /* Signal to the parent that we're ready. */
4687 pthread_mutex_lock(&info
->mutex
);
4688 pthread_cond_broadcast(&info
->cond
);
4689 pthread_mutex_unlock(&info
->mutex
);
4690 /* Wait until the parent has finshed initializing the tls state. */
4691 pthread_mutex_lock(&clone_lock
);
4692 pthread_mutex_unlock(&clone_lock
);
4698 /* do_fork() Must return host values and target errnos (unlike most
4699 do_*() functions). */
4700 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4701 abi_ulong parent_tidptr
, target_ulong newtls
,
4702 abi_ulong child_tidptr
)
4704 CPUState
*cpu
= ENV_GET_CPU(env
);
4708 CPUArchState
*new_env
;
4709 unsigned int nptl_flags
;
4712 /* Emulate vfork() with fork() */
4713 if (flags
& CLONE_VFORK
)
4714 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4716 if (flags
& CLONE_VM
) {
4717 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4718 new_thread_info info
;
4719 pthread_attr_t attr
;
4721 ts
= g_new0(TaskState
, 1);
4722 init_task_state(ts
);
4723 /* we create a new CPU instance. */
4724 new_env
= cpu_copy(env
);
4725 /* Init regs that differ from the parent. */
4726 cpu_clone_regs(new_env
, newsp
);
4727 new_cpu
= ENV_GET_CPU(new_env
);
4728 new_cpu
->opaque
= ts
;
4729 ts
->bprm
= parent_ts
->bprm
;
4730 ts
->info
= parent_ts
->info
;
4732 flags
&= ~CLONE_NPTL_FLAGS2
;
4734 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4735 ts
->child_tidptr
= child_tidptr
;
4738 if (nptl_flags
& CLONE_SETTLS
)
4739 cpu_set_tls (new_env
, newtls
);
4741 /* Grab a mutex so that thread setup appears atomic. */
4742 pthread_mutex_lock(&clone_lock
);
4744 memset(&info
, 0, sizeof(info
));
4745 pthread_mutex_init(&info
.mutex
, NULL
);
4746 pthread_mutex_lock(&info
.mutex
);
4747 pthread_cond_init(&info
.cond
, NULL
);
4749 if (nptl_flags
& CLONE_CHILD_SETTID
)
4750 info
.child_tidptr
= child_tidptr
;
4751 if (nptl_flags
& CLONE_PARENT_SETTID
)
4752 info
.parent_tidptr
= parent_tidptr
;
4754 ret
= pthread_attr_init(&attr
);
4755 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4756 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4757 /* It is not safe to deliver signals until the child has finished
4758 initializing, so temporarily block all signals. */
4759 sigfillset(&sigmask
);
4760 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4762 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4763 /* TODO: Free new CPU state if thread creation failed. */
4765 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4766 pthread_attr_destroy(&attr
);
4768 /* Wait for the child to initialize. */
4769 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4771 if (flags
& CLONE_PARENT_SETTID
)
4772 put_user_u32(ret
, parent_tidptr
);
4776 pthread_mutex_unlock(&info
.mutex
);
4777 pthread_cond_destroy(&info
.cond
);
4778 pthread_mutex_destroy(&info
.mutex
);
4779 pthread_mutex_unlock(&clone_lock
);
4781 /* if no CLONE_VM, we consider it is a fork */
4782 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4783 return -TARGET_EINVAL
;
4788 /* Child Process. */
4790 cpu_clone_regs(env
, newsp
);
4792 /* There is a race condition here. The parent process could
4793 theoretically read the TID in the child process before the child
4794 tid is set. This would require using either ptrace
4795 (not implemented) or having *_tidptr to point at a shared memory
4796 mapping. We can't repeat the spinlock hack used above because
4797 the child process gets its own copy of the lock. */
4798 if (flags
& CLONE_CHILD_SETTID
)
4799 put_user_u32(gettid(), child_tidptr
);
4800 if (flags
& CLONE_PARENT_SETTID
)
4801 put_user_u32(gettid(), parent_tidptr
);
4802 ts
= (TaskState
*)cpu
->opaque
;
4803 if (flags
& CLONE_SETTLS
)
4804 cpu_set_tls (env
, newtls
);
4805 if (flags
& CLONE_CHILD_CLEARTID
)
4806 ts
->child_tidptr
= child_tidptr
;
4814 /* warning : doesn't handle linux specific flags... */
4815 static int target_to_host_fcntl_cmd(int cmd
)
4818 case TARGET_F_DUPFD
:
4819 case TARGET_F_GETFD
:
4820 case TARGET_F_SETFD
:
4821 case TARGET_F_GETFL
:
4822 case TARGET_F_SETFL
:
4824 case TARGET_F_GETLK
:
4826 case TARGET_F_SETLK
:
4828 case TARGET_F_SETLKW
:
4830 case TARGET_F_GETOWN
:
4832 case TARGET_F_SETOWN
:
4834 case TARGET_F_GETSIG
:
4836 case TARGET_F_SETSIG
:
4838 #if TARGET_ABI_BITS == 32
4839 case TARGET_F_GETLK64
:
4841 case TARGET_F_SETLK64
:
4843 case TARGET_F_SETLKW64
:
4846 case TARGET_F_SETLEASE
:
4848 case TARGET_F_GETLEASE
:
4850 #ifdef F_DUPFD_CLOEXEC
4851 case TARGET_F_DUPFD_CLOEXEC
:
4852 return F_DUPFD_CLOEXEC
;
4854 case TARGET_F_NOTIFY
:
4857 case TARGET_F_GETOWN_EX
:
4861 case TARGET_F_SETOWN_EX
:
4865 return -TARGET_EINVAL
;
4867 return -TARGET_EINVAL
;
4870 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4871 static const bitmask_transtbl flock_tbl
[] = {
4872 TRANSTBL_CONVERT(F_RDLCK
),
4873 TRANSTBL_CONVERT(F_WRLCK
),
4874 TRANSTBL_CONVERT(F_UNLCK
),
4875 TRANSTBL_CONVERT(F_EXLCK
),
4876 TRANSTBL_CONVERT(F_SHLCK
),
4880 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4883 struct target_flock
*target_fl
;
4884 struct flock64 fl64
;
4885 struct target_flock64
*target_fl64
;
4887 struct f_owner_ex fox
;
4888 struct target_f_owner_ex
*target_fox
;
4891 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4893 if (host_cmd
== -TARGET_EINVAL
)
4897 case TARGET_F_GETLK
:
4898 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4899 return -TARGET_EFAULT
;
4901 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4902 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4903 fl
.l_start
= tswapal(target_fl
->l_start
);
4904 fl
.l_len
= tswapal(target_fl
->l_len
);
4905 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4906 unlock_user_struct(target_fl
, arg
, 0);
4907 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4909 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4910 return -TARGET_EFAULT
;
4912 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4913 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4914 target_fl
->l_start
= tswapal(fl
.l_start
);
4915 target_fl
->l_len
= tswapal(fl
.l_len
);
4916 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4917 unlock_user_struct(target_fl
, arg
, 1);
4921 case TARGET_F_SETLK
:
4922 case TARGET_F_SETLKW
:
4923 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4924 return -TARGET_EFAULT
;
4926 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4927 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4928 fl
.l_start
= tswapal(target_fl
->l_start
);
4929 fl
.l_len
= tswapal(target_fl
->l_len
);
4930 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4931 unlock_user_struct(target_fl
, arg
, 0);
4932 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4935 case TARGET_F_GETLK64
:
4936 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4937 return -TARGET_EFAULT
;
4939 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4940 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4941 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4942 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4943 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4944 unlock_user_struct(target_fl64
, arg
, 0);
4945 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4947 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4948 return -TARGET_EFAULT
;
4949 target_fl64
->l_type
=
4950 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4951 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4952 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4953 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4954 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4955 unlock_user_struct(target_fl64
, arg
, 1);
4958 case TARGET_F_SETLK64
:
4959 case TARGET_F_SETLKW64
:
4960 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4961 return -TARGET_EFAULT
;
4963 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4964 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4965 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4966 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4967 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4968 unlock_user_struct(target_fl64
, arg
, 0);
4969 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4972 case TARGET_F_GETFL
:
4973 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4975 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4979 case TARGET_F_SETFL
:
4980 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4984 case TARGET_F_GETOWN_EX
:
4985 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4987 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4988 return -TARGET_EFAULT
;
4989 target_fox
->type
= tswap32(fox
.type
);
4990 target_fox
->pid
= tswap32(fox
.pid
);
4991 unlock_user_struct(target_fox
, arg
, 1);
4997 case TARGET_F_SETOWN_EX
:
4998 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4999 return -TARGET_EFAULT
;
5000 fox
.type
= tswap32(target_fox
->type
);
5001 fox
.pid
= tswap32(target_fox
->pid
);
5002 unlock_user_struct(target_fox
, arg
, 0);
5003 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5007 case TARGET_F_SETOWN
:
5008 case TARGET_F_GETOWN
:
5009 case TARGET_F_SETSIG
:
5010 case TARGET_F_GETSIG
:
5011 case TARGET_F_SETLEASE
:
5012 case TARGET_F_GETLEASE
:
5013 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5017 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5025 static inline int high2lowuid(int uid
)
5033 static inline int high2lowgid(int gid
)
5041 static inline int low2highuid(int uid
)
5043 if ((int16_t)uid
== -1)
5049 static inline int low2highgid(int gid
)
5051 if ((int16_t)gid
== -1)
5056 static inline int tswapid(int id
)
5061 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5063 #else /* !USE_UID16 */
5064 static inline int high2lowuid(int uid
)
5068 static inline int high2lowgid(int gid
)
5072 static inline int low2highuid(int uid
)
5076 static inline int low2highgid(int gid
)
5080 static inline int tswapid(int id
)
5085 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5087 #endif /* USE_UID16 */
5089 void syscall_init(void)
5092 const argtype
*arg_type
;
5096 thunk_init(STRUCT_MAX
);
5098 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5099 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5100 #include "syscall_types.h"
5102 #undef STRUCT_SPECIAL
5104 /* Build target_to_host_errno_table[] table from
5105 * host_to_target_errno_table[]. */
5106 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5107 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5110 /* we patch the ioctl size if necessary. We rely on the fact that
5111 no ioctl has all the bits at '1' in the size field */
5113 while (ie
->target_cmd
!= 0) {
5114 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5115 TARGET_IOC_SIZEMASK
) {
5116 arg_type
= ie
->arg_type
;
5117 if (arg_type
[0] != TYPE_PTR
) {
5118 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5123 size
= thunk_type_size(arg_type
, 0);
5124 ie
->target_cmd
= (ie
->target_cmd
&
5125 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5126 (size
<< TARGET_IOC_SIZESHIFT
);
5129 /* automatic consistency check if same arch */
5130 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5131 (defined(__x86_64__) && defined(TARGET_X86_64))
5132 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5133 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5134 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5141 #if TARGET_ABI_BITS == 32
5142 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5144 #ifdef TARGET_WORDS_BIGENDIAN
5145 return ((uint64_t)word0
<< 32) | word1
;
5147 return ((uint64_t)word1
<< 32) | word0
;
5150 #else /* TARGET_ABI_BITS == 32 */
5151 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5155 #endif /* TARGET_ABI_BITS != 32 */
5157 #ifdef TARGET_NR_truncate64
5158 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5163 if (regpairs_aligned(cpu_env
)) {
5167 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5171 #ifdef TARGET_NR_ftruncate64
5172 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5177 if (regpairs_aligned(cpu_env
)) {
5181 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5185 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5186 abi_ulong target_addr
)
5188 struct target_timespec
*target_ts
;
5190 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5191 return -TARGET_EFAULT
;
5192 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5193 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5194 unlock_user_struct(target_ts
, target_addr
, 0);
5198 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5199 struct timespec
*host_ts
)
5201 struct target_timespec
*target_ts
;
5203 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5204 return -TARGET_EFAULT
;
5205 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5206 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5207 unlock_user_struct(target_ts
, target_addr
, 1);
5211 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5212 abi_ulong target_addr
)
5214 struct target_itimerspec
*target_itspec
;
5216 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5217 return -TARGET_EFAULT
;
5220 host_itspec
->it_interval
.tv_sec
=
5221 tswapal(target_itspec
->it_interval
.tv_sec
);
5222 host_itspec
->it_interval
.tv_nsec
=
5223 tswapal(target_itspec
->it_interval
.tv_nsec
);
5224 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5225 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5227 unlock_user_struct(target_itspec
, target_addr
, 1);
5231 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5232 struct itimerspec
*host_its
)
5234 struct target_itimerspec
*target_itspec
;
5236 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5237 return -TARGET_EFAULT
;
5240 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5241 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5243 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5244 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5246 unlock_user_struct(target_itspec
, target_addr
, 0);
5250 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5251 abi_ulong target_addr
)
5253 struct target_sigevent
*target_sevp
;
5255 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5256 return -TARGET_EFAULT
;
5259 /* This union is awkward on 64 bit systems because it has a 32 bit
5260 * integer and a pointer in it; we follow the conversion approach
5261 * used for handling sigval types in signal.c so the guest should get
5262 * the correct value back even if we did a 64 bit byteswap and it's
5263 * using the 32 bit integer.
5265 host_sevp
->sigev_value
.sival_ptr
=
5266 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5267 host_sevp
->sigev_signo
=
5268 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5269 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5270 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5272 unlock_user_struct(target_sevp
, target_addr
, 1);
5276 #if defined(TARGET_NR_mlockall)
5277 static inline int target_to_host_mlockall_arg(int arg
)
5281 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5282 result
|= MCL_CURRENT
;
5284 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5285 result
|= MCL_FUTURE
;
5291 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5292 abi_ulong target_addr
,
5293 struct stat
*host_st
)
5295 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5296 if (((CPUARMState
*)cpu_env
)->eabi
) {
5297 struct target_eabi_stat64
*target_st
;
5299 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5300 return -TARGET_EFAULT
;
5301 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5302 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5303 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5304 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5305 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5307 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5308 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5309 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5310 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5311 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5312 __put_user(host_st
->st_size
, &target_st
->st_size
);
5313 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5314 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5315 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5316 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5317 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5318 unlock_user_struct(target_st
, target_addr
, 1);
5322 #if defined(TARGET_HAS_STRUCT_STAT64)
5323 struct target_stat64
*target_st
;
5325 struct target_stat
*target_st
;
5328 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5329 return -TARGET_EFAULT
;
5330 memset(target_st
, 0, sizeof(*target_st
));
5331 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5332 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5333 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5334 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5336 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5337 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5338 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5339 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5340 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5341 /* XXX: better use of kernel struct */
5342 __put_user(host_st
->st_size
, &target_st
->st_size
);
5343 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5344 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5345 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5346 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5347 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5348 unlock_user_struct(target_st
, target_addr
, 1);
5354 /* ??? Using host futex calls even when target atomic operations
5355 are not really atomic probably breaks things. However implementing
5356 futexes locally would make futexes shared between multiple processes
5357 tricky. However they're probably useless because guest atomic
5358 operations won't work either. */
5359 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5360 target_ulong uaddr2
, int val3
)
5362 struct timespec ts
, *pts
;
5365 /* ??? We assume FUTEX_* constants are the same on both host
5367 #ifdef FUTEX_CMD_MASK
5368 base_op
= op
& FUTEX_CMD_MASK
;
5374 case FUTEX_WAIT_BITSET
:
5377 target_to_host_timespec(pts
, timeout
);
5381 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5384 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5386 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5388 case FUTEX_CMP_REQUEUE
:
5390 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5391 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5392 But the prototype takes a `struct timespec *'; insert casts
5393 to satisfy the compiler. We do not need to tswap TIMEOUT
5394 since it's not compared to guest memory. */
5395 pts
= (struct timespec
*)(uintptr_t) timeout
;
5396 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5398 (base_op
== FUTEX_CMP_REQUEUE
5402 return -TARGET_ENOSYS
;
5405 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5406 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5407 abi_long handle
, abi_long mount_id
,
5410 struct file_handle
*target_fh
;
5411 struct file_handle
*fh
;
5415 unsigned int size
, total_size
;
5417 if (get_user_s32(size
, handle
)) {
5418 return -TARGET_EFAULT
;
5421 name
= lock_user_string(pathname
);
5423 return -TARGET_EFAULT
;
5426 total_size
= sizeof(struct file_handle
) + size
;
5427 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5429 unlock_user(name
, pathname
, 0);
5430 return -TARGET_EFAULT
;
5433 fh
= g_malloc0(total_size
);
5434 fh
->handle_bytes
= size
;
5436 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5437 unlock_user(name
, pathname
, 0);
5439 /* man name_to_handle_at(2):
5440 * Other than the use of the handle_bytes field, the caller should treat
5441 * the file_handle structure as an opaque data type
5444 memcpy(target_fh
, fh
, total_size
);
5445 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5446 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5448 unlock_user(target_fh
, handle
, total_size
);
5450 if (put_user_s32(mid
, mount_id
)) {
5451 return -TARGET_EFAULT
;
5459 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5460 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5463 struct file_handle
*target_fh
;
5464 struct file_handle
*fh
;
5465 unsigned int size
, total_size
;
5468 if (get_user_s32(size
, handle
)) {
5469 return -TARGET_EFAULT
;
5472 total_size
= sizeof(struct file_handle
) + size
;
5473 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5475 return -TARGET_EFAULT
;
5478 fh
= g_memdup(target_fh
, total_size
);
5479 fh
->handle_bytes
= size
;
5480 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5482 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5483 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5487 unlock_user(target_fh
, handle
, total_size
);
5493 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5495 /* signalfd siginfo conversion */
5498 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5499 const struct signalfd_siginfo
*info
)
5501 int sig
= host_to_target_signal(info
->ssi_signo
);
5503 /* linux/signalfd.h defines a ssi_addr_lsb
5504 * not defined in sys/signalfd.h but used by some kernels
5507 #ifdef BUS_MCEERR_AO
5508 if (tinfo
->ssi_signo
== SIGBUS
&&
5509 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5510 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5511 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5512 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5513 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5517 tinfo
->ssi_signo
= tswap32(sig
);
5518 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5519 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5520 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5521 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5522 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5523 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5524 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5525 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5526 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5527 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5528 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5529 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5530 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5531 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5532 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5535 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5539 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5540 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5546 static TargetFdTrans target_signalfd_trans
= {
5547 .host_to_target_data
= host_to_target_data_signalfd
,
5550 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5553 target_sigset_t
*target_mask
;
5557 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5558 return -TARGET_EINVAL
;
5560 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5561 return -TARGET_EFAULT
;
5564 target_to_host_sigset(&host_mask
, target_mask
);
5566 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5568 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5570 fd_trans_register(ret
, &target_signalfd_trans
);
5573 unlock_user_struct(target_mask
, mask
, 0);
5579 /* Map host to target signal numbers for the wait family of syscalls.
5580 Assume all other status bits are the same. */
5581 int host_to_target_waitstatus(int status
)
5583 if (WIFSIGNALED(status
)) {
5584 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5586 if (WIFSTOPPED(status
)) {
5587 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5593 static int open_self_cmdline(void *cpu_env
, int fd
)
5596 bool word_skipped
= false;
5598 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5608 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5611 fd_orig
= close(fd_orig
);
5614 } else if (nb_read
== 0) {
5618 if (!word_skipped
) {
5619 /* Skip the first string, which is the path to qemu-*-static
5620 instead of the actual command. */
5621 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5623 /* Null byte found, skip one string */
5625 nb_read
-= cp_buf
- buf
;
5626 word_skipped
= true;
5631 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5640 return close(fd_orig
);
5643 static int open_self_maps(void *cpu_env
, int fd
)
5645 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5646 TaskState
*ts
= cpu
->opaque
;
5652 fp
= fopen("/proc/self/maps", "r");
5657 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5658 int fields
, dev_maj
, dev_min
, inode
;
5659 uint64_t min
, max
, offset
;
5660 char flag_r
, flag_w
, flag_x
, flag_p
;
5661 char path
[512] = "";
5662 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5663 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5664 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5666 if ((fields
< 10) || (fields
> 11)) {
5669 if (h2g_valid(min
)) {
5670 int flags
= page_get_flags(h2g(min
));
5671 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5672 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5675 if (h2g(min
) == ts
->info
->stack_limit
) {
5676 pstrcpy(path
, sizeof(path
), " [stack]");
5678 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5679 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5680 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5681 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5682 path
[0] ? " " : "", path
);
5692 static int open_self_stat(void *cpu_env
, int fd
)
5694 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5695 TaskState
*ts
= cpu
->opaque
;
5696 abi_ulong start_stack
= ts
->info
->start_stack
;
5699 for (i
= 0; i
< 44; i
++) {
5707 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5708 } else if (i
== 1) {
5710 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5711 } else if (i
== 27) {
5714 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5716 /* for the rest, there is MasterCard */
5717 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5721 if (write(fd
, buf
, len
) != len
) {
5729 static int open_self_auxv(void *cpu_env
, int fd
)
5731 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5732 TaskState
*ts
= cpu
->opaque
;
5733 abi_ulong auxv
= ts
->info
->saved_auxv
;
5734 abi_ulong len
= ts
->info
->auxv_len
;
5738 * Auxiliary vector is stored in target process stack.
5739 * read in whole auxv vector and copy it to file
5741 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5745 r
= write(fd
, ptr
, len
);
5752 lseek(fd
, 0, SEEK_SET
);
5753 unlock_user(ptr
, auxv
, len
);
5759 static int is_proc_myself(const char *filename
, const char *entry
)
5761 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5762 filename
+= strlen("/proc/");
5763 if (!strncmp(filename
, "self/", strlen("self/"))) {
5764 filename
+= strlen("self/");
5765 } else if (*filename
>= '1' && *filename
<= '9') {
5767 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5768 if (!strncmp(filename
, myself
, strlen(myself
))) {
5769 filename
+= strlen(myself
);
5776 if (!strcmp(filename
, entry
)) {
5783 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5784 static int is_proc(const char *filename
, const char *entry
)
5786 return strcmp(filename
, entry
) == 0;
5789 static int open_net_route(void *cpu_env
, int fd
)
5796 fp
= fopen("/proc/net/route", "r");
5803 read
= getline(&line
, &len
, fp
);
5804 dprintf(fd
, "%s", line
);
5808 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5810 uint32_t dest
, gw
, mask
;
5811 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5812 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5813 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5814 &mask
, &mtu
, &window
, &irtt
);
5815 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5816 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5817 metric
, tswap32(mask
), mtu
, window
, irtt
);
5827 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5830 const char *filename
;
5831 int (*fill
)(void *cpu_env
, int fd
);
5832 int (*cmp
)(const char *s1
, const char *s2
);
5834 const struct fake_open
*fake_open
;
5835 static const struct fake_open fakes
[] = {
5836 { "maps", open_self_maps
, is_proc_myself
},
5837 { "stat", open_self_stat
, is_proc_myself
},
5838 { "auxv", open_self_auxv
, is_proc_myself
},
5839 { "cmdline", open_self_cmdline
, is_proc_myself
},
5840 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5841 { "/proc/net/route", open_net_route
, is_proc
},
5843 { NULL
, NULL
, NULL
}
5846 if (is_proc_myself(pathname
, "exe")) {
5847 int execfd
= qemu_getauxval(AT_EXECFD
);
5848 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
5851 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5852 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5857 if (fake_open
->filename
) {
5859 char filename
[PATH_MAX
];
5862 /* create temporary file to map stat to */
5863 tmpdir
= getenv("TMPDIR");
5866 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5867 fd
= mkstemp(filename
);
5873 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5879 lseek(fd
, 0, SEEK_SET
);
5884 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
5887 #define TIMER_MAGIC 0x0caf0000
5888 #define TIMER_MAGIC_MASK 0xffff0000
5890 /* Convert QEMU provided timer ID back to internal 16bit index format */
5891 static target_timer_t
get_timer_id(abi_long arg
)
5893 target_timer_t timerid
= arg
;
5895 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5896 return -TARGET_EINVAL
;
5901 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5902 return -TARGET_EINVAL
;
5908 /* do_syscall() should always have a single exit point at the end so
5909 that actions, such as logging of syscall results, can be performed.
5910 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5911 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5912 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5913 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5916 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5922 #if defined(DEBUG_ERESTARTSYS)
5923 /* Debug-only code for exercising the syscall-restart code paths
5924 * in the per-architecture cpu main loops: restart every syscall
5925 * the guest makes once before letting it through.
5932 return -TARGET_ERESTARTSYS
;
5938 gemu_log("syscall %d", num
);
5941 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5944 case TARGET_NR_exit
:
5945 /* In old applications this may be used to implement _exit(2).
5946 However in threaded applictions it is used for thread termination,
5947 and _exit_group is used for application termination.
5948 Do thread termination if we have more then one thread. */
5949 /* FIXME: This probably breaks if a signal arrives. We should probably
5950 be disabling signals. */
5951 if (CPU_NEXT(first_cpu
)) {
5955 /* Remove the CPU from the list. */
5956 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5959 if (ts
->child_tidptr
) {
5960 put_user_u32(0, ts
->child_tidptr
);
5961 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5965 object_unref(OBJECT(cpu
));
5967 rcu_unregister_thread();
5973 gdb_exit(cpu_env
, arg1
);
5975 ret
= 0; /* avoid warning */
5977 case TARGET_NR_read
:
5981 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5983 ret
= get_errno(safe_read(arg1
, p
, arg3
));
5985 fd_trans_host_to_target_data(arg1
)) {
5986 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5988 unlock_user(p
, arg2
, ret
);
5991 case TARGET_NR_write
:
5992 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5994 ret
= get_errno(safe_write(arg1
, p
, arg3
));
5995 unlock_user(p
, arg2
, 0);
5997 #ifdef TARGET_NR_open
5998 case TARGET_NR_open
:
5999 if (!(p
= lock_user_string(arg1
)))
6001 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6002 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6004 fd_trans_unregister(ret
);
6005 unlock_user(p
, arg1
, 0);
6008 case TARGET_NR_openat
:
6009 if (!(p
= lock_user_string(arg2
)))
6011 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6012 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6014 fd_trans_unregister(ret
);
6015 unlock_user(p
, arg2
, 0);
6017 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6018 case TARGET_NR_name_to_handle_at
:
6019 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6022 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6023 case TARGET_NR_open_by_handle_at
:
6024 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6025 fd_trans_unregister(ret
);
6028 case TARGET_NR_close
:
6029 fd_trans_unregister(arg1
);
6030 ret
= get_errno(close(arg1
));
6035 #ifdef TARGET_NR_fork
6036 case TARGET_NR_fork
:
6037 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6040 #ifdef TARGET_NR_waitpid
6041 case TARGET_NR_waitpid
:
6044 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6045 if (!is_error(ret
) && arg2
&& ret
6046 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6051 #ifdef TARGET_NR_waitid
6052 case TARGET_NR_waitid
:
6056 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6057 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6058 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6060 host_to_target_siginfo(p
, &info
);
6061 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6066 #ifdef TARGET_NR_creat /* not on alpha */
6067 case TARGET_NR_creat
:
6068 if (!(p
= lock_user_string(arg1
)))
6070 ret
= get_errno(creat(p
, arg2
));
6071 fd_trans_unregister(ret
);
6072 unlock_user(p
, arg1
, 0);
6075 #ifdef TARGET_NR_link
6076 case TARGET_NR_link
:
6079 p
= lock_user_string(arg1
);
6080 p2
= lock_user_string(arg2
);
6082 ret
= -TARGET_EFAULT
;
6084 ret
= get_errno(link(p
, p2
));
6085 unlock_user(p2
, arg2
, 0);
6086 unlock_user(p
, arg1
, 0);
6090 #if defined(TARGET_NR_linkat)
6091 case TARGET_NR_linkat
:
6096 p
= lock_user_string(arg2
);
6097 p2
= lock_user_string(arg4
);
6099 ret
= -TARGET_EFAULT
;
6101 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6102 unlock_user(p
, arg2
, 0);
6103 unlock_user(p2
, arg4
, 0);
6107 #ifdef TARGET_NR_unlink
6108 case TARGET_NR_unlink
:
6109 if (!(p
= lock_user_string(arg1
)))
6111 ret
= get_errno(unlink(p
));
6112 unlock_user(p
, arg1
, 0);
6115 #if defined(TARGET_NR_unlinkat)
6116 case TARGET_NR_unlinkat
:
6117 if (!(p
= lock_user_string(arg2
)))
6119 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6120 unlock_user(p
, arg2
, 0);
6123 case TARGET_NR_execve
:
6125 char **argp
, **envp
;
6128 abi_ulong guest_argp
;
6129 abi_ulong guest_envp
;
6136 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6137 if (get_user_ual(addr
, gp
))
6145 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6146 if (get_user_ual(addr
, gp
))
6153 argp
= alloca((argc
+ 1) * sizeof(void *));
6154 envp
= alloca((envc
+ 1) * sizeof(void *));
6156 for (gp
= guest_argp
, q
= argp
; gp
;
6157 gp
+= sizeof(abi_ulong
), q
++) {
6158 if (get_user_ual(addr
, gp
))
6162 if (!(*q
= lock_user_string(addr
)))
6164 total_size
+= strlen(*q
) + 1;
6168 for (gp
= guest_envp
, q
= envp
; gp
;
6169 gp
+= sizeof(abi_ulong
), q
++) {
6170 if (get_user_ual(addr
, gp
))
6174 if (!(*q
= lock_user_string(addr
)))
6176 total_size
+= strlen(*q
) + 1;
6180 if (!(p
= lock_user_string(arg1
)))
6182 ret
= get_errno(execve(p
, argp
, envp
));
6183 unlock_user(p
, arg1
, 0);
6188 ret
= -TARGET_EFAULT
;
6191 for (gp
= guest_argp
, q
= argp
; *q
;
6192 gp
+= sizeof(abi_ulong
), q
++) {
6193 if (get_user_ual(addr
, gp
)
6196 unlock_user(*q
, addr
, 0);
6198 for (gp
= guest_envp
, q
= envp
; *q
;
6199 gp
+= sizeof(abi_ulong
), q
++) {
6200 if (get_user_ual(addr
, gp
)
6203 unlock_user(*q
, addr
, 0);
6207 case TARGET_NR_chdir
:
6208 if (!(p
= lock_user_string(arg1
)))
6210 ret
= get_errno(chdir(p
));
6211 unlock_user(p
, arg1
, 0);
6213 #ifdef TARGET_NR_time
6214 case TARGET_NR_time
:
6217 ret
= get_errno(time(&host_time
));
6220 && put_user_sal(host_time
, arg1
))
6225 #ifdef TARGET_NR_mknod
6226 case TARGET_NR_mknod
:
6227 if (!(p
= lock_user_string(arg1
)))
6229 ret
= get_errno(mknod(p
, arg2
, arg3
));
6230 unlock_user(p
, arg1
, 0);
6233 #if defined(TARGET_NR_mknodat)
6234 case TARGET_NR_mknodat
:
6235 if (!(p
= lock_user_string(arg2
)))
6237 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6238 unlock_user(p
, arg2
, 0);
6241 #ifdef TARGET_NR_chmod
6242 case TARGET_NR_chmod
:
6243 if (!(p
= lock_user_string(arg1
)))
6245 ret
= get_errno(chmod(p
, arg2
));
6246 unlock_user(p
, arg1
, 0);
6249 #ifdef TARGET_NR_break
6250 case TARGET_NR_break
:
6253 #ifdef TARGET_NR_oldstat
6254 case TARGET_NR_oldstat
:
6257 case TARGET_NR_lseek
:
6258 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6260 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6261 /* Alpha specific */
6262 case TARGET_NR_getxpid
:
6263 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6264 ret
= get_errno(getpid());
6267 #ifdef TARGET_NR_getpid
6268 case TARGET_NR_getpid
:
6269 ret
= get_errno(getpid());
6272 case TARGET_NR_mount
:
6274 /* need to look at the data field */
6278 p
= lock_user_string(arg1
);
6286 p2
= lock_user_string(arg2
);
6289 unlock_user(p
, arg1
, 0);
6295 p3
= lock_user_string(arg3
);
6298 unlock_user(p
, arg1
, 0);
6300 unlock_user(p2
, arg2
, 0);
6307 /* FIXME - arg5 should be locked, but it isn't clear how to
6308 * do that since it's not guaranteed to be a NULL-terminated
6312 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6314 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6316 ret
= get_errno(ret
);
6319 unlock_user(p
, arg1
, 0);
6321 unlock_user(p2
, arg2
, 0);
6323 unlock_user(p3
, arg3
, 0);
6327 #ifdef TARGET_NR_umount
6328 case TARGET_NR_umount
:
6329 if (!(p
= lock_user_string(arg1
)))
6331 ret
= get_errno(umount(p
));
6332 unlock_user(p
, arg1
, 0);
6335 #ifdef TARGET_NR_stime /* not on alpha */
6336 case TARGET_NR_stime
:
6339 if (get_user_sal(host_time
, arg1
))
6341 ret
= get_errno(stime(&host_time
));
6345 case TARGET_NR_ptrace
:
6347 #ifdef TARGET_NR_alarm /* not on alpha */
6348 case TARGET_NR_alarm
:
6352 #ifdef TARGET_NR_oldfstat
6353 case TARGET_NR_oldfstat
:
6356 #ifdef TARGET_NR_pause /* not on alpha */
6357 case TARGET_NR_pause
:
6358 ret
= get_errno(pause());
6361 #ifdef TARGET_NR_utime
6362 case TARGET_NR_utime
:
6364 struct utimbuf tbuf
, *host_tbuf
;
6365 struct target_utimbuf
*target_tbuf
;
6367 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6369 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6370 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6371 unlock_user_struct(target_tbuf
, arg2
, 0);
6376 if (!(p
= lock_user_string(arg1
)))
6378 ret
= get_errno(utime(p
, host_tbuf
));
6379 unlock_user(p
, arg1
, 0);
6383 #ifdef TARGET_NR_utimes
6384 case TARGET_NR_utimes
:
6386 struct timeval
*tvp
, tv
[2];
6388 if (copy_from_user_timeval(&tv
[0], arg2
)
6389 || copy_from_user_timeval(&tv
[1],
6390 arg2
+ sizeof(struct target_timeval
)))
6396 if (!(p
= lock_user_string(arg1
)))
6398 ret
= get_errno(utimes(p
, tvp
));
6399 unlock_user(p
, arg1
, 0);
6403 #if defined(TARGET_NR_futimesat)
6404 case TARGET_NR_futimesat
:
6406 struct timeval
*tvp
, tv
[2];
6408 if (copy_from_user_timeval(&tv
[0], arg3
)
6409 || copy_from_user_timeval(&tv
[1],
6410 arg3
+ sizeof(struct target_timeval
)))
6416 if (!(p
= lock_user_string(arg2
)))
6418 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6419 unlock_user(p
, arg2
, 0);
6423 #ifdef TARGET_NR_stty
6424 case TARGET_NR_stty
:
6427 #ifdef TARGET_NR_gtty
6428 case TARGET_NR_gtty
:
6431 #ifdef TARGET_NR_access
6432 case TARGET_NR_access
:
6433 if (!(p
= lock_user_string(arg1
)))
6435 ret
= get_errno(access(path(p
), arg2
));
6436 unlock_user(p
, arg1
, 0);
6439 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6440 case TARGET_NR_faccessat
:
6441 if (!(p
= lock_user_string(arg2
)))
6443 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6444 unlock_user(p
, arg2
, 0);
6447 #ifdef TARGET_NR_nice /* not on alpha */
6448 case TARGET_NR_nice
:
6449 ret
= get_errno(nice(arg1
));
6452 #ifdef TARGET_NR_ftime
6453 case TARGET_NR_ftime
:
6456 case TARGET_NR_sync
:
6460 case TARGET_NR_kill
:
6461 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6463 #ifdef TARGET_NR_rename
6464 case TARGET_NR_rename
:
6467 p
= lock_user_string(arg1
);
6468 p2
= lock_user_string(arg2
);
6470 ret
= -TARGET_EFAULT
;
6472 ret
= get_errno(rename(p
, p2
));
6473 unlock_user(p2
, arg2
, 0);
6474 unlock_user(p
, arg1
, 0);
6478 #if defined(TARGET_NR_renameat)
6479 case TARGET_NR_renameat
:
6482 p
= lock_user_string(arg2
);
6483 p2
= lock_user_string(arg4
);
6485 ret
= -TARGET_EFAULT
;
6487 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6488 unlock_user(p2
, arg4
, 0);
6489 unlock_user(p
, arg2
, 0);
6493 #ifdef TARGET_NR_mkdir
6494 case TARGET_NR_mkdir
:
6495 if (!(p
= lock_user_string(arg1
)))
6497 ret
= get_errno(mkdir(p
, arg2
));
6498 unlock_user(p
, arg1
, 0);
6501 #if defined(TARGET_NR_mkdirat)
6502 case TARGET_NR_mkdirat
:
6503 if (!(p
= lock_user_string(arg2
)))
6505 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6506 unlock_user(p
, arg2
, 0);
6509 #ifdef TARGET_NR_rmdir
6510 case TARGET_NR_rmdir
:
6511 if (!(p
= lock_user_string(arg1
)))
6513 ret
= get_errno(rmdir(p
));
6514 unlock_user(p
, arg1
, 0);
6518 ret
= get_errno(dup(arg1
));
6520 fd_trans_dup(arg1
, ret
);
6523 #ifdef TARGET_NR_pipe
6524 case TARGET_NR_pipe
:
6525 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6528 #ifdef TARGET_NR_pipe2
6529 case TARGET_NR_pipe2
:
6530 ret
= do_pipe(cpu_env
, arg1
,
6531 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6534 case TARGET_NR_times
:
6536 struct target_tms
*tmsp
;
6538 ret
= get_errno(times(&tms
));
6540 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6543 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6544 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6545 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6546 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6549 ret
= host_to_target_clock_t(ret
);
6552 #ifdef TARGET_NR_prof
6553 case TARGET_NR_prof
:
6556 #ifdef TARGET_NR_signal
6557 case TARGET_NR_signal
:
6560 case TARGET_NR_acct
:
6562 ret
= get_errno(acct(NULL
));
6564 if (!(p
= lock_user_string(arg1
)))
6566 ret
= get_errno(acct(path(p
)));
6567 unlock_user(p
, arg1
, 0);
6570 #ifdef TARGET_NR_umount2
6571 case TARGET_NR_umount2
:
6572 if (!(p
= lock_user_string(arg1
)))
6574 ret
= get_errno(umount2(p
, arg2
));
6575 unlock_user(p
, arg1
, 0);
6578 #ifdef TARGET_NR_lock
6579 case TARGET_NR_lock
:
6582 case TARGET_NR_ioctl
:
6583 ret
= do_ioctl(arg1
, arg2
, arg3
);
6585 case TARGET_NR_fcntl
:
6586 ret
= do_fcntl(arg1
, arg2
, arg3
);
6588 #ifdef TARGET_NR_mpx
6592 case TARGET_NR_setpgid
:
6593 ret
= get_errno(setpgid(arg1
, arg2
));
6595 #ifdef TARGET_NR_ulimit
6596 case TARGET_NR_ulimit
:
6599 #ifdef TARGET_NR_oldolduname
6600 case TARGET_NR_oldolduname
:
6603 case TARGET_NR_umask
:
6604 ret
= get_errno(umask(arg1
));
6606 case TARGET_NR_chroot
:
6607 if (!(p
= lock_user_string(arg1
)))
6609 ret
= get_errno(chroot(p
));
6610 unlock_user(p
, arg1
, 0);
6612 #ifdef TARGET_NR_ustat
6613 case TARGET_NR_ustat
:
6616 #ifdef TARGET_NR_dup2
6617 case TARGET_NR_dup2
:
6618 ret
= get_errno(dup2(arg1
, arg2
));
6620 fd_trans_dup(arg1
, arg2
);
6624 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6625 case TARGET_NR_dup3
:
6626 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6628 fd_trans_dup(arg1
, arg2
);
6632 #ifdef TARGET_NR_getppid /* not on alpha */
6633 case TARGET_NR_getppid
:
6634 ret
= get_errno(getppid());
6637 #ifdef TARGET_NR_getpgrp
6638 case TARGET_NR_getpgrp
:
6639 ret
= get_errno(getpgrp());
6642 case TARGET_NR_setsid
:
6643 ret
= get_errno(setsid());
6645 #ifdef TARGET_NR_sigaction
6646 case TARGET_NR_sigaction
:
6648 #if defined(TARGET_ALPHA)
6649 struct target_sigaction act
, oact
, *pact
= 0;
6650 struct target_old_sigaction
*old_act
;
6652 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6654 act
._sa_handler
= old_act
->_sa_handler
;
6655 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6656 act
.sa_flags
= old_act
->sa_flags
;
6657 act
.sa_restorer
= 0;
6658 unlock_user_struct(old_act
, arg2
, 0);
6661 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6662 if (!is_error(ret
) && arg3
) {
6663 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6665 old_act
->_sa_handler
= oact
._sa_handler
;
6666 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6667 old_act
->sa_flags
= oact
.sa_flags
;
6668 unlock_user_struct(old_act
, arg3
, 1);
6670 #elif defined(TARGET_MIPS)
6671 struct target_sigaction act
, oact
, *pact
, *old_act
;
6674 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6676 act
._sa_handler
= old_act
->_sa_handler
;
6677 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6678 act
.sa_flags
= old_act
->sa_flags
;
6679 unlock_user_struct(old_act
, arg2
, 0);
6685 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6687 if (!is_error(ret
) && arg3
) {
6688 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6690 old_act
->_sa_handler
= oact
._sa_handler
;
6691 old_act
->sa_flags
= oact
.sa_flags
;
6692 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6693 old_act
->sa_mask
.sig
[1] = 0;
6694 old_act
->sa_mask
.sig
[2] = 0;
6695 old_act
->sa_mask
.sig
[3] = 0;
6696 unlock_user_struct(old_act
, arg3
, 1);
6699 struct target_old_sigaction
*old_act
;
6700 struct target_sigaction act
, oact
, *pact
;
6702 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6704 act
._sa_handler
= old_act
->_sa_handler
;
6705 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6706 act
.sa_flags
= old_act
->sa_flags
;
6707 act
.sa_restorer
= old_act
->sa_restorer
;
6708 unlock_user_struct(old_act
, arg2
, 0);
6713 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6714 if (!is_error(ret
) && arg3
) {
6715 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6717 old_act
->_sa_handler
= oact
._sa_handler
;
6718 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6719 old_act
->sa_flags
= oact
.sa_flags
;
6720 old_act
->sa_restorer
= oact
.sa_restorer
;
6721 unlock_user_struct(old_act
, arg3
, 1);
6727 case TARGET_NR_rt_sigaction
:
6729 #if defined(TARGET_ALPHA)
6730 struct target_sigaction act
, oact
, *pact
= 0;
6731 struct target_rt_sigaction
*rt_act
;
6732 /* ??? arg4 == sizeof(sigset_t). */
6734 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6736 act
._sa_handler
= rt_act
->_sa_handler
;
6737 act
.sa_mask
= rt_act
->sa_mask
;
6738 act
.sa_flags
= rt_act
->sa_flags
;
6739 act
.sa_restorer
= arg5
;
6740 unlock_user_struct(rt_act
, arg2
, 0);
6743 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6744 if (!is_error(ret
) && arg3
) {
6745 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6747 rt_act
->_sa_handler
= oact
._sa_handler
;
6748 rt_act
->sa_mask
= oact
.sa_mask
;
6749 rt_act
->sa_flags
= oact
.sa_flags
;
6750 unlock_user_struct(rt_act
, arg3
, 1);
6753 struct target_sigaction
*act
;
6754 struct target_sigaction
*oact
;
6757 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6762 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6763 ret
= -TARGET_EFAULT
;
6764 goto rt_sigaction_fail
;
6768 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6771 unlock_user_struct(act
, arg2
, 0);
6773 unlock_user_struct(oact
, arg3
, 1);
6777 #ifdef TARGET_NR_sgetmask /* not on alpha */
6778 case TARGET_NR_sgetmask
:
6781 abi_ulong target_set
;
6782 do_sigprocmask(0, NULL
, &cur_set
);
6783 host_to_target_old_sigset(&target_set
, &cur_set
);
6788 #ifdef TARGET_NR_ssetmask /* not on alpha */
6789 case TARGET_NR_ssetmask
:
6791 sigset_t set
, oset
, cur_set
;
6792 abi_ulong target_set
= arg1
;
6793 do_sigprocmask(0, NULL
, &cur_set
);
6794 target_to_host_old_sigset(&set
, &target_set
);
6795 sigorset(&set
, &set
, &cur_set
);
6796 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6797 host_to_target_old_sigset(&target_set
, &oset
);
6802 #ifdef TARGET_NR_sigprocmask
6803 case TARGET_NR_sigprocmask
:
6805 #if defined(TARGET_ALPHA)
6806 sigset_t set
, oldset
;
6811 case TARGET_SIG_BLOCK
:
6814 case TARGET_SIG_UNBLOCK
:
6817 case TARGET_SIG_SETMASK
:
6821 ret
= -TARGET_EINVAL
;
6825 target_to_host_old_sigset(&set
, &mask
);
6827 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6828 if (!is_error(ret
)) {
6829 host_to_target_old_sigset(&mask
, &oldset
);
6831 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6834 sigset_t set
, oldset
, *set_ptr
;
6839 case TARGET_SIG_BLOCK
:
6842 case TARGET_SIG_UNBLOCK
:
6845 case TARGET_SIG_SETMASK
:
6849 ret
= -TARGET_EINVAL
;
6852 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6854 target_to_host_old_sigset(&set
, p
);
6855 unlock_user(p
, arg2
, 0);
6861 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6862 if (!is_error(ret
) && arg3
) {
6863 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6865 host_to_target_old_sigset(p
, &oldset
);
6866 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6872 case TARGET_NR_rt_sigprocmask
:
6875 sigset_t set
, oldset
, *set_ptr
;
6879 case TARGET_SIG_BLOCK
:
6882 case TARGET_SIG_UNBLOCK
:
6885 case TARGET_SIG_SETMASK
:
6889 ret
= -TARGET_EINVAL
;
6892 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6894 target_to_host_sigset(&set
, p
);
6895 unlock_user(p
, arg2
, 0);
6901 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6902 if (!is_error(ret
) && arg3
) {
6903 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6905 host_to_target_sigset(p
, &oldset
);
6906 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6910 #ifdef TARGET_NR_sigpending
6911 case TARGET_NR_sigpending
:
6914 ret
= get_errno(sigpending(&set
));
6915 if (!is_error(ret
)) {
6916 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6918 host_to_target_old_sigset(p
, &set
);
6919 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6924 case TARGET_NR_rt_sigpending
:
6927 ret
= get_errno(sigpending(&set
));
6928 if (!is_error(ret
)) {
6929 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6931 host_to_target_sigset(p
, &set
);
6932 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6936 #ifdef TARGET_NR_sigsuspend
6937 case TARGET_NR_sigsuspend
:
6940 #if defined(TARGET_ALPHA)
6941 abi_ulong mask
= arg1
;
6942 target_to_host_old_sigset(&set
, &mask
);
6944 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6946 target_to_host_old_sigset(&set
, p
);
6947 unlock_user(p
, arg1
, 0);
6949 ret
= get_errno(sigsuspend(&set
));
6953 case TARGET_NR_rt_sigsuspend
:
6956 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6958 target_to_host_sigset(&set
, p
);
6959 unlock_user(p
, arg1
, 0);
6960 ret
= get_errno(sigsuspend(&set
));
6963 case TARGET_NR_rt_sigtimedwait
:
6966 struct timespec uts
, *puts
;
6969 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6971 target_to_host_sigset(&set
, p
);
6972 unlock_user(p
, arg1
, 0);
6975 target_to_host_timespec(puts
, arg3
);
6979 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6980 if (!is_error(ret
)) {
6982 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6987 host_to_target_siginfo(p
, &uinfo
);
6988 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6990 ret
= host_to_target_signal(ret
);
6994 case TARGET_NR_rt_sigqueueinfo
:
6997 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6999 target_to_host_siginfo(&uinfo
, p
);
7000 unlock_user(p
, arg1
, 0);
7001 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7004 #ifdef TARGET_NR_sigreturn
7005 case TARGET_NR_sigreturn
:
7006 ret
= do_sigreturn(cpu_env
);
7009 case TARGET_NR_rt_sigreturn
:
7010 ret
= do_rt_sigreturn(cpu_env
);
7012 case TARGET_NR_sethostname
:
7013 if (!(p
= lock_user_string(arg1
)))
7015 ret
= get_errno(sethostname(p
, arg2
));
7016 unlock_user(p
, arg1
, 0);
7018 case TARGET_NR_setrlimit
:
7020 int resource
= target_to_host_resource(arg1
);
7021 struct target_rlimit
*target_rlim
;
7023 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7025 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7026 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7027 unlock_user_struct(target_rlim
, arg2
, 0);
7028 ret
= get_errno(setrlimit(resource
, &rlim
));
7031 case TARGET_NR_getrlimit
:
7033 int resource
= target_to_host_resource(arg1
);
7034 struct target_rlimit
*target_rlim
;
7037 ret
= get_errno(getrlimit(resource
, &rlim
));
7038 if (!is_error(ret
)) {
7039 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7041 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7042 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7043 unlock_user_struct(target_rlim
, arg2
, 1);
7047 case TARGET_NR_getrusage
:
7049 struct rusage rusage
;
7050 ret
= get_errno(getrusage(arg1
, &rusage
));
7051 if (!is_error(ret
)) {
7052 ret
= host_to_target_rusage(arg2
, &rusage
);
7056 case TARGET_NR_gettimeofday
:
7059 ret
= get_errno(gettimeofday(&tv
, NULL
));
7060 if (!is_error(ret
)) {
7061 if (copy_to_user_timeval(arg1
, &tv
))
7066 case TARGET_NR_settimeofday
:
7068 struct timeval tv
, *ptv
= NULL
;
7069 struct timezone tz
, *ptz
= NULL
;
7072 if (copy_from_user_timeval(&tv
, arg1
)) {
7079 if (copy_from_user_timezone(&tz
, arg2
)) {
7085 ret
= get_errno(settimeofday(ptv
, ptz
));
7088 #if defined(TARGET_NR_select)
7089 case TARGET_NR_select
:
7090 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7091 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7094 struct target_sel_arg_struct
*sel
;
7095 abi_ulong inp
, outp
, exp
, tvp
;
7098 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7100 nsel
= tswapal(sel
->n
);
7101 inp
= tswapal(sel
->inp
);
7102 outp
= tswapal(sel
->outp
);
7103 exp
= tswapal(sel
->exp
);
7104 tvp
= tswapal(sel
->tvp
);
7105 unlock_user_struct(sel
, arg1
, 0);
7106 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7111 #ifdef TARGET_NR_pselect6
7112 case TARGET_NR_pselect6
:
7114 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7115 fd_set rfds
, wfds
, efds
;
7116 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7117 struct timespec ts
, *ts_ptr
;
7120 * The 6th arg is actually two args smashed together,
7121 * so we cannot use the C library.
7129 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7130 target_sigset_t
*target_sigset
;
7138 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7142 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7146 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7152 * This takes a timespec, and not a timeval, so we cannot
7153 * use the do_select() helper ...
7156 if (target_to_host_timespec(&ts
, ts_addr
)) {
7164 /* Extract the two packed args for the sigset */
7167 sig
.size
= _NSIG
/ 8;
7169 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7173 arg_sigset
= tswapal(arg7
[0]);
7174 arg_sigsize
= tswapal(arg7
[1]);
7175 unlock_user(arg7
, arg6
, 0);
7179 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7180 /* Like the kernel, we enforce correct size sigsets */
7181 ret
= -TARGET_EINVAL
;
7184 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7185 sizeof(*target_sigset
), 1);
7186 if (!target_sigset
) {
7189 target_to_host_sigset(&set
, target_sigset
);
7190 unlock_user(target_sigset
, arg_sigset
, 0);
7198 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7201 if (!is_error(ret
)) {
7202 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7204 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7206 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7209 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7215 #ifdef TARGET_NR_symlink
7216 case TARGET_NR_symlink
:
7219 p
= lock_user_string(arg1
);
7220 p2
= lock_user_string(arg2
);
7222 ret
= -TARGET_EFAULT
;
7224 ret
= get_errno(symlink(p
, p2
));
7225 unlock_user(p2
, arg2
, 0);
7226 unlock_user(p
, arg1
, 0);
7230 #if defined(TARGET_NR_symlinkat)
7231 case TARGET_NR_symlinkat
:
7234 p
= lock_user_string(arg1
);
7235 p2
= lock_user_string(arg3
);
7237 ret
= -TARGET_EFAULT
;
7239 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7240 unlock_user(p2
, arg3
, 0);
7241 unlock_user(p
, arg1
, 0);
7245 #ifdef TARGET_NR_oldlstat
7246 case TARGET_NR_oldlstat
:
7249 #ifdef TARGET_NR_readlink
7250 case TARGET_NR_readlink
:
7253 p
= lock_user_string(arg1
);
7254 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7256 ret
= -TARGET_EFAULT
;
7258 /* Short circuit this for the magic exe check. */
7259 ret
= -TARGET_EINVAL
;
7260 } else if (is_proc_myself((const char *)p
, "exe")) {
7261 char real
[PATH_MAX
], *temp
;
7262 temp
= realpath(exec_path
, real
);
7263 /* Return value is # of bytes that we wrote to the buffer. */
7265 ret
= get_errno(-1);
7267 /* Don't worry about sign mismatch as earlier mapping
7268 * logic would have thrown a bad address error. */
7269 ret
= MIN(strlen(real
), arg3
);
7270 /* We cannot NUL terminate the string. */
7271 memcpy(p2
, real
, ret
);
7274 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7276 unlock_user(p2
, arg2
, ret
);
7277 unlock_user(p
, arg1
, 0);
7281 #if defined(TARGET_NR_readlinkat)
7282 case TARGET_NR_readlinkat
:
7285 p
= lock_user_string(arg2
);
7286 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7288 ret
= -TARGET_EFAULT
;
7289 } else if (is_proc_myself((const char *)p
, "exe")) {
7290 char real
[PATH_MAX
], *temp
;
7291 temp
= realpath(exec_path
, real
);
7292 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7293 snprintf((char *)p2
, arg4
, "%s", real
);
7295 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7297 unlock_user(p2
, arg3
, ret
);
7298 unlock_user(p
, arg2
, 0);
7302 #ifdef TARGET_NR_uselib
7303 case TARGET_NR_uselib
:
7306 #ifdef TARGET_NR_swapon
7307 case TARGET_NR_swapon
:
7308 if (!(p
= lock_user_string(arg1
)))
7310 ret
= get_errno(swapon(p
, arg2
));
7311 unlock_user(p
, arg1
, 0);
7314 case TARGET_NR_reboot
:
7315 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7316 /* arg4 must be ignored in all other cases */
7317 p
= lock_user_string(arg4
);
7321 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7322 unlock_user(p
, arg4
, 0);
7324 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7327 #ifdef TARGET_NR_readdir
7328 case TARGET_NR_readdir
:
7331 #ifdef TARGET_NR_mmap
7332 case TARGET_NR_mmap
:
7333 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7334 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7335 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7336 || defined(TARGET_S390X)
7339 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7340 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7348 unlock_user(v
, arg1
, 0);
7349 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7350 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7354 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7355 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7361 #ifdef TARGET_NR_mmap2
7362 case TARGET_NR_mmap2
:
7364 #define MMAP_SHIFT 12
7366 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7367 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7369 arg6
<< MMAP_SHIFT
));
7372 case TARGET_NR_munmap
:
7373 ret
= get_errno(target_munmap(arg1
, arg2
));
7375 case TARGET_NR_mprotect
:
7377 TaskState
*ts
= cpu
->opaque
;
7378 /* Special hack to detect libc making the stack executable. */
7379 if ((arg3
& PROT_GROWSDOWN
)
7380 && arg1
>= ts
->info
->stack_limit
7381 && arg1
<= ts
->info
->start_stack
) {
7382 arg3
&= ~PROT_GROWSDOWN
;
7383 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7384 arg1
= ts
->info
->stack_limit
;
7387 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7389 #ifdef TARGET_NR_mremap
7390 case TARGET_NR_mremap
:
7391 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7394 /* ??? msync/mlock/munlock are broken for softmmu. */
7395 #ifdef TARGET_NR_msync
7396 case TARGET_NR_msync
:
7397 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7400 #ifdef TARGET_NR_mlock
7401 case TARGET_NR_mlock
:
7402 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7405 #ifdef TARGET_NR_munlock
7406 case TARGET_NR_munlock
:
7407 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7410 #ifdef TARGET_NR_mlockall
7411 case TARGET_NR_mlockall
:
7412 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7415 #ifdef TARGET_NR_munlockall
7416 case TARGET_NR_munlockall
:
7417 ret
= get_errno(munlockall());
7420 case TARGET_NR_truncate
:
7421 if (!(p
= lock_user_string(arg1
)))
7423 ret
= get_errno(truncate(p
, arg2
));
7424 unlock_user(p
, arg1
, 0);
7426 case TARGET_NR_ftruncate
:
7427 ret
= get_errno(ftruncate(arg1
, arg2
));
7429 case TARGET_NR_fchmod
:
7430 ret
= get_errno(fchmod(arg1
, arg2
));
7432 #if defined(TARGET_NR_fchmodat)
7433 case TARGET_NR_fchmodat
:
7434 if (!(p
= lock_user_string(arg2
)))
7436 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7437 unlock_user(p
, arg2
, 0);
7440 case TARGET_NR_getpriority
:
7441 /* Note that negative values are valid for getpriority, so we must
7442 differentiate based on errno settings. */
7444 ret
= getpriority(arg1
, arg2
);
7445 if (ret
== -1 && errno
!= 0) {
7446 ret
= -host_to_target_errno(errno
);
7450 /* Return value is the unbiased priority. Signal no error. */
7451 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7453 /* Return value is a biased priority to avoid negative numbers. */
7457 case TARGET_NR_setpriority
:
7458 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7460 #ifdef TARGET_NR_profil
7461 case TARGET_NR_profil
:
7464 case TARGET_NR_statfs
:
7465 if (!(p
= lock_user_string(arg1
)))
7467 ret
= get_errno(statfs(path(p
), &stfs
));
7468 unlock_user(p
, arg1
, 0);
7470 if (!is_error(ret
)) {
7471 struct target_statfs
*target_stfs
;
7473 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7475 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7476 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7477 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7478 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7479 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7480 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7481 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7482 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7483 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7484 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7485 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7486 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7487 unlock_user_struct(target_stfs
, arg2
, 1);
7490 case TARGET_NR_fstatfs
:
7491 ret
= get_errno(fstatfs(arg1
, &stfs
));
7492 goto convert_statfs
;
7493 #ifdef TARGET_NR_statfs64
7494 case TARGET_NR_statfs64
:
7495 if (!(p
= lock_user_string(arg1
)))
7497 ret
= get_errno(statfs(path(p
), &stfs
));
7498 unlock_user(p
, arg1
, 0);
7500 if (!is_error(ret
)) {
7501 struct target_statfs64
*target_stfs
;
7503 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7505 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7506 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7507 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7508 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7509 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7510 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7511 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7512 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7513 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7514 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7515 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7516 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7517 unlock_user_struct(target_stfs
, arg3
, 1);
7520 case TARGET_NR_fstatfs64
:
7521 ret
= get_errno(fstatfs(arg1
, &stfs
));
7522 goto convert_statfs64
;
7524 #ifdef TARGET_NR_ioperm
7525 case TARGET_NR_ioperm
:
7528 #ifdef TARGET_NR_socketcall
7529 case TARGET_NR_socketcall
:
7530 ret
= do_socketcall(arg1
, arg2
);
7533 #ifdef TARGET_NR_accept
7534 case TARGET_NR_accept
:
7535 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7538 #ifdef TARGET_NR_accept4
7539 case TARGET_NR_accept4
:
7540 #ifdef CONFIG_ACCEPT4
7541 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7547 #ifdef TARGET_NR_bind
7548 case TARGET_NR_bind
:
7549 ret
= do_bind(arg1
, arg2
, arg3
);
7552 #ifdef TARGET_NR_connect
7553 case TARGET_NR_connect
:
7554 ret
= do_connect(arg1
, arg2
, arg3
);
7557 #ifdef TARGET_NR_getpeername
7558 case TARGET_NR_getpeername
:
7559 ret
= do_getpeername(arg1
, arg2
, arg3
);
7562 #ifdef TARGET_NR_getsockname
7563 case TARGET_NR_getsockname
:
7564 ret
= do_getsockname(arg1
, arg2
, arg3
);
7567 #ifdef TARGET_NR_getsockopt
7568 case TARGET_NR_getsockopt
:
7569 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7572 #ifdef TARGET_NR_listen
7573 case TARGET_NR_listen
:
7574 ret
= get_errno(listen(arg1
, arg2
));
7577 #ifdef TARGET_NR_recv
7578 case TARGET_NR_recv
:
7579 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7582 #ifdef TARGET_NR_recvfrom
7583 case TARGET_NR_recvfrom
:
7584 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7587 #ifdef TARGET_NR_recvmsg
7588 case TARGET_NR_recvmsg
:
7589 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7592 #ifdef TARGET_NR_send
7593 case TARGET_NR_send
:
7594 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7597 #ifdef TARGET_NR_sendmsg
7598 case TARGET_NR_sendmsg
:
7599 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7602 #ifdef TARGET_NR_sendmmsg
7603 case TARGET_NR_sendmmsg
:
7604 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7606 case TARGET_NR_recvmmsg
:
7607 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7610 #ifdef TARGET_NR_sendto
7611 case TARGET_NR_sendto
:
7612 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7615 #ifdef TARGET_NR_shutdown
7616 case TARGET_NR_shutdown
:
7617 ret
= get_errno(shutdown(arg1
, arg2
));
7620 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7621 case TARGET_NR_getrandom
:
7622 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
7626 ret
= get_errno(getrandom(p
, arg2
, arg3
));
7627 unlock_user(p
, arg1
, ret
);
7630 #ifdef TARGET_NR_socket
7631 case TARGET_NR_socket
:
7632 ret
= do_socket(arg1
, arg2
, arg3
);
7633 fd_trans_unregister(ret
);
7636 #ifdef TARGET_NR_socketpair
7637 case TARGET_NR_socketpair
:
7638 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7641 #ifdef TARGET_NR_setsockopt
7642 case TARGET_NR_setsockopt
:
7643 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7647 case TARGET_NR_syslog
:
7648 if (!(p
= lock_user_string(arg2
)))
7650 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7651 unlock_user(p
, arg2
, 0);
7654 case TARGET_NR_setitimer
:
7656 struct itimerval value
, ovalue
, *pvalue
;
7660 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7661 || copy_from_user_timeval(&pvalue
->it_value
,
7662 arg2
+ sizeof(struct target_timeval
)))
7667 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7668 if (!is_error(ret
) && arg3
) {
7669 if (copy_to_user_timeval(arg3
,
7670 &ovalue
.it_interval
)
7671 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7677 case TARGET_NR_getitimer
:
7679 struct itimerval value
;
7681 ret
= get_errno(getitimer(arg1
, &value
));
7682 if (!is_error(ret
) && arg2
) {
7683 if (copy_to_user_timeval(arg2
,
7685 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7691 #ifdef TARGET_NR_stat
7692 case TARGET_NR_stat
:
7693 if (!(p
= lock_user_string(arg1
)))
7695 ret
= get_errno(stat(path(p
), &st
));
7696 unlock_user(p
, arg1
, 0);
7699 #ifdef TARGET_NR_lstat
7700 case TARGET_NR_lstat
:
7701 if (!(p
= lock_user_string(arg1
)))
7703 ret
= get_errno(lstat(path(p
), &st
));
7704 unlock_user(p
, arg1
, 0);
7707 case TARGET_NR_fstat
:
7709 ret
= get_errno(fstat(arg1
, &st
));
7710 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7713 if (!is_error(ret
)) {
7714 struct target_stat
*target_st
;
7716 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7718 memset(target_st
, 0, sizeof(*target_st
));
7719 __put_user(st
.st_dev
, &target_st
->st_dev
);
7720 __put_user(st
.st_ino
, &target_st
->st_ino
);
7721 __put_user(st
.st_mode
, &target_st
->st_mode
);
7722 __put_user(st
.st_uid
, &target_st
->st_uid
);
7723 __put_user(st
.st_gid
, &target_st
->st_gid
);
7724 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7725 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7726 __put_user(st
.st_size
, &target_st
->st_size
);
7727 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7728 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7729 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7730 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7731 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7732 unlock_user_struct(target_st
, arg2
, 1);
7736 #ifdef TARGET_NR_olduname
7737 case TARGET_NR_olduname
:
7740 #ifdef TARGET_NR_iopl
7741 case TARGET_NR_iopl
:
7744 case TARGET_NR_vhangup
:
7745 ret
= get_errno(vhangup());
7747 #ifdef TARGET_NR_idle
7748 case TARGET_NR_idle
:
7751 #ifdef TARGET_NR_syscall
7752 case TARGET_NR_syscall
:
7753 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7754 arg6
, arg7
, arg8
, 0);
7757 case TARGET_NR_wait4
:
7760 abi_long status_ptr
= arg2
;
7761 struct rusage rusage
, *rusage_ptr
;
7762 abi_ulong target_rusage
= arg4
;
7763 abi_long rusage_err
;
7765 rusage_ptr
= &rusage
;
7768 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
7769 if (!is_error(ret
)) {
7770 if (status_ptr
&& ret
) {
7771 status
= host_to_target_waitstatus(status
);
7772 if (put_user_s32(status
, status_ptr
))
7775 if (target_rusage
) {
7776 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7784 #ifdef TARGET_NR_swapoff
7785 case TARGET_NR_swapoff
:
7786 if (!(p
= lock_user_string(arg1
)))
7788 ret
= get_errno(swapoff(p
));
7789 unlock_user(p
, arg1
, 0);
7792 case TARGET_NR_sysinfo
:
7794 struct target_sysinfo
*target_value
;
7795 struct sysinfo value
;
7796 ret
= get_errno(sysinfo(&value
));
7797 if (!is_error(ret
) && arg1
)
7799 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7801 __put_user(value
.uptime
, &target_value
->uptime
);
7802 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7803 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7804 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7805 __put_user(value
.totalram
, &target_value
->totalram
);
7806 __put_user(value
.freeram
, &target_value
->freeram
);
7807 __put_user(value
.sharedram
, &target_value
->sharedram
);
7808 __put_user(value
.bufferram
, &target_value
->bufferram
);
7809 __put_user(value
.totalswap
, &target_value
->totalswap
);
7810 __put_user(value
.freeswap
, &target_value
->freeswap
);
7811 __put_user(value
.procs
, &target_value
->procs
);
7812 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7813 __put_user(value
.freehigh
, &target_value
->freehigh
);
7814 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7815 unlock_user_struct(target_value
, arg1
, 1);
7819 #ifdef TARGET_NR_ipc
7821 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7824 #ifdef TARGET_NR_semget
7825 case TARGET_NR_semget
:
7826 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7829 #ifdef TARGET_NR_semop
7830 case TARGET_NR_semop
:
7831 ret
= do_semop(arg1
, arg2
, arg3
);
7834 #ifdef TARGET_NR_semctl
7835 case TARGET_NR_semctl
:
7836 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7839 #ifdef TARGET_NR_msgctl
7840 case TARGET_NR_msgctl
:
7841 ret
= do_msgctl(arg1
, arg2
, arg3
);
7844 #ifdef TARGET_NR_msgget
7845 case TARGET_NR_msgget
:
7846 ret
= get_errno(msgget(arg1
, arg2
));
7849 #ifdef TARGET_NR_msgrcv
7850 case TARGET_NR_msgrcv
:
7851 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7854 #ifdef TARGET_NR_msgsnd
7855 case TARGET_NR_msgsnd
:
7856 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7859 #ifdef TARGET_NR_shmget
7860 case TARGET_NR_shmget
:
7861 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7864 #ifdef TARGET_NR_shmctl
7865 case TARGET_NR_shmctl
:
7866 ret
= do_shmctl(arg1
, arg2
, arg3
);
7869 #ifdef TARGET_NR_shmat
7870 case TARGET_NR_shmat
:
7871 ret
= do_shmat(arg1
, arg2
, arg3
);
7874 #ifdef TARGET_NR_shmdt
7875 case TARGET_NR_shmdt
:
7876 ret
= do_shmdt(arg1
);
7879 case TARGET_NR_fsync
:
7880 ret
= get_errno(fsync(arg1
));
7882 case TARGET_NR_clone
:
7883 /* Linux manages to have three different orderings for its
7884 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7885 * match the kernel's CONFIG_CLONE_* settings.
7886 * Microblaze is further special in that it uses a sixth
7887 * implicit argument to clone for the TLS pointer.
7889 #if defined(TARGET_MICROBLAZE)
7890 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7891 #elif defined(TARGET_CLONE_BACKWARDS)
7892 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7893 #elif defined(TARGET_CLONE_BACKWARDS2)
7894 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7896 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7899 #ifdef __NR_exit_group
7900 /* new thread calls */
7901 case TARGET_NR_exit_group
:
7905 gdb_exit(cpu_env
, arg1
);
7906 ret
= get_errno(exit_group(arg1
));
7909 case TARGET_NR_setdomainname
:
7910 if (!(p
= lock_user_string(arg1
)))
7912 ret
= get_errno(setdomainname(p
, arg2
));
7913 unlock_user(p
, arg1
, 0);
7915 case TARGET_NR_uname
:
7916 /* no need to transcode because we use the linux syscall */
7918 struct new_utsname
* buf
;
7920 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7922 ret
= get_errno(sys_uname(buf
));
7923 if (!is_error(ret
)) {
7924 /* Overrite the native machine name with whatever is being
7926 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7927 /* Allow the user to override the reported release. */
7928 if (qemu_uname_release
&& *qemu_uname_release
)
7929 strcpy (buf
->release
, qemu_uname_release
);
7931 unlock_user_struct(buf
, arg1
, 1);
7935 case TARGET_NR_modify_ldt
:
7936 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7938 #if !defined(TARGET_X86_64)
7939 case TARGET_NR_vm86old
:
7941 case TARGET_NR_vm86
:
7942 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7946 case TARGET_NR_adjtimex
:
7948 #ifdef TARGET_NR_create_module
7949 case TARGET_NR_create_module
:
7951 case TARGET_NR_init_module
:
7952 case TARGET_NR_delete_module
:
7953 #ifdef TARGET_NR_get_kernel_syms
7954 case TARGET_NR_get_kernel_syms
:
7957 case TARGET_NR_quotactl
:
7959 case TARGET_NR_getpgid
:
7960 ret
= get_errno(getpgid(arg1
));
7962 case TARGET_NR_fchdir
:
7963 ret
= get_errno(fchdir(arg1
));
7965 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7966 case TARGET_NR_bdflush
:
7969 #ifdef TARGET_NR_sysfs
7970 case TARGET_NR_sysfs
:
7973 case TARGET_NR_personality
:
7974 ret
= get_errno(personality(arg1
));
7976 #ifdef TARGET_NR_afs_syscall
7977 case TARGET_NR_afs_syscall
:
7980 #ifdef TARGET_NR__llseek /* Not on alpha */
7981 case TARGET_NR__llseek
:
7984 #if !defined(__NR_llseek)
7985 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7987 ret
= get_errno(res
);
7992 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7994 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8000 #ifdef TARGET_NR_getdents
8001 case TARGET_NR_getdents
:
8002 #ifdef __NR_getdents
8003 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8005 struct target_dirent
*target_dirp
;
8006 struct linux_dirent
*dirp
;
8007 abi_long count
= arg3
;
8009 dirp
= g_try_malloc(count
);
8011 ret
= -TARGET_ENOMEM
;
8015 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8016 if (!is_error(ret
)) {
8017 struct linux_dirent
*de
;
8018 struct target_dirent
*tde
;
8020 int reclen
, treclen
;
8021 int count1
, tnamelen
;
8025 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8029 reclen
= de
->d_reclen
;
8030 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8031 assert(tnamelen
>= 0);
8032 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8033 assert(count1
+ treclen
<= count
);
8034 tde
->d_reclen
= tswap16(treclen
);
8035 tde
->d_ino
= tswapal(de
->d_ino
);
8036 tde
->d_off
= tswapal(de
->d_off
);
8037 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8038 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8040 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8044 unlock_user(target_dirp
, arg2
, ret
);
8050 struct linux_dirent
*dirp
;
8051 abi_long count
= arg3
;
8053 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8055 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8056 if (!is_error(ret
)) {
8057 struct linux_dirent
*de
;
8062 reclen
= de
->d_reclen
;
8065 de
->d_reclen
= tswap16(reclen
);
8066 tswapls(&de
->d_ino
);
8067 tswapls(&de
->d_off
);
8068 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8072 unlock_user(dirp
, arg2
, ret
);
8076 /* Implement getdents in terms of getdents64 */
8078 struct linux_dirent64
*dirp
;
8079 abi_long count
= arg3
;
8081 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8085 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8086 if (!is_error(ret
)) {
8087 /* Convert the dirent64 structs to target dirent. We do this
8088 * in-place, since we can guarantee that a target_dirent is no
8089 * larger than a dirent64; however this means we have to be
8090 * careful to read everything before writing in the new format.
8092 struct linux_dirent64
*de
;
8093 struct target_dirent
*tde
;
8098 tde
= (struct target_dirent
*)dirp
;
8100 int namelen
, treclen
;
8101 int reclen
= de
->d_reclen
;
8102 uint64_t ino
= de
->d_ino
;
8103 int64_t off
= de
->d_off
;
8104 uint8_t type
= de
->d_type
;
8106 namelen
= strlen(de
->d_name
);
8107 treclen
= offsetof(struct target_dirent
, d_name
)
8109 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8111 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8112 tde
->d_ino
= tswapal(ino
);
8113 tde
->d_off
= tswapal(off
);
8114 tde
->d_reclen
= tswap16(treclen
);
8115 /* The target_dirent type is in what was formerly a padding
8116 * byte at the end of the structure:
8118 *(((char *)tde
) + treclen
- 1) = type
;
8120 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8121 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8127 unlock_user(dirp
, arg2
, ret
);
8131 #endif /* TARGET_NR_getdents */
8132 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8133 case TARGET_NR_getdents64
:
8135 struct linux_dirent64
*dirp
;
8136 abi_long count
= arg3
;
8137 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8139 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8140 if (!is_error(ret
)) {
8141 struct linux_dirent64
*de
;
8146 reclen
= de
->d_reclen
;
8149 de
->d_reclen
= tswap16(reclen
);
8150 tswap64s((uint64_t *)&de
->d_ino
);
8151 tswap64s((uint64_t *)&de
->d_off
);
8152 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8156 unlock_user(dirp
, arg2
, ret
);
8159 #endif /* TARGET_NR_getdents64 */
8160 #if defined(TARGET_NR__newselect)
8161 case TARGET_NR__newselect
:
8162 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8166 # ifdef TARGET_NR_poll
8167 case TARGET_NR_poll
:
8169 # ifdef TARGET_NR_ppoll
8170 case TARGET_NR_ppoll
:
8173 struct target_pollfd
*target_pfd
;
8174 unsigned int nfds
= arg2
;
8182 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8183 sizeof(struct target_pollfd
) * nfds
, 1);
8188 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8189 for (i
= 0; i
< nfds
; i
++) {
8190 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8191 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8195 # ifdef TARGET_NR_ppoll
8196 if (num
== TARGET_NR_ppoll
) {
8197 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8198 target_sigset_t
*target_set
;
8199 sigset_t _set
, *set
= &_set
;
8202 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8203 unlock_user(target_pfd
, arg1
, 0);
8211 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8213 unlock_user(target_pfd
, arg1
, 0);
8216 target_to_host_sigset(set
, target_set
);
8221 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8223 if (!is_error(ret
) && arg3
) {
8224 host_to_target_timespec(arg3
, timeout_ts
);
8227 unlock_user(target_set
, arg4
, 0);
8231 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8233 if (!is_error(ret
)) {
8234 for(i
= 0; i
< nfds
; i
++) {
8235 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8238 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8242 case TARGET_NR_flock
:
8243 /* NOTE: the flock constant seems to be the same for every
8245 ret
= get_errno(flock(arg1
, arg2
));
8247 case TARGET_NR_readv
:
8249 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8251 ret
= get_errno(readv(arg1
, vec
, arg3
));
8252 unlock_iovec(vec
, arg2
, arg3
, 1);
8254 ret
= -host_to_target_errno(errno
);
8258 case TARGET_NR_writev
:
8260 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8262 ret
= get_errno(writev(arg1
, vec
, arg3
));
8263 unlock_iovec(vec
, arg2
, arg3
, 0);
8265 ret
= -host_to_target_errno(errno
);
8269 case TARGET_NR_getsid
:
8270 ret
= get_errno(getsid(arg1
));
8272 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8273 case TARGET_NR_fdatasync
:
8274 ret
= get_errno(fdatasync(arg1
));
8277 #ifdef TARGET_NR__sysctl
8278 case TARGET_NR__sysctl
:
8279 /* We don't implement this, but ENOTDIR is always a safe
8281 ret
= -TARGET_ENOTDIR
;
8284 case TARGET_NR_sched_getaffinity
:
8286 unsigned int mask_size
;
8287 unsigned long *mask
;
8290 * sched_getaffinity needs multiples of ulong, so need to take
8291 * care of mismatches between target ulong and host ulong sizes.
8293 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8294 ret
= -TARGET_EINVAL
;
8297 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8299 mask
= alloca(mask_size
);
8300 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8302 if (!is_error(ret
)) {
8304 /* More data returned than the caller's buffer will fit.
8305 * This only happens if sizeof(abi_long) < sizeof(long)
8306 * and the caller passed us a buffer holding an odd number
8307 * of abi_longs. If the host kernel is actually using the
8308 * extra 4 bytes then fail EINVAL; otherwise we can just
8309 * ignore them and only copy the interesting part.
8311 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8312 if (numcpus
> arg2
* 8) {
8313 ret
= -TARGET_EINVAL
;
8319 if (copy_to_user(arg3
, mask
, ret
)) {
8325 case TARGET_NR_sched_setaffinity
:
8327 unsigned int mask_size
;
8328 unsigned long *mask
;
8331 * sched_setaffinity needs multiples of ulong, so need to take
8332 * care of mismatches between target ulong and host ulong sizes.
8334 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8335 ret
= -TARGET_EINVAL
;
8338 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8340 mask
= alloca(mask_size
);
8341 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8344 memcpy(mask
, p
, arg2
);
8345 unlock_user_struct(p
, arg2
, 0);
8347 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8350 case TARGET_NR_sched_setparam
:
8352 struct sched_param
*target_schp
;
8353 struct sched_param schp
;
8356 return -TARGET_EINVAL
;
8358 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8360 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8361 unlock_user_struct(target_schp
, arg2
, 0);
8362 ret
= get_errno(sched_setparam(arg1
, &schp
));
8365 case TARGET_NR_sched_getparam
:
8367 struct sched_param
*target_schp
;
8368 struct sched_param schp
;
8371 return -TARGET_EINVAL
;
8373 ret
= get_errno(sched_getparam(arg1
, &schp
));
8374 if (!is_error(ret
)) {
8375 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8377 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8378 unlock_user_struct(target_schp
, arg2
, 1);
8382 case TARGET_NR_sched_setscheduler
:
8384 struct sched_param
*target_schp
;
8385 struct sched_param schp
;
8387 return -TARGET_EINVAL
;
8389 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8391 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8392 unlock_user_struct(target_schp
, arg3
, 0);
8393 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8396 case TARGET_NR_sched_getscheduler
:
8397 ret
= get_errno(sched_getscheduler(arg1
));
8399 case TARGET_NR_sched_yield
:
8400 ret
= get_errno(sched_yield());
8402 case TARGET_NR_sched_get_priority_max
:
8403 ret
= get_errno(sched_get_priority_max(arg1
));
8405 case TARGET_NR_sched_get_priority_min
:
8406 ret
= get_errno(sched_get_priority_min(arg1
));
8408 case TARGET_NR_sched_rr_get_interval
:
8411 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8412 if (!is_error(ret
)) {
8413 ret
= host_to_target_timespec(arg2
, &ts
);
8417 case TARGET_NR_nanosleep
:
8419 struct timespec req
, rem
;
8420 target_to_host_timespec(&req
, arg1
);
8421 ret
= get_errno(nanosleep(&req
, &rem
));
8422 if (is_error(ret
) && arg2
) {
8423 host_to_target_timespec(arg2
, &rem
);
8427 #ifdef TARGET_NR_query_module
8428 case TARGET_NR_query_module
:
8431 #ifdef TARGET_NR_nfsservctl
8432 case TARGET_NR_nfsservctl
:
8435 case TARGET_NR_prctl
:
8437 case PR_GET_PDEATHSIG
:
8440 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8441 if (!is_error(ret
) && arg2
8442 && put_user_ual(deathsig
, arg2
)) {
8450 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8454 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8456 unlock_user(name
, arg2
, 16);
8461 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8465 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8467 unlock_user(name
, arg2
, 0);
8472 /* Most prctl options have no pointer arguments */
8473 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8477 #ifdef TARGET_NR_arch_prctl
8478 case TARGET_NR_arch_prctl
:
8479 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8480 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8486 #ifdef TARGET_NR_pread64
8487 case TARGET_NR_pread64
:
8488 if (regpairs_aligned(cpu_env
)) {
8492 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8494 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8495 unlock_user(p
, arg2
, ret
);
8497 case TARGET_NR_pwrite64
:
8498 if (regpairs_aligned(cpu_env
)) {
8502 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8504 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8505 unlock_user(p
, arg2
, 0);
8508 case TARGET_NR_getcwd
:
8509 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8511 ret
= get_errno(sys_getcwd1(p
, arg2
));
8512 unlock_user(p
, arg1
, ret
);
8514 case TARGET_NR_capget
:
8515 case TARGET_NR_capset
:
8517 struct target_user_cap_header
*target_header
;
8518 struct target_user_cap_data
*target_data
= NULL
;
8519 struct __user_cap_header_struct header
;
8520 struct __user_cap_data_struct data
[2];
8521 struct __user_cap_data_struct
*dataptr
= NULL
;
8522 int i
, target_datalen
;
8525 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8528 header
.version
= tswap32(target_header
->version
);
8529 header
.pid
= tswap32(target_header
->pid
);
8531 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8532 /* Version 2 and up takes pointer to two user_data structs */
8536 target_datalen
= sizeof(*target_data
) * data_items
;
8539 if (num
== TARGET_NR_capget
) {
8540 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8542 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8545 unlock_user_struct(target_header
, arg1
, 0);
8549 if (num
== TARGET_NR_capset
) {
8550 for (i
= 0; i
< data_items
; i
++) {
8551 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8552 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8553 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8560 if (num
== TARGET_NR_capget
) {
8561 ret
= get_errno(capget(&header
, dataptr
));
8563 ret
= get_errno(capset(&header
, dataptr
));
8566 /* The kernel always updates version for both capget and capset */
8567 target_header
->version
= tswap32(header
.version
);
8568 unlock_user_struct(target_header
, arg1
, 1);
8571 if (num
== TARGET_NR_capget
) {
8572 for (i
= 0; i
< data_items
; i
++) {
8573 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8574 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8575 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8577 unlock_user(target_data
, arg2
, target_datalen
);
8579 unlock_user(target_data
, arg2
, 0);
8584 case TARGET_NR_sigaltstack
:
8585 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8588 #ifdef CONFIG_SENDFILE
8589 case TARGET_NR_sendfile
:
8594 ret
= get_user_sal(off
, arg3
);
8595 if (is_error(ret
)) {
8600 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8601 if (!is_error(ret
) && arg3
) {
8602 abi_long ret2
= put_user_sal(off
, arg3
);
8603 if (is_error(ret2
)) {
8609 #ifdef TARGET_NR_sendfile64
8610 case TARGET_NR_sendfile64
:
8615 ret
= get_user_s64(off
, arg3
);
8616 if (is_error(ret
)) {
8621 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8622 if (!is_error(ret
) && arg3
) {
8623 abi_long ret2
= put_user_s64(off
, arg3
);
8624 if (is_error(ret2
)) {
8632 case TARGET_NR_sendfile
:
8633 #ifdef TARGET_NR_sendfile64
8634 case TARGET_NR_sendfile64
:
8639 #ifdef TARGET_NR_getpmsg
8640 case TARGET_NR_getpmsg
:
8643 #ifdef TARGET_NR_putpmsg
8644 case TARGET_NR_putpmsg
:
8647 #ifdef TARGET_NR_vfork
8648 case TARGET_NR_vfork
:
8649 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8653 #ifdef TARGET_NR_ugetrlimit
8654 case TARGET_NR_ugetrlimit
:
8657 int resource
= target_to_host_resource(arg1
);
8658 ret
= get_errno(getrlimit(resource
, &rlim
));
8659 if (!is_error(ret
)) {
8660 struct target_rlimit
*target_rlim
;
8661 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8663 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8664 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8665 unlock_user_struct(target_rlim
, arg2
, 1);
8670 #ifdef TARGET_NR_truncate64
8671 case TARGET_NR_truncate64
:
8672 if (!(p
= lock_user_string(arg1
)))
8674 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8675 unlock_user(p
, arg1
, 0);
8678 #ifdef TARGET_NR_ftruncate64
8679 case TARGET_NR_ftruncate64
:
8680 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8683 #ifdef TARGET_NR_stat64
8684 case TARGET_NR_stat64
:
8685 if (!(p
= lock_user_string(arg1
)))
8687 ret
= get_errno(stat(path(p
), &st
));
8688 unlock_user(p
, arg1
, 0);
8690 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8693 #ifdef TARGET_NR_lstat64
8694 case TARGET_NR_lstat64
:
8695 if (!(p
= lock_user_string(arg1
)))
8697 ret
= get_errno(lstat(path(p
), &st
));
8698 unlock_user(p
, arg1
, 0);
8700 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8703 #ifdef TARGET_NR_fstat64
8704 case TARGET_NR_fstat64
:
8705 ret
= get_errno(fstat(arg1
, &st
));
8707 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8710 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8711 #ifdef TARGET_NR_fstatat64
8712 case TARGET_NR_fstatat64
:
8714 #ifdef TARGET_NR_newfstatat
8715 case TARGET_NR_newfstatat
:
8717 if (!(p
= lock_user_string(arg2
)))
8719 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8721 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8724 #ifdef TARGET_NR_lchown
8725 case TARGET_NR_lchown
:
8726 if (!(p
= lock_user_string(arg1
)))
8728 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8729 unlock_user(p
, arg1
, 0);
8732 #ifdef TARGET_NR_getuid
8733 case TARGET_NR_getuid
:
8734 ret
= get_errno(high2lowuid(getuid()));
8737 #ifdef TARGET_NR_getgid
8738 case TARGET_NR_getgid
:
8739 ret
= get_errno(high2lowgid(getgid()));
8742 #ifdef TARGET_NR_geteuid
8743 case TARGET_NR_geteuid
:
8744 ret
= get_errno(high2lowuid(geteuid()));
8747 #ifdef TARGET_NR_getegid
8748 case TARGET_NR_getegid
:
8749 ret
= get_errno(high2lowgid(getegid()));
8752 case TARGET_NR_setreuid
:
8753 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8755 case TARGET_NR_setregid
:
8756 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8758 case TARGET_NR_getgroups
:
8760 int gidsetsize
= arg1
;
8761 target_id
*target_grouplist
;
8765 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8766 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8767 if (gidsetsize
== 0)
8769 if (!is_error(ret
)) {
8770 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8771 if (!target_grouplist
)
8773 for(i
= 0;i
< ret
; i
++)
8774 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8775 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8779 case TARGET_NR_setgroups
:
8781 int gidsetsize
= arg1
;
8782 target_id
*target_grouplist
;
8783 gid_t
*grouplist
= NULL
;
8786 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8787 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8788 if (!target_grouplist
) {
8789 ret
= -TARGET_EFAULT
;
8792 for (i
= 0; i
< gidsetsize
; i
++) {
8793 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8795 unlock_user(target_grouplist
, arg2
, 0);
8797 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8800 case TARGET_NR_fchown
:
8801 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8803 #if defined(TARGET_NR_fchownat)
8804 case TARGET_NR_fchownat
:
8805 if (!(p
= lock_user_string(arg2
)))
8807 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8808 low2highgid(arg4
), arg5
));
8809 unlock_user(p
, arg2
, 0);
8812 #ifdef TARGET_NR_setresuid
8813 case TARGET_NR_setresuid
:
8814 ret
= get_errno(setresuid(low2highuid(arg1
),
8816 low2highuid(arg3
)));
8819 #ifdef TARGET_NR_getresuid
8820 case TARGET_NR_getresuid
:
8822 uid_t ruid
, euid
, suid
;
8823 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8824 if (!is_error(ret
)) {
8825 if (put_user_id(high2lowuid(ruid
), arg1
)
8826 || put_user_id(high2lowuid(euid
), arg2
)
8827 || put_user_id(high2lowuid(suid
), arg3
))
8833 #ifdef TARGET_NR_getresgid
8834 case TARGET_NR_setresgid
:
8835 ret
= get_errno(setresgid(low2highgid(arg1
),
8837 low2highgid(arg3
)));
8840 #ifdef TARGET_NR_getresgid
8841 case TARGET_NR_getresgid
:
8843 gid_t rgid
, egid
, sgid
;
8844 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8845 if (!is_error(ret
)) {
8846 if (put_user_id(high2lowgid(rgid
), arg1
)
8847 || put_user_id(high2lowgid(egid
), arg2
)
8848 || put_user_id(high2lowgid(sgid
), arg3
))
8854 #ifdef TARGET_NR_chown
8855 case TARGET_NR_chown
:
8856 if (!(p
= lock_user_string(arg1
)))
8858 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8859 unlock_user(p
, arg1
, 0);
8862 case TARGET_NR_setuid
:
8863 ret
= get_errno(setuid(low2highuid(arg1
)));
8865 case TARGET_NR_setgid
:
8866 ret
= get_errno(setgid(low2highgid(arg1
)));
8868 case TARGET_NR_setfsuid
:
8869 ret
= get_errno(setfsuid(arg1
));
8871 case TARGET_NR_setfsgid
:
8872 ret
= get_errno(setfsgid(arg1
));
8875 #ifdef TARGET_NR_lchown32
8876 case TARGET_NR_lchown32
:
8877 if (!(p
= lock_user_string(arg1
)))
8879 ret
= get_errno(lchown(p
, arg2
, arg3
));
8880 unlock_user(p
, arg1
, 0);
8883 #ifdef TARGET_NR_getuid32
8884 case TARGET_NR_getuid32
:
8885 ret
= get_errno(getuid());
8889 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8890 /* Alpha specific */
8891 case TARGET_NR_getxuid
:
8895 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8897 ret
= get_errno(getuid());
8900 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8901 /* Alpha specific */
8902 case TARGET_NR_getxgid
:
8906 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8908 ret
= get_errno(getgid());
8911 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8912 /* Alpha specific */
8913 case TARGET_NR_osf_getsysinfo
:
8914 ret
= -TARGET_EOPNOTSUPP
;
8916 case TARGET_GSI_IEEE_FP_CONTROL
:
8918 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8920 /* Copied from linux ieee_fpcr_to_swcr. */
8921 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8922 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8923 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8924 | SWCR_TRAP_ENABLE_DZE
8925 | SWCR_TRAP_ENABLE_OVF
);
8926 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8927 | SWCR_TRAP_ENABLE_INE
);
8928 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8929 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8931 if (put_user_u64 (swcr
, arg2
))
8937 /* case GSI_IEEE_STATE_AT_SIGNAL:
8938 -- Not implemented in linux kernel.
8940 -- Retrieves current unaligned access state; not much used.
8942 -- Retrieves implver information; surely not used.
8944 -- Grabs a copy of the HWRPB; surely not used.
8949 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8950 /* Alpha specific */
8951 case TARGET_NR_osf_setsysinfo
:
8952 ret
= -TARGET_EOPNOTSUPP
;
8954 case TARGET_SSI_IEEE_FP_CONTROL
:
8956 uint64_t swcr
, fpcr
, orig_fpcr
;
8958 if (get_user_u64 (swcr
, arg2
)) {
8961 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8962 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8964 /* Copied from linux ieee_swcr_to_fpcr. */
8965 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8966 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8967 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8968 | SWCR_TRAP_ENABLE_DZE
8969 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8970 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8971 | SWCR_TRAP_ENABLE_INE
)) << 57;
8972 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8973 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8975 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8980 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8982 uint64_t exc
, fpcr
, orig_fpcr
;
8985 if (get_user_u64(exc
, arg2
)) {
8989 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8991 /* We only add to the exception status here. */
8992 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8994 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8997 /* Old exceptions are not signaled. */
8998 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9000 /* If any exceptions set by this call,
9001 and are unmasked, send a signal. */
9003 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9004 si_code
= TARGET_FPE_FLTRES
;
9006 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9007 si_code
= TARGET_FPE_FLTUND
;
9009 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9010 si_code
= TARGET_FPE_FLTOVF
;
9012 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9013 si_code
= TARGET_FPE_FLTDIV
;
9015 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9016 si_code
= TARGET_FPE_FLTINV
;
9019 target_siginfo_t info
;
9020 info
.si_signo
= SIGFPE
;
9022 info
.si_code
= si_code
;
9023 info
._sifields
._sigfault
._addr
9024 = ((CPUArchState
*)cpu_env
)->pc
;
9025 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9030 /* case SSI_NVPAIRS:
9031 -- Used with SSIN_UACPROC to enable unaligned accesses.
9032 case SSI_IEEE_STATE_AT_SIGNAL:
9033 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9034 -- Not implemented in linux kernel
9039 #ifdef TARGET_NR_osf_sigprocmask
9040 /* Alpha specific. */
9041 case TARGET_NR_osf_sigprocmask
:
9045 sigset_t set
, oldset
;
9048 case TARGET_SIG_BLOCK
:
9051 case TARGET_SIG_UNBLOCK
:
9054 case TARGET_SIG_SETMASK
:
9058 ret
= -TARGET_EINVAL
;
9062 target_to_host_old_sigset(&set
, &mask
);
9063 do_sigprocmask(how
, &set
, &oldset
);
9064 host_to_target_old_sigset(&mask
, &oldset
);
9070 #ifdef TARGET_NR_getgid32
9071 case TARGET_NR_getgid32
:
9072 ret
= get_errno(getgid());
9075 #ifdef TARGET_NR_geteuid32
9076 case TARGET_NR_geteuid32
:
9077 ret
= get_errno(geteuid());
9080 #ifdef TARGET_NR_getegid32
9081 case TARGET_NR_getegid32
:
9082 ret
= get_errno(getegid());
9085 #ifdef TARGET_NR_setreuid32
9086 case TARGET_NR_setreuid32
:
9087 ret
= get_errno(setreuid(arg1
, arg2
));
9090 #ifdef TARGET_NR_setregid32
9091 case TARGET_NR_setregid32
:
9092 ret
= get_errno(setregid(arg1
, arg2
));
9095 #ifdef TARGET_NR_getgroups32
9096 case TARGET_NR_getgroups32
:
9098 int gidsetsize
= arg1
;
9099 uint32_t *target_grouplist
;
9103 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9104 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9105 if (gidsetsize
== 0)
9107 if (!is_error(ret
)) {
9108 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9109 if (!target_grouplist
) {
9110 ret
= -TARGET_EFAULT
;
9113 for(i
= 0;i
< ret
; i
++)
9114 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9115 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9120 #ifdef TARGET_NR_setgroups32
9121 case TARGET_NR_setgroups32
:
9123 int gidsetsize
= arg1
;
9124 uint32_t *target_grouplist
;
9128 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9129 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9130 if (!target_grouplist
) {
9131 ret
= -TARGET_EFAULT
;
9134 for(i
= 0;i
< gidsetsize
; i
++)
9135 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9136 unlock_user(target_grouplist
, arg2
, 0);
9137 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9141 #ifdef TARGET_NR_fchown32
9142 case TARGET_NR_fchown32
:
9143 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9146 #ifdef TARGET_NR_setresuid32
9147 case TARGET_NR_setresuid32
:
9148 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9151 #ifdef TARGET_NR_getresuid32
9152 case TARGET_NR_getresuid32
:
9154 uid_t ruid
, euid
, suid
;
9155 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9156 if (!is_error(ret
)) {
9157 if (put_user_u32(ruid
, arg1
)
9158 || put_user_u32(euid
, arg2
)
9159 || put_user_u32(suid
, arg3
))
9165 #ifdef TARGET_NR_setresgid32
9166 case TARGET_NR_setresgid32
:
9167 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9170 #ifdef TARGET_NR_getresgid32
9171 case TARGET_NR_getresgid32
:
9173 gid_t rgid
, egid
, sgid
;
9174 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9175 if (!is_error(ret
)) {
9176 if (put_user_u32(rgid
, arg1
)
9177 || put_user_u32(egid
, arg2
)
9178 || put_user_u32(sgid
, arg3
))
9184 #ifdef TARGET_NR_chown32
9185 case TARGET_NR_chown32
:
9186 if (!(p
= lock_user_string(arg1
)))
9188 ret
= get_errno(chown(p
, arg2
, arg3
));
9189 unlock_user(p
, arg1
, 0);
9192 #ifdef TARGET_NR_setuid32
9193 case TARGET_NR_setuid32
:
9194 ret
= get_errno(setuid(arg1
));
9197 #ifdef TARGET_NR_setgid32
9198 case TARGET_NR_setgid32
:
9199 ret
= get_errno(setgid(arg1
));
9202 #ifdef TARGET_NR_setfsuid32
9203 case TARGET_NR_setfsuid32
:
9204 ret
= get_errno(setfsuid(arg1
));
9207 #ifdef TARGET_NR_setfsgid32
9208 case TARGET_NR_setfsgid32
:
9209 ret
= get_errno(setfsgid(arg1
));
9213 case TARGET_NR_pivot_root
:
9215 #ifdef TARGET_NR_mincore
9216 case TARGET_NR_mincore
:
9219 ret
= -TARGET_EFAULT
;
9220 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9222 if (!(p
= lock_user_string(arg3
)))
9224 ret
= get_errno(mincore(a
, arg2
, p
));
9225 unlock_user(p
, arg3
, ret
);
9227 unlock_user(a
, arg1
, 0);
9231 #ifdef TARGET_NR_arm_fadvise64_64
9232 case TARGET_NR_arm_fadvise64_64
:
9235 * arm_fadvise64_64 looks like fadvise64_64 but
9236 * with different argument order
9244 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9245 #ifdef TARGET_NR_fadvise64_64
9246 case TARGET_NR_fadvise64_64
:
9248 #ifdef TARGET_NR_fadvise64
9249 case TARGET_NR_fadvise64
:
9253 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9254 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9255 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9256 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9260 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9263 #ifdef TARGET_NR_madvise
9264 case TARGET_NR_madvise
:
9265 /* A straight passthrough may not be safe because qemu sometimes
9266 turns private file-backed mappings into anonymous mappings.
9267 This will break MADV_DONTNEED.
9268 This is a hint, so ignoring and returning success is ok. */
9272 #if TARGET_ABI_BITS == 32
9273 case TARGET_NR_fcntl64
:
9277 struct target_flock64
*target_fl
;
9279 struct target_eabi_flock64
*target_efl
;
9282 cmd
= target_to_host_fcntl_cmd(arg2
);
9283 if (cmd
== -TARGET_EINVAL
) {
9289 case TARGET_F_GETLK64
:
9291 if (((CPUARMState
*)cpu_env
)->eabi
) {
9292 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9294 fl
.l_type
= tswap16(target_efl
->l_type
);
9295 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9296 fl
.l_start
= tswap64(target_efl
->l_start
);
9297 fl
.l_len
= tswap64(target_efl
->l_len
);
9298 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9299 unlock_user_struct(target_efl
, arg3
, 0);
9303 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9305 fl
.l_type
= tswap16(target_fl
->l_type
);
9306 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9307 fl
.l_start
= tswap64(target_fl
->l_start
);
9308 fl
.l_len
= tswap64(target_fl
->l_len
);
9309 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9310 unlock_user_struct(target_fl
, arg3
, 0);
9312 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9315 if (((CPUARMState
*)cpu_env
)->eabi
) {
9316 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9318 target_efl
->l_type
= tswap16(fl
.l_type
);
9319 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9320 target_efl
->l_start
= tswap64(fl
.l_start
);
9321 target_efl
->l_len
= tswap64(fl
.l_len
);
9322 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9323 unlock_user_struct(target_efl
, arg3
, 1);
9327 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9329 target_fl
->l_type
= tswap16(fl
.l_type
);
9330 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9331 target_fl
->l_start
= tswap64(fl
.l_start
);
9332 target_fl
->l_len
= tswap64(fl
.l_len
);
9333 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9334 unlock_user_struct(target_fl
, arg3
, 1);
9339 case TARGET_F_SETLK64
:
9340 case TARGET_F_SETLKW64
:
9342 if (((CPUARMState
*)cpu_env
)->eabi
) {
9343 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9345 fl
.l_type
= tswap16(target_efl
->l_type
);
9346 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9347 fl
.l_start
= tswap64(target_efl
->l_start
);
9348 fl
.l_len
= tswap64(target_efl
->l_len
);
9349 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9350 unlock_user_struct(target_efl
, arg3
, 0);
9354 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9356 fl
.l_type
= tswap16(target_fl
->l_type
);
9357 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9358 fl
.l_start
= tswap64(target_fl
->l_start
);
9359 fl
.l_len
= tswap64(target_fl
->l_len
);
9360 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9361 unlock_user_struct(target_fl
, arg3
, 0);
9363 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9366 ret
= do_fcntl(arg1
, arg2
, arg3
);
9372 #ifdef TARGET_NR_cacheflush
9373 case TARGET_NR_cacheflush
:
9374 /* self-modifying code is handled automatically, so nothing needed */
9378 #ifdef TARGET_NR_security
9379 case TARGET_NR_security
:
9382 #ifdef TARGET_NR_getpagesize
9383 case TARGET_NR_getpagesize
:
9384 ret
= TARGET_PAGE_SIZE
;
9387 case TARGET_NR_gettid
:
9388 ret
= get_errno(gettid());
9390 #ifdef TARGET_NR_readahead
9391 case TARGET_NR_readahead
:
9392 #if TARGET_ABI_BITS == 32
9393 if (regpairs_aligned(cpu_env
)) {
9398 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9400 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9405 #ifdef TARGET_NR_setxattr
9406 case TARGET_NR_listxattr
:
9407 case TARGET_NR_llistxattr
:
9411 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9413 ret
= -TARGET_EFAULT
;
9417 p
= lock_user_string(arg1
);
9419 if (num
== TARGET_NR_listxattr
) {
9420 ret
= get_errno(listxattr(p
, b
, arg3
));
9422 ret
= get_errno(llistxattr(p
, b
, arg3
));
9425 ret
= -TARGET_EFAULT
;
9427 unlock_user(p
, arg1
, 0);
9428 unlock_user(b
, arg2
, arg3
);
9431 case TARGET_NR_flistxattr
:
9435 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9437 ret
= -TARGET_EFAULT
;
9441 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9442 unlock_user(b
, arg2
, arg3
);
9445 case TARGET_NR_setxattr
:
9446 case TARGET_NR_lsetxattr
:
9448 void *p
, *n
, *v
= 0;
9450 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9452 ret
= -TARGET_EFAULT
;
9456 p
= lock_user_string(arg1
);
9457 n
= lock_user_string(arg2
);
9459 if (num
== TARGET_NR_setxattr
) {
9460 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9462 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9465 ret
= -TARGET_EFAULT
;
9467 unlock_user(p
, arg1
, 0);
9468 unlock_user(n
, arg2
, 0);
9469 unlock_user(v
, arg3
, 0);
9472 case TARGET_NR_fsetxattr
:
9476 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9478 ret
= -TARGET_EFAULT
;
9482 n
= lock_user_string(arg2
);
9484 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9486 ret
= -TARGET_EFAULT
;
9488 unlock_user(n
, arg2
, 0);
9489 unlock_user(v
, arg3
, 0);
9492 case TARGET_NR_getxattr
:
9493 case TARGET_NR_lgetxattr
:
9495 void *p
, *n
, *v
= 0;
9497 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9499 ret
= -TARGET_EFAULT
;
9503 p
= lock_user_string(arg1
);
9504 n
= lock_user_string(arg2
);
9506 if (num
== TARGET_NR_getxattr
) {
9507 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9509 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9512 ret
= -TARGET_EFAULT
;
9514 unlock_user(p
, arg1
, 0);
9515 unlock_user(n
, arg2
, 0);
9516 unlock_user(v
, arg3
, arg4
);
9519 case TARGET_NR_fgetxattr
:
9523 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9525 ret
= -TARGET_EFAULT
;
9529 n
= lock_user_string(arg2
);
9531 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9533 ret
= -TARGET_EFAULT
;
9535 unlock_user(n
, arg2
, 0);
9536 unlock_user(v
, arg3
, arg4
);
9539 case TARGET_NR_removexattr
:
9540 case TARGET_NR_lremovexattr
:
9543 p
= lock_user_string(arg1
);
9544 n
= lock_user_string(arg2
);
9546 if (num
== TARGET_NR_removexattr
) {
9547 ret
= get_errno(removexattr(p
, n
));
9549 ret
= get_errno(lremovexattr(p
, n
));
9552 ret
= -TARGET_EFAULT
;
9554 unlock_user(p
, arg1
, 0);
9555 unlock_user(n
, arg2
, 0);
9558 case TARGET_NR_fremovexattr
:
9561 n
= lock_user_string(arg2
);
9563 ret
= get_errno(fremovexattr(arg1
, n
));
9565 ret
= -TARGET_EFAULT
;
9567 unlock_user(n
, arg2
, 0);
9571 #endif /* CONFIG_ATTR */
9572 #ifdef TARGET_NR_set_thread_area
9573 case TARGET_NR_set_thread_area
:
9574 #if defined(TARGET_MIPS)
9575 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9578 #elif defined(TARGET_CRIS)
9580 ret
= -TARGET_EINVAL
;
9582 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9586 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9587 ret
= do_set_thread_area(cpu_env
, arg1
);
9589 #elif defined(TARGET_M68K)
9591 TaskState
*ts
= cpu
->opaque
;
9592 ts
->tp_value
= arg1
;
9597 goto unimplemented_nowarn
;
9600 #ifdef TARGET_NR_get_thread_area
9601 case TARGET_NR_get_thread_area
:
9602 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9603 ret
= do_get_thread_area(cpu_env
, arg1
);
9605 #elif defined(TARGET_M68K)
9607 TaskState
*ts
= cpu
->opaque
;
9612 goto unimplemented_nowarn
;
9615 #ifdef TARGET_NR_getdomainname
9616 case TARGET_NR_getdomainname
:
9617 goto unimplemented_nowarn
;
9620 #ifdef TARGET_NR_clock_gettime
9621 case TARGET_NR_clock_gettime
:
9624 ret
= get_errno(clock_gettime(arg1
, &ts
));
9625 if (!is_error(ret
)) {
9626 host_to_target_timespec(arg2
, &ts
);
9631 #ifdef TARGET_NR_clock_getres
9632 case TARGET_NR_clock_getres
:
9635 ret
= get_errno(clock_getres(arg1
, &ts
));
9636 if (!is_error(ret
)) {
9637 host_to_target_timespec(arg2
, &ts
);
9642 #ifdef TARGET_NR_clock_nanosleep
9643 case TARGET_NR_clock_nanosleep
:
9646 target_to_host_timespec(&ts
, arg3
);
9647 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9649 host_to_target_timespec(arg4
, &ts
);
9651 #if defined(TARGET_PPC)
9652 /* clock_nanosleep is odd in that it returns positive errno values.
9653 * On PPC, CR0 bit 3 should be set in such a situation. */
9655 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9662 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9663 case TARGET_NR_set_tid_address
:
9664 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9668 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9669 case TARGET_NR_tkill
:
9670 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9674 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9675 case TARGET_NR_tgkill
:
9676 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9677 target_to_host_signal(arg3
)));
9681 #ifdef TARGET_NR_set_robust_list
9682 case TARGET_NR_set_robust_list
:
9683 case TARGET_NR_get_robust_list
:
9684 /* The ABI for supporting robust futexes has userspace pass
9685 * the kernel a pointer to a linked list which is updated by
9686 * userspace after the syscall; the list is walked by the kernel
9687 * when the thread exits. Since the linked list in QEMU guest
9688 * memory isn't a valid linked list for the host and we have
9689 * no way to reliably intercept the thread-death event, we can't
9690 * support these. Silently return ENOSYS so that guest userspace
9691 * falls back to a non-robust futex implementation (which should
9692 * be OK except in the corner case of the guest crashing while
9693 * holding a mutex that is shared with another process via
9696 goto unimplemented_nowarn
;
9699 #if defined(TARGET_NR_utimensat)
9700 case TARGET_NR_utimensat
:
9702 struct timespec
*tsp
, ts
[2];
9706 target_to_host_timespec(ts
, arg3
);
9707 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9711 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9713 if (!(p
= lock_user_string(arg2
))) {
9714 ret
= -TARGET_EFAULT
;
9717 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9718 unlock_user(p
, arg2
, 0);
9723 case TARGET_NR_futex
:
9724 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9726 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9727 case TARGET_NR_inotify_init
:
9728 ret
= get_errno(sys_inotify_init());
9731 #ifdef CONFIG_INOTIFY1
9732 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9733 case TARGET_NR_inotify_init1
:
9734 ret
= get_errno(sys_inotify_init1(arg1
));
9738 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9739 case TARGET_NR_inotify_add_watch
:
9740 p
= lock_user_string(arg2
);
9741 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9742 unlock_user(p
, arg2
, 0);
9745 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9746 case TARGET_NR_inotify_rm_watch
:
9747 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9751 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9752 case TARGET_NR_mq_open
:
9754 struct mq_attr posix_mq_attr
, *attrp
;
9756 p
= lock_user_string(arg1
- 1);
9758 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9759 attrp
= &posix_mq_attr
;
9763 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9764 unlock_user (p
, arg1
, 0);
9768 case TARGET_NR_mq_unlink
:
9769 p
= lock_user_string(arg1
- 1);
9770 ret
= get_errno(mq_unlink(p
));
9771 unlock_user (p
, arg1
, 0);
9774 case TARGET_NR_mq_timedsend
:
9778 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9780 target_to_host_timespec(&ts
, arg5
);
9781 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9782 host_to_target_timespec(arg5
, &ts
);
9785 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9786 unlock_user (p
, arg2
, arg3
);
9790 case TARGET_NR_mq_timedreceive
:
9795 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9797 target_to_host_timespec(&ts
, arg5
);
9798 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9799 host_to_target_timespec(arg5
, &ts
);
9802 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9803 unlock_user (p
, arg2
, arg3
);
9805 put_user_u32(prio
, arg4
);
9809 /* Not implemented for now... */
9810 /* case TARGET_NR_mq_notify: */
9813 case TARGET_NR_mq_getsetattr
:
9815 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9818 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9819 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9822 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9823 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9830 #ifdef CONFIG_SPLICE
9831 #ifdef TARGET_NR_tee
9834 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9838 #ifdef TARGET_NR_splice
9839 case TARGET_NR_splice
:
9841 loff_t loff_in
, loff_out
;
9842 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9844 if (get_user_u64(loff_in
, arg2
)) {
9847 ploff_in
= &loff_in
;
9850 if (get_user_u64(loff_out
, arg4
)) {
9853 ploff_out
= &loff_out
;
9855 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9857 if (put_user_u64(loff_in
, arg2
)) {
9862 if (put_user_u64(loff_out
, arg4
)) {
9869 #ifdef TARGET_NR_vmsplice
9870 case TARGET_NR_vmsplice
:
9872 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9874 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9875 unlock_iovec(vec
, arg2
, arg3
, 0);
9877 ret
= -host_to_target_errno(errno
);
9882 #endif /* CONFIG_SPLICE */
9883 #ifdef CONFIG_EVENTFD
9884 #if defined(TARGET_NR_eventfd)
9885 case TARGET_NR_eventfd
:
9886 ret
= get_errno(eventfd(arg1
, 0));
9887 fd_trans_unregister(ret
);
9890 #if defined(TARGET_NR_eventfd2)
9891 case TARGET_NR_eventfd2
:
9893 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9894 if (arg2
& TARGET_O_NONBLOCK
) {
9895 host_flags
|= O_NONBLOCK
;
9897 if (arg2
& TARGET_O_CLOEXEC
) {
9898 host_flags
|= O_CLOEXEC
;
9900 ret
= get_errno(eventfd(arg1
, host_flags
));
9901 fd_trans_unregister(ret
);
9905 #endif /* CONFIG_EVENTFD */
9906 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9907 case TARGET_NR_fallocate
:
9908 #if TARGET_ABI_BITS == 32
9909 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9910 target_offset64(arg5
, arg6
)));
9912 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9916 #if defined(CONFIG_SYNC_FILE_RANGE)
9917 #if defined(TARGET_NR_sync_file_range)
9918 case TARGET_NR_sync_file_range
:
9919 #if TARGET_ABI_BITS == 32
9920 #if defined(TARGET_MIPS)
9921 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9922 target_offset64(arg5
, arg6
), arg7
));
9924 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9925 target_offset64(arg4
, arg5
), arg6
));
9926 #endif /* !TARGET_MIPS */
9928 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9932 #if defined(TARGET_NR_sync_file_range2)
9933 case TARGET_NR_sync_file_range2
:
9934 /* This is like sync_file_range but the arguments are reordered */
9935 #if TARGET_ABI_BITS == 32
9936 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9937 target_offset64(arg5
, arg6
), arg2
));
9939 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9944 #if defined(TARGET_NR_signalfd4)
9945 case TARGET_NR_signalfd4
:
9946 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9949 #if defined(TARGET_NR_signalfd)
9950 case TARGET_NR_signalfd
:
9951 ret
= do_signalfd4(arg1
, arg2
, 0);
9954 #if defined(CONFIG_EPOLL)
9955 #if defined(TARGET_NR_epoll_create)
9956 case TARGET_NR_epoll_create
:
9957 ret
= get_errno(epoll_create(arg1
));
9960 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9961 case TARGET_NR_epoll_create1
:
9962 ret
= get_errno(epoll_create1(arg1
));
9965 #if defined(TARGET_NR_epoll_ctl)
9966 case TARGET_NR_epoll_ctl
:
9968 struct epoll_event ep
;
9969 struct epoll_event
*epp
= 0;
9971 struct target_epoll_event
*target_ep
;
9972 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9975 ep
.events
= tswap32(target_ep
->events
);
9976 /* The epoll_data_t union is just opaque data to the kernel,
9977 * so we transfer all 64 bits across and need not worry what
9978 * actual data type it is.
9980 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9981 unlock_user_struct(target_ep
, arg4
, 0);
9984 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9989 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9990 #define IMPLEMENT_EPOLL_PWAIT
9992 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9993 #if defined(TARGET_NR_epoll_wait)
9994 case TARGET_NR_epoll_wait
:
9996 #if defined(IMPLEMENT_EPOLL_PWAIT)
9997 case TARGET_NR_epoll_pwait
:
10000 struct target_epoll_event
*target_ep
;
10001 struct epoll_event
*ep
;
10003 int maxevents
= arg3
;
10004 int timeout
= arg4
;
10006 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10007 maxevents
* sizeof(struct target_epoll_event
), 1);
10012 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10015 #if defined(IMPLEMENT_EPOLL_PWAIT)
10016 case TARGET_NR_epoll_pwait
:
10018 target_sigset_t
*target_set
;
10019 sigset_t _set
, *set
= &_set
;
10022 target_set
= lock_user(VERIFY_READ
, arg5
,
10023 sizeof(target_sigset_t
), 1);
10025 unlock_user(target_ep
, arg2
, 0);
10028 target_to_host_sigset(set
, target_set
);
10029 unlock_user(target_set
, arg5
, 0);
10034 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10038 #if defined(TARGET_NR_epoll_wait)
10039 case TARGET_NR_epoll_wait
:
10040 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10044 ret
= -TARGET_ENOSYS
;
10046 if (!is_error(ret
)) {
10048 for (i
= 0; i
< ret
; i
++) {
10049 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10050 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10053 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10058 #ifdef TARGET_NR_prlimit64
10059 case TARGET_NR_prlimit64
:
10061 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10062 struct target_rlimit64
*target_rnew
, *target_rold
;
10063 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10064 int resource
= target_to_host_resource(arg2
);
10066 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10069 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10070 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10071 unlock_user_struct(target_rnew
, arg3
, 0);
10075 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10076 if (!is_error(ret
) && arg4
) {
10077 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10080 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10081 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10082 unlock_user_struct(target_rold
, arg4
, 1);
10087 #ifdef TARGET_NR_gethostname
10088 case TARGET_NR_gethostname
:
10090 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10092 ret
= get_errno(gethostname(name
, arg2
));
10093 unlock_user(name
, arg1
, arg2
);
10095 ret
= -TARGET_EFAULT
;
10100 #ifdef TARGET_NR_atomic_cmpxchg_32
10101 case TARGET_NR_atomic_cmpxchg_32
:
10103 /* should use start_exclusive from main.c */
10104 abi_ulong mem_value
;
10105 if (get_user_u32(mem_value
, arg6
)) {
10106 target_siginfo_t info
;
10107 info
.si_signo
= SIGSEGV
;
10109 info
.si_code
= TARGET_SEGV_MAPERR
;
10110 info
._sifields
._sigfault
._addr
= arg6
;
10111 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10115 if (mem_value
== arg2
)
10116 put_user_u32(arg1
, arg6
);
10121 #ifdef TARGET_NR_atomic_barrier
10122 case TARGET_NR_atomic_barrier
:
10124 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10130 #ifdef TARGET_NR_timer_create
10131 case TARGET_NR_timer_create
:
10133 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10135 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10138 int timer_index
= next_free_host_timer();
10140 if (timer_index
< 0) {
10141 ret
= -TARGET_EAGAIN
;
10143 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10146 phost_sevp
= &host_sevp
;
10147 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10153 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10157 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10166 #ifdef TARGET_NR_timer_settime
10167 case TARGET_NR_timer_settime
:
10169 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10170 * struct itimerspec * old_value */
10171 target_timer_t timerid
= get_timer_id(arg1
);
10175 } else if (arg3
== 0) {
10176 ret
= -TARGET_EINVAL
;
10178 timer_t htimer
= g_posix_timers
[timerid
];
10179 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10181 target_to_host_itimerspec(&hspec_new
, arg3
);
10183 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10184 host_to_target_itimerspec(arg2
, &hspec_old
);
10190 #ifdef TARGET_NR_timer_gettime
10191 case TARGET_NR_timer_gettime
:
10193 /* args: timer_t timerid, struct itimerspec *curr_value */
10194 target_timer_t timerid
= get_timer_id(arg1
);
10198 } else if (!arg2
) {
10199 ret
= -TARGET_EFAULT
;
10201 timer_t htimer
= g_posix_timers
[timerid
];
10202 struct itimerspec hspec
;
10203 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10205 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10206 ret
= -TARGET_EFAULT
;
10213 #ifdef TARGET_NR_timer_getoverrun
10214 case TARGET_NR_timer_getoverrun
:
10216 /* args: timer_t timerid */
10217 target_timer_t timerid
= get_timer_id(arg1
);
10222 timer_t htimer
= g_posix_timers
[timerid
];
10223 ret
= get_errno(timer_getoverrun(htimer
));
10225 fd_trans_unregister(ret
);
10230 #ifdef TARGET_NR_timer_delete
10231 case TARGET_NR_timer_delete
:
10233 /* args: timer_t timerid */
10234 target_timer_t timerid
= get_timer_id(arg1
);
10239 timer_t htimer
= g_posix_timers
[timerid
];
10240 ret
= get_errno(timer_delete(htimer
));
10241 g_posix_timers
[timerid
] = 0;
10247 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10248 case TARGET_NR_timerfd_create
:
10249 ret
= get_errno(timerfd_create(arg1
,
10250 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10254 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10255 case TARGET_NR_timerfd_gettime
:
10257 struct itimerspec its_curr
;
10259 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10261 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10268 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10269 case TARGET_NR_timerfd_settime
:
10271 struct itimerspec its_new
, its_old
, *p_new
;
10274 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10282 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10284 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10291 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10292 case TARGET_NR_ioprio_get
:
10293 ret
= get_errno(ioprio_get(arg1
, arg2
));
10297 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10298 case TARGET_NR_ioprio_set
:
10299 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10303 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10304 case TARGET_NR_setns
:
10305 ret
= get_errno(setns(arg1
, arg2
));
10308 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10309 case TARGET_NR_unshare
:
10310 ret
= get_errno(unshare(arg1
));
10316 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10317 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10318 unimplemented_nowarn
:
10320 ret
= -TARGET_ENOSYS
;
10325 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10328 print_syscall_ret(num
, ret
);
10331 ret
= -TARGET_EFAULT
;