4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 #define __NR__llseek __NR_lseek
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
203 _syscall0(int, gettid
)
205 /* This is a replacement for the host gettid() and must return a host
207 static int gettid(void) {
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
220 loff_t
*, res
, uint
, wh
);
222 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
223 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group
,int,error_code
)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address
,int *,tidptr
)
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
238 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
248 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
249 struct __user_cap_data_struct
*, data
);
250 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get
, int, which
, int, who
)
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
262 static bitmask_transtbl fcntl_flags_tbl
[] = {
263 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
264 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
265 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
266 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
267 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
268 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
269 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
270 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
271 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
272 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
273 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
274 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
275 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
286 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
295 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
297 typedef struct TargetFdTrans
{
298 TargetFdDataFunc host_to_target_data
;
299 TargetFdDataFunc target_to_host_data
;
300 TargetFdAddrFunc target_to_host_addr
;
303 static TargetFdTrans
**target_fd_trans
;
305 static unsigned int target_fd_max
;
307 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
309 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
310 return target_fd_trans
[fd
]->host_to_target_data
;
315 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
317 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
318 return target_fd_trans
[fd
]->target_to_host_addr
;
323 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
327 if (fd
>= target_fd_max
) {
328 oldmax
= target_fd_max
;
329 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans
= g_renew(TargetFdTrans
*,
331 target_fd_trans
, target_fd_max
);
332 memset((void *)(target_fd_trans
+ oldmax
), 0,
333 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
335 target_fd_trans
[fd
] = trans
;
338 static void fd_trans_unregister(int fd
)
340 if (fd
>= 0 && fd
< target_fd_max
) {
341 target_fd_trans
[fd
] = NULL
;
345 static void fd_trans_dup(int oldfd
, int newfd
)
347 fd_trans_unregister(newfd
);
348 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
349 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
353 static int sys_getcwd1(char *buf
, size_t size
)
355 if (getcwd(buf
, size
) == NULL
) {
356 /* getcwd() sets errno */
359 return strlen(buf
)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd
, const char *pathname
,
365 const struct timespec times
[2], int flags
)
367 if (pathname
== NULL
)
368 return futimens(dirfd
, times
);
370 return utimensat(dirfd
, pathname
, times
, flags
);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
375 const struct timespec
*,tsp
,int,flags
)
377 static int sys_utimensat(int dirfd
, const char *pathname
,
378 const struct timespec times
[2], int flags
)
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
398 return (inotify_add_watch(fd
, pathname
, mask
));
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
404 return (inotify_rm_watch(fd
, wd
));
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags
)
411 return (inotify_init1(flags
));
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
425 # define __NR_ppoll -1
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
429 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64
{
443 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
444 const struct host_rlimit64
*, new_limit
,
445 struct host_rlimit64
*, old_limit
)
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers
[32] = { 0, } ;
453 static inline int next_free_host_timer(void)
456 /* FIXME: Does finding the next free slot require a lock? */
457 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
458 if (g_posix_timers
[k
] == 0) {
459 g_posix_timers
[k
] = (timer_t
) 1;
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 static inline int regpairs_aligned(void *cpu_env
) {
470 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
472 #elif defined(TARGET_MIPS)
473 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476 * of registers which translates to the same as ARM/MIPS, because we start with
478 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
480 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
483 #define ERRNO_TABLE_SIZE 1200
485 /* target_to_host_errno_table[] is initialized from
486 * host_to_target_errno_table[] in syscall_init(). */
487 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
491 * This list is the union of errno values overridden in asm-<arch>/errno.h
492 * minus the errnos that are not actually generic to all archs.
494 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
495 [EAGAIN
] = TARGET_EAGAIN
,
496 [EIDRM
] = TARGET_EIDRM
,
497 [ECHRNG
] = TARGET_ECHRNG
,
498 [EL2NSYNC
] = TARGET_EL2NSYNC
,
499 [EL3HLT
] = TARGET_EL3HLT
,
500 [EL3RST
] = TARGET_EL3RST
,
501 [ELNRNG
] = TARGET_ELNRNG
,
502 [EUNATCH
] = TARGET_EUNATCH
,
503 [ENOCSI
] = TARGET_ENOCSI
,
504 [EL2HLT
] = TARGET_EL2HLT
,
505 [EDEADLK
] = TARGET_EDEADLK
,
506 [ENOLCK
] = TARGET_ENOLCK
,
507 [EBADE
] = TARGET_EBADE
,
508 [EBADR
] = TARGET_EBADR
,
509 [EXFULL
] = TARGET_EXFULL
,
510 [ENOANO
] = TARGET_ENOANO
,
511 [EBADRQC
] = TARGET_EBADRQC
,
512 [EBADSLT
] = TARGET_EBADSLT
,
513 [EBFONT
] = TARGET_EBFONT
,
514 [ENOSTR
] = TARGET_ENOSTR
,
515 [ENODATA
] = TARGET_ENODATA
,
516 [ETIME
] = TARGET_ETIME
,
517 [ENOSR
] = TARGET_ENOSR
,
518 [ENONET
] = TARGET_ENONET
,
519 [ENOPKG
] = TARGET_ENOPKG
,
520 [EREMOTE
] = TARGET_EREMOTE
,
521 [ENOLINK
] = TARGET_ENOLINK
,
522 [EADV
] = TARGET_EADV
,
523 [ESRMNT
] = TARGET_ESRMNT
,
524 [ECOMM
] = TARGET_ECOMM
,
525 [EPROTO
] = TARGET_EPROTO
,
526 [EDOTDOT
] = TARGET_EDOTDOT
,
527 [EMULTIHOP
] = TARGET_EMULTIHOP
,
528 [EBADMSG
] = TARGET_EBADMSG
,
529 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
530 [EOVERFLOW
] = TARGET_EOVERFLOW
,
531 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
532 [EBADFD
] = TARGET_EBADFD
,
533 [EREMCHG
] = TARGET_EREMCHG
,
534 [ELIBACC
] = TARGET_ELIBACC
,
535 [ELIBBAD
] = TARGET_ELIBBAD
,
536 [ELIBSCN
] = TARGET_ELIBSCN
,
537 [ELIBMAX
] = TARGET_ELIBMAX
,
538 [ELIBEXEC
] = TARGET_ELIBEXEC
,
539 [EILSEQ
] = TARGET_EILSEQ
,
540 [ENOSYS
] = TARGET_ENOSYS
,
541 [ELOOP
] = TARGET_ELOOP
,
542 [ERESTART
] = TARGET_ERESTART
,
543 [ESTRPIPE
] = TARGET_ESTRPIPE
,
544 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
545 [EUSERS
] = TARGET_EUSERS
,
546 [ENOTSOCK
] = TARGET_ENOTSOCK
,
547 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
548 [EMSGSIZE
] = TARGET_EMSGSIZE
,
549 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
550 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
551 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
552 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
553 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
554 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
555 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
556 [EADDRINUSE
] = TARGET_EADDRINUSE
,
557 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
558 [ENETDOWN
] = TARGET_ENETDOWN
,
559 [ENETUNREACH
] = TARGET_ENETUNREACH
,
560 [ENETRESET
] = TARGET_ENETRESET
,
561 [ECONNABORTED
] = TARGET_ECONNABORTED
,
562 [ECONNRESET
] = TARGET_ECONNRESET
,
563 [ENOBUFS
] = TARGET_ENOBUFS
,
564 [EISCONN
] = TARGET_EISCONN
,
565 [ENOTCONN
] = TARGET_ENOTCONN
,
566 [EUCLEAN
] = TARGET_EUCLEAN
,
567 [ENOTNAM
] = TARGET_ENOTNAM
,
568 [ENAVAIL
] = TARGET_ENAVAIL
,
569 [EISNAM
] = TARGET_EISNAM
,
570 [EREMOTEIO
] = TARGET_EREMOTEIO
,
571 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
572 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
573 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
574 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
575 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
576 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
577 [EALREADY
] = TARGET_EALREADY
,
578 [EINPROGRESS
] = TARGET_EINPROGRESS
,
579 [ESTALE
] = TARGET_ESTALE
,
580 [ECANCELED
] = TARGET_ECANCELED
,
581 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
582 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
584 [ENOKEY
] = TARGET_ENOKEY
,
587 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
590 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
593 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
596 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
598 #ifdef ENOTRECOVERABLE
599 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
603 static inline int host_to_target_errno(int err
)
605 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
606 host_to_target_errno_table
[err
]) {
607 return host_to_target_errno_table
[err
];
612 static inline int target_to_host_errno(int err
)
614 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
615 target_to_host_errno_table
[err
]) {
616 return target_to_host_errno_table
[err
];
621 static inline abi_long
get_errno(abi_long ret
)
624 return -host_to_target_errno(errno
);
629 static inline int is_error(abi_long ret
)
631 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
634 char *target_strerror(int err
)
636 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
639 return strerror(target_to_host_errno(err
));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
690 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
691 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
692 int, flags
, mode_t
, mode
)
693 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
694 struct rusage
*, rusage
)
695 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
696 int, options
, struct rusage
*, rusage
)
697 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
698 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
699 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
700 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
701 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
703 static inline int host_to_target_sock_type(int host_type
)
707 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
709 target_type
= TARGET_SOCK_DGRAM
;
712 target_type
= TARGET_SOCK_STREAM
;
715 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
719 #if defined(SOCK_CLOEXEC)
720 if (host_type
& SOCK_CLOEXEC
) {
721 target_type
|= TARGET_SOCK_CLOEXEC
;
725 #if defined(SOCK_NONBLOCK)
726 if (host_type
& SOCK_NONBLOCK
) {
727 target_type
|= TARGET_SOCK_NONBLOCK
;
734 static abi_ulong target_brk
;
735 static abi_ulong target_original_brk
;
736 static abi_ulong brk_page
;
738 void target_set_brk(abi_ulong new_brk
)
740 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
741 brk_page
= HOST_PAGE_ALIGN(target_brk
);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long
do_brk(abi_ulong new_brk
)
750 abi_long mapped_addr
;
753 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
756 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
759 if (new_brk
< target_original_brk
) {
760 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
765 /* If the new brk is less than the highest page reserved to the
766 * target heap allocation, set it and we're almost done... */
767 if (new_brk
<= brk_page
) {
768 /* Heap contents are initialized to zero, as for anonymous
770 if (new_brk
> target_brk
) {
771 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
773 target_brk
= new_brk
;
774 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
778 /* We need to allocate more memory after the brk... Note that
779 * we don't use MAP_FIXED because that will map over the top of
780 * any existing mapping (like the one with the host libc or qemu
781 * itself); instead we treat "mapped but at wrong address" as
782 * a failure and unmap again.
784 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
785 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
786 PROT_READ
|PROT_WRITE
,
787 MAP_ANON
|MAP_PRIVATE
, 0, 0));
789 if (mapped_addr
== brk_page
) {
790 /* Heap contents are initialized to zero, as for anonymous
791 * mapped pages. Technically the new pages are already
792 * initialized to zero since they *are* anonymous mapped
793 * pages, however we have to take care with the contents that
794 * come from the remaining part of the previous page: it may
795 * contains garbage data due to a previous heap usage (grown
797 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
799 target_brk
= new_brk
;
800 brk_page
= HOST_PAGE_ALIGN(target_brk
);
801 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
804 } else if (mapped_addr
!= -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr
, new_alloc_size
);
810 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
813 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM
;
821 /* For everything else, return the previous break. */
825 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
826 abi_ulong target_fds_addr
,
830 abi_ulong b
, *target_fds
;
832 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
833 if (!(target_fds
= lock_user(VERIFY_READ
,
835 sizeof(abi_ulong
) * nw
,
837 return -TARGET_EFAULT
;
841 for (i
= 0; i
< nw
; i
++) {
842 /* grab the abi_ulong */
843 __get_user(b
, &target_fds
[i
]);
844 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
845 /* check the bit inside the abi_ulong */
852 unlock_user(target_fds
, target_fds_addr
, 0);
857 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
858 abi_ulong target_fds_addr
,
861 if (target_fds_addr
) {
862 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
863 return -TARGET_EFAULT
;
871 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
877 abi_ulong
*target_fds
;
879 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
880 if (!(target_fds
= lock_user(VERIFY_WRITE
,
882 sizeof(abi_ulong
) * nw
,
884 return -TARGET_EFAULT
;
887 for (i
= 0; i
< nw
; i
++) {
889 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
890 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
893 __put_user(v
, &target_fds
[i
]);
896 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
901 #if defined(__alpha__)
907 static inline abi_long
host_to_target_clock_t(long ticks
)
909 #if HOST_HZ == TARGET_HZ
912 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
916 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
917 const struct rusage
*rusage
)
919 struct target_rusage
*target_rusage
;
921 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
922 return -TARGET_EFAULT
;
923 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
924 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
925 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
926 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
927 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
928 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
929 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
930 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
931 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
932 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
933 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
934 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
935 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
936 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
937 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
938 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
939 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
940 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
941 unlock_user_struct(target_rusage
, target_addr
, 1);
946 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
948 abi_ulong target_rlim_swap
;
951 target_rlim_swap
= tswapal(target_rlim
);
952 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
953 return RLIM_INFINITY
;
955 result
= target_rlim_swap
;
956 if (target_rlim_swap
!= (rlim_t
)result
)
957 return RLIM_INFINITY
;
962 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
964 abi_ulong target_rlim_swap
;
967 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
968 target_rlim_swap
= TARGET_RLIM_INFINITY
;
970 target_rlim_swap
= rlim
;
971 result
= tswapal(target_rlim_swap
);
976 static inline int target_to_host_resource(int code
)
979 case TARGET_RLIMIT_AS
:
981 case TARGET_RLIMIT_CORE
:
983 case TARGET_RLIMIT_CPU
:
985 case TARGET_RLIMIT_DATA
:
987 case TARGET_RLIMIT_FSIZE
:
989 case TARGET_RLIMIT_LOCKS
:
991 case TARGET_RLIMIT_MEMLOCK
:
992 return RLIMIT_MEMLOCK
;
993 case TARGET_RLIMIT_MSGQUEUE
:
994 return RLIMIT_MSGQUEUE
;
995 case TARGET_RLIMIT_NICE
:
997 case TARGET_RLIMIT_NOFILE
:
998 return RLIMIT_NOFILE
;
999 case TARGET_RLIMIT_NPROC
:
1000 return RLIMIT_NPROC
;
1001 case TARGET_RLIMIT_RSS
:
1003 case TARGET_RLIMIT_RTPRIO
:
1004 return RLIMIT_RTPRIO
;
1005 case TARGET_RLIMIT_SIGPENDING
:
1006 return RLIMIT_SIGPENDING
;
1007 case TARGET_RLIMIT_STACK
:
1008 return RLIMIT_STACK
;
1014 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1015 abi_ulong target_tv_addr
)
1017 struct target_timeval
*target_tv
;
1019 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1020 return -TARGET_EFAULT
;
1022 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1023 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1025 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1030 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1031 const struct timeval
*tv
)
1033 struct target_timeval
*target_tv
;
1035 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1036 return -TARGET_EFAULT
;
1038 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1039 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1041 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1046 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1047 abi_ulong target_tz_addr
)
1049 struct target_timezone
*target_tz
;
1051 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1052 return -TARGET_EFAULT
;
1055 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1056 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1058 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1063 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1066 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1067 abi_ulong target_mq_attr_addr
)
1069 struct target_mq_attr
*target_mq_attr
;
1071 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1072 target_mq_attr_addr
, 1))
1073 return -TARGET_EFAULT
;
1075 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1076 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1077 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1078 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1080 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1085 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1086 const struct mq_attr
*attr
)
1088 struct target_mq_attr
*target_mq_attr
;
1090 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1091 target_mq_attr_addr
, 0))
1092 return -TARGET_EFAULT
;
1094 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1095 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1096 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1097 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1099 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1105 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1106 /* do_select() must return target values and target errnos. */
1107 static abi_long
do_select(int n
,
1108 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1109 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1111 fd_set rfds
, wfds
, efds
;
1112 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1114 struct timespec ts
, *ts_ptr
;
1117 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1121 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1125 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1130 if (target_tv_addr
) {
1131 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1132 return -TARGET_EFAULT
;
1133 ts
.tv_sec
= tv
.tv_sec
;
1134 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1140 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1143 if (!is_error(ret
)) {
1144 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1145 return -TARGET_EFAULT
;
1146 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1147 return -TARGET_EFAULT
;
1148 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1149 return -TARGET_EFAULT
;
1151 if (target_tv_addr
) {
1152 tv
.tv_sec
= ts
.tv_sec
;
1153 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1154 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1155 return -TARGET_EFAULT
;
1164 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1167 return pipe2(host_pipe
, flags
);
1173 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1174 int flags
, int is_pipe2
)
1178 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1181 return get_errno(ret
);
1183 /* Several targets have special calling conventions for the original
1184 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1186 #if defined(TARGET_ALPHA)
1187 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1188 return host_pipe
[0];
1189 #elif defined(TARGET_MIPS)
1190 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1191 return host_pipe
[0];
1192 #elif defined(TARGET_SH4)
1193 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1194 return host_pipe
[0];
1195 #elif defined(TARGET_SPARC)
1196 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1197 return host_pipe
[0];
1201 if (put_user_s32(host_pipe
[0], pipedes
)
1202 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1203 return -TARGET_EFAULT
;
1204 return get_errno(ret
);
1207 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1208 abi_ulong target_addr
,
1211 struct target_ip_mreqn
*target_smreqn
;
1213 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1215 return -TARGET_EFAULT
;
1216 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1217 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1218 if (len
== sizeof(struct target_ip_mreqn
))
1219 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1220 unlock_user(target_smreqn
, target_addr
, 0);
1225 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1226 abi_ulong target_addr
,
1229 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1230 sa_family_t sa_family
;
1231 struct target_sockaddr
*target_saddr
;
1233 if (fd_trans_target_to_host_addr(fd
)) {
1234 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1237 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1239 return -TARGET_EFAULT
;
1241 sa_family
= tswap16(target_saddr
->sa_family
);
1243 /* Oops. The caller might send a incomplete sun_path; sun_path
1244 * must be terminated by \0 (see the manual page), but
1245 * unfortunately it is quite common to specify sockaddr_un
1246 * length as "strlen(x->sun_path)" while it should be
1247 * "strlen(...) + 1". We'll fix that here if needed.
1248 * Linux kernel has a similar feature.
1251 if (sa_family
== AF_UNIX
) {
1252 if (len
< unix_maxlen
&& len
> 0) {
1253 char *cp
= (char*)target_saddr
;
1255 if ( cp
[len
-1] && !cp
[len
] )
1258 if (len
> unix_maxlen
)
1262 memcpy(addr
, target_saddr
, len
);
1263 addr
->sa_family
= sa_family
;
1264 if (sa_family
== AF_PACKET
) {
1265 struct target_sockaddr_ll
*lladdr
;
1267 lladdr
= (struct target_sockaddr_ll
*)addr
;
1268 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1269 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1271 unlock_user(target_saddr
, target_addr
, 0);
1276 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1277 struct sockaddr
*addr
,
1280 struct target_sockaddr
*target_saddr
;
1282 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1284 return -TARGET_EFAULT
;
1285 memcpy(target_saddr
, addr
, len
);
1286 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1287 unlock_user(target_saddr
, target_addr
, len
);
1292 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1293 struct target_msghdr
*target_msgh
)
1295 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1296 abi_long msg_controllen
;
1297 abi_ulong target_cmsg_addr
;
1298 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1299 socklen_t space
= 0;
1301 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1302 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1304 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1305 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1306 target_cmsg_start
= target_cmsg
;
1308 return -TARGET_EFAULT
;
1310 while (cmsg
&& target_cmsg
) {
1311 void *data
= CMSG_DATA(cmsg
);
1312 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1314 int len
= tswapal(target_cmsg
->cmsg_len
)
1315 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1317 space
+= CMSG_SPACE(len
);
1318 if (space
> msgh
->msg_controllen
) {
1319 space
-= CMSG_SPACE(len
);
1320 /* This is a QEMU bug, since we allocated the payload
1321 * area ourselves (unlike overflow in host-to-target
1322 * conversion, which is just the guest giving us a buffer
1323 * that's too small). It can't happen for the payload types
1324 * we currently support; if it becomes an issue in future
1325 * we would need to improve our allocation strategy to
1326 * something more intelligent than "twice the size of the
1327 * target buffer we're reading from".
1329 gemu_log("Host cmsg overflow\n");
1333 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1334 cmsg
->cmsg_level
= SOL_SOCKET
;
1336 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1338 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1339 cmsg
->cmsg_len
= CMSG_LEN(len
);
1341 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1342 int *fd
= (int *)data
;
1343 int *target_fd
= (int *)target_data
;
1344 int i
, numfds
= len
/ sizeof(int);
1346 for (i
= 0; i
< numfds
; i
++) {
1347 __get_user(fd
[i
], target_fd
+ i
);
1349 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1350 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1351 struct ucred
*cred
= (struct ucred
*)data
;
1352 struct target_ucred
*target_cred
=
1353 (struct target_ucred
*)target_data
;
1355 __get_user(cred
->pid
, &target_cred
->pid
);
1356 __get_user(cred
->uid
, &target_cred
->uid
);
1357 __get_user(cred
->gid
, &target_cred
->gid
);
1359 gemu_log("Unsupported ancillary data: %d/%d\n",
1360 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1361 memcpy(data
, target_data
, len
);
1364 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1365 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1368 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1370 msgh
->msg_controllen
= space
;
1374 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1375 struct msghdr
*msgh
)
1377 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1378 abi_long msg_controllen
;
1379 abi_ulong target_cmsg_addr
;
1380 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1381 socklen_t space
= 0;
1383 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1384 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1386 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1387 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1388 target_cmsg_start
= target_cmsg
;
1390 return -TARGET_EFAULT
;
1392 while (cmsg
&& target_cmsg
) {
1393 void *data
= CMSG_DATA(cmsg
);
1394 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1396 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1397 int tgt_len
, tgt_space
;
1399 /* We never copy a half-header but may copy half-data;
1400 * this is Linux's behaviour in put_cmsg(). Note that
1401 * truncation here is a guest problem (which we report
1402 * to the guest via the CTRUNC bit), unlike truncation
1403 * in target_to_host_cmsg, which is a QEMU bug.
1405 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1406 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1410 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1411 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1413 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1415 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1417 tgt_len
= TARGET_CMSG_LEN(len
);
1419 /* Payload types which need a different size of payload on
1420 * the target must adjust tgt_len here.
1422 switch (cmsg
->cmsg_level
) {
1424 switch (cmsg
->cmsg_type
) {
1426 tgt_len
= sizeof(struct target_timeval
);
1435 if (msg_controllen
< tgt_len
) {
1436 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1437 tgt_len
= msg_controllen
;
1440 /* We must now copy-and-convert len bytes of payload
1441 * into tgt_len bytes of destination space. Bear in mind
1442 * that in both source and destination we may be dealing
1443 * with a truncated value!
1445 switch (cmsg
->cmsg_level
) {
1447 switch (cmsg
->cmsg_type
) {
1450 int *fd
= (int *)data
;
1451 int *target_fd
= (int *)target_data
;
1452 int i
, numfds
= tgt_len
/ sizeof(int);
1454 for (i
= 0; i
< numfds
; i
++) {
1455 __put_user(fd
[i
], target_fd
+ i
);
1461 struct timeval
*tv
= (struct timeval
*)data
;
1462 struct target_timeval
*target_tv
=
1463 (struct target_timeval
*)target_data
;
1465 if (len
!= sizeof(struct timeval
) ||
1466 tgt_len
!= sizeof(struct target_timeval
)) {
1470 /* copy struct timeval to target */
1471 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1472 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1475 case SCM_CREDENTIALS
:
1477 struct ucred
*cred
= (struct ucred
*)data
;
1478 struct target_ucred
*target_cred
=
1479 (struct target_ucred
*)target_data
;
1481 __put_user(cred
->pid
, &target_cred
->pid
);
1482 __put_user(cred
->uid
, &target_cred
->uid
);
1483 __put_user(cred
->gid
, &target_cred
->gid
);
1493 gemu_log("Unsupported ancillary data: %d/%d\n",
1494 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1495 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1496 if (tgt_len
> len
) {
1497 memset(target_data
+ len
, 0, tgt_len
- len
);
1501 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1502 tgt_space
= TARGET_CMSG_SPACE(len
);
1503 if (msg_controllen
< tgt_space
) {
1504 tgt_space
= msg_controllen
;
1506 msg_controllen
-= tgt_space
;
1508 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1509 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1512 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1514 target_msgh
->msg_controllen
= tswapal(space
);
1518 /* do_setsockopt() Must return target values and target errnos. */
1519 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1520 abi_ulong optval_addr
, socklen_t optlen
)
1524 struct ip_mreqn
*ip_mreq
;
1525 struct ip_mreq_source
*ip_mreq_source
;
1529 /* TCP options all take an 'int' value. */
1530 if (optlen
< sizeof(uint32_t))
1531 return -TARGET_EINVAL
;
1533 if (get_user_u32(val
, optval_addr
))
1534 return -TARGET_EFAULT
;
1535 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1542 case IP_ROUTER_ALERT
:
1546 case IP_MTU_DISCOVER
:
1552 case IP_MULTICAST_TTL
:
1553 case IP_MULTICAST_LOOP
:
1555 if (optlen
>= sizeof(uint32_t)) {
1556 if (get_user_u32(val
, optval_addr
))
1557 return -TARGET_EFAULT
;
1558 } else if (optlen
>= 1) {
1559 if (get_user_u8(val
, optval_addr
))
1560 return -TARGET_EFAULT
;
1562 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1564 case IP_ADD_MEMBERSHIP
:
1565 case IP_DROP_MEMBERSHIP
:
1566 if (optlen
< sizeof (struct target_ip_mreq
) ||
1567 optlen
> sizeof (struct target_ip_mreqn
))
1568 return -TARGET_EINVAL
;
1570 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1571 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1572 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1575 case IP_BLOCK_SOURCE
:
1576 case IP_UNBLOCK_SOURCE
:
1577 case IP_ADD_SOURCE_MEMBERSHIP
:
1578 case IP_DROP_SOURCE_MEMBERSHIP
:
1579 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1580 return -TARGET_EINVAL
;
1582 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1583 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1584 unlock_user (ip_mreq_source
, optval_addr
, 0);
1593 case IPV6_MTU_DISCOVER
:
1596 case IPV6_RECVPKTINFO
:
1598 if (optlen
< sizeof(uint32_t)) {
1599 return -TARGET_EINVAL
;
1601 if (get_user_u32(val
, optval_addr
)) {
1602 return -TARGET_EFAULT
;
1604 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1605 &val
, sizeof(val
)));
1614 /* struct icmp_filter takes an u32 value */
1615 if (optlen
< sizeof(uint32_t)) {
1616 return -TARGET_EINVAL
;
1619 if (get_user_u32(val
, optval_addr
)) {
1620 return -TARGET_EFAULT
;
1622 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1623 &val
, sizeof(val
)));
1630 case TARGET_SOL_SOCKET
:
1632 case TARGET_SO_RCVTIMEO
:
1636 optname
= SO_RCVTIMEO
;
1639 if (optlen
!= sizeof(struct target_timeval
)) {
1640 return -TARGET_EINVAL
;
1643 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1644 return -TARGET_EFAULT
;
1647 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1651 case TARGET_SO_SNDTIMEO
:
1652 optname
= SO_SNDTIMEO
;
1654 case TARGET_SO_ATTACH_FILTER
:
1656 struct target_sock_fprog
*tfprog
;
1657 struct target_sock_filter
*tfilter
;
1658 struct sock_fprog fprog
;
1659 struct sock_filter
*filter
;
1662 if (optlen
!= sizeof(*tfprog
)) {
1663 return -TARGET_EINVAL
;
1665 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1666 return -TARGET_EFAULT
;
1668 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1669 tswapal(tfprog
->filter
), 0)) {
1670 unlock_user_struct(tfprog
, optval_addr
, 1);
1671 return -TARGET_EFAULT
;
1674 fprog
.len
= tswap16(tfprog
->len
);
1675 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1676 if (filter
== NULL
) {
1677 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1678 unlock_user_struct(tfprog
, optval_addr
, 1);
1679 return -TARGET_ENOMEM
;
1681 for (i
= 0; i
< fprog
.len
; i
++) {
1682 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1683 filter
[i
].jt
= tfilter
[i
].jt
;
1684 filter
[i
].jf
= tfilter
[i
].jf
;
1685 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1687 fprog
.filter
= filter
;
1689 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1690 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1693 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1694 unlock_user_struct(tfprog
, optval_addr
, 1);
1697 case TARGET_SO_BINDTODEVICE
:
1699 char *dev_ifname
, *addr_ifname
;
1701 if (optlen
> IFNAMSIZ
- 1) {
1702 optlen
= IFNAMSIZ
- 1;
1704 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1706 return -TARGET_EFAULT
;
1708 optname
= SO_BINDTODEVICE
;
1709 addr_ifname
= alloca(IFNAMSIZ
);
1710 memcpy(addr_ifname
, dev_ifname
, optlen
);
1711 addr_ifname
[optlen
] = 0;
1712 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1713 addr_ifname
, optlen
));
1714 unlock_user (dev_ifname
, optval_addr
, 0);
1717 /* Options with 'int' argument. */
1718 case TARGET_SO_DEBUG
:
1721 case TARGET_SO_REUSEADDR
:
1722 optname
= SO_REUSEADDR
;
1724 case TARGET_SO_TYPE
:
1727 case TARGET_SO_ERROR
:
1730 case TARGET_SO_DONTROUTE
:
1731 optname
= SO_DONTROUTE
;
1733 case TARGET_SO_BROADCAST
:
1734 optname
= SO_BROADCAST
;
1736 case TARGET_SO_SNDBUF
:
1737 optname
= SO_SNDBUF
;
1739 case TARGET_SO_SNDBUFFORCE
:
1740 optname
= SO_SNDBUFFORCE
;
1742 case TARGET_SO_RCVBUF
:
1743 optname
= SO_RCVBUF
;
1745 case TARGET_SO_RCVBUFFORCE
:
1746 optname
= SO_RCVBUFFORCE
;
1748 case TARGET_SO_KEEPALIVE
:
1749 optname
= SO_KEEPALIVE
;
1751 case TARGET_SO_OOBINLINE
:
1752 optname
= SO_OOBINLINE
;
1754 case TARGET_SO_NO_CHECK
:
1755 optname
= SO_NO_CHECK
;
1757 case TARGET_SO_PRIORITY
:
1758 optname
= SO_PRIORITY
;
1761 case TARGET_SO_BSDCOMPAT
:
1762 optname
= SO_BSDCOMPAT
;
1765 case TARGET_SO_PASSCRED
:
1766 optname
= SO_PASSCRED
;
1768 case TARGET_SO_PASSSEC
:
1769 optname
= SO_PASSSEC
;
1771 case TARGET_SO_TIMESTAMP
:
1772 optname
= SO_TIMESTAMP
;
1774 case TARGET_SO_RCVLOWAT
:
1775 optname
= SO_RCVLOWAT
;
1781 if (optlen
< sizeof(uint32_t))
1782 return -TARGET_EINVAL
;
1784 if (get_user_u32(val
, optval_addr
))
1785 return -TARGET_EFAULT
;
1786 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1790 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1791 ret
= -TARGET_ENOPROTOOPT
;
1796 /* do_getsockopt() Must return target values and target errnos. */
1797 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1798 abi_ulong optval_addr
, abi_ulong optlen
)
1805 case TARGET_SOL_SOCKET
:
1808 /* These don't just return a single integer */
1809 case TARGET_SO_LINGER
:
1810 case TARGET_SO_RCVTIMEO
:
1811 case TARGET_SO_SNDTIMEO
:
1812 case TARGET_SO_PEERNAME
:
1814 case TARGET_SO_PEERCRED
: {
1817 struct target_ucred
*tcr
;
1819 if (get_user_u32(len
, optlen
)) {
1820 return -TARGET_EFAULT
;
1823 return -TARGET_EINVAL
;
1827 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1835 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1836 return -TARGET_EFAULT
;
1838 __put_user(cr
.pid
, &tcr
->pid
);
1839 __put_user(cr
.uid
, &tcr
->uid
);
1840 __put_user(cr
.gid
, &tcr
->gid
);
1841 unlock_user_struct(tcr
, optval_addr
, 1);
1842 if (put_user_u32(len
, optlen
)) {
1843 return -TARGET_EFAULT
;
1847 /* Options with 'int' argument. */
1848 case TARGET_SO_DEBUG
:
1851 case TARGET_SO_REUSEADDR
:
1852 optname
= SO_REUSEADDR
;
1854 case TARGET_SO_TYPE
:
1857 case TARGET_SO_ERROR
:
1860 case TARGET_SO_DONTROUTE
:
1861 optname
= SO_DONTROUTE
;
1863 case TARGET_SO_BROADCAST
:
1864 optname
= SO_BROADCAST
;
1866 case TARGET_SO_SNDBUF
:
1867 optname
= SO_SNDBUF
;
1869 case TARGET_SO_RCVBUF
:
1870 optname
= SO_RCVBUF
;
1872 case TARGET_SO_KEEPALIVE
:
1873 optname
= SO_KEEPALIVE
;
1875 case TARGET_SO_OOBINLINE
:
1876 optname
= SO_OOBINLINE
;
1878 case TARGET_SO_NO_CHECK
:
1879 optname
= SO_NO_CHECK
;
1881 case TARGET_SO_PRIORITY
:
1882 optname
= SO_PRIORITY
;
1885 case TARGET_SO_BSDCOMPAT
:
1886 optname
= SO_BSDCOMPAT
;
1889 case TARGET_SO_PASSCRED
:
1890 optname
= SO_PASSCRED
;
1892 case TARGET_SO_TIMESTAMP
:
1893 optname
= SO_TIMESTAMP
;
1895 case TARGET_SO_RCVLOWAT
:
1896 optname
= SO_RCVLOWAT
;
1898 case TARGET_SO_ACCEPTCONN
:
1899 optname
= SO_ACCEPTCONN
;
1906 /* TCP options all take an 'int' value. */
1908 if (get_user_u32(len
, optlen
))
1909 return -TARGET_EFAULT
;
1911 return -TARGET_EINVAL
;
1913 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1916 if (optname
== SO_TYPE
) {
1917 val
= host_to_target_sock_type(val
);
1922 if (put_user_u32(val
, optval_addr
))
1923 return -TARGET_EFAULT
;
1925 if (put_user_u8(val
, optval_addr
))
1926 return -TARGET_EFAULT
;
1928 if (put_user_u32(len
, optlen
))
1929 return -TARGET_EFAULT
;
1936 case IP_ROUTER_ALERT
:
1940 case IP_MTU_DISCOVER
:
1946 case IP_MULTICAST_TTL
:
1947 case IP_MULTICAST_LOOP
:
1948 if (get_user_u32(len
, optlen
))
1949 return -TARGET_EFAULT
;
1951 return -TARGET_EINVAL
;
1953 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1956 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1958 if (put_user_u32(len
, optlen
)
1959 || put_user_u8(val
, optval_addr
))
1960 return -TARGET_EFAULT
;
1962 if (len
> sizeof(int))
1964 if (put_user_u32(len
, optlen
)
1965 || put_user_u32(val
, optval_addr
))
1966 return -TARGET_EFAULT
;
1970 ret
= -TARGET_ENOPROTOOPT
;
1976 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1978 ret
= -TARGET_EOPNOTSUPP
;
1984 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1985 int count
, int copy
)
1987 struct target_iovec
*target_vec
;
1989 abi_ulong total_len
, max_len
;
1992 bool bad_address
= false;
1998 if (count
< 0 || count
> IOV_MAX
) {
2003 vec
= g_try_new0(struct iovec
, count
);
2009 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2010 count
* sizeof(struct target_iovec
), 1);
2011 if (target_vec
== NULL
) {
2016 /* ??? If host page size > target page size, this will result in a
2017 value larger than what we can actually support. */
2018 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2021 for (i
= 0; i
< count
; i
++) {
2022 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2023 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2028 } else if (len
== 0) {
2029 /* Zero length pointer is ignored. */
2030 vec
[i
].iov_base
= 0;
2032 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2033 /* If the first buffer pointer is bad, this is a fault. But
2034 * subsequent bad buffers will result in a partial write; this
2035 * is realized by filling the vector with null pointers and
2037 if (!vec
[i
].iov_base
) {
2048 if (len
> max_len
- total_len
) {
2049 len
= max_len
- total_len
;
2052 vec
[i
].iov_len
= len
;
2056 unlock_user(target_vec
, target_addr
, 0);
2061 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2062 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2065 unlock_user(target_vec
, target_addr
, 0);
2072 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2073 int count
, int copy
)
2075 struct target_iovec
*target_vec
;
2078 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2079 count
* sizeof(struct target_iovec
), 1);
2081 for (i
= 0; i
< count
; i
++) {
2082 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2083 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2087 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2089 unlock_user(target_vec
, target_addr
, 0);
2095 static inline int target_to_host_sock_type(int *type
)
2098 int target_type
= *type
;
2100 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2101 case TARGET_SOCK_DGRAM
:
2102 host_type
= SOCK_DGRAM
;
2104 case TARGET_SOCK_STREAM
:
2105 host_type
= SOCK_STREAM
;
2108 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2111 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2112 #if defined(SOCK_CLOEXEC)
2113 host_type
|= SOCK_CLOEXEC
;
2115 return -TARGET_EINVAL
;
2118 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2119 #if defined(SOCK_NONBLOCK)
2120 host_type
|= SOCK_NONBLOCK
;
2121 #elif !defined(O_NONBLOCK)
2122 return -TARGET_EINVAL
;
2129 /* Try to emulate socket type flags after socket creation. */
2130 static int sock_flags_fixup(int fd
, int target_type
)
2132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2133 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2134 int flags
= fcntl(fd
, F_GETFL
);
2135 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2137 return -TARGET_EINVAL
;
2144 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2145 abi_ulong target_addr
,
2148 struct sockaddr
*addr
= host_addr
;
2149 struct target_sockaddr
*target_saddr
;
2151 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2152 if (!target_saddr
) {
2153 return -TARGET_EFAULT
;
2156 memcpy(addr
, target_saddr
, len
);
2157 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2158 /* spkt_protocol is big-endian */
2160 unlock_user(target_saddr
, target_addr
, 0);
2164 static TargetFdTrans target_packet_trans
= {
2165 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2168 /* do_socket() Must return target values and target errnos. */
2169 static abi_long
do_socket(int domain
, int type
, int protocol
)
2171 int target_type
= type
;
2174 ret
= target_to_host_sock_type(&type
);
2179 if (domain
== PF_NETLINK
)
2180 return -TARGET_EAFNOSUPPORT
;
2182 if (domain
== AF_PACKET
||
2183 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2184 protocol
= tswap16(protocol
);
2187 ret
= get_errno(socket(domain
, type
, protocol
));
2189 ret
= sock_flags_fixup(ret
, target_type
);
2190 if (type
== SOCK_PACKET
) {
2191 /* Manage an obsolete case :
2192 * if socket type is SOCK_PACKET, bind by name
2194 fd_trans_register(ret
, &target_packet_trans
);
2200 /* do_bind() Must return target values and target errnos. */
2201 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2207 if ((int)addrlen
< 0) {
2208 return -TARGET_EINVAL
;
2211 addr
= alloca(addrlen
+1);
2213 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2217 return get_errno(bind(sockfd
, addr
, addrlen
));
2220 /* do_connect() Must return target values and target errnos. */
2221 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2227 if ((int)addrlen
< 0) {
2228 return -TARGET_EINVAL
;
2231 addr
= alloca(addrlen
+1);
2233 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2237 return get_errno(connect(sockfd
, addr
, addrlen
));
2240 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2241 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2242 int flags
, int send
)
2248 abi_ulong target_vec
;
2250 if (msgp
->msg_name
) {
2251 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2252 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2253 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2254 tswapal(msgp
->msg_name
),
2260 msg
.msg_name
= NULL
;
2261 msg
.msg_namelen
= 0;
2263 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2264 msg
.msg_control
= alloca(msg
.msg_controllen
);
2265 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2267 count
= tswapal(msgp
->msg_iovlen
);
2268 target_vec
= tswapal(msgp
->msg_iov
);
2269 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2270 target_vec
, count
, send
);
2272 ret
= -host_to_target_errno(errno
);
2275 msg
.msg_iovlen
= count
;
2279 ret
= target_to_host_cmsg(&msg
, msgp
);
2281 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2283 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2284 if (!is_error(ret
)) {
2286 ret
= host_to_target_cmsg(msgp
, &msg
);
2287 if (!is_error(ret
)) {
2288 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2289 if (msg
.msg_name
!= NULL
) {
2290 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2291 msg
.msg_name
, msg
.msg_namelen
);
2303 unlock_iovec(vec
, target_vec
, count
, !send
);
2308 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2309 int flags
, int send
)
2312 struct target_msghdr
*msgp
;
2314 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2318 return -TARGET_EFAULT
;
2320 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2321 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2325 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2326 * so it might not have this *mmsg-specific flag either.
2328 #ifndef MSG_WAITFORONE
2329 #define MSG_WAITFORONE 0x10000
2332 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2333 unsigned int vlen
, unsigned int flags
,
2336 struct target_mmsghdr
*mmsgp
;
2340 if (vlen
> UIO_MAXIOV
) {
2344 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2346 return -TARGET_EFAULT
;
2349 for (i
= 0; i
< vlen
; i
++) {
2350 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2351 if (is_error(ret
)) {
2354 mmsgp
[i
].msg_len
= tswap32(ret
);
2355 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2356 if (flags
& MSG_WAITFORONE
) {
2357 flags
|= MSG_DONTWAIT
;
2361 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2363 /* Return number of datagrams sent if we sent any at all;
2364 * otherwise return the error.
2372 /* If we don't have a system accept4() then just call accept.
2373 * The callsites to do_accept4() will ensure that they don't
2374 * pass a non-zero flags argument in this config.
2376 #ifndef CONFIG_ACCEPT4
2377 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2378 socklen_t
*addrlen
, int flags
)
2381 return accept(sockfd
, addr
, addrlen
);
2385 /* do_accept4() Must return target values and target errnos. */
2386 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2387 abi_ulong target_addrlen_addr
, int flags
)
2394 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2396 if (target_addr
== 0) {
2397 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2400 /* linux returns EINVAL if addrlen pointer is invalid */
2401 if (get_user_u32(addrlen
, target_addrlen_addr
))
2402 return -TARGET_EINVAL
;
2404 if ((int)addrlen
< 0) {
2405 return -TARGET_EINVAL
;
2408 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2409 return -TARGET_EINVAL
;
2411 addr
= alloca(addrlen
);
2413 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2414 if (!is_error(ret
)) {
2415 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2416 if (put_user_u32(addrlen
, target_addrlen_addr
))
2417 ret
= -TARGET_EFAULT
;
2422 /* do_getpeername() Must return target values and target errnos. */
2423 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2424 abi_ulong target_addrlen_addr
)
2430 if (get_user_u32(addrlen
, target_addrlen_addr
))
2431 return -TARGET_EFAULT
;
2433 if ((int)addrlen
< 0) {
2434 return -TARGET_EINVAL
;
2437 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2438 return -TARGET_EFAULT
;
2440 addr
= alloca(addrlen
);
2442 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2443 if (!is_error(ret
)) {
2444 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2445 if (put_user_u32(addrlen
, target_addrlen_addr
))
2446 ret
= -TARGET_EFAULT
;
2451 /* do_getsockname() Must return target values and target errnos. */
2452 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2453 abi_ulong target_addrlen_addr
)
2459 if (get_user_u32(addrlen
, target_addrlen_addr
))
2460 return -TARGET_EFAULT
;
2462 if ((int)addrlen
< 0) {
2463 return -TARGET_EINVAL
;
2466 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2467 return -TARGET_EFAULT
;
2469 addr
= alloca(addrlen
);
2471 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2472 if (!is_error(ret
)) {
2473 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2474 if (put_user_u32(addrlen
, target_addrlen_addr
))
2475 ret
= -TARGET_EFAULT
;
2480 /* do_socketpair() Must return target values and target errnos. */
2481 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2482 abi_ulong target_tab_addr
)
2487 target_to_host_sock_type(&type
);
2489 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2490 if (!is_error(ret
)) {
2491 if (put_user_s32(tab
[0], target_tab_addr
)
2492 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2493 ret
= -TARGET_EFAULT
;
2498 /* do_sendto() Must return target values and target errnos. */
2499 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2500 abi_ulong target_addr
, socklen_t addrlen
)
2506 if ((int)addrlen
< 0) {
2507 return -TARGET_EINVAL
;
2510 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2512 return -TARGET_EFAULT
;
2514 addr
= alloca(addrlen
+1);
2515 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2517 unlock_user(host_msg
, msg
, 0);
2520 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2522 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2524 unlock_user(host_msg
, msg
, 0);
2528 /* do_recvfrom() Must return target values and target errnos. */
2529 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2530 abi_ulong target_addr
,
2531 abi_ulong target_addrlen
)
2538 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2540 return -TARGET_EFAULT
;
2542 if (get_user_u32(addrlen
, target_addrlen
)) {
2543 ret
= -TARGET_EFAULT
;
2546 if ((int)addrlen
< 0) {
2547 ret
= -TARGET_EINVAL
;
2550 addr
= alloca(addrlen
);
2551 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2553 addr
= NULL
; /* To keep compiler quiet. */
2554 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2556 if (!is_error(ret
)) {
2558 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2559 if (put_user_u32(addrlen
, target_addrlen
)) {
2560 ret
= -TARGET_EFAULT
;
2564 unlock_user(host_msg
, msg
, len
);
2567 unlock_user(host_msg
, msg
, 0);
2572 #ifdef TARGET_NR_socketcall
2573 /* do_socketcall() Must return target values and target errnos. */
2574 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2576 static const unsigned ac
[] = { /* number of arguments per call */
2577 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2578 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2581 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2582 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2583 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2584 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2585 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2586 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2587 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2588 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2589 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2590 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2591 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2592 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2593 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2594 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2595 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2596 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2598 abi_long a
[6]; /* max 6 args */
2600 /* first, collect the arguments in a[] according to ac[] */
2601 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2603 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2604 for (i
= 0; i
< ac
[num
]; ++i
) {
2605 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2606 return -TARGET_EFAULT
;
2611 /* now when we have the args, actually handle the call */
2613 case SOCKOP_socket
: /* domain, type, protocol */
2614 return do_socket(a
[0], a
[1], a
[2]);
2615 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2616 return do_bind(a
[0], a
[1], a
[2]);
2617 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2618 return do_connect(a
[0], a
[1], a
[2]);
2619 case SOCKOP_listen
: /* sockfd, backlog */
2620 return get_errno(listen(a
[0], a
[1]));
2621 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2622 return do_accept4(a
[0], a
[1], a
[2], 0);
2623 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2624 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2625 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2626 return do_getsockname(a
[0], a
[1], a
[2]);
2627 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2628 return do_getpeername(a
[0], a
[1], a
[2]);
2629 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2630 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2631 case SOCKOP_send
: /* sockfd, msg, len, flags */
2632 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2633 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2634 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2635 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2636 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2637 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2638 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2639 case SOCKOP_shutdown
: /* sockfd, how */
2640 return get_errno(shutdown(a
[0], a
[1]));
2641 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2642 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2643 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2644 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2645 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2646 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2647 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2648 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2649 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2650 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2651 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2652 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2654 gemu_log("Unsupported socketcall: %d\n", num
);
2655 return -TARGET_ENOSYS
;
2660 #define N_SHM_REGIONS 32
2662 static struct shm_region
{
2666 } shm_regions
[N_SHM_REGIONS
];
2668 struct target_semid_ds
2670 struct target_ipc_perm sem_perm
;
2671 abi_ulong sem_otime
;
2672 #if !defined(TARGET_PPC64)
2673 abi_ulong __unused1
;
2675 abi_ulong sem_ctime
;
2676 #if !defined(TARGET_PPC64)
2677 abi_ulong __unused2
;
2679 abi_ulong sem_nsems
;
2680 abi_ulong __unused3
;
2681 abi_ulong __unused4
;
2684 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2685 abi_ulong target_addr
)
2687 struct target_ipc_perm
*target_ip
;
2688 struct target_semid_ds
*target_sd
;
2690 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2691 return -TARGET_EFAULT
;
2692 target_ip
= &(target_sd
->sem_perm
);
2693 host_ip
->__key
= tswap32(target_ip
->__key
);
2694 host_ip
->uid
= tswap32(target_ip
->uid
);
2695 host_ip
->gid
= tswap32(target_ip
->gid
);
2696 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2697 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2698 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2699 host_ip
->mode
= tswap32(target_ip
->mode
);
2701 host_ip
->mode
= tswap16(target_ip
->mode
);
2703 #if defined(TARGET_PPC)
2704 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2706 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2708 unlock_user_struct(target_sd
, target_addr
, 0);
2712 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2713 struct ipc_perm
*host_ip
)
2715 struct target_ipc_perm
*target_ip
;
2716 struct target_semid_ds
*target_sd
;
2718 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2719 return -TARGET_EFAULT
;
2720 target_ip
= &(target_sd
->sem_perm
);
2721 target_ip
->__key
= tswap32(host_ip
->__key
);
2722 target_ip
->uid
= tswap32(host_ip
->uid
);
2723 target_ip
->gid
= tswap32(host_ip
->gid
);
2724 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2725 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2726 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2727 target_ip
->mode
= tswap32(host_ip
->mode
);
2729 target_ip
->mode
= tswap16(host_ip
->mode
);
2731 #if defined(TARGET_PPC)
2732 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2734 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2736 unlock_user_struct(target_sd
, target_addr
, 1);
2740 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2741 abi_ulong target_addr
)
2743 struct target_semid_ds
*target_sd
;
2745 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2746 return -TARGET_EFAULT
;
2747 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2748 return -TARGET_EFAULT
;
2749 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2750 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2751 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2752 unlock_user_struct(target_sd
, target_addr
, 0);
2756 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2757 struct semid_ds
*host_sd
)
2759 struct target_semid_ds
*target_sd
;
2761 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2762 return -TARGET_EFAULT
;
2763 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2764 return -TARGET_EFAULT
;
2765 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2766 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2767 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2768 unlock_user_struct(target_sd
, target_addr
, 1);
2772 struct target_seminfo
{
2785 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2786 struct seminfo
*host_seminfo
)
2788 struct target_seminfo
*target_seminfo
;
2789 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2790 return -TARGET_EFAULT
;
2791 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2792 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2793 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2794 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2795 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2796 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2797 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2798 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2799 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2800 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2801 unlock_user_struct(target_seminfo
, target_addr
, 1);
2807 struct semid_ds
*buf
;
2808 unsigned short *array
;
2809 struct seminfo
*__buf
;
2812 union target_semun
{
2819 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2820 abi_ulong target_addr
)
2823 unsigned short *array
;
2825 struct semid_ds semid_ds
;
2828 semun
.buf
= &semid_ds
;
2830 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2832 return get_errno(ret
);
2834 nsems
= semid_ds
.sem_nsems
;
2836 *host_array
= g_try_new(unsigned short, nsems
);
2838 return -TARGET_ENOMEM
;
2840 array
= lock_user(VERIFY_READ
, target_addr
,
2841 nsems
*sizeof(unsigned short), 1);
2843 g_free(*host_array
);
2844 return -TARGET_EFAULT
;
2847 for(i
=0; i
<nsems
; i
++) {
2848 __get_user((*host_array
)[i
], &array
[i
]);
2850 unlock_user(array
, target_addr
, 0);
2855 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2856 unsigned short **host_array
)
2859 unsigned short *array
;
2861 struct semid_ds semid_ds
;
2864 semun
.buf
= &semid_ds
;
2866 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2868 return get_errno(ret
);
2870 nsems
= semid_ds
.sem_nsems
;
2872 array
= lock_user(VERIFY_WRITE
, target_addr
,
2873 nsems
*sizeof(unsigned short), 0);
2875 return -TARGET_EFAULT
;
2877 for(i
=0; i
<nsems
; i
++) {
2878 __put_user((*host_array
)[i
], &array
[i
]);
2880 g_free(*host_array
);
2881 unlock_user(array
, target_addr
, 1);
2886 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2887 abi_ulong target_arg
)
2889 union target_semun target_su
= { .buf
= target_arg
};
2891 struct semid_ds dsarg
;
2892 unsigned short *array
= NULL
;
2893 struct seminfo seminfo
;
2894 abi_long ret
= -TARGET_EINVAL
;
2901 /* In 64 bit cross-endian situations, we will erroneously pick up
2902 * the wrong half of the union for the "val" element. To rectify
2903 * this, the entire 8-byte structure is byteswapped, followed by
2904 * a swap of the 4 byte val field. In other cases, the data is
2905 * already in proper host byte order. */
2906 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2907 target_su
.buf
= tswapal(target_su
.buf
);
2908 arg
.val
= tswap32(target_su
.val
);
2910 arg
.val
= target_su
.val
;
2912 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2916 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2920 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2921 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2928 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2932 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2933 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2939 arg
.__buf
= &seminfo
;
2940 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2941 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2949 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2956 struct target_sembuf
{
2957 unsigned short sem_num
;
2962 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2963 abi_ulong target_addr
,
2966 struct target_sembuf
*target_sembuf
;
2969 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2970 nsops
*sizeof(struct target_sembuf
), 1);
2972 return -TARGET_EFAULT
;
2974 for(i
=0; i
<nsops
; i
++) {
2975 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2976 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2977 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2980 unlock_user(target_sembuf
, target_addr
, 0);
2985 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2987 struct sembuf sops
[nsops
];
2989 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2990 return -TARGET_EFAULT
;
2992 return get_errno(semop(semid
, sops
, nsops
));
2995 struct target_msqid_ds
2997 struct target_ipc_perm msg_perm
;
2998 abi_ulong msg_stime
;
2999 #if TARGET_ABI_BITS == 32
3000 abi_ulong __unused1
;
3002 abi_ulong msg_rtime
;
3003 #if TARGET_ABI_BITS == 32
3004 abi_ulong __unused2
;
3006 abi_ulong msg_ctime
;
3007 #if TARGET_ABI_BITS == 32
3008 abi_ulong __unused3
;
3010 abi_ulong __msg_cbytes
;
3012 abi_ulong msg_qbytes
;
3013 abi_ulong msg_lspid
;
3014 abi_ulong msg_lrpid
;
3015 abi_ulong __unused4
;
3016 abi_ulong __unused5
;
3019 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3020 abi_ulong target_addr
)
3022 struct target_msqid_ds
*target_md
;
3024 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3025 return -TARGET_EFAULT
;
3026 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3027 return -TARGET_EFAULT
;
3028 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3029 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3030 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3031 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3032 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3033 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3034 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3035 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3036 unlock_user_struct(target_md
, target_addr
, 0);
3040 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3041 struct msqid_ds
*host_md
)
3043 struct target_msqid_ds
*target_md
;
3045 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3046 return -TARGET_EFAULT
;
3047 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3048 return -TARGET_EFAULT
;
3049 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3050 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3051 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3052 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3053 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3054 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3055 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3056 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3057 unlock_user_struct(target_md
, target_addr
, 1);
3061 struct target_msginfo
{
3069 unsigned short int msgseg
;
3072 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3073 struct msginfo
*host_msginfo
)
3075 struct target_msginfo
*target_msginfo
;
3076 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3077 return -TARGET_EFAULT
;
3078 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3079 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3080 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3081 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3082 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3083 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3084 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3085 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3086 unlock_user_struct(target_msginfo
, target_addr
, 1);
3090 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3092 struct msqid_ds dsarg
;
3093 struct msginfo msginfo
;
3094 abi_long ret
= -TARGET_EINVAL
;
3102 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3103 return -TARGET_EFAULT
;
3104 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3105 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3106 return -TARGET_EFAULT
;
3109 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3113 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3114 if (host_to_target_msginfo(ptr
, &msginfo
))
3115 return -TARGET_EFAULT
;
3122 struct target_msgbuf
{
3127 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3128 ssize_t msgsz
, int msgflg
)
3130 struct target_msgbuf
*target_mb
;
3131 struct msgbuf
*host_mb
;
3135 return -TARGET_EINVAL
;
3138 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3139 return -TARGET_EFAULT
;
3140 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3142 unlock_user_struct(target_mb
, msgp
, 0);
3143 return -TARGET_ENOMEM
;
3145 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3146 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3147 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3149 unlock_user_struct(target_mb
, msgp
, 0);
3154 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3155 ssize_t msgsz
, abi_long msgtyp
,
3158 struct target_msgbuf
*target_mb
;
3160 struct msgbuf
*host_mb
;
3164 return -TARGET_EINVAL
;
3167 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3168 return -TARGET_EFAULT
;
3170 host_mb
= g_malloc(msgsz
+sizeof(long));
3171 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3174 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3175 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3176 if (!target_mtext
) {
3177 ret
= -TARGET_EFAULT
;
3180 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3181 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3184 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3188 unlock_user_struct(target_mb
, msgp
, 1);
3193 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3194 abi_ulong target_addr
)
3196 struct target_shmid_ds
*target_sd
;
3198 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3199 return -TARGET_EFAULT
;
3200 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3201 return -TARGET_EFAULT
;
3202 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3203 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3204 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3205 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3206 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3207 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3208 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3209 unlock_user_struct(target_sd
, target_addr
, 0);
3213 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3214 struct shmid_ds
*host_sd
)
3216 struct target_shmid_ds
*target_sd
;
3218 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3219 return -TARGET_EFAULT
;
3220 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3221 return -TARGET_EFAULT
;
3222 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3223 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3224 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3225 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3226 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3227 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3228 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3229 unlock_user_struct(target_sd
, target_addr
, 1);
3233 struct target_shminfo
{
3241 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3242 struct shminfo
*host_shminfo
)
3244 struct target_shminfo
*target_shminfo
;
3245 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3246 return -TARGET_EFAULT
;
3247 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3248 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3249 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3250 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3251 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3252 unlock_user_struct(target_shminfo
, target_addr
, 1);
3256 struct target_shm_info
{
3261 abi_ulong swap_attempts
;
3262 abi_ulong swap_successes
;
3265 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3266 struct shm_info
*host_shm_info
)
3268 struct target_shm_info
*target_shm_info
;
3269 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3270 return -TARGET_EFAULT
;
3271 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3272 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3273 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3274 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3275 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3276 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3277 unlock_user_struct(target_shm_info
, target_addr
, 1);
3281 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3283 struct shmid_ds dsarg
;
3284 struct shminfo shminfo
;
3285 struct shm_info shm_info
;
3286 abi_long ret
= -TARGET_EINVAL
;
3294 if (target_to_host_shmid_ds(&dsarg
, buf
))
3295 return -TARGET_EFAULT
;
3296 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3297 if (host_to_target_shmid_ds(buf
, &dsarg
))
3298 return -TARGET_EFAULT
;
3301 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3302 if (host_to_target_shminfo(buf
, &shminfo
))
3303 return -TARGET_EFAULT
;
3306 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3307 if (host_to_target_shm_info(buf
, &shm_info
))
3308 return -TARGET_EFAULT
;
3313 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3320 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3324 struct shmid_ds shm_info
;
3327 /* find out the length of the shared memory segment */
3328 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3329 if (is_error(ret
)) {
3330 /* can't get length, bail out */
3337 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3339 abi_ulong mmap_start
;
3341 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3343 if (mmap_start
== -1) {
3345 host_raddr
= (void *)-1;
3347 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3350 if (host_raddr
== (void *)-1) {
3352 return get_errno((long)host_raddr
);
3354 raddr
=h2g((unsigned long)host_raddr
);
3356 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3357 PAGE_VALID
| PAGE_READ
|
3358 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3360 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3361 if (!shm_regions
[i
].in_use
) {
3362 shm_regions
[i
].in_use
= true;
3363 shm_regions
[i
].start
= raddr
;
3364 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3374 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3378 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3379 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3380 shm_regions
[i
].in_use
= false;
3381 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3386 return get_errno(shmdt(g2h(shmaddr
)));
3389 #ifdef TARGET_NR_ipc
3390 /* ??? This only works with linear mappings. */
3391 /* do_ipc() must return target values and target errnos. */
3392 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3393 abi_long second
, abi_long third
,
3394 abi_long ptr
, abi_long fifth
)
3399 version
= call
>> 16;
3404 ret
= do_semop(first
, ptr
, second
);
3408 ret
= get_errno(semget(first
, second
, third
));
3411 case IPCOP_semctl
: {
3412 /* The semun argument to semctl is passed by value, so dereference the
3415 get_user_ual(atptr
, ptr
);
3416 ret
= do_semctl(first
, second
, third
, atptr
);
3421 ret
= get_errno(msgget(first
, second
));
3425 ret
= do_msgsnd(first
, ptr
, second
, third
);
3429 ret
= do_msgctl(first
, second
, ptr
);
3436 struct target_ipc_kludge
{
3441 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3442 ret
= -TARGET_EFAULT
;
3446 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3448 unlock_user_struct(tmp
, ptr
, 0);
3452 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3461 raddr
= do_shmat(first
, ptr
, second
);
3462 if (is_error(raddr
))
3463 return get_errno(raddr
);
3464 if (put_user_ual(raddr
, third
))
3465 return -TARGET_EFAULT
;
3469 ret
= -TARGET_EINVAL
;
3474 ret
= do_shmdt(ptr
);
3478 /* IPC_* flag values are the same on all linux platforms */
3479 ret
= get_errno(shmget(first
, second
, third
));
3482 /* IPC_* and SHM_* command values are the same on all linux platforms */
3484 ret
= do_shmctl(first
, second
, ptr
);
3487 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3488 ret
= -TARGET_ENOSYS
;
3495 /* kernel structure types definitions */
3497 #define STRUCT(name, ...) STRUCT_ ## name,
3498 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3500 #include "syscall_types.h"
3504 #undef STRUCT_SPECIAL
3506 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3507 #define STRUCT_SPECIAL(name)
3508 #include "syscall_types.h"
3510 #undef STRUCT_SPECIAL
3512 typedef struct IOCTLEntry IOCTLEntry
;
3514 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3515 int fd
, int cmd
, abi_long arg
);
3519 unsigned int host_cmd
;
3522 do_ioctl_fn
*do_ioctl
;
3523 const argtype arg_type
[5];
3526 #define IOC_R 0x0001
3527 #define IOC_W 0x0002
3528 #define IOC_RW (IOC_R | IOC_W)
3530 #define MAX_STRUCT_SIZE 4096
3532 #ifdef CONFIG_FIEMAP
3533 /* So fiemap access checks don't overflow on 32 bit systems.
3534 * This is very slightly smaller than the limit imposed by
3535 * the underlying kernel.
3537 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3538 / sizeof(struct fiemap_extent))
3540 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3541 int fd
, int cmd
, abi_long arg
)
3543 /* The parameter for this ioctl is a struct fiemap followed
3544 * by an array of struct fiemap_extent whose size is set
3545 * in fiemap->fm_extent_count. The array is filled in by the
3548 int target_size_in
, target_size_out
;
3550 const argtype
*arg_type
= ie
->arg_type
;
3551 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3554 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3558 assert(arg_type
[0] == TYPE_PTR
);
3559 assert(ie
->access
== IOC_RW
);
3561 target_size_in
= thunk_type_size(arg_type
, 0);
3562 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3564 return -TARGET_EFAULT
;
3566 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3567 unlock_user(argptr
, arg
, 0);
3568 fm
= (struct fiemap
*)buf_temp
;
3569 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3570 return -TARGET_EINVAL
;
3573 outbufsz
= sizeof (*fm
) +
3574 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3576 if (outbufsz
> MAX_STRUCT_SIZE
) {
3577 /* We can't fit all the extents into the fixed size buffer.
3578 * Allocate one that is large enough and use it instead.
3580 fm
= g_try_malloc(outbufsz
);
3582 return -TARGET_ENOMEM
;
3584 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3587 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3588 if (!is_error(ret
)) {
3589 target_size_out
= target_size_in
;
3590 /* An extent_count of 0 means we were only counting the extents
3591 * so there are no structs to copy
3593 if (fm
->fm_extent_count
!= 0) {
3594 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3596 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3598 ret
= -TARGET_EFAULT
;
3600 /* Convert the struct fiemap */
3601 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3602 if (fm
->fm_extent_count
!= 0) {
3603 p
= argptr
+ target_size_in
;
3604 /* ...and then all the struct fiemap_extents */
3605 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3606 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3611 unlock_user(argptr
, arg
, target_size_out
);
3621 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3622 int fd
, int cmd
, abi_long arg
)
3624 const argtype
*arg_type
= ie
->arg_type
;
3628 struct ifconf
*host_ifconf
;
3630 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3631 int target_ifreq_size
;
3636 abi_long target_ifc_buf
;
3640 assert(arg_type
[0] == TYPE_PTR
);
3641 assert(ie
->access
== IOC_RW
);
3644 target_size
= thunk_type_size(arg_type
, 0);
3646 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3648 return -TARGET_EFAULT
;
3649 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3650 unlock_user(argptr
, arg
, 0);
3652 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3653 target_ifc_len
= host_ifconf
->ifc_len
;
3654 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3656 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3657 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3658 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3660 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3661 if (outbufsz
> MAX_STRUCT_SIZE
) {
3662 /* We can't fit all the extents into the fixed size buffer.
3663 * Allocate one that is large enough and use it instead.
3665 host_ifconf
= malloc(outbufsz
);
3667 return -TARGET_ENOMEM
;
3669 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3672 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3674 host_ifconf
->ifc_len
= host_ifc_len
;
3675 host_ifconf
->ifc_buf
= host_ifc_buf
;
3677 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3678 if (!is_error(ret
)) {
3679 /* convert host ifc_len to target ifc_len */
3681 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3682 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3683 host_ifconf
->ifc_len
= target_ifc_len
;
3685 /* restore target ifc_buf */
3687 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3689 /* copy struct ifconf to target user */
3691 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3693 return -TARGET_EFAULT
;
3694 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3695 unlock_user(argptr
, arg
, target_size
);
3697 /* copy ifreq[] to target user */
3699 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3700 for (i
= 0; i
< nb_ifreq
; i
++) {
3701 thunk_convert(argptr
+ i
* target_ifreq_size
,
3702 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3703 ifreq_arg_type
, THUNK_TARGET
);
3705 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3715 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3716 int cmd
, abi_long arg
)
3719 struct dm_ioctl
*host_dm
;
3720 abi_long guest_data
;
3721 uint32_t guest_data_size
;
3723 const argtype
*arg_type
= ie
->arg_type
;
3725 void *big_buf
= NULL
;
3729 target_size
= thunk_type_size(arg_type
, 0);
3730 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3732 ret
= -TARGET_EFAULT
;
3735 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3736 unlock_user(argptr
, arg
, 0);
3738 /* buf_temp is too small, so fetch things into a bigger buffer */
3739 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3740 memcpy(big_buf
, buf_temp
, target_size
);
3744 guest_data
= arg
+ host_dm
->data_start
;
3745 if ((guest_data
- arg
) < 0) {
3749 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3750 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3752 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3753 switch (ie
->host_cmd
) {
3755 case DM_LIST_DEVICES
:
3758 case DM_DEV_SUSPEND
:
3761 case DM_TABLE_STATUS
:
3762 case DM_TABLE_CLEAR
:
3764 case DM_LIST_VERSIONS
:
3768 case DM_DEV_SET_GEOMETRY
:
3769 /* data contains only strings */
3770 memcpy(host_data
, argptr
, guest_data_size
);
3773 memcpy(host_data
, argptr
, guest_data_size
);
3774 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3778 void *gspec
= argptr
;
3779 void *cur_data
= host_data
;
3780 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3781 int spec_size
= thunk_type_size(arg_type
, 0);
3784 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3785 struct dm_target_spec
*spec
= cur_data
;
3789 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3790 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3792 spec
->next
= sizeof(*spec
) + slen
;
3793 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3795 cur_data
+= spec
->next
;
3800 ret
= -TARGET_EINVAL
;
3801 unlock_user(argptr
, guest_data
, 0);
3804 unlock_user(argptr
, guest_data
, 0);
3806 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3807 if (!is_error(ret
)) {
3808 guest_data
= arg
+ host_dm
->data_start
;
3809 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3810 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3811 switch (ie
->host_cmd
) {
3816 case DM_DEV_SUSPEND
:
3819 case DM_TABLE_CLEAR
:
3821 case DM_DEV_SET_GEOMETRY
:
3822 /* no return data */
3824 case DM_LIST_DEVICES
:
3826 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3827 uint32_t remaining_data
= guest_data_size
;
3828 void *cur_data
= argptr
;
3829 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3830 int nl_size
= 12; /* can't use thunk_size due to alignment */
3833 uint32_t next
= nl
->next
;
3835 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3837 if (remaining_data
< nl
->next
) {
3838 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3841 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3842 strcpy(cur_data
+ nl_size
, nl
->name
);
3843 cur_data
+= nl
->next
;
3844 remaining_data
-= nl
->next
;
3848 nl
= (void*)nl
+ next
;
3853 case DM_TABLE_STATUS
:
3855 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3856 void *cur_data
= argptr
;
3857 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3858 int spec_size
= thunk_type_size(arg_type
, 0);
3861 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3862 uint32_t next
= spec
->next
;
3863 int slen
= strlen((char*)&spec
[1]) + 1;
3864 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3865 if (guest_data_size
< spec
->next
) {
3866 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3869 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3870 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3871 cur_data
= argptr
+ spec
->next
;
3872 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3878 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3879 int count
= *(uint32_t*)hdata
;
3880 uint64_t *hdev
= hdata
+ 8;
3881 uint64_t *gdev
= argptr
+ 8;
3884 *(uint32_t*)argptr
= tswap32(count
);
3885 for (i
= 0; i
< count
; i
++) {
3886 *gdev
= tswap64(*hdev
);
3892 case DM_LIST_VERSIONS
:
3894 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3895 uint32_t remaining_data
= guest_data_size
;
3896 void *cur_data
= argptr
;
3897 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3898 int vers_size
= thunk_type_size(arg_type
, 0);
3901 uint32_t next
= vers
->next
;
3903 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3905 if (remaining_data
< vers
->next
) {
3906 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3909 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3910 strcpy(cur_data
+ vers_size
, vers
->name
);
3911 cur_data
+= vers
->next
;
3912 remaining_data
-= vers
->next
;
3916 vers
= (void*)vers
+ next
;
3921 unlock_user(argptr
, guest_data
, 0);
3922 ret
= -TARGET_EINVAL
;
3925 unlock_user(argptr
, guest_data
, guest_data_size
);
3927 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3929 ret
= -TARGET_EFAULT
;
3932 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3933 unlock_user(argptr
, arg
, target_size
);
3940 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3941 int cmd
, abi_long arg
)
3945 const argtype
*arg_type
= ie
->arg_type
;
3946 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3949 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3950 struct blkpg_partition host_part
;
3952 /* Read and convert blkpg */
3954 target_size
= thunk_type_size(arg_type
, 0);
3955 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3957 ret
= -TARGET_EFAULT
;
3960 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3961 unlock_user(argptr
, arg
, 0);
3963 switch (host_blkpg
->op
) {
3964 case BLKPG_ADD_PARTITION
:
3965 case BLKPG_DEL_PARTITION
:
3966 /* payload is struct blkpg_partition */
3969 /* Unknown opcode */
3970 ret
= -TARGET_EINVAL
;
3974 /* Read and convert blkpg->data */
3975 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3976 target_size
= thunk_type_size(part_arg_type
, 0);
3977 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3979 ret
= -TARGET_EFAULT
;
3982 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3983 unlock_user(argptr
, arg
, 0);
3985 /* Swizzle the data pointer to our local copy and call! */
3986 host_blkpg
->data
= &host_part
;
3987 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3993 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3994 int fd
, int cmd
, abi_long arg
)
3996 const argtype
*arg_type
= ie
->arg_type
;
3997 const StructEntry
*se
;
3998 const argtype
*field_types
;
3999 const int *dst_offsets
, *src_offsets
;
4002 abi_ulong
*target_rt_dev_ptr
;
4003 unsigned long *host_rt_dev_ptr
;
4007 assert(ie
->access
== IOC_W
);
4008 assert(*arg_type
== TYPE_PTR
);
4010 assert(*arg_type
== TYPE_STRUCT
);
4011 target_size
= thunk_type_size(arg_type
, 0);
4012 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4014 return -TARGET_EFAULT
;
4017 assert(*arg_type
== (int)STRUCT_rtentry
);
4018 se
= struct_entries
+ *arg_type
++;
4019 assert(se
->convert
[0] == NULL
);
4020 /* convert struct here to be able to catch rt_dev string */
4021 field_types
= se
->field_types
;
4022 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4023 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4024 for (i
= 0; i
< se
->nb_fields
; i
++) {
4025 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4026 assert(*field_types
== TYPE_PTRVOID
);
4027 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4028 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4029 if (*target_rt_dev_ptr
!= 0) {
4030 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4031 tswapal(*target_rt_dev_ptr
));
4032 if (!*host_rt_dev_ptr
) {
4033 unlock_user(argptr
, arg
, 0);
4034 return -TARGET_EFAULT
;
4037 *host_rt_dev_ptr
= 0;
4042 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4043 argptr
+ src_offsets
[i
],
4044 field_types
, THUNK_HOST
);
4046 unlock_user(argptr
, arg
, 0);
4048 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4049 if (*host_rt_dev_ptr
!= 0) {
4050 unlock_user((void *)*host_rt_dev_ptr
,
4051 *target_rt_dev_ptr
, 0);
4056 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4057 int fd
, int cmd
, abi_long arg
)
4059 int sig
= target_to_host_signal(arg
);
4060 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4063 static IOCTLEntry ioctl_entries
[] = {
4064 #define IOCTL(cmd, access, ...) \
4065 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4066 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4067 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4072 /* ??? Implement proper locking for ioctls. */
4073 /* do_ioctl() Must return target values and target errnos. */
4074 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4076 const IOCTLEntry
*ie
;
4077 const argtype
*arg_type
;
4079 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4085 if (ie
->target_cmd
== 0) {
4086 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4087 return -TARGET_ENOSYS
;
4089 if (ie
->target_cmd
== cmd
)
4093 arg_type
= ie
->arg_type
;
4095 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4098 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4101 switch(arg_type
[0]) {
4104 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4108 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4112 target_size
= thunk_type_size(arg_type
, 0);
4113 switch(ie
->access
) {
4115 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4116 if (!is_error(ret
)) {
4117 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4119 return -TARGET_EFAULT
;
4120 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4121 unlock_user(argptr
, arg
, target_size
);
4125 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4127 return -TARGET_EFAULT
;
4128 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4129 unlock_user(argptr
, arg
, 0);
4130 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4134 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4136 return -TARGET_EFAULT
;
4137 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4138 unlock_user(argptr
, arg
, 0);
4139 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4140 if (!is_error(ret
)) {
4141 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4143 return -TARGET_EFAULT
;
4144 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4145 unlock_user(argptr
, arg
, target_size
);
4151 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4152 (long)cmd
, arg_type
[0]);
4153 ret
= -TARGET_ENOSYS
;
4159 static const bitmask_transtbl iflag_tbl
[] = {
4160 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4161 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4162 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4163 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4164 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4165 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4166 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4167 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4168 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4169 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4170 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4171 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4172 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4173 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4177 static const bitmask_transtbl oflag_tbl
[] = {
4178 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4179 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4180 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4181 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4182 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4183 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4184 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4185 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4186 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4187 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4188 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4189 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4190 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4191 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4192 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4193 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4194 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4195 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4196 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4197 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4198 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4199 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4200 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4201 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4205 static const bitmask_transtbl cflag_tbl
[] = {
4206 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4207 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4208 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4209 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4210 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4211 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4212 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4213 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4214 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4215 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4216 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4217 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4218 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4219 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4220 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4221 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4222 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4223 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4224 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4225 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4226 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4227 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4228 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4229 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4230 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4231 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4232 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4233 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4234 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4235 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4236 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4240 static const bitmask_transtbl lflag_tbl
[] = {
4241 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4242 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4243 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4244 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4245 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4246 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4247 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4248 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4249 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4250 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4251 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4252 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4253 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4254 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4255 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4259 static void target_to_host_termios (void *dst
, const void *src
)
4261 struct host_termios
*host
= dst
;
4262 const struct target_termios
*target
= src
;
4265 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4267 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4269 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4271 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4272 host
->c_line
= target
->c_line
;
4274 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4275 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4276 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4277 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4278 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4279 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4280 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4281 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4282 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4283 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4284 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4285 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4286 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4287 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4288 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4289 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4290 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4291 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4294 static void host_to_target_termios (void *dst
, const void *src
)
4296 struct target_termios
*target
= dst
;
4297 const struct host_termios
*host
= src
;
4300 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4302 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4304 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4306 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4307 target
->c_line
= host
->c_line
;
4309 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4310 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4311 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4312 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4313 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4314 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4315 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4316 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4317 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4318 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4319 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4320 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4321 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4322 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4323 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4324 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4325 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4326 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4329 static const StructEntry struct_termios_def
= {
4330 .convert
= { host_to_target_termios
, target_to_host_termios
},
4331 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4332 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4335 static bitmask_transtbl mmap_flags_tbl
[] = {
4336 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4337 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4338 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4339 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4340 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4341 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4342 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4343 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4344 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4349 #if defined(TARGET_I386)
4351 /* NOTE: there is really one LDT for all the threads */
4352 static uint8_t *ldt_table
;
4354 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4361 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4362 if (size
> bytecount
)
4364 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4366 return -TARGET_EFAULT
;
4367 /* ??? Should this by byteswapped? */
4368 memcpy(p
, ldt_table
, size
);
4369 unlock_user(p
, ptr
, size
);
4373 /* XXX: add locking support */
4374 static abi_long
write_ldt(CPUX86State
*env
,
4375 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4377 struct target_modify_ldt_ldt_s ldt_info
;
4378 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4379 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4380 int seg_not_present
, useable
, lm
;
4381 uint32_t *lp
, entry_1
, entry_2
;
4383 if (bytecount
!= sizeof(ldt_info
))
4384 return -TARGET_EINVAL
;
4385 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4386 return -TARGET_EFAULT
;
4387 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4388 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4389 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4390 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4391 unlock_user_struct(target_ldt_info
, ptr
, 0);
4393 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4394 return -TARGET_EINVAL
;
4395 seg_32bit
= ldt_info
.flags
& 1;
4396 contents
= (ldt_info
.flags
>> 1) & 3;
4397 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4398 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4399 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4400 useable
= (ldt_info
.flags
>> 6) & 1;
4404 lm
= (ldt_info
.flags
>> 7) & 1;
4406 if (contents
== 3) {
4408 return -TARGET_EINVAL
;
4409 if (seg_not_present
== 0)
4410 return -TARGET_EINVAL
;
4412 /* allocate the LDT */
4414 env
->ldt
.base
= target_mmap(0,
4415 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4416 PROT_READ
|PROT_WRITE
,
4417 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4418 if (env
->ldt
.base
== -1)
4419 return -TARGET_ENOMEM
;
4420 memset(g2h(env
->ldt
.base
), 0,
4421 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4422 env
->ldt
.limit
= 0xffff;
4423 ldt_table
= g2h(env
->ldt
.base
);
4426 /* NOTE: same code as Linux kernel */
4427 /* Allow LDTs to be cleared by the user. */
4428 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4431 read_exec_only
== 1 &&
4433 limit_in_pages
== 0 &&
4434 seg_not_present
== 1 &&
4442 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4443 (ldt_info
.limit
& 0x0ffff);
4444 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4445 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4446 (ldt_info
.limit
& 0xf0000) |
4447 ((read_exec_only
^ 1) << 9) |
4449 ((seg_not_present
^ 1) << 15) |
4451 (limit_in_pages
<< 23) |
4455 entry_2
|= (useable
<< 20);
4457 /* Install the new entry ... */
4459 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4460 lp
[0] = tswap32(entry_1
);
4461 lp
[1] = tswap32(entry_2
);
4465 /* specific and weird i386 syscalls */
4466 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4467 unsigned long bytecount
)
4473 ret
= read_ldt(ptr
, bytecount
);
4476 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4479 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4482 ret
= -TARGET_ENOSYS
;
4488 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4489 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4491 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4492 struct target_modify_ldt_ldt_s ldt_info
;
4493 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4494 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4495 int seg_not_present
, useable
, lm
;
4496 uint32_t *lp
, entry_1
, entry_2
;
4499 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4500 if (!target_ldt_info
)
4501 return -TARGET_EFAULT
;
4502 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4503 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4504 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4505 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4506 if (ldt_info
.entry_number
== -1) {
4507 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4508 if (gdt_table
[i
] == 0) {
4509 ldt_info
.entry_number
= i
;
4510 target_ldt_info
->entry_number
= tswap32(i
);
4515 unlock_user_struct(target_ldt_info
, ptr
, 1);
4517 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4518 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4519 return -TARGET_EINVAL
;
4520 seg_32bit
= ldt_info
.flags
& 1;
4521 contents
= (ldt_info
.flags
>> 1) & 3;
4522 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4523 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4524 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4525 useable
= (ldt_info
.flags
>> 6) & 1;
4529 lm
= (ldt_info
.flags
>> 7) & 1;
4532 if (contents
== 3) {
4533 if (seg_not_present
== 0)
4534 return -TARGET_EINVAL
;
4537 /* NOTE: same code as Linux kernel */
4538 /* Allow LDTs to be cleared by the user. */
4539 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4540 if ((contents
== 0 &&
4541 read_exec_only
== 1 &&
4543 limit_in_pages
== 0 &&
4544 seg_not_present
== 1 &&
4552 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4553 (ldt_info
.limit
& 0x0ffff);
4554 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4555 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4556 (ldt_info
.limit
& 0xf0000) |
4557 ((read_exec_only
^ 1) << 9) |
4559 ((seg_not_present
^ 1) << 15) |
4561 (limit_in_pages
<< 23) |
4566 /* Install the new entry ... */
4568 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4569 lp
[0] = tswap32(entry_1
);
4570 lp
[1] = tswap32(entry_2
);
4574 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4576 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4577 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4578 uint32_t base_addr
, limit
, flags
;
4579 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4580 int seg_not_present
, useable
, lm
;
4581 uint32_t *lp
, entry_1
, entry_2
;
4583 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4584 if (!target_ldt_info
)
4585 return -TARGET_EFAULT
;
4586 idx
= tswap32(target_ldt_info
->entry_number
);
4587 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4588 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4589 unlock_user_struct(target_ldt_info
, ptr
, 1);
4590 return -TARGET_EINVAL
;
4592 lp
= (uint32_t *)(gdt_table
+ idx
);
4593 entry_1
= tswap32(lp
[0]);
4594 entry_2
= tswap32(lp
[1]);
4596 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4597 contents
= (entry_2
>> 10) & 3;
4598 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4599 seg_32bit
= (entry_2
>> 22) & 1;
4600 limit_in_pages
= (entry_2
>> 23) & 1;
4601 useable
= (entry_2
>> 20) & 1;
4605 lm
= (entry_2
>> 21) & 1;
4607 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4608 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4609 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4610 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4611 base_addr
= (entry_1
>> 16) |
4612 (entry_2
& 0xff000000) |
4613 ((entry_2
& 0xff) << 16);
4614 target_ldt_info
->base_addr
= tswapal(base_addr
);
4615 target_ldt_info
->limit
= tswap32(limit
);
4616 target_ldt_info
->flags
= tswap32(flags
);
4617 unlock_user_struct(target_ldt_info
, ptr
, 1);
4620 #endif /* TARGET_I386 && TARGET_ABI32 */
4622 #ifndef TARGET_ABI32
4623 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4630 case TARGET_ARCH_SET_GS
:
4631 case TARGET_ARCH_SET_FS
:
4632 if (code
== TARGET_ARCH_SET_GS
)
4636 cpu_x86_load_seg(env
, idx
, 0);
4637 env
->segs
[idx
].base
= addr
;
4639 case TARGET_ARCH_GET_GS
:
4640 case TARGET_ARCH_GET_FS
:
4641 if (code
== TARGET_ARCH_GET_GS
)
4645 val
= env
->segs
[idx
].base
;
4646 if (put_user(val
, addr
, abi_ulong
))
4647 ret
= -TARGET_EFAULT
;
4650 ret
= -TARGET_EINVAL
;
4657 #endif /* defined(TARGET_I386) */
4659 #define NEW_STACK_SIZE 0x40000
4662 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4665 pthread_mutex_t mutex
;
4666 pthread_cond_t cond
;
4669 abi_ulong child_tidptr
;
4670 abi_ulong parent_tidptr
;
4674 static void *clone_func(void *arg
)
4676 new_thread_info
*info
= arg
;
4681 rcu_register_thread();
4683 cpu
= ENV_GET_CPU(env
);
4685 ts
= (TaskState
*)cpu
->opaque
;
4686 info
->tid
= gettid();
4687 cpu
->host_tid
= info
->tid
;
4689 if (info
->child_tidptr
)
4690 put_user_u32(info
->tid
, info
->child_tidptr
);
4691 if (info
->parent_tidptr
)
4692 put_user_u32(info
->tid
, info
->parent_tidptr
);
4693 /* Enable signals. */
4694 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4695 /* Signal to the parent that we're ready. */
4696 pthread_mutex_lock(&info
->mutex
);
4697 pthread_cond_broadcast(&info
->cond
);
4698 pthread_mutex_unlock(&info
->mutex
);
4699 /* Wait until the parent has finshed initializing the tls state. */
4700 pthread_mutex_lock(&clone_lock
);
4701 pthread_mutex_unlock(&clone_lock
);
4707 /* do_fork() Must return host values and target errnos (unlike most
4708 do_*() functions). */
4709 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4710 abi_ulong parent_tidptr
, target_ulong newtls
,
4711 abi_ulong child_tidptr
)
4713 CPUState
*cpu
= ENV_GET_CPU(env
);
4717 CPUArchState
*new_env
;
4718 unsigned int nptl_flags
;
4721 /* Emulate vfork() with fork() */
4722 if (flags
& CLONE_VFORK
)
4723 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4725 if (flags
& CLONE_VM
) {
4726 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4727 new_thread_info info
;
4728 pthread_attr_t attr
;
4730 ts
= g_new0(TaskState
, 1);
4731 init_task_state(ts
);
4732 /* we create a new CPU instance. */
4733 new_env
= cpu_copy(env
);
4734 /* Init regs that differ from the parent. */
4735 cpu_clone_regs(new_env
, newsp
);
4736 new_cpu
= ENV_GET_CPU(new_env
);
4737 new_cpu
->opaque
= ts
;
4738 ts
->bprm
= parent_ts
->bprm
;
4739 ts
->info
= parent_ts
->info
;
4741 flags
&= ~CLONE_NPTL_FLAGS2
;
4743 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4744 ts
->child_tidptr
= child_tidptr
;
4747 if (nptl_flags
& CLONE_SETTLS
)
4748 cpu_set_tls (new_env
, newtls
);
4750 /* Grab a mutex so that thread setup appears atomic. */
4751 pthread_mutex_lock(&clone_lock
);
4753 memset(&info
, 0, sizeof(info
));
4754 pthread_mutex_init(&info
.mutex
, NULL
);
4755 pthread_mutex_lock(&info
.mutex
);
4756 pthread_cond_init(&info
.cond
, NULL
);
4758 if (nptl_flags
& CLONE_CHILD_SETTID
)
4759 info
.child_tidptr
= child_tidptr
;
4760 if (nptl_flags
& CLONE_PARENT_SETTID
)
4761 info
.parent_tidptr
= parent_tidptr
;
4763 ret
= pthread_attr_init(&attr
);
4764 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4765 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4766 /* It is not safe to deliver signals until the child has finished
4767 initializing, so temporarily block all signals. */
4768 sigfillset(&sigmask
);
4769 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4771 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4772 /* TODO: Free new CPU state if thread creation failed. */
4774 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4775 pthread_attr_destroy(&attr
);
4777 /* Wait for the child to initialize. */
4778 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4780 if (flags
& CLONE_PARENT_SETTID
)
4781 put_user_u32(ret
, parent_tidptr
);
4785 pthread_mutex_unlock(&info
.mutex
);
4786 pthread_cond_destroy(&info
.cond
);
4787 pthread_mutex_destroy(&info
.mutex
);
4788 pthread_mutex_unlock(&clone_lock
);
4790 /* if no CLONE_VM, we consider it is a fork */
4791 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4792 return -TARGET_EINVAL
;
4797 /* Child Process. */
4799 cpu_clone_regs(env
, newsp
);
4801 /* There is a race condition here. The parent process could
4802 theoretically read the TID in the child process before the child
4803 tid is set. This would require using either ptrace
4804 (not implemented) or having *_tidptr to point at a shared memory
4805 mapping. We can't repeat the spinlock hack used above because
4806 the child process gets its own copy of the lock. */
4807 if (flags
& CLONE_CHILD_SETTID
)
4808 put_user_u32(gettid(), child_tidptr
);
4809 if (flags
& CLONE_PARENT_SETTID
)
4810 put_user_u32(gettid(), parent_tidptr
);
4811 ts
= (TaskState
*)cpu
->opaque
;
4812 if (flags
& CLONE_SETTLS
)
4813 cpu_set_tls (env
, newtls
);
4814 if (flags
& CLONE_CHILD_CLEARTID
)
4815 ts
->child_tidptr
= child_tidptr
;
4823 /* warning : doesn't handle linux specific flags... */
4824 static int target_to_host_fcntl_cmd(int cmd
)
4827 case TARGET_F_DUPFD
:
4828 case TARGET_F_GETFD
:
4829 case TARGET_F_SETFD
:
4830 case TARGET_F_GETFL
:
4831 case TARGET_F_SETFL
:
4833 case TARGET_F_GETLK
:
4835 case TARGET_F_SETLK
:
4837 case TARGET_F_SETLKW
:
4839 case TARGET_F_GETOWN
:
4841 case TARGET_F_SETOWN
:
4843 case TARGET_F_GETSIG
:
4845 case TARGET_F_SETSIG
:
4847 #if TARGET_ABI_BITS == 32
4848 case TARGET_F_GETLK64
:
4850 case TARGET_F_SETLK64
:
4852 case TARGET_F_SETLKW64
:
4855 case TARGET_F_SETLEASE
:
4857 case TARGET_F_GETLEASE
:
4859 #ifdef F_DUPFD_CLOEXEC
4860 case TARGET_F_DUPFD_CLOEXEC
:
4861 return F_DUPFD_CLOEXEC
;
4863 case TARGET_F_NOTIFY
:
4866 case TARGET_F_GETOWN_EX
:
4870 case TARGET_F_SETOWN_EX
:
4874 return -TARGET_EINVAL
;
4876 return -TARGET_EINVAL
;
4879 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4880 static const bitmask_transtbl flock_tbl
[] = {
4881 TRANSTBL_CONVERT(F_RDLCK
),
4882 TRANSTBL_CONVERT(F_WRLCK
),
4883 TRANSTBL_CONVERT(F_UNLCK
),
4884 TRANSTBL_CONVERT(F_EXLCK
),
4885 TRANSTBL_CONVERT(F_SHLCK
),
4889 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4892 struct target_flock
*target_fl
;
4893 struct flock64 fl64
;
4894 struct target_flock64
*target_fl64
;
4896 struct f_owner_ex fox
;
4897 struct target_f_owner_ex
*target_fox
;
4900 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4902 if (host_cmd
== -TARGET_EINVAL
)
4906 case TARGET_F_GETLK
:
4907 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4908 return -TARGET_EFAULT
;
4910 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4911 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4912 fl
.l_start
= tswapal(target_fl
->l_start
);
4913 fl
.l_len
= tswapal(target_fl
->l_len
);
4914 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4915 unlock_user_struct(target_fl
, arg
, 0);
4916 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4918 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4919 return -TARGET_EFAULT
;
4921 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4922 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4923 target_fl
->l_start
= tswapal(fl
.l_start
);
4924 target_fl
->l_len
= tswapal(fl
.l_len
);
4925 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4926 unlock_user_struct(target_fl
, arg
, 1);
4930 case TARGET_F_SETLK
:
4931 case TARGET_F_SETLKW
:
4932 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4933 return -TARGET_EFAULT
;
4935 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4936 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4937 fl
.l_start
= tswapal(target_fl
->l_start
);
4938 fl
.l_len
= tswapal(target_fl
->l_len
);
4939 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4940 unlock_user_struct(target_fl
, arg
, 0);
4941 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4944 case TARGET_F_GETLK64
:
4945 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4946 return -TARGET_EFAULT
;
4948 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4949 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4950 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4951 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4952 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4953 unlock_user_struct(target_fl64
, arg
, 0);
4954 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4956 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4957 return -TARGET_EFAULT
;
4958 target_fl64
->l_type
=
4959 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4960 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4961 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4962 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4963 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4964 unlock_user_struct(target_fl64
, arg
, 1);
4967 case TARGET_F_SETLK64
:
4968 case TARGET_F_SETLKW64
:
4969 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4970 return -TARGET_EFAULT
;
4972 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4973 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4974 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4975 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4976 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4977 unlock_user_struct(target_fl64
, arg
, 0);
4978 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4981 case TARGET_F_GETFL
:
4982 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4984 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4988 case TARGET_F_SETFL
:
4989 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4993 case TARGET_F_GETOWN_EX
:
4994 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4996 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4997 return -TARGET_EFAULT
;
4998 target_fox
->type
= tswap32(fox
.type
);
4999 target_fox
->pid
= tswap32(fox
.pid
);
5000 unlock_user_struct(target_fox
, arg
, 1);
5006 case TARGET_F_SETOWN_EX
:
5007 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5008 return -TARGET_EFAULT
;
5009 fox
.type
= tswap32(target_fox
->type
);
5010 fox
.pid
= tswap32(target_fox
->pid
);
5011 unlock_user_struct(target_fox
, arg
, 0);
5012 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
5016 case TARGET_F_SETOWN
:
5017 case TARGET_F_GETOWN
:
5018 case TARGET_F_SETSIG
:
5019 case TARGET_F_GETSIG
:
5020 case TARGET_F_SETLEASE
:
5021 case TARGET_F_GETLEASE
:
5022 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
5026 ret
= get_errno(fcntl(fd
, cmd
, arg
));
5034 static inline int high2lowuid(int uid
)
5042 static inline int high2lowgid(int gid
)
5050 static inline int low2highuid(int uid
)
5052 if ((int16_t)uid
== -1)
5058 static inline int low2highgid(int gid
)
5060 if ((int16_t)gid
== -1)
5065 static inline int tswapid(int id
)
5070 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5072 #else /* !USE_UID16 */
5073 static inline int high2lowuid(int uid
)
5077 static inline int high2lowgid(int gid
)
5081 static inline int low2highuid(int uid
)
5085 static inline int low2highgid(int gid
)
5089 static inline int tswapid(int id
)
5094 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5096 #endif /* USE_UID16 */
5098 void syscall_init(void)
5101 const argtype
*arg_type
;
5105 thunk_init(STRUCT_MAX
);
5107 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5108 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5109 #include "syscall_types.h"
5111 #undef STRUCT_SPECIAL
5113 /* Build target_to_host_errno_table[] table from
5114 * host_to_target_errno_table[]. */
5115 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5116 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5119 /* we patch the ioctl size if necessary. We rely on the fact that
5120 no ioctl has all the bits at '1' in the size field */
5122 while (ie
->target_cmd
!= 0) {
5123 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5124 TARGET_IOC_SIZEMASK
) {
5125 arg_type
= ie
->arg_type
;
5126 if (arg_type
[0] != TYPE_PTR
) {
5127 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5132 size
= thunk_type_size(arg_type
, 0);
5133 ie
->target_cmd
= (ie
->target_cmd
&
5134 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5135 (size
<< TARGET_IOC_SIZESHIFT
);
5138 /* automatic consistency check if same arch */
5139 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5140 (defined(__x86_64__) && defined(TARGET_X86_64))
5141 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5142 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5143 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5150 #if TARGET_ABI_BITS == 32
5151 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5153 #ifdef TARGET_WORDS_BIGENDIAN
5154 return ((uint64_t)word0
<< 32) | word1
;
5156 return ((uint64_t)word1
<< 32) | word0
;
5159 #else /* TARGET_ABI_BITS == 32 */
5160 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5164 #endif /* TARGET_ABI_BITS != 32 */
5166 #ifdef TARGET_NR_truncate64
5167 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5172 if (regpairs_aligned(cpu_env
)) {
5176 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5180 #ifdef TARGET_NR_ftruncate64
5181 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5186 if (regpairs_aligned(cpu_env
)) {
5190 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5194 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5195 abi_ulong target_addr
)
5197 struct target_timespec
*target_ts
;
5199 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5200 return -TARGET_EFAULT
;
5201 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5202 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5203 unlock_user_struct(target_ts
, target_addr
, 0);
5207 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5208 struct timespec
*host_ts
)
5210 struct target_timespec
*target_ts
;
5212 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5213 return -TARGET_EFAULT
;
5214 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5215 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5216 unlock_user_struct(target_ts
, target_addr
, 1);
5220 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5221 abi_ulong target_addr
)
5223 struct target_itimerspec
*target_itspec
;
5225 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5226 return -TARGET_EFAULT
;
5229 host_itspec
->it_interval
.tv_sec
=
5230 tswapal(target_itspec
->it_interval
.tv_sec
);
5231 host_itspec
->it_interval
.tv_nsec
=
5232 tswapal(target_itspec
->it_interval
.tv_nsec
);
5233 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5234 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5236 unlock_user_struct(target_itspec
, target_addr
, 1);
5240 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5241 struct itimerspec
*host_its
)
5243 struct target_itimerspec
*target_itspec
;
5245 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5246 return -TARGET_EFAULT
;
5249 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5250 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5252 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5253 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5255 unlock_user_struct(target_itspec
, target_addr
, 0);
5259 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5260 abi_ulong target_addr
)
5262 struct target_sigevent
*target_sevp
;
5264 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5265 return -TARGET_EFAULT
;
5268 /* This union is awkward on 64 bit systems because it has a 32 bit
5269 * integer and a pointer in it; we follow the conversion approach
5270 * used for handling sigval types in signal.c so the guest should get
5271 * the correct value back even if we did a 64 bit byteswap and it's
5272 * using the 32 bit integer.
5274 host_sevp
->sigev_value
.sival_ptr
=
5275 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5276 host_sevp
->sigev_signo
=
5277 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5278 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5279 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5281 unlock_user_struct(target_sevp
, target_addr
, 1);
5285 #if defined(TARGET_NR_mlockall)
5286 static inline int target_to_host_mlockall_arg(int arg
)
5290 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5291 result
|= MCL_CURRENT
;
5293 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5294 result
|= MCL_FUTURE
;
5300 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5301 abi_ulong target_addr
,
5302 struct stat
*host_st
)
5304 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5305 if (((CPUARMState
*)cpu_env
)->eabi
) {
5306 struct target_eabi_stat64
*target_st
;
5308 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5309 return -TARGET_EFAULT
;
5310 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5311 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5312 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5313 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5314 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5316 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5317 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5318 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5319 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5320 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5321 __put_user(host_st
->st_size
, &target_st
->st_size
);
5322 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5323 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5324 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5325 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5326 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5327 unlock_user_struct(target_st
, target_addr
, 1);
5331 #if defined(TARGET_HAS_STRUCT_STAT64)
5332 struct target_stat64
*target_st
;
5334 struct target_stat
*target_st
;
5337 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5338 return -TARGET_EFAULT
;
5339 memset(target_st
, 0, sizeof(*target_st
));
5340 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5341 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5342 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5343 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5345 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5346 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5347 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5348 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5349 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5350 /* XXX: better use of kernel struct */
5351 __put_user(host_st
->st_size
, &target_st
->st_size
);
5352 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5353 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5354 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5355 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5356 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5357 unlock_user_struct(target_st
, target_addr
, 1);
5363 /* ??? Using host futex calls even when target atomic operations
5364 are not really atomic probably breaks things. However implementing
5365 futexes locally would make futexes shared between multiple processes
5366 tricky. However they're probably useless because guest atomic
5367 operations won't work either. */
5368 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5369 target_ulong uaddr2
, int val3
)
5371 struct timespec ts
, *pts
;
5374 /* ??? We assume FUTEX_* constants are the same on both host
5376 #ifdef FUTEX_CMD_MASK
5377 base_op
= op
& FUTEX_CMD_MASK
;
5383 case FUTEX_WAIT_BITSET
:
5386 target_to_host_timespec(pts
, timeout
);
5390 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
5393 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5395 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5397 case FUTEX_CMP_REQUEUE
:
5399 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5400 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5401 But the prototype takes a `struct timespec *'; insert casts
5402 to satisfy the compiler. We do not need to tswap TIMEOUT
5403 since it's not compared to guest memory. */
5404 pts
= (struct timespec
*)(uintptr_t) timeout
;
5405 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
5407 (base_op
== FUTEX_CMP_REQUEUE
5411 return -TARGET_ENOSYS
;
5414 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5415 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5416 abi_long handle
, abi_long mount_id
,
5419 struct file_handle
*target_fh
;
5420 struct file_handle
*fh
;
5424 unsigned int size
, total_size
;
5426 if (get_user_s32(size
, handle
)) {
5427 return -TARGET_EFAULT
;
5430 name
= lock_user_string(pathname
);
5432 return -TARGET_EFAULT
;
5435 total_size
= sizeof(struct file_handle
) + size
;
5436 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5438 unlock_user(name
, pathname
, 0);
5439 return -TARGET_EFAULT
;
5442 fh
= g_malloc0(total_size
);
5443 fh
->handle_bytes
= size
;
5445 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5446 unlock_user(name
, pathname
, 0);
5448 /* man name_to_handle_at(2):
5449 * Other than the use of the handle_bytes field, the caller should treat
5450 * the file_handle structure as an opaque data type
5453 memcpy(target_fh
, fh
, total_size
);
5454 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5455 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5457 unlock_user(target_fh
, handle
, total_size
);
5459 if (put_user_s32(mid
, mount_id
)) {
5460 return -TARGET_EFAULT
;
5468 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5469 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5472 struct file_handle
*target_fh
;
5473 struct file_handle
*fh
;
5474 unsigned int size
, total_size
;
5477 if (get_user_s32(size
, handle
)) {
5478 return -TARGET_EFAULT
;
5481 total_size
= sizeof(struct file_handle
) + size
;
5482 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5484 return -TARGET_EFAULT
;
5487 fh
= g_memdup(target_fh
, total_size
);
5488 fh
->handle_bytes
= size
;
5489 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5491 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5492 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5496 unlock_user(target_fh
, handle
, total_size
);
5502 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5504 /* signalfd siginfo conversion */
5507 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5508 const struct signalfd_siginfo
*info
)
5510 int sig
= host_to_target_signal(info
->ssi_signo
);
5512 /* linux/signalfd.h defines a ssi_addr_lsb
5513 * not defined in sys/signalfd.h but used by some kernels
5516 #ifdef BUS_MCEERR_AO
5517 if (tinfo
->ssi_signo
== SIGBUS
&&
5518 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5519 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5520 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5521 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5522 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5526 tinfo
->ssi_signo
= tswap32(sig
);
5527 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5528 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5529 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5530 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5531 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5532 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5533 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5534 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5535 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5536 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5537 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5538 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5539 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5540 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5541 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5544 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5548 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5549 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5555 static TargetFdTrans target_signalfd_trans
= {
5556 .host_to_target_data
= host_to_target_data_signalfd
,
5559 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5562 target_sigset_t
*target_mask
;
5566 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5567 return -TARGET_EINVAL
;
5569 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5570 return -TARGET_EFAULT
;
5573 target_to_host_sigset(&host_mask
, target_mask
);
5575 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5577 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5579 fd_trans_register(ret
, &target_signalfd_trans
);
5582 unlock_user_struct(target_mask
, mask
, 0);
5588 /* Map host to target signal numbers for the wait family of syscalls.
5589 Assume all other status bits are the same. */
5590 int host_to_target_waitstatus(int status
)
5592 if (WIFSIGNALED(status
)) {
5593 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5595 if (WIFSTOPPED(status
)) {
5596 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5602 static int open_self_cmdline(void *cpu_env
, int fd
)
5605 bool word_skipped
= false;
5607 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5617 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5620 fd_orig
= close(fd_orig
);
5623 } else if (nb_read
== 0) {
5627 if (!word_skipped
) {
5628 /* Skip the first string, which is the path to qemu-*-static
5629 instead of the actual command. */
5630 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5632 /* Null byte found, skip one string */
5634 nb_read
-= cp_buf
- buf
;
5635 word_skipped
= true;
5640 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5649 return close(fd_orig
);
5652 static int open_self_maps(void *cpu_env
, int fd
)
5654 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5655 TaskState
*ts
= cpu
->opaque
;
5661 fp
= fopen("/proc/self/maps", "r");
5666 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5667 int fields
, dev_maj
, dev_min
, inode
;
5668 uint64_t min
, max
, offset
;
5669 char flag_r
, flag_w
, flag_x
, flag_p
;
5670 char path
[512] = "";
5671 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5672 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5673 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5675 if ((fields
< 10) || (fields
> 11)) {
5678 if (h2g_valid(min
)) {
5679 int flags
= page_get_flags(h2g(min
));
5680 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5681 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5684 if (h2g(min
) == ts
->info
->stack_limit
) {
5685 pstrcpy(path
, sizeof(path
), " [stack]");
5687 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5688 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5689 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5690 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5691 path
[0] ? " " : "", path
);
5701 static int open_self_stat(void *cpu_env
, int fd
)
5703 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5704 TaskState
*ts
= cpu
->opaque
;
5705 abi_ulong start_stack
= ts
->info
->start_stack
;
5708 for (i
= 0; i
< 44; i
++) {
5716 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5717 } else if (i
== 1) {
5719 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5720 } else if (i
== 27) {
5723 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5725 /* for the rest, there is MasterCard */
5726 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5730 if (write(fd
, buf
, len
) != len
) {
5738 static int open_self_auxv(void *cpu_env
, int fd
)
5740 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5741 TaskState
*ts
= cpu
->opaque
;
5742 abi_ulong auxv
= ts
->info
->saved_auxv
;
5743 abi_ulong len
= ts
->info
->auxv_len
;
5747 * Auxiliary vector is stored in target process stack.
5748 * read in whole auxv vector and copy it to file
5750 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5754 r
= write(fd
, ptr
, len
);
5761 lseek(fd
, 0, SEEK_SET
);
5762 unlock_user(ptr
, auxv
, len
);
5768 static int is_proc_myself(const char *filename
, const char *entry
)
5770 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5771 filename
+= strlen("/proc/");
5772 if (!strncmp(filename
, "self/", strlen("self/"))) {
5773 filename
+= strlen("self/");
5774 } else if (*filename
>= '1' && *filename
<= '9') {
5776 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5777 if (!strncmp(filename
, myself
, strlen(myself
))) {
5778 filename
+= strlen(myself
);
5785 if (!strcmp(filename
, entry
)) {
5792 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5793 static int is_proc(const char *filename
, const char *entry
)
5795 return strcmp(filename
, entry
) == 0;
5798 static int open_net_route(void *cpu_env
, int fd
)
5805 fp
= fopen("/proc/net/route", "r");
5812 read
= getline(&line
, &len
, fp
);
5813 dprintf(fd
, "%s", line
);
5817 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5819 uint32_t dest
, gw
, mask
;
5820 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5821 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5822 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5823 &mask
, &mtu
, &window
, &irtt
);
5824 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5825 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5826 metric
, tswap32(mask
), mtu
, window
, irtt
);
5836 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5839 const char *filename
;
5840 int (*fill
)(void *cpu_env
, int fd
);
5841 int (*cmp
)(const char *s1
, const char *s2
);
5843 const struct fake_open
*fake_open
;
5844 static const struct fake_open fakes
[] = {
5845 { "maps", open_self_maps
, is_proc_myself
},
5846 { "stat", open_self_stat
, is_proc_myself
},
5847 { "auxv", open_self_auxv
, is_proc_myself
},
5848 { "cmdline", open_self_cmdline
, is_proc_myself
},
5849 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5850 { "/proc/net/route", open_net_route
, is_proc
},
5852 { NULL
, NULL
, NULL
}
5855 if (is_proc_myself(pathname
, "exe")) {
5856 int execfd
= qemu_getauxval(AT_EXECFD
);
5857 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
5860 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5861 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5866 if (fake_open
->filename
) {
5868 char filename
[PATH_MAX
];
5871 /* create temporary file to map stat to */
5872 tmpdir
= getenv("TMPDIR");
5875 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5876 fd
= mkstemp(filename
);
5882 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5888 lseek(fd
, 0, SEEK_SET
);
5893 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
5896 #define TIMER_MAGIC 0x0caf0000
5897 #define TIMER_MAGIC_MASK 0xffff0000
5899 /* Convert QEMU provided timer ID back to internal 16bit index format */
5900 static target_timer_t
get_timer_id(abi_long arg
)
5902 target_timer_t timerid
= arg
;
5904 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5905 return -TARGET_EINVAL
;
5910 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5911 return -TARGET_EINVAL
;
5917 /* do_syscall() should always have a single exit point at the end so
5918 that actions, such as logging of syscall results, can be performed.
5919 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5920 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5921 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5922 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5925 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5931 #if defined(DEBUG_ERESTARTSYS)
5932 /* Debug-only code for exercising the syscall-restart code paths
5933 * in the per-architecture cpu main loops: restart every syscall
5934 * the guest makes once before letting it through.
5941 return -TARGET_ERESTARTSYS
;
5947 gemu_log("syscall %d", num
);
5950 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5953 case TARGET_NR_exit
:
5954 /* In old applications this may be used to implement _exit(2).
5955 However in threaded applictions it is used for thread termination,
5956 and _exit_group is used for application termination.
5957 Do thread termination if we have more then one thread. */
5958 /* FIXME: This probably breaks if a signal arrives. We should probably
5959 be disabling signals. */
5960 if (CPU_NEXT(first_cpu
)) {
5964 /* Remove the CPU from the list. */
5965 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5968 if (ts
->child_tidptr
) {
5969 put_user_u32(0, ts
->child_tidptr
);
5970 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5974 object_unref(OBJECT(cpu
));
5976 rcu_unregister_thread();
5982 gdb_exit(cpu_env
, arg1
);
5984 ret
= 0; /* avoid warning */
5986 case TARGET_NR_read
:
5990 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5992 ret
= get_errno(safe_read(arg1
, p
, arg3
));
5994 fd_trans_host_to_target_data(arg1
)) {
5995 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5997 unlock_user(p
, arg2
, ret
);
6000 case TARGET_NR_write
:
6001 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6003 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6004 unlock_user(p
, arg2
, 0);
6006 #ifdef TARGET_NR_open
6007 case TARGET_NR_open
:
6008 if (!(p
= lock_user_string(arg1
)))
6010 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6011 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6013 fd_trans_unregister(ret
);
6014 unlock_user(p
, arg1
, 0);
6017 case TARGET_NR_openat
:
6018 if (!(p
= lock_user_string(arg2
)))
6020 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6021 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6023 fd_trans_unregister(ret
);
6024 unlock_user(p
, arg2
, 0);
6026 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6027 case TARGET_NR_name_to_handle_at
:
6028 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6031 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6032 case TARGET_NR_open_by_handle_at
:
6033 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6034 fd_trans_unregister(ret
);
6037 case TARGET_NR_close
:
6038 fd_trans_unregister(arg1
);
6039 ret
= get_errno(close(arg1
));
6044 #ifdef TARGET_NR_fork
6045 case TARGET_NR_fork
:
6046 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6049 #ifdef TARGET_NR_waitpid
6050 case TARGET_NR_waitpid
:
6053 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6054 if (!is_error(ret
) && arg2
&& ret
6055 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6060 #ifdef TARGET_NR_waitid
6061 case TARGET_NR_waitid
:
6065 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6066 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6067 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6069 host_to_target_siginfo(p
, &info
);
6070 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6075 #ifdef TARGET_NR_creat /* not on alpha */
6076 case TARGET_NR_creat
:
6077 if (!(p
= lock_user_string(arg1
)))
6079 ret
= get_errno(creat(p
, arg2
));
6080 fd_trans_unregister(ret
);
6081 unlock_user(p
, arg1
, 0);
6084 #ifdef TARGET_NR_link
6085 case TARGET_NR_link
:
6088 p
= lock_user_string(arg1
);
6089 p2
= lock_user_string(arg2
);
6091 ret
= -TARGET_EFAULT
;
6093 ret
= get_errno(link(p
, p2
));
6094 unlock_user(p2
, arg2
, 0);
6095 unlock_user(p
, arg1
, 0);
6099 #if defined(TARGET_NR_linkat)
6100 case TARGET_NR_linkat
:
6105 p
= lock_user_string(arg2
);
6106 p2
= lock_user_string(arg4
);
6108 ret
= -TARGET_EFAULT
;
6110 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6111 unlock_user(p
, arg2
, 0);
6112 unlock_user(p2
, arg4
, 0);
6116 #ifdef TARGET_NR_unlink
6117 case TARGET_NR_unlink
:
6118 if (!(p
= lock_user_string(arg1
)))
6120 ret
= get_errno(unlink(p
));
6121 unlock_user(p
, arg1
, 0);
6124 #if defined(TARGET_NR_unlinkat)
6125 case TARGET_NR_unlinkat
:
6126 if (!(p
= lock_user_string(arg2
)))
6128 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6129 unlock_user(p
, arg2
, 0);
6132 case TARGET_NR_execve
:
6134 char **argp
, **envp
;
6137 abi_ulong guest_argp
;
6138 abi_ulong guest_envp
;
6145 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6146 if (get_user_ual(addr
, gp
))
6154 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6155 if (get_user_ual(addr
, gp
))
6162 argp
= alloca((argc
+ 1) * sizeof(void *));
6163 envp
= alloca((envc
+ 1) * sizeof(void *));
6165 for (gp
= guest_argp
, q
= argp
; gp
;
6166 gp
+= sizeof(abi_ulong
), q
++) {
6167 if (get_user_ual(addr
, gp
))
6171 if (!(*q
= lock_user_string(addr
)))
6173 total_size
+= strlen(*q
) + 1;
6177 for (gp
= guest_envp
, q
= envp
; gp
;
6178 gp
+= sizeof(abi_ulong
), q
++) {
6179 if (get_user_ual(addr
, gp
))
6183 if (!(*q
= lock_user_string(addr
)))
6185 total_size
+= strlen(*q
) + 1;
6189 if (!(p
= lock_user_string(arg1
)))
6191 /* Although execve() is not an interruptible syscall it is
6192 * a special case where we must use the safe_syscall wrapper:
6193 * if we allow a signal to happen before we make the host
6194 * syscall then we will 'lose' it, because at the point of
6195 * execve the process leaves QEMU's control. So we use the
6196 * safe syscall wrapper to ensure that we either take the
6197 * signal as a guest signal, or else it does not happen
6198 * before the execve completes and makes it the other
6199 * program's problem.
6201 ret
= get_errno(safe_execve(p
, argp
, envp
));
6202 unlock_user(p
, arg1
, 0);
6207 ret
= -TARGET_EFAULT
;
6210 for (gp
= guest_argp
, q
= argp
; *q
;
6211 gp
+= sizeof(abi_ulong
), q
++) {
6212 if (get_user_ual(addr
, gp
)
6215 unlock_user(*q
, addr
, 0);
6217 for (gp
= guest_envp
, q
= envp
; *q
;
6218 gp
+= sizeof(abi_ulong
), q
++) {
6219 if (get_user_ual(addr
, gp
)
6222 unlock_user(*q
, addr
, 0);
6226 case TARGET_NR_chdir
:
6227 if (!(p
= lock_user_string(arg1
)))
6229 ret
= get_errno(chdir(p
));
6230 unlock_user(p
, arg1
, 0);
6232 #ifdef TARGET_NR_time
6233 case TARGET_NR_time
:
6236 ret
= get_errno(time(&host_time
));
6239 && put_user_sal(host_time
, arg1
))
6244 #ifdef TARGET_NR_mknod
6245 case TARGET_NR_mknod
:
6246 if (!(p
= lock_user_string(arg1
)))
6248 ret
= get_errno(mknod(p
, arg2
, arg3
));
6249 unlock_user(p
, arg1
, 0);
6252 #if defined(TARGET_NR_mknodat)
6253 case TARGET_NR_mknodat
:
6254 if (!(p
= lock_user_string(arg2
)))
6256 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6257 unlock_user(p
, arg2
, 0);
6260 #ifdef TARGET_NR_chmod
6261 case TARGET_NR_chmod
:
6262 if (!(p
= lock_user_string(arg1
)))
6264 ret
= get_errno(chmod(p
, arg2
));
6265 unlock_user(p
, arg1
, 0);
6268 #ifdef TARGET_NR_break
6269 case TARGET_NR_break
:
6272 #ifdef TARGET_NR_oldstat
6273 case TARGET_NR_oldstat
:
6276 case TARGET_NR_lseek
:
6277 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6279 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6280 /* Alpha specific */
6281 case TARGET_NR_getxpid
:
6282 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6283 ret
= get_errno(getpid());
6286 #ifdef TARGET_NR_getpid
6287 case TARGET_NR_getpid
:
6288 ret
= get_errno(getpid());
6291 case TARGET_NR_mount
:
6293 /* need to look at the data field */
6297 p
= lock_user_string(arg1
);
6305 p2
= lock_user_string(arg2
);
6308 unlock_user(p
, arg1
, 0);
6314 p3
= lock_user_string(arg3
);
6317 unlock_user(p
, arg1
, 0);
6319 unlock_user(p2
, arg2
, 0);
6326 /* FIXME - arg5 should be locked, but it isn't clear how to
6327 * do that since it's not guaranteed to be a NULL-terminated
6331 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6333 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6335 ret
= get_errno(ret
);
6338 unlock_user(p
, arg1
, 0);
6340 unlock_user(p2
, arg2
, 0);
6342 unlock_user(p3
, arg3
, 0);
6346 #ifdef TARGET_NR_umount
6347 case TARGET_NR_umount
:
6348 if (!(p
= lock_user_string(arg1
)))
6350 ret
= get_errno(umount(p
));
6351 unlock_user(p
, arg1
, 0);
6354 #ifdef TARGET_NR_stime /* not on alpha */
6355 case TARGET_NR_stime
:
6358 if (get_user_sal(host_time
, arg1
))
6360 ret
= get_errno(stime(&host_time
));
6364 case TARGET_NR_ptrace
:
6366 #ifdef TARGET_NR_alarm /* not on alpha */
6367 case TARGET_NR_alarm
:
6371 #ifdef TARGET_NR_oldfstat
6372 case TARGET_NR_oldfstat
:
6375 #ifdef TARGET_NR_pause /* not on alpha */
6376 case TARGET_NR_pause
:
6377 ret
= get_errno(pause());
6380 #ifdef TARGET_NR_utime
6381 case TARGET_NR_utime
:
6383 struct utimbuf tbuf
, *host_tbuf
;
6384 struct target_utimbuf
*target_tbuf
;
6386 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6388 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6389 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6390 unlock_user_struct(target_tbuf
, arg2
, 0);
6395 if (!(p
= lock_user_string(arg1
)))
6397 ret
= get_errno(utime(p
, host_tbuf
));
6398 unlock_user(p
, arg1
, 0);
6402 #ifdef TARGET_NR_utimes
6403 case TARGET_NR_utimes
:
6405 struct timeval
*tvp
, tv
[2];
6407 if (copy_from_user_timeval(&tv
[0], arg2
)
6408 || copy_from_user_timeval(&tv
[1],
6409 arg2
+ sizeof(struct target_timeval
)))
6415 if (!(p
= lock_user_string(arg1
)))
6417 ret
= get_errno(utimes(p
, tvp
));
6418 unlock_user(p
, arg1
, 0);
6422 #if defined(TARGET_NR_futimesat)
6423 case TARGET_NR_futimesat
:
6425 struct timeval
*tvp
, tv
[2];
6427 if (copy_from_user_timeval(&tv
[0], arg3
)
6428 || copy_from_user_timeval(&tv
[1],
6429 arg3
+ sizeof(struct target_timeval
)))
6435 if (!(p
= lock_user_string(arg2
)))
6437 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6438 unlock_user(p
, arg2
, 0);
6442 #ifdef TARGET_NR_stty
6443 case TARGET_NR_stty
:
6446 #ifdef TARGET_NR_gtty
6447 case TARGET_NR_gtty
:
6450 #ifdef TARGET_NR_access
6451 case TARGET_NR_access
:
6452 if (!(p
= lock_user_string(arg1
)))
6454 ret
= get_errno(access(path(p
), arg2
));
6455 unlock_user(p
, arg1
, 0);
6458 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6459 case TARGET_NR_faccessat
:
6460 if (!(p
= lock_user_string(arg2
)))
6462 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6463 unlock_user(p
, arg2
, 0);
6466 #ifdef TARGET_NR_nice /* not on alpha */
6467 case TARGET_NR_nice
:
6468 ret
= get_errno(nice(arg1
));
6471 #ifdef TARGET_NR_ftime
6472 case TARGET_NR_ftime
:
6475 case TARGET_NR_sync
:
6479 case TARGET_NR_kill
:
6480 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6482 #ifdef TARGET_NR_rename
6483 case TARGET_NR_rename
:
6486 p
= lock_user_string(arg1
);
6487 p2
= lock_user_string(arg2
);
6489 ret
= -TARGET_EFAULT
;
6491 ret
= get_errno(rename(p
, p2
));
6492 unlock_user(p2
, arg2
, 0);
6493 unlock_user(p
, arg1
, 0);
6497 #if defined(TARGET_NR_renameat)
6498 case TARGET_NR_renameat
:
6501 p
= lock_user_string(arg2
);
6502 p2
= lock_user_string(arg4
);
6504 ret
= -TARGET_EFAULT
;
6506 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6507 unlock_user(p2
, arg4
, 0);
6508 unlock_user(p
, arg2
, 0);
6512 #ifdef TARGET_NR_mkdir
6513 case TARGET_NR_mkdir
:
6514 if (!(p
= lock_user_string(arg1
)))
6516 ret
= get_errno(mkdir(p
, arg2
));
6517 unlock_user(p
, arg1
, 0);
6520 #if defined(TARGET_NR_mkdirat)
6521 case TARGET_NR_mkdirat
:
6522 if (!(p
= lock_user_string(arg2
)))
6524 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6525 unlock_user(p
, arg2
, 0);
6528 #ifdef TARGET_NR_rmdir
6529 case TARGET_NR_rmdir
:
6530 if (!(p
= lock_user_string(arg1
)))
6532 ret
= get_errno(rmdir(p
));
6533 unlock_user(p
, arg1
, 0);
6537 ret
= get_errno(dup(arg1
));
6539 fd_trans_dup(arg1
, ret
);
6542 #ifdef TARGET_NR_pipe
6543 case TARGET_NR_pipe
:
6544 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6547 #ifdef TARGET_NR_pipe2
6548 case TARGET_NR_pipe2
:
6549 ret
= do_pipe(cpu_env
, arg1
,
6550 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6553 case TARGET_NR_times
:
6555 struct target_tms
*tmsp
;
6557 ret
= get_errno(times(&tms
));
6559 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6562 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6563 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6564 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6565 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6568 ret
= host_to_target_clock_t(ret
);
6571 #ifdef TARGET_NR_prof
6572 case TARGET_NR_prof
:
6575 #ifdef TARGET_NR_signal
6576 case TARGET_NR_signal
:
6579 case TARGET_NR_acct
:
6581 ret
= get_errno(acct(NULL
));
6583 if (!(p
= lock_user_string(arg1
)))
6585 ret
= get_errno(acct(path(p
)));
6586 unlock_user(p
, arg1
, 0);
6589 #ifdef TARGET_NR_umount2
6590 case TARGET_NR_umount2
:
6591 if (!(p
= lock_user_string(arg1
)))
6593 ret
= get_errno(umount2(p
, arg2
));
6594 unlock_user(p
, arg1
, 0);
6597 #ifdef TARGET_NR_lock
6598 case TARGET_NR_lock
:
6601 case TARGET_NR_ioctl
:
6602 ret
= do_ioctl(arg1
, arg2
, arg3
);
6604 case TARGET_NR_fcntl
:
6605 ret
= do_fcntl(arg1
, arg2
, arg3
);
6607 #ifdef TARGET_NR_mpx
6611 case TARGET_NR_setpgid
:
6612 ret
= get_errno(setpgid(arg1
, arg2
));
6614 #ifdef TARGET_NR_ulimit
6615 case TARGET_NR_ulimit
:
6618 #ifdef TARGET_NR_oldolduname
6619 case TARGET_NR_oldolduname
:
6622 case TARGET_NR_umask
:
6623 ret
= get_errno(umask(arg1
));
6625 case TARGET_NR_chroot
:
6626 if (!(p
= lock_user_string(arg1
)))
6628 ret
= get_errno(chroot(p
));
6629 unlock_user(p
, arg1
, 0);
6631 #ifdef TARGET_NR_ustat
6632 case TARGET_NR_ustat
:
6635 #ifdef TARGET_NR_dup2
6636 case TARGET_NR_dup2
:
6637 ret
= get_errno(dup2(arg1
, arg2
));
6639 fd_trans_dup(arg1
, arg2
);
6643 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6644 case TARGET_NR_dup3
:
6645 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6647 fd_trans_dup(arg1
, arg2
);
6651 #ifdef TARGET_NR_getppid /* not on alpha */
6652 case TARGET_NR_getppid
:
6653 ret
= get_errno(getppid());
6656 #ifdef TARGET_NR_getpgrp
6657 case TARGET_NR_getpgrp
:
6658 ret
= get_errno(getpgrp());
6661 case TARGET_NR_setsid
:
6662 ret
= get_errno(setsid());
6664 #ifdef TARGET_NR_sigaction
6665 case TARGET_NR_sigaction
:
6667 #if defined(TARGET_ALPHA)
6668 struct target_sigaction act
, oact
, *pact
= 0;
6669 struct target_old_sigaction
*old_act
;
6671 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6673 act
._sa_handler
= old_act
->_sa_handler
;
6674 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6675 act
.sa_flags
= old_act
->sa_flags
;
6676 act
.sa_restorer
= 0;
6677 unlock_user_struct(old_act
, arg2
, 0);
6680 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6681 if (!is_error(ret
) && arg3
) {
6682 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6684 old_act
->_sa_handler
= oact
._sa_handler
;
6685 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6686 old_act
->sa_flags
= oact
.sa_flags
;
6687 unlock_user_struct(old_act
, arg3
, 1);
6689 #elif defined(TARGET_MIPS)
6690 struct target_sigaction act
, oact
, *pact
, *old_act
;
6693 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6695 act
._sa_handler
= old_act
->_sa_handler
;
6696 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6697 act
.sa_flags
= old_act
->sa_flags
;
6698 unlock_user_struct(old_act
, arg2
, 0);
6704 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6706 if (!is_error(ret
) && arg3
) {
6707 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6709 old_act
->_sa_handler
= oact
._sa_handler
;
6710 old_act
->sa_flags
= oact
.sa_flags
;
6711 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6712 old_act
->sa_mask
.sig
[1] = 0;
6713 old_act
->sa_mask
.sig
[2] = 0;
6714 old_act
->sa_mask
.sig
[3] = 0;
6715 unlock_user_struct(old_act
, arg3
, 1);
6718 struct target_old_sigaction
*old_act
;
6719 struct target_sigaction act
, oact
, *pact
;
6721 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6723 act
._sa_handler
= old_act
->_sa_handler
;
6724 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6725 act
.sa_flags
= old_act
->sa_flags
;
6726 act
.sa_restorer
= old_act
->sa_restorer
;
6727 unlock_user_struct(old_act
, arg2
, 0);
6732 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6733 if (!is_error(ret
) && arg3
) {
6734 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6736 old_act
->_sa_handler
= oact
._sa_handler
;
6737 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6738 old_act
->sa_flags
= oact
.sa_flags
;
6739 old_act
->sa_restorer
= oact
.sa_restorer
;
6740 unlock_user_struct(old_act
, arg3
, 1);
6746 case TARGET_NR_rt_sigaction
:
6748 #if defined(TARGET_ALPHA)
6749 struct target_sigaction act
, oact
, *pact
= 0;
6750 struct target_rt_sigaction
*rt_act
;
6751 /* ??? arg4 == sizeof(sigset_t). */
6753 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6755 act
._sa_handler
= rt_act
->_sa_handler
;
6756 act
.sa_mask
= rt_act
->sa_mask
;
6757 act
.sa_flags
= rt_act
->sa_flags
;
6758 act
.sa_restorer
= arg5
;
6759 unlock_user_struct(rt_act
, arg2
, 0);
6762 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6763 if (!is_error(ret
) && arg3
) {
6764 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6766 rt_act
->_sa_handler
= oact
._sa_handler
;
6767 rt_act
->sa_mask
= oact
.sa_mask
;
6768 rt_act
->sa_flags
= oact
.sa_flags
;
6769 unlock_user_struct(rt_act
, arg3
, 1);
6772 struct target_sigaction
*act
;
6773 struct target_sigaction
*oact
;
6776 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6781 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6782 ret
= -TARGET_EFAULT
;
6783 goto rt_sigaction_fail
;
6787 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6790 unlock_user_struct(act
, arg2
, 0);
6792 unlock_user_struct(oact
, arg3
, 1);
6796 #ifdef TARGET_NR_sgetmask /* not on alpha */
6797 case TARGET_NR_sgetmask
:
6800 abi_ulong target_set
;
6801 do_sigprocmask(0, NULL
, &cur_set
);
6802 host_to_target_old_sigset(&target_set
, &cur_set
);
6807 #ifdef TARGET_NR_ssetmask /* not on alpha */
6808 case TARGET_NR_ssetmask
:
6810 sigset_t set
, oset
, cur_set
;
6811 abi_ulong target_set
= arg1
;
6812 do_sigprocmask(0, NULL
, &cur_set
);
6813 target_to_host_old_sigset(&set
, &target_set
);
6814 sigorset(&set
, &set
, &cur_set
);
6815 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6816 host_to_target_old_sigset(&target_set
, &oset
);
6821 #ifdef TARGET_NR_sigprocmask
6822 case TARGET_NR_sigprocmask
:
6824 #if defined(TARGET_ALPHA)
6825 sigset_t set
, oldset
;
6830 case TARGET_SIG_BLOCK
:
6833 case TARGET_SIG_UNBLOCK
:
6836 case TARGET_SIG_SETMASK
:
6840 ret
= -TARGET_EINVAL
;
6844 target_to_host_old_sigset(&set
, &mask
);
6846 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6847 if (!is_error(ret
)) {
6848 host_to_target_old_sigset(&mask
, &oldset
);
6850 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6853 sigset_t set
, oldset
, *set_ptr
;
6858 case TARGET_SIG_BLOCK
:
6861 case TARGET_SIG_UNBLOCK
:
6864 case TARGET_SIG_SETMASK
:
6868 ret
= -TARGET_EINVAL
;
6871 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6873 target_to_host_old_sigset(&set
, p
);
6874 unlock_user(p
, arg2
, 0);
6880 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6881 if (!is_error(ret
) && arg3
) {
6882 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6884 host_to_target_old_sigset(p
, &oldset
);
6885 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6891 case TARGET_NR_rt_sigprocmask
:
6894 sigset_t set
, oldset
, *set_ptr
;
6898 case TARGET_SIG_BLOCK
:
6901 case TARGET_SIG_UNBLOCK
:
6904 case TARGET_SIG_SETMASK
:
6908 ret
= -TARGET_EINVAL
;
6911 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6913 target_to_host_sigset(&set
, p
);
6914 unlock_user(p
, arg2
, 0);
6920 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6921 if (!is_error(ret
) && arg3
) {
6922 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6924 host_to_target_sigset(p
, &oldset
);
6925 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6929 #ifdef TARGET_NR_sigpending
6930 case TARGET_NR_sigpending
:
6933 ret
= get_errno(sigpending(&set
));
6934 if (!is_error(ret
)) {
6935 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6937 host_to_target_old_sigset(p
, &set
);
6938 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6943 case TARGET_NR_rt_sigpending
:
6946 ret
= get_errno(sigpending(&set
));
6947 if (!is_error(ret
)) {
6948 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6950 host_to_target_sigset(p
, &set
);
6951 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6955 #ifdef TARGET_NR_sigsuspend
6956 case TARGET_NR_sigsuspend
:
6959 #if defined(TARGET_ALPHA)
6960 abi_ulong mask
= arg1
;
6961 target_to_host_old_sigset(&set
, &mask
);
6963 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6965 target_to_host_old_sigset(&set
, p
);
6966 unlock_user(p
, arg1
, 0);
6968 ret
= get_errno(sigsuspend(&set
));
6972 case TARGET_NR_rt_sigsuspend
:
6975 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6977 target_to_host_sigset(&set
, p
);
6978 unlock_user(p
, arg1
, 0);
6979 ret
= get_errno(sigsuspend(&set
));
6982 case TARGET_NR_rt_sigtimedwait
:
6985 struct timespec uts
, *puts
;
6988 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6990 target_to_host_sigset(&set
, p
);
6991 unlock_user(p
, arg1
, 0);
6994 target_to_host_timespec(puts
, arg3
);
6998 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6999 if (!is_error(ret
)) {
7001 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7006 host_to_target_siginfo(p
, &uinfo
);
7007 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7009 ret
= host_to_target_signal(ret
);
7013 case TARGET_NR_rt_sigqueueinfo
:
7016 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
7018 target_to_host_siginfo(&uinfo
, p
);
7019 unlock_user(p
, arg1
, 0);
7020 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7023 #ifdef TARGET_NR_sigreturn
7024 case TARGET_NR_sigreturn
:
7025 ret
= do_sigreturn(cpu_env
);
7028 case TARGET_NR_rt_sigreturn
:
7029 ret
= do_rt_sigreturn(cpu_env
);
7031 case TARGET_NR_sethostname
:
7032 if (!(p
= lock_user_string(arg1
)))
7034 ret
= get_errno(sethostname(p
, arg2
));
7035 unlock_user(p
, arg1
, 0);
7037 case TARGET_NR_setrlimit
:
7039 int resource
= target_to_host_resource(arg1
);
7040 struct target_rlimit
*target_rlim
;
7042 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7044 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7045 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7046 unlock_user_struct(target_rlim
, arg2
, 0);
7047 ret
= get_errno(setrlimit(resource
, &rlim
));
7050 case TARGET_NR_getrlimit
:
7052 int resource
= target_to_host_resource(arg1
);
7053 struct target_rlimit
*target_rlim
;
7056 ret
= get_errno(getrlimit(resource
, &rlim
));
7057 if (!is_error(ret
)) {
7058 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7060 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7061 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7062 unlock_user_struct(target_rlim
, arg2
, 1);
7066 case TARGET_NR_getrusage
:
7068 struct rusage rusage
;
7069 ret
= get_errno(getrusage(arg1
, &rusage
));
7070 if (!is_error(ret
)) {
7071 ret
= host_to_target_rusage(arg2
, &rusage
);
7075 case TARGET_NR_gettimeofday
:
7078 ret
= get_errno(gettimeofday(&tv
, NULL
));
7079 if (!is_error(ret
)) {
7080 if (copy_to_user_timeval(arg1
, &tv
))
7085 case TARGET_NR_settimeofday
:
7087 struct timeval tv
, *ptv
= NULL
;
7088 struct timezone tz
, *ptz
= NULL
;
7091 if (copy_from_user_timeval(&tv
, arg1
)) {
7098 if (copy_from_user_timezone(&tz
, arg2
)) {
7104 ret
= get_errno(settimeofday(ptv
, ptz
));
7107 #if defined(TARGET_NR_select)
7108 case TARGET_NR_select
:
7109 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7110 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7113 struct target_sel_arg_struct
*sel
;
7114 abi_ulong inp
, outp
, exp
, tvp
;
7117 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7119 nsel
= tswapal(sel
->n
);
7120 inp
= tswapal(sel
->inp
);
7121 outp
= tswapal(sel
->outp
);
7122 exp
= tswapal(sel
->exp
);
7123 tvp
= tswapal(sel
->tvp
);
7124 unlock_user_struct(sel
, arg1
, 0);
7125 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7130 #ifdef TARGET_NR_pselect6
7131 case TARGET_NR_pselect6
:
7133 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7134 fd_set rfds
, wfds
, efds
;
7135 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7136 struct timespec ts
, *ts_ptr
;
7139 * The 6th arg is actually two args smashed together,
7140 * so we cannot use the C library.
7148 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7149 target_sigset_t
*target_sigset
;
7157 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7161 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7165 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7171 * This takes a timespec, and not a timeval, so we cannot
7172 * use the do_select() helper ...
7175 if (target_to_host_timespec(&ts
, ts_addr
)) {
7183 /* Extract the two packed args for the sigset */
7186 sig
.size
= _NSIG
/ 8;
7188 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7192 arg_sigset
= tswapal(arg7
[0]);
7193 arg_sigsize
= tswapal(arg7
[1]);
7194 unlock_user(arg7
, arg6
, 0);
7198 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7199 /* Like the kernel, we enforce correct size sigsets */
7200 ret
= -TARGET_EINVAL
;
7203 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7204 sizeof(*target_sigset
), 1);
7205 if (!target_sigset
) {
7208 target_to_host_sigset(&set
, target_sigset
);
7209 unlock_user(target_sigset
, arg_sigset
, 0);
7217 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7220 if (!is_error(ret
)) {
7221 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7223 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7225 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7228 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7234 #ifdef TARGET_NR_symlink
7235 case TARGET_NR_symlink
:
7238 p
= lock_user_string(arg1
);
7239 p2
= lock_user_string(arg2
);
7241 ret
= -TARGET_EFAULT
;
7243 ret
= get_errno(symlink(p
, p2
));
7244 unlock_user(p2
, arg2
, 0);
7245 unlock_user(p
, arg1
, 0);
7249 #if defined(TARGET_NR_symlinkat)
7250 case TARGET_NR_symlinkat
:
7253 p
= lock_user_string(arg1
);
7254 p2
= lock_user_string(arg3
);
7256 ret
= -TARGET_EFAULT
;
7258 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7259 unlock_user(p2
, arg3
, 0);
7260 unlock_user(p
, arg1
, 0);
7264 #ifdef TARGET_NR_oldlstat
7265 case TARGET_NR_oldlstat
:
7268 #ifdef TARGET_NR_readlink
7269 case TARGET_NR_readlink
:
7272 p
= lock_user_string(arg1
);
7273 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7275 ret
= -TARGET_EFAULT
;
7277 /* Short circuit this for the magic exe check. */
7278 ret
= -TARGET_EINVAL
;
7279 } else if (is_proc_myself((const char *)p
, "exe")) {
7280 char real
[PATH_MAX
], *temp
;
7281 temp
= realpath(exec_path
, real
);
7282 /* Return value is # of bytes that we wrote to the buffer. */
7284 ret
= get_errno(-1);
7286 /* Don't worry about sign mismatch as earlier mapping
7287 * logic would have thrown a bad address error. */
7288 ret
= MIN(strlen(real
), arg3
);
7289 /* We cannot NUL terminate the string. */
7290 memcpy(p2
, real
, ret
);
7293 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7295 unlock_user(p2
, arg2
, ret
);
7296 unlock_user(p
, arg1
, 0);
7300 #if defined(TARGET_NR_readlinkat)
7301 case TARGET_NR_readlinkat
:
7304 p
= lock_user_string(arg2
);
7305 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7307 ret
= -TARGET_EFAULT
;
7308 } else if (is_proc_myself((const char *)p
, "exe")) {
7309 char real
[PATH_MAX
], *temp
;
7310 temp
= realpath(exec_path
, real
);
7311 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7312 snprintf((char *)p2
, arg4
, "%s", real
);
7314 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7316 unlock_user(p2
, arg3
, ret
);
7317 unlock_user(p
, arg2
, 0);
7321 #ifdef TARGET_NR_uselib
7322 case TARGET_NR_uselib
:
7325 #ifdef TARGET_NR_swapon
7326 case TARGET_NR_swapon
:
7327 if (!(p
= lock_user_string(arg1
)))
7329 ret
= get_errno(swapon(p
, arg2
));
7330 unlock_user(p
, arg1
, 0);
7333 case TARGET_NR_reboot
:
7334 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7335 /* arg4 must be ignored in all other cases */
7336 p
= lock_user_string(arg4
);
7340 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7341 unlock_user(p
, arg4
, 0);
7343 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7346 #ifdef TARGET_NR_readdir
7347 case TARGET_NR_readdir
:
7350 #ifdef TARGET_NR_mmap
7351 case TARGET_NR_mmap
:
7352 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7353 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7354 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7355 || defined(TARGET_S390X)
7358 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7359 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7367 unlock_user(v
, arg1
, 0);
7368 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7369 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7373 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7374 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7380 #ifdef TARGET_NR_mmap2
7381 case TARGET_NR_mmap2
:
7383 #define MMAP_SHIFT 12
7385 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7386 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7388 arg6
<< MMAP_SHIFT
));
7391 case TARGET_NR_munmap
:
7392 ret
= get_errno(target_munmap(arg1
, arg2
));
7394 case TARGET_NR_mprotect
:
7396 TaskState
*ts
= cpu
->opaque
;
7397 /* Special hack to detect libc making the stack executable. */
7398 if ((arg3
& PROT_GROWSDOWN
)
7399 && arg1
>= ts
->info
->stack_limit
7400 && arg1
<= ts
->info
->start_stack
) {
7401 arg3
&= ~PROT_GROWSDOWN
;
7402 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7403 arg1
= ts
->info
->stack_limit
;
7406 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7408 #ifdef TARGET_NR_mremap
7409 case TARGET_NR_mremap
:
7410 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7413 /* ??? msync/mlock/munlock are broken for softmmu. */
7414 #ifdef TARGET_NR_msync
7415 case TARGET_NR_msync
:
7416 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7419 #ifdef TARGET_NR_mlock
7420 case TARGET_NR_mlock
:
7421 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7424 #ifdef TARGET_NR_munlock
7425 case TARGET_NR_munlock
:
7426 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7429 #ifdef TARGET_NR_mlockall
7430 case TARGET_NR_mlockall
:
7431 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7434 #ifdef TARGET_NR_munlockall
7435 case TARGET_NR_munlockall
:
7436 ret
= get_errno(munlockall());
7439 case TARGET_NR_truncate
:
7440 if (!(p
= lock_user_string(arg1
)))
7442 ret
= get_errno(truncate(p
, arg2
));
7443 unlock_user(p
, arg1
, 0);
7445 case TARGET_NR_ftruncate
:
7446 ret
= get_errno(ftruncate(arg1
, arg2
));
7448 case TARGET_NR_fchmod
:
7449 ret
= get_errno(fchmod(arg1
, arg2
));
7451 #if defined(TARGET_NR_fchmodat)
7452 case TARGET_NR_fchmodat
:
7453 if (!(p
= lock_user_string(arg2
)))
7455 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7456 unlock_user(p
, arg2
, 0);
7459 case TARGET_NR_getpriority
:
7460 /* Note that negative values are valid for getpriority, so we must
7461 differentiate based on errno settings. */
7463 ret
= getpriority(arg1
, arg2
);
7464 if (ret
== -1 && errno
!= 0) {
7465 ret
= -host_to_target_errno(errno
);
7469 /* Return value is the unbiased priority. Signal no error. */
7470 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7472 /* Return value is a biased priority to avoid negative numbers. */
7476 case TARGET_NR_setpriority
:
7477 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7479 #ifdef TARGET_NR_profil
7480 case TARGET_NR_profil
:
7483 case TARGET_NR_statfs
:
7484 if (!(p
= lock_user_string(arg1
)))
7486 ret
= get_errno(statfs(path(p
), &stfs
));
7487 unlock_user(p
, arg1
, 0);
7489 if (!is_error(ret
)) {
7490 struct target_statfs
*target_stfs
;
7492 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7494 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7495 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7496 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7497 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7498 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7499 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7500 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7501 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7502 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7503 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7504 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7505 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7506 unlock_user_struct(target_stfs
, arg2
, 1);
7509 case TARGET_NR_fstatfs
:
7510 ret
= get_errno(fstatfs(arg1
, &stfs
));
7511 goto convert_statfs
;
7512 #ifdef TARGET_NR_statfs64
7513 case TARGET_NR_statfs64
:
7514 if (!(p
= lock_user_string(arg1
)))
7516 ret
= get_errno(statfs(path(p
), &stfs
));
7517 unlock_user(p
, arg1
, 0);
7519 if (!is_error(ret
)) {
7520 struct target_statfs64
*target_stfs
;
7522 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7524 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7525 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7526 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7527 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7528 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7529 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7530 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7531 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7532 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7533 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7534 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7535 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7536 unlock_user_struct(target_stfs
, arg3
, 1);
7539 case TARGET_NR_fstatfs64
:
7540 ret
= get_errno(fstatfs(arg1
, &stfs
));
7541 goto convert_statfs64
;
7543 #ifdef TARGET_NR_ioperm
7544 case TARGET_NR_ioperm
:
7547 #ifdef TARGET_NR_socketcall
7548 case TARGET_NR_socketcall
:
7549 ret
= do_socketcall(arg1
, arg2
);
7552 #ifdef TARGET_NR_accept
7553 case TARGET_NR_accept
:
7554 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7557 #ifdef TARGET_NR_accept4
7558 case TARGET_NR_accept4
:
7559 #ifdef CONFIG_ACCEPT4
7560 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7566 #ifdef TARGET_NR_bind
7567 case TARGET_NR_bind
:
7568 ret
= do_bind(arg1
, arg2
, arg3
);
7571 #ifdef TARGET_NR_connect
7572 case TARGET_NR_connect
:
7573 ret
= do_connect(arg1
, arg2
, arg3
);
7576 #ifdef TARGET_NR_getpeername
7577 case TARGET_NR_getpeername
:
7578 ret
= do_getpeername(arg1
, arg2
, arg3
);
7581 #ifdef TARGET_NR_getsockname
7582 case TARGET_NR_getsockname
:
7583 ret
= do_getsockname(arg1
, arg2
, arg3
);
7586 #ifdef TARGET_NR_getsockopt
7587 case TARGET_NR_getsockopt
:
7588 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7591 #ifdef TARGET_NR_listen
7592 case TARGET_NR_listen
:
7593 ret
= get_errno(listen(arg1
, arg2
));
7596 #ifdef TARGET_NR_recv
7597 case TARGET_NR_recv
:
7598 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7601 #ifdef TARGET_NR_recvfrom
7602 case TARGET_NR_recvfrom
:
7603 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7606 #ifdef TARGET_NR_recvmsg
7607 case TARGET_NR_recvmsg
:
7608 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7611 #ifdef TARGET_NR_send
7612 case TARGET_NR_send
:
7613 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7616 #ifdef TARGET_NR_sendmsg
7617 case TARGET_NR_sendmsg
:
7618 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7621 #ifdef TARGET_NR_sendmmsg
7622 case TARGET_NR_sendmmsg
:
7623 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7625 case TARGET_NR_recvmmsg
:
7626 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7629 #ifdef TARGET_NR_sendto
7630 case TARGET_NR_sendto
:
7631 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7634 #ifdef TARGET_NR_shutdown
7635 case TARGET_NR_shutdown
:
7636 ret
= get_errno(shutdown(arg1
, arg2
));
7639 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7640 case TARGET_NR_getrandom
:
7641 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
7645 ret
= get_errno(getrandom(p
, arg2
, arg3
));
7646 unlock_user(p
, arg1
, ret
);
7649 #ifdef TARGET_NR_socket
7650 case TARGET_NR_socket
:
7651 ret
= do_socket(arg1
, arg2
, arg3
);
7652 fd_trans_unregister(ret
);
7655 #ifdef TARGET_NR_socketpair
7656 case TARGET_NR_socketpair
:
7657 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7660 #ifdef TARGET_NR_setsockopt
7661 case TARGET_NR_setsockopt
:
7662 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7666 case TARGET_NR_syslog
:
7667 if (!(p
= lock_user_string(arg2
)))
7669 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7670 unlock_user(p
, arg2
, 0);
7673 case TARGET_NR_setitimer
:
7675 struct itimerval value
, ovalue
, *pvalue
;
7679 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7680 || copy_from_user_timeval(&pvalue
->it_value
,
7681 arg2
+ sizeof(struct target_timeval
)))
7686 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7687 if (!is_error(ret
) && arg3
) {
7688 if (copy_to_user_timeval(arg3
,
7689 &ovalue
.it_interval
)
7690 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7696 case TARGET_NR_getitimer
:
7698 struct itimerval value
;
7700 ret
= get_errno(getitimer(arg1
, &value
));
7701 if (!is_error(ret
) && arg2
) {
7702 if (copy_to_user_timeval(arg2
,
7704 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7710 #ifdef TARGET_NR_stat
7711 case TARGET_NR_stat
:
7712 if (!(p
= lock_user_string(arg1
)))
7714 ret
= get_errno(stat(path(p
), &st
));
7715 unlock_user(p
, arg1
, 0);
7718 #ifdef TARGET_NR_lstat
7719 case TARGET_NR_lstat
:
7720 if (!(p
= lock_user_string(arg1
)))
7722 ret
= get_errno(lstat(path(p
), &st
));
7723 unlock_user(p
, arg1
, 0);
7726 case TARGET_NR_fstat
:
7728 ret
= get_errno(fstat(arg1
, &st
));
7729 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7732 if (!is_error(ret
)) {
7733 struct target_stat
*target_st
;
7735 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7737 memset(target_st
, 0, sizeof(*target_st
));
7738 __put_user(st
.st_dev
, &target_st
->st_dev
);
7739 __put_user(st
.st_ino
, &target_st
->st_ino
);
7740 __put_user(st
.st_mode
, &target_st
->st_mode
);
7741 __put_user(st
.st_uid
, &target_st
->st_uid
);
7742 __put_user(st
.st_gid
, &target_st
->st_gid
);
7743 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7744 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7745 __put_user(st
.st_size
, &target_st
->st_size
);
7746 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7747 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7748 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7749 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7750 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7751 unlock_user_struct(target_st
, arg2
, 1);
7755 #ifdef TARGET_NR_olduname
7756 case TARGET_NR_olduname
:
7759 #ifdef TARGET_NR_iopl
7760 case TARGET_NR_iopl
:
7763 case TARGET_NR_vhangup
:
7764 ret
= get_errno(vhangup());
7766 #ifdef TARGET_NR_idle
7767 case TARGET_NR_idle
:
7770 #ifdef TARGET_NR_syscall
7771 case TARGET_NR_syscall
:
7772 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7773 arg6
, arg7
, arg8
, 0);
7776 case TARGET_NR_wait4
:
7779 abi_long status_ptr
= arg2
;
7780 struct rusage rusage
, *rusage_ptr
;
7781 abi_ulong target_rusage
= arg4
;
7782 abi_long rusage_err
;
7784 rusage_ptr
= &rusage
;
7787 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
7788 if (!is_error(ret
)) {
7789 if (status_ptr
&& ret
) {
7790 status
= host_to_target_waitstatus(status
);
7791 if (put_user_s32(status
, status_ptr
))
7794 if (target_rusage
) {
7795 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7803 #ifdef TARGET_NR_swapoff
7804 case TARGET_NR_swapoff
:
7805 if (!(p
= lock_user_string(arg1
)))
7807 ret
= get_errno(swapoff(p
));
7808 unlock_user(p
, arg1
, 0);
7811 case TARGET_NR_sysinfo
:
7813 struct target_sysinfo
*target_value
;
7814 struct sysinfo value
;
7815 ret
= get_errno(sysinfo(&value
));
7816 if (!is_error(ret
) && arg1
)
7818 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7820 __put_user(value
.uptime
, &target_value
->uptime
);
7821 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7822 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7823 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7824 __put_user(value
.totalram
, &target_value
->totalram
);
7825 __put_user(value
.freeram
, &target_value
->freeram
);
7826 __put_user(value
.sharedram
, &target_value
->sharedram
);
7827 __put_user(value
.bufferram
, &target_value
->bufferram
);
7828 __put_user(value
.totalswap
, &target_value
->totalswap
);
7829 __put_user(value
.freeswap
, &target_value
->freeswap
);
7830 __put_user(value
.procs
, &target_value
->procs
);
7831 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7832 __put_user(value
.freehigh
, &target_value
->freehigh
);
7833 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7834 unlock_user_struct(target_value
, arg1
, 1);
7838 #ifdef TARGET_NR_ipc
7840 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7843 #ifdef TARGET_NR_semget
7844 case TARGET_NR_semget
:
7845 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7848 #ifdef TARGET_NR_semop
7849 case TARGET_NR_semop
:
7850 ret
= do_semop(arg1
, arg2
, arg3
);
7853 #ifdef TARGET_NR_semctl
7854 case TARGET_NR_semctl
:
7855 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7858 #ifdef TARGET_NR_msgctl
7859 case TARGET_NR_msgctl
:
7860 ret
= do_msgctl(arg1
, arg2
, arg3
);
7863 #ifdef TARGET_NR_msgget
7864 case TARGET_NR_msgget
:
7865 ret
= get_errno(msgget(arg1
, arg2
));
7868 #ifdef TARGET_NR_msgrcv
7869 case TARGET_NR_msgrcv
:
7870 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7873 #ifdef TARGET_NR_msgsnd
7874 case TARGET_NR_msgsnd
:
7875 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7878 #ifdef TARGET_NR_shmget
7879 case TARGET_NR_shmget
:
7880 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7883 #ifdef TARGET_NR_shmctl
7884 case TARGET_NR_shmctl
:
7885 ret
= do_shmctl(arg1
, arg2
, arg3
);
7888 #ifdef TARGET_NR_shmat
7889 case TARGET_NR_shmat
:
7890 ret
= do_shmat(arg1
, arg2
, arg3
);
7893 #ifdef TARGET_NR_shmdt
7894 case TARGET_NR_shmdt
:
7895 ret
= do_shmdt(arg1
);
7898 case TARGET_NR_fsync
:
7899 ret
= get_errno(fsync(arg1
));
7901 case TARGET_NR_clone
:
7902 /* Linux manages to have three different orderings for its
7903 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7904 * match the kernel's CONFIG_CLONE_* settings.
7905 * Microblaze is further special in that it uses a sixth
7906 * implicit argument to clone for the TLS pointer.
7908 #if defined(TARGET_MICROBLAZE)
7909 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7910 #elif defined(TARGET_CLONE_BACKWARDS)
7911 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7912 #elif defined(TARGET_CLONE_BACKWARDS2)
7913 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7915 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7918 #ifdef __NR_exit_group
7919 /* new thread calls */
7920 case TARGET_NR_exit_group
:
7924 gdb_exit(cpu_env
, arg1
);
7925 ret
= get_errno(exit_group(arg1
));
7928 case TARGET_NR_setdomainname
:
7929 if (!(p
= lock_user_string(arg1
)))
7931 ret
= get_errno(setdomainname(p
, arg2
));
7932 unlock_user(p
, arg1
, 0);
7934 case TARGET_NR_uname
:
7935 /* no need to transcode because we use the linux syscall */
7937 struct new_utsname
* buf
;
7939 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7941 ret
= get_errno(sys_uname(buf
));
7942 if (!is_error(ret
)) {
7943 /* Overrite the native machine name with whatever is being
7945 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7946 /* Allow the user to override the reported release. */
7947 if (qemu_uname_release
&& *qemu_uname_release
)
7948 strcpy (buf
->release
, qemu_uname_release
);
7950 unlock_user_struct(buf
, arg1
, 1);
7954 case TARGET_NR_modify_ldt
:
7955 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7957 #if !defined(TARGET_X86_64)
7958 case TARGET_NR_vm86old
:
7960 case TARGET_NR_vm86
:
7961 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7965 case TARGET_NR_adjtimex
:
7967 #ifdef TARGET_NR_create_module
7968 case TARGET_NR_create_module
:
7970 case TARGET_NR_init_module
:
7971 case TARGET_NR_delete_module
:
7972 #ifdef TARGET_NR_get_kernel_syms
7973 case TARGET_NR_get_kernel_syms
:
7976 case TARGET_NR_quotactl
:
7978 case TARGET_NR_getpgid
:
7979 ret
= get_errno(getpgid(arg1
));
7981 case TARGET_NR_fchdir
:
7982 ret
= get_errno(fchdir(arg1
));
7984 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7985 case TARGET_NR_bdflush
:
7988 #ifdef TARGET_NR_sysfs
7989 case TARGET_NR_sysfs
:
7992 case TARGET_NR_personality
:
7993 ret
= get_errno(personality(arg1
));
7995 #ifdef TARGET_NR_afs_syscall
7996 case TARGET_NR_afs_syscall
:
7999 #ifdef TARGET_NR__llseek /* Not on alpha */
8000 case TARGET_NR__llseek
:
8003 #if !defined(__NR_llseek)
8004 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8006 ret
= get_errno(res
);
8011 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8013 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8019 #ifdef TARGET_NR_getdents
8020 case TARGET_NR_getdents
:
8021 #ifdef __NR_getdents
8022 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8024 struct target_dirent
*target_dirp
;
8025 struct linux_dirent
*dirp
;
8026 abi_long count
= arg3
;
8028 dirp
= g_try_malloc(count
);
8030 ret
= -TARGET_ENOMEM
;
8034 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8035 if (!is_error(ret
)) {
8036 struct linux_dirent
*de
;
8037 struct target_dirent
*tde
;
8039 int reclen
, treclen
;
8040 int count1
, tnamelen
;
8044 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8048 reclen
= de
->d_reclen
;
8049 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8050 assert(tnamelen
>= 0);
8051 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8052 assert(count1
+ treclen
<= count
);
8053 tde
->d_reclen
= tswap16(treclen
);
8054 tde
->d_ino
= tswapal(de
->d_ino
);
8055 tde
->d_off
= tswapal(de
->d_off
);
8056 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8057 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8059 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8063 unlock_user(target_dirp
, arg2
, ret
);
8069 struct linux_dirent
*dirp
;
8070 abi_long count
= arg3
;
8072 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8074 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8075 if (!is_error(ret
)) {
8076 struct linux_dirent
*de
;
8081 reclen
= de
->d_reclen
;
8084 de
->d_reclen
= tswap16(reclen
);
8085 tswapls(&de
->d_ino
);
8086 tswapls(&de
->d_off
);
8087 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8091 unlock_user(dirp
, arg2
, ret
);
8095 /* Implement getdents in terms of getdents64 */
8097 struct linux_dirent64
*dirp
;
8098 abi_long count
= arg3
;
8100 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8104 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8105 if (!is_error(ret
)) {
8106 /* Convert the dirent64 structs to target dirent. We do this
8107 * in-place, since we can guarantee that a target_dirent is no
8108 * larger than a dirent64; however this means we have to be
8109 * careful to read everything before writing in the new format.
8111 struct linux_dirent64
*de
;
8112 struct target_dirent
*tde
;
8117 tde
= (struct target_dirent
*)dirp
;
8119 int namelen
, treclen
;
8120 int reclen
= de
->d_reclen
;
8121 uint64_t ino
= de
->d_ino
;
8122 int64_t off
= de
->d_off
;
8123 uint8_t type
= de
->d_type
;
8125 namelen
= strlen(de
->d_name
);
8126 treclen
= offsetof(struct target_dirent
, d_name
)
8128 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8130 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8131 tde
->d_ino
= tswapal(ino
);
8132 tde
->d_off
= tswapal(off
);
8133 tde
->d_reclen
= tswap16(treclen
);
8134 /* The target_dirent type is in what was formerly a padding
8135 * byte at the end of the structure:
8137 *(((char *)tde
) + treclen
- 1) = type
;
8139 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8140 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8146 unlock_user(dirp
, arg2
, ret
);
8150 #endif /* TARGET_NR_getdents */
8151 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8152 case TARGET_NR_getdents64
:
8154 struct linux_dirent64
*dirp
;
8155 abi_long count
= arg3
;
8156 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8158 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8159 if (!is_error(ret
)) {
8160 struct linux_dirent64
*de
;
8165 reclen
= de
->d_reclen
;
8168 de
->d_reclen
= tswap16(reclen
);
8169 tswap64s((uint64_t *)&de
->d_ino
);
8170 tswap64s((uint64_t *)&de
->d_off
);
8171 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8175 unlock_user(dirp
, arg2
, ret
);
8178 #endif /* TARGET_NR_getdents64 */
8179 #if defined(TARGET_NR__newselect)
8180 case TARGET_NR__newselect
:
8181 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8184 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8185 # ifdef TARGET_NR_poll
8186 case TARGET_NR_poll
:
8188 # ifdef TARGET_NR_ppoll
8189 case TARGET_NR_ppoll
:
8192 struct target_pollfd
*target_pfd
;
8193 unsigned int nfds
= arg2
;
8201 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8202 sizeof(struct target_pollfd
) * nfds
, 1);
8207 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8208 for (i
= 0; i
< nfds
; i
++) {
8209 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8210 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8214 # ifdef TARGET_NR_ppoll
8215 if (num
== TARGET_NR_ppoll
) {
8216 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8217 target_sigset_t
*target_set
;
8218 sigset_t _set
, *set
= &_set
;
8221 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8222 unlock_user(target_pfd
, arg1
, 0);
8230 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8232 unlock_user(target_pfd
, arg1
, 0);
8235 target_to_host_sigset(set
, target_set
);
8240 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8242 if (!is_error(ret
) && arg3
) {
8243 host_to_target_timespec(arg3
, timeout_ts
);
8246 unlock_user(target_set
, arg4
, 0);
8250 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8252 if (!is_error(ret
)) {
8253 for(i
= 0; i
< nfds
; i
++) {
8254 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8257 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8261 case TARGET_NR_flock
:
8262 /* NOTE: the flock constant seems to be the same for every
8264 ret
= get_errno(flock(arg1
, arg2
));
8266 case TARGET_NR_readv
:
8268 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8270 ret
= get_errno(readv(arg1
, vec
, arg3
));
8271 unlock_iovec(vec
, arg2
, arg3
, 1);
8273 ret
= -host_to_target_errno(errno
);
8277 case TARGET_NR_writev
:
8279 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8281 ret
= get_errno(writev(arg1
, vec
, arg3
));
8282 unlock_iovec(vec
, arg2
, arg3
, 0);
8284 ret
= -host_to_target_errno(errno
);
8288 case TARGET_NR_getsid
:
8289 ret
= get_errno(getsid(arg1
));
8291 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8292 case TARGET_NR_fdatasync
:
8293 ret
= get_errno(fdatasync(arg1
));
8296 #ifdef TARGET_NR__sysctl
8297 case TARGET_NR__sysctl
:
8298 /* We don't implement this, but ENOTDIR is always a safe
8300 ret
= -TARGET_ENOTDIR
;
8303 case TARGET_NR_sched_getaffinity
:
8305 unsigned int mask_size
;
8306 unsigned long *mask
;
8309 * sched_getaffinity needs multiples of ulong, so need to take
8310 * care of mismatches between target ulong and host ulong sizes.
8312 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8313 ret
= -TARGET_EINVAL
;
8316 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8318 mask
= alloca(mask_size
);
8319 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8321 if (!is_error(ret
)) {
8323 /* More data returned than the caller's buffer will fit.
8324 * This only happens if sizeof(abi_long) < sizeof(long)
8325 * and the caller passed us a buffer holding an odd number
8326 * of abi_longs. If the host kernel is actually using the
8327 * extra 4 bytes then fail EINVAL; otherwise we can just
8328 * ignore them and only copy the interesting part.
8330 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8331 if (numcpus
> arg2
* 8) {
8332 ret
= -TARGET_EINVAL
;
8338 if (copy_to_user(arg3
, mask
, ret
)) {
8344 case TARGET_NR_sched_setaffinity
:
8346 unsigned int mask_size
;
8347 unsigned long *mask
;
8350 * sched_setaffinity needs multiples of ulong, so need to take
8351 * care of mismatches between target ulong and host ulong sizes.
8353 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8354 ret
= -TARGET_EINVAL
;
8357 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8359 mask
= alloca(mask_size
);
8360 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8363 memcpy(mask
, p
, arg2
);
8364 unlock_user_struct(p
, arg2
, 0);
8366 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8369 case TARGET_NR_sched_setparam
:
8371 struct sched_param
*target_schp
;
8372 struct sched_param schp
;
8375 return -TARGET_EINVAL
;
8377 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8379 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8380 unlock_user_struct(target_schp
, arg2
, 0);
8381 ret
= get_errno(sched_setparam(arg1
, &schp
));
8384 case TARGET_NR_sched_getparam
:
8386 struct sched_param
*target_schp
;
8387 struct sched_param schp
;
8390 return -TARGET_EINVAL
;
8392 ret
= get_errno(sched_getparam(arg1
, &schp
));
8393 if (!is_error(ret
)) {
8394 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8396 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8397 unlock_user_struct(target_schp
, arg2
, 1);
8401 case TARGET_NR_sched_setscheduler
:
8403 struct sched_param
*target_schp
;
8404 struct sched_param schp
;
8406 return -TARGET_EINVAL
;
8408 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8410 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8411 unlock_user_struct(target_schp
, arg3
, 0);
8412 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8415 case TARGET_NR_sched_getscheduler
:
8416 ret
= get_errno(sched_getscheduler(arg1
));
8418 case TARGET_NR_sched_yield
:
8419 ret
= get_errno(sched_yield());
8421 case TARGET_NR_sched_get_priority_max
:
8422 ret
= get_errno(sched_get_priority_max(arg1
));
8424 case TARGET_NR_sched_get_priority_min
:
8425 ret
= get_errno(sched_get_priority_min(arg1
));
8427 case TARGET_NR_sched_rr_get_interval
:
8430 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8431 if (!is_error(ret
)) {
8432 ret
= host_to_target_timespec(arg2
, &ts
);
8436 case TARGET_NR_nanosleep
:
8438 struct timespec req
, rem
;
8439 target_to_host_timespec(&req
, arg1
);
8440 ret
= get_errno(nanosleep(&req
, &rem
));
8441 if (is_error(ret
) && arg2
) {
8442 host_to_target_timespec(arg2
, &rem
);
8446 #ifdef TARGET_NR_query_module
8447 case TARGET_NR_query_module
:
8450 #ifdef TARGET_NR_nfsservctl
8451 case TARGET_NR_nfsservctl
:
8454 case TARGET_NR_prctl
:
8456 case PR_GET_PDEATHSIG
:
8459 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8460 if (!is_error(ret
) && arg2
8461 && put_user_ual(deathsig
, arg2
)) {
8469 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8473 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8475 unlock_user(name
, arg2
, 16);
8480 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8484 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8486 unlock_user(name
, arg2
, 0);
8491 /* Most prctl options have no pointer arguments */
8492 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8496 #ifdef TARGET_NR_arch_prctl
8497 case TARGET_NR_arch_prctl
:
8498 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8499 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8505 #ifdef TARGET_NR_pread64
8506 case TARGET_NR_pread64
:
8507 if (regpairs_aligned(cpu_env
)) {
8511 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8513 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8514 unlock_user(p
, arg2
, ret
);
8516 case TARGET_NR_pwrite64
:
8517 if (regpairs_aligned(cpu_env
)) {
8521 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8523 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8524 unlock_user(p
, arg2
, 0);
8527 case TARGET_NR_getcwd
:
8528 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8530 ret
= get_errno(sys_getcwd1(p
, arg2
));
8531 unlock_user(p
, arg1
, ret
);
8533 case TARGET_NR_capget
:
8534 case TARGET_NR_capset
:
8536 struct target_user_cap_header
*target_header
;
8537 struct target_user_cap_data
*target_data
= NULL
;
8538 struct __user_cap_header_struct header
;
8539 struct __user_cap_data_struct data
[2];
8540 struct __user_cap_data_struct
*dataptr
= NULL
;
8541 int i
, target_datalen
;
8544 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8547 header
.version
= tswap32(target_header
->version
);
8548 header
.pid
= tswap32(target_header
->pid
);
8550 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8551 /* Version 2 and up takes pointer to two user_data structs */
8555 target_datalen
= sizeof(*target_data
) * data_items
;
8558 if (num
== TARGET_NR_capget
) {
8559 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8561 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8564 unlock_user_struct(target_header
, arg1
, 0);
8568 if (num
== TARGET_NR_capset
) {
8569 for (i
= 0; i
< data_items
; i
++) {
8570 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8571 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8572 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8579 if (num
== TARGET_NR_capget
) {
8580 ret
= get_errno(capget(&header
, dataptr
));
8582 ret
= get_errno(capset(&header
, dataptr
));
8585 /* The kernel always updates version for both capget and capset */
8586 target_header
->version
= tswap32(header
.version
);
8587 unlock_user_struct(target_header
, arg1
, 1);
8590 if (num
== TARGET_NR_capget
) {
8591 for (i
= 0; i
< data_items
; i
++) {
8592 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8593 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8594 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8596 unlock_user(target_data
, arg2
, target_datalen
);
8598 unlock_user(target_data
, arg2
, 0);
8603 case TARGET_NR_sigaltstack
:
8604 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8607 #ifdef CONFIG_SENDFILE
8608 case TARGET_NR_sendfile
:
8613 ret
= get_user_sal(off
, arg3
);
8614 if (is_error(ret
)) {
8619 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8620 if (!is_error(ret
) && arg3
) {
8621 abi_long ret2
= put_user_sal(off
, arg3
);
8622 if (is_error(ret2
)) {
8628 #ifdef TARGET_NR_sendfile64
8629 case TARGET_NR_sendfile64
:
8634 ret
= get_user_s64(off
, arg3
);
8635 if (is_error(ret
)) {
8640 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8641 if (!is_error(ret
) && arg3
) {
8642 abi_long ret2
= put_user_s64(off
, arg3
);
8643 if (is_error(ret2
)) {
8651 case TARGET_NR_sendfile
:
8652 #ifdef TARGET_NR_sendfile64
8653 case TARGET_NR_sendfile64
:
8658 #ifdef TARGET_NR_getpmsg
8659 case TARGET_NR_getpmsg
:
8662 #ifdef TARGET_NR_putpmsg
8663 case TARGET_NR_putpmsg
:
8666 #ifdef TARGET_NR_vfork
8667 case TARGET_NR_vfork
:
8668 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8672 #ifdef TARGET_NR_ugetrlimit
8673 case TARGET_NR_ugetrlimit
:
8676 int resource
= target_to_host_resource(arg1
);
8677 ret
= get_errno(getrlimit(resource
, &rlim
));
8678 if (!is_error(ret
)) {
8679 struct target_rlimit
*target_rlim
;
8680 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8682 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8683 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8684 unlock_user_struct(target_rlim
, arg2
, 1);
8689 #ifdef TARGET_NR_truncate64
8690 case TARGET_NR_truncate64
:
8691 if (!(p
= lock_user_string(arg1
)))
8693 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8694 unlock_user(p
, arg1
, 0);
8697 #ifdef TARGET_NR_ftruncate64
8698 case TARGET_NR_ftruncate64
:
8699 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8702 #ifdef TARGET_NR_stat64
8703 case TARGET_NR_stat64
:
8704 if (!(p
= lock_user_string(arg1
)))
8706 ret
= get_errno(stat(path(p
), &st
));
8707 unlock_user(p
, arg1
, 0);
8709 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8712 #ifdef TARGET_NR_lstat64
8713 case TARGET_NR_lstat64
:
8714 if (!(p
= lock_user_string(arg1
)))
8716 ret
= get_errno(lstat(path(p
), &st
));
8717 unlock_user(p
, arg1
, 0);
8719 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8722 #ifdef TARGET_NR_fstat64
8723 case TARGET_NR_fstat64
:
8724 ret
= get_errno(fstat(arg1
, &st
));
8726 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8729 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8730 #ifdef TARGET_NR_fstatat64
8731 case TARGET_NR_fstatat64
:
8733 #ifdef TARGET_NR_newfstatat
8734 case TARGET_NR_newfstatat
:
8736 if (!(p
= lock_user_string(arg2
)))
8738 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8740 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8743 #ifdef TARGET_NR_lchown
8744 case TARGET_NR_lchown
:
8745 if (!(p
= lock_user_string(arg1
)))
8747 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8748 unlock_user(p
, arg1
, 0);
8751 #ifdef TARGET_NR_getuid
8752 case TARGET_NR_getuid
:
8753 ret
= get_errno(high2lowuid(getuid()));
8756 #ifdef TARGET_NR_getgid
8757 case TARGET_NR_getgid
:
8758 ret
= get_errno(high2lowgid(getgid()));
8761 #ifdef TARGET_NR_geteuid
8762 case TARGET_NR_geteuid
:
8763 ret
= get_errno(high2lowuid(geteuid()));
8766 #ifdef TARGET_NR_getegid
8767 case TARGET_NR_getegid
:
8768 ret
= get_errno(high2lowgid(getegid()));
8771 case TARGET_NR_setreuid
:
8772 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8774 case TARGET_NR_setregid
:
8775 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8777 case TARGET_NR_getgroups
:
8779 int gidsetsize
= arg1
;
8780 target_id
*target_grouplist
;
8784 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8785 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8786 if (gidsetsize
== 0)
8788 if (!is_error(ret
)) {
8789 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8790 if (!target_grouplist
)
8792 for(i
= 0;i
< ret
; i
++)
8793 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8794 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8798 case TARGET_NR_setgroups
:
8800 int gidsetsize
= arg1
;
8801 target_id
*target_grouplist
;
8802 gid_t
*grouplist
= NULL
;
8805 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8806 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8807 if (!target_grouplist
) {
8808 ret
= -TARGET_EFAULT
;
8811 for (i
= 0; i
< gidsetsize
; i
++) {
8812 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8814 unlock_user(target_grouplist
, arg2
, 0);
8816 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8819 case TARGET_NR_fchown
:
8820 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8822 #if defined(TARGET_NR_fchownat)
8823 case TARGET_NR_fchownat
:
8824 if (!(p
= lock_user_string(arg2
)))
8826 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8827 low2highgid(arg4
), arg5
));
8828 unlock_user(p
, arg2
, 0);
8831 #ifdef TARGET_NR_setresuid
8832 case TARGET_NR_setresuid
:
8833 ret
= get_errno(setresuid(low2highuid(arg1
),
8835 low2highuid(arg3
)));
8838 #ifdef TARGET_NR_getresuid
8839 case TARGET_NR_getresuid
:
8841 uid_t ruid
, euid
, suid
;
8842 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8843 if (!is_error(ret
)) {
8844 if (put_user_id(high2lowuid(ruid
), arg1
)
8845 || put_user_id(high2lowuid(euid
), arg2
)
8846 || put_user_id(high2lowuid(suid
), arg3
))
8852 #ifdef TARGET_NR_getresgid
8853 case TARGET_NR_setresgid
:
8854 ret
= get_errno(setresgid(low2highgid(arg1
),
8856 low2highgid(arg3
)));
8859 #ifdef TARGET_NR_getresgid
8860 case TARGET_NR_getresgid
:
8862 gid_t rgid
, egid
, sgid
;
8863 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8864 if (!is_error(ret
)) {
8865 if (put_user_id(high2lowgid(rgid
), arg1
)
8866 || put_user_id(high2lowgid(egid
), arg2
)
8867 || put_user_id(high2lowgid(sgid
), arg3
))
8873 #ifdef TARGET_NR_chown
8874 case TARGET_NR_chown
:
8875 if (!(p
= lock_user_string(arg1
)))
8877 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8878 unlock_user(p
, arg1
, 0);
8881 case TARGET_NR_setuid
:
8882 ret
= get_errno(setuid(low2highuid(arg1
)));
8884 case TARGET_NR_setgid
:
8885 ret
= get_errno(setgid(low2highgid(arg1
)));
8887 case TARGET_NR_setfsuid
:
8888 ret
= get_errno(setfsuid(arg1
));
8890 case TARGET_NR_setfsgid
:
8891 ret
= get_errno(setfsgid(arg1
));
8894 #ifdef TARGET_NR_lchown32
8895 case TARGET_NR_lchown32
:
8896 if (!(p
= lock_user_string(arg1
)))
8898 ret
= get_errno(lchown(p
, arg2
, arg3
));
8899 unlock_user(p
, arg1
, 0);
8902 #ifdef TARGET_NR_getuid32
8903 case TARGET_NR_getuid32
:
8904 ret
= get_errno(getuid());
8908 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8909 /* Alpha specific */
8910 case TARGET_NR_getxuid
:
8914 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8916 ret
= get_errno(getuid());
8919 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8920 /* Alpha specific */
8921 case TARGET_NR_getxgid
:
8925 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8927 ret
= get_errno(getgid());
8930 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8931 /* Alpha specific */
8932 case TARGET_NR_osf_getsysinfo
:
8933 ret
= -TARGET_EOPNOTSUPP
;
8935 case TARGET_GSI_IEEE_FP_CONTROL
:
8937 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8939 /* Copied from linux ieee_fpcr_to_swcr. */
8940 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8941 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8942 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8943 | SWCR_TRAP_ENABLE_DZE
8944 | SWCR_TRAP_ENABLE_OVF
);
8945 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8946 | SWCR_TRAP_ENABLE_INE
);
8947 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8948 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8950 if (put_user_u64 (swcr
, arg2
))
8956 /* case GSI_IEEE_STATE_AT_SIGNAL:
8957 -- Not implemented in linux kernel.
8959 -- Retrieves current unaligned access state; not much used.
8961 -- Retrieves implver information; surely not used.
8963 -- Grabs a copy of the HWRPB; surely not used.
8968 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8969 /* Alpha specific */
8970 case TARGET_NR_osf_setsysinfo
:
8971 ret
= -TARGET_EOPNOTSUPP
;
8973 case TARGET_SSI_IEEE_FP_CONTROL
:
8975 uint64_t swcr
, fpcr
, orig_fpcr
;
8977 if (get_user_u64 (swcr
, arg2
)) {
8980 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8981 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8983 /* Copied from linux ieee_swcr_to_fpcr. */
8984 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8985 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8986 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8987 | SWCR_TRAP_ENABLE_DZE
8988 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8989 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8990 | SWCR_TRAP_ENABLE_INE
)) << 57;
8991 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8992 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8994 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8999 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9001 uint64_t exc
, fpcr
, orig_fpcr
;
9004 if (get_user_u64(exc
, arg2
)) {
9008 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9010 /* We only add to the exception status here. */
9011 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9013 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9016 /* Old exceptions are not signaled. */
9017 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9019 /* If any exceptions set by this call,
9020 and are unmasked, send a signal. */
9022 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9023 si_code
= TARGET_FPE_FLTRES
;
9025 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9026 si_code
= TARGET_FPE_FLTUND
;
9028 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9029 si_code
= TARGET_FPE_FLTOVF
;
9031 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9032 si_code
= TARGET_FPE_FLTDIV
;
9034 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9035 si_code
= TARGET_FPE_FLTINV
;
9038 target_siginfo_t info
;
9039 info
.si_signo
= SIGFPE
;
9041 info
.si_code
= si_code
;
9042 info
._sifields
._sigfault
._addr
9043 = ((CPUArchState
*)cpu_env
)->pc
;
9044 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9049 /* case SSI_NVPAIRS:
9050 -- Used with SSIN_UACPROC to enable unaligned accesses.
9051 case SSI_IEEE_STATE_AT_SIGNAL:
9052 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9053 -- Not implemented in linux kernel
9058 #ifdef TARGET_NR_osf_sigprocmask
9059 /* Alpha specific. */
9060 case TARGET_NR_osf_sigprocmask
:
9064 sigset_t set
, oldset
;
9067 case TARGET_SIG_BLOCK
:
9070 case TARGET_SIG_UNBLOCK
:
9073 case TARGET_SIG_SETMASK
:
9077 ret
= -TARGET_EINVAL
;
9081 target_to_host_old_sigset(&set
, &mask
);
9082 do_sigprocmask(how
, &set
, &oldset
);
9083 host_to_target_old_sigset(&mask
, &oldset
);
9089 #ifdef TARGET_NR_getgid32
9090 case TARGET_NR_getgid32
:
9091 ret
= get_errno(getgid());
9094 #ifdef TARGET_NR_geteuid32
9095 case TARGET_NR_geteuid32
:
9096 ret
= get_errno(geteuid());
9099 #ifdef TARGET_NR_getegid32
9100 case TARGET_NR_getegid32
:
9101 ret
= get_errno(getegid());
9104 #ifdef TARGET_NR_setreuid32
9105 case TARGET_NR_setreuid32
:
9106 ret
= get_errno(setreuid(arg1
, arg2
));
9109 #ifdef TARGET_NR_setregid32
9110 case TARGET_NR_setregid32
:
9111 ret
= get_errno(setregid(arg1
, arg2
));
9114 #ifdef TARGET_NR_getgroups32
9115 case TARGET_NR_getgroups32
:
9117 int gidsetsize
= arg1
;
9118 uint32_t *target_grouplist
;
9122 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9123 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9124 if (gidsetsize
== 0)
9126 if (!is_error(ret
)) {
9127 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9128 if (!target_grouplist
) {
9129 ret
= -TARGET_EFAULT
;
9132 for(i
= 0;i
< ret
; i
++)
9133 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9134 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9139 #ifdef TARGET_NR_setgroups32
9140 case TARGET_NR_setgroups32
:
9142 int gidsetsize
= arg1
;
9143 uint32_t *target_grouplist
;
9147 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9148 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9149 if (!target_grouplist
) {
9150 ret
= -TARGET_EFAULT
;
9153 for(i
= 0;i
< gidsetsize
; i
++)
9154 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9155 unlock_user(target_grouplist
, arg2
, 0);
9156 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9160 #ifdef TARGET_NR_fchown32
9161 case TARGET_NR_fchown32
:
9162 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9165 #ifdef TARGET_NR_setresuid32
9166 case TARGET_NR_setresuid32
:
9167 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9170 #ifdef TARGET_NR_getresuid32
9171 case TARGET_NR_getresuid32
:
9173 uid_t ruid
, euid
, suid
;
9174 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9175 if (!is_error(ret
)) {
9176 if (put_user_u32(ruid
, arg1
)
9177 || put_user_u32(euid
, arg2
)
9178 || put_user_u32(suid
, arg3
))
9184 #ifdef TARGET_NR_setresgid32
9185 case TARGET_NR_setresgid32
:
9186 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9189 #ifdef TARGET_NR_getresgid32
9190 case TARGET_NR_getresgid32
:
9192 gid_t rgid
, egid
, sgid
;
9193 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9194 if (!is_error(ret
)) {
9195 if (put_user_u32(rgid
, arg1
)
9196 || put_user_u32(egid
, arg2
)
9197 || put_user_u32(sgid
, arg3
))
9203 #ifdef TARGET_NR_chown32
9204 case TARGET_NR_chown32
:
9205 if (!(p
= lock_user_string(arg1
)))
9207 ret
= get_errno(chown(p
, arg2
, arg3
));
9208 unlock_user(p
, arg1
, 0);
9211 #ifdef TARGET_NR_setuid32
9212 case TARGET_NR_setuid32
:
9213 ret
= get_errno(setuid(arg1
));
9216 #ifdef TARGET_NR_setgid32
9217 case TARGET_NR_setgid32
:
9218 ret
= get_errno(setgid(arg1
));
9221 #ifdef TARGET_NR_setfsuid32
9222 case TARGET_NR_setfsuid32
:
9223 ret
= get_errno(setfsuid(arg1
));
9226 #ifdef TARGET_NR_setfsgid32
9227 case TARGET_NR_setfsgid32
:
9228 ret
= get_errno(setfsgid(arg1
));
9232 case TARGET_NR_pivot_root
:
9234 #ifdef TARGET_NR_mincore
9235 case TARGET_NR_mincore
:
9238 ret
= -TARGET_EFAULT
;
9239 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9241 if (!(p
= lock_user_string(arg3
)))
9243 ret
= get_errno(mincore(a
, arg2
, p
));
9244 unlock_user(p
, arg3
, ret
);
9246 unlock_user(a
, arg1
, 0);
9250 #ifdef TARGET_NR_arm_fadvise64_64
9251 case TARGET_NR_arm_fadvise64_64
:
9254 * arm_fadvise64_64 looks like fadvise64_64 but
9255 * with different argument order
9263 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9264 #ifdef TARGET_NR_fadvise64_64
9265 case TARGET_NR_fadvise64_64
:
9267 #ifdef TARGET_NR_fadvise64
9268 case TARGET_NR_fadvise64
:
9272 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9273 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9274 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9275 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9279 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9282 #ifdef TARGET_NR_madvise
9283 case TARGET_NR_madvise
:
9284 /* A straight passthrough may not be safe because qemu sometimes
9285 turns private file-backed mappings into anonymous mappings.
9286 This will break MADV_DONTNEED.
9287 This is a hint, so ignoring and returning success is ok. */
9291 #if TARGET_ABI_BITS == 32
9292 case TARGET_NR_fcntl64
:
9296 struct target_flock64
*target_fl
;
9298 struct target_eabi_flock64
*target_efl
;
9301 cmd
= target_to_host_fcntl_cmd(arg2
);
9302 if (cmd
== -TARGET_EINVAL
) {
9308 case TARGET_F_GETLK64
:
9310 if (((CPUARMState
*)cpu_env
)->eabi
) {
9311 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9313 fl
.l_type
= tswap16(target_efl
->l_type
);
9314 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9315 fl
.l_start
= tswap64(target_efl
->l_start
);
9316 fl
.l_len
= tswap64(target_efl
->l_len
);
9317 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9318 unlock_user_struct(target_efl
, arg3
, 0);
9322 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9324 fl
.l_type
= tswap16(target_fl
->l_type
);
9325 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9326 fl
.l_start
= tswap64(target_fl
->l_start
);
9327 fl
.l_len
= tswap64(target_fl
->l_len
);
9328 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9329 unlock_user_struct(target_fl
, arg3
, 0);
9331 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9334 if (((CPUARMState
*)cpu_env
)->eabi
) {
9335 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9337 target_efl
->l_type
= tswap16(fl
.l_type
);
9338 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9339 target_efl
->l_start
= tswap64(fl
.l_start
);
9340 target_efl
->l_len
= tswap64(fl
.l_len
);
9341 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9342 unlock_user_struct(target_efl
, arg3
, 1);
9346 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9348 target_fl
->l_type
= tswap16(fl
.l_type
);
9349 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9350 target_fl
->l_start
= tswap64(fl
.l_start
);
9351 target_fl
->l_len
= tswap64(fl
.l_len
);
9352 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9353 unlock_user_struct(target_fl
, arg3
, 1);
9358 case TARGET_F_SETLK64
:
9359 case TARGET_F_SETLKW64
:
9361 if (((CPUARMState
*)cpu_env
)->eabi
) {
9362 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9364 fl
.l_type
= tswap16(target_efl
->l_type
);
9365 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9366 fl
.l_start
= tswap64(target_efl
->l_start
);
9367 fl
.l_len
= tswap64(target_efl
->l_len
);
9368 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9369 unlock_user_struct(target_efl
, arg3
, 0);
9373 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9375 fl
.l_type
= tswap16(target_fl
->l_type
);
9376 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9377 fl
.l_start
= tswap64(target_fl
->l_start
);
9378 fl
.l_len
= tswap64(target_fl
->l_len
);
9379 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9380 unlock_user_struct(target_fl
, arg3
, 0);
9382 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9385 ret
= do_fcntl(arg1
, arg2
, arg3
);
9391 #ifdef TARGET_NR_cacheflush
9392 case TARGET_NR_cacheflush
:
9393 /* self-modifying code is handled automatically, so nothing needed */
9397 #ifdef TARGET_NR_security
9398 case TARGET_NR_security
:
9401 #ifdef TARGET_NR_getpagesize
9402 case TARGET_NR_getpagesize
:
9403 ret
= TARGET_PAGE_SIZE
;
9406 case TARGET_NR_gettid
:
9407 ret
= get_errno(gettid());
9409 #ifdef TARGET_NR_readahead
9410 case TARGET_NR_readahead
:
9411 #if TARGET_ABI_BITS == 32
9412 if (regpairs_aligned(cpu_env
)) {
9417 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9419 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9424 #ifdef TARGET_NR_setxattr
9425 case TARGET_NR_listxattr
:
9426 case TARGET_NR_llistxattr
:
9430 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9432 ret
= -TARGET_EFAULT
;
9436 p
= lock_user_string(arg1
);
9438 if (num
== TARGET_NR_listxattr
) {
9439 ret
= get_errno(listxattr(p
, b
, arg3
));
9441 ret
= get_errno(llistxattr(p
, b
, arg3
));
9444 ret
= -TARGET_EFAULT
;
9446 unlock_user(p
, arg1
, 0);
9447 unlock_user(b
, arg2
, arg3
);
9450 case TARGET_NR_flistxattr
:
9454 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9456 ret
= -TARGET_EFAULT
;
9460 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9461 unlock_user(b
, arg2
, arg3
);
9464 case TARGET_NR_setxattr
:
9465 case TARGET_NR_lsetxattr
:
9467 void *p
, *n
, *v
= 0;
9469 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9471 ret
= -TARGET_EFAULT
;
9475 p
= lock_user_string(arg1
);
9476 n
= lock_user_string(arg2
);
9478 if (num
== TARGET_NR_setxattr
) {
9479 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9481 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9484 ret
= -TARGET_EFAULT
;
9486 unlock_user(p
, arg1
, 0);
9487 unlock_user(n
, arg2
, 0);
9488 unlock_user(v
, arg3
, 0);
9491 case TARGET_NR_fsetxattr
:
9495 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9497 ret
= -TARGET_EFAULT
;
9501 n
= lock_user_string(arg2
);
9503 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9505 ret
= -TARGET_EFAULT
;
9507 unlock_user(n
, arg2
, 0);
9508 unlock_user(v
, arg3
, 0);
9511 case TARGET_NR_getxattr
:
9512 case TARGET_NR_lgetxattr
:
9514 void *p
, *n
, *v
= 0;
9516 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9518 ret
= -TARGET_EFAULT
;
9522 p
= lock_user_string(arg1
);
9523 n
= lock_user_string(arg2
);
9525 if (num
== TARGET_NR_getxattr
) {
9526 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9528 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9531 ret
= -TARGET_EFAULT
;
9533 unlock_user(p
, arg1
, 0);
9534 unlock_user(n
, arg2
, 0);
9535 unlock_user(v
, arg3
, arg4
);
9538 case TARGET_NR_fgetxattr
:
9542 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9544 ret
= -TARGET_EFAULT
;
9548 n
= lock_user_string(arg2
);
9550 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9552 ret
= -TARGET_EFAULT
;
9554 unlock_user(n
, arg2
, 0);
9555 unlock_user(v
, arg3
, arg4
);
9558 case TARGET_NR_removexattr
:
9559 case TARGET_NR_lremovexattr
:
9562 p
= lock_user_string(arg1
);
9563 n
= lock_user_string(arg2
);
9565 if (num
== TARGET_NR_removexattr
) {
9566 ret
= get_errno(removexattr(p
, n
));
9568 ret
= get_errno(lremovexattr(p
, n
));
9571 ret
= -TARGET_EFAULT
;
9573 unlock_user(p
, arg1
, 0);
9574 unlock_user(n
, arg2
, 0);
9577 case TARGET_NR_fremovexattr
:
9580 n
= lock_user_string(arg2
);
9582 ret
= get_errno(fremovexattr(arg1
, n
));
9584 ret
= -TARGET_EFAULT
;
9586 unlock_user(n
, arg2
, 0);
9590 #endif /* CONFIG_ATTR */
9591 #ifdef TARGET_NR_set_thread_area
9592 case TARGET_NR_set_thread_area
:
9593 #if defined(TARGET_MIPS)
9594 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9597 #elif defined(TARGET_CRIS)
9599 ret
= -TARGET_EINVAL
;
9601 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9605 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9606 ret
= do_set_thread_area(cpu_env
, arg1
);
9608 #elif defined(TARGET_M68K)
9610 TaskState
*ts
= cpu
->opaque
;
9611 ts
->tp_value
= arg1
;
9616 goto unimplemented_nowarn
;
9619 #ifdef TARGET_NR_get_thread_area
9620 case TARGET_NR_get_thread_area
:
9621 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9622 ret
= do_get_thread_area(cpu_env
, arg1
);
9624 #elif defined(TARGET_M68K)
9626 TaskState
*ts
= cpu
->opaque
;
9631 goto unimplemented_nowarn
;
9634 #ifdef TARGET_NR_getdomainname
9635 case TARGET_NR_getdomainname
:
9636 goto unimplemented_nowarn
;
9639 #ifdef TARGET_NR_clock_gettime
9640 case TARGET_NR_clock_gettime
:
9643 ret
= get_errno(clock_gettime(arg1
, &ts
));
9644 if (!is_error(ret
)) {
9645 host_to_target_timespec(arg2
, &ts
);
9650 #ifdef TARGET_NR_clock_getres
9651 case TARGET_NR_clock_getres
:
9654 ret
= get_errno(clock_getres(arg1
, &ts
));
9655 if (!is_error(ret
)) {
9656 host_to_target_timespec(arg2
, &ts
);
9661 #ifdef TARGET_NR_clock_nanosleep
9662 case TARGET_NR_clock_nanosleep
:
9665 target_to_host_timespec(&ts
, arg3
);
9666 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9668 host_to_target_timespec(arg4
, &ts
);
9670 #if defined(TARGET_PPC)
9671 /* clock_nanosleep is odd in that it returns positive errno values.
9672 * On PPC, CR0 bit 3 should be set in such a situation. */
9674 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9681 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9682 case TARGET_NR_set_tid_address
:
9683 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9687 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9688 case TARGET_NR_tkill
:
9689 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9693 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9694 case TARGET_NR_tgkill
:
9695 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9696 target_to_host_signal(arg3
)));
9700 #ifdef TARGET_NR_set_robust_list
9701 case TARGET_NR_set_robust_list
:
9702 case TARGET_NR_get_robust_list
:
9703 /* The ABI for supporting robust futexes has userspace pass
9704 * the kernel a pointer to a linked list which is updated by
9705 * userspace after the syscall; the list is walked by the kernel
9706 * when the thread exits. Since the linked list in QEMU guest
9707 * memory isn't a valid linked list for the host and we have
9708 * no way to reliably intercept the thread-death event, we can't
9709 * support these. Silently return ENOSYS so that guest userspace
9710 * falls back to a non-robust futex implementation (which should
9711 * be OK except in the corner case of the guest crashing while
9712 * holding a mutex that is shared with another process via
9715 goto unimplemented_nowarn
;
9718 #if defined(TARGET_NR_utimensat)
9719 case TARGET_NR_utimensat
:
9721 struct timespec
*tsp
, ts
[2];
9725 target_to_host_timespec(ts
, arg3
);
9726 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9730 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9732 if (!(p
= lock_user_string(arg2
))) {
9733 ret
= -TARGET_EFAULT
;
9736 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9737 unlock_user(p
, arg2
, 0);
9742 case TARGET_NR_futex
:
9743 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9745 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9746 case TARGET_NR_inotify_init
:
9747 ret
= get_errno(sys_inotify_init());
9750 #ifdef CONFIG_INOTIFY1
9751 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9752 case TARGET_NR_inotify_init1
:
9753 ret
= get_errno(sys_inotify_init1(arg1
));
9757 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9758 case TARGET_NR_inotify_add_watch
:
9759 p
= lock_user_string(arg2
);
9760 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9761 unlock_user(p
, arg2
, 0);
9764 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9765 case TARGET_NR_inotify_rm_watch
:
9766 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9771 case TARGET_NR_mq_open
:
9773 struct mq_attr posix_mq_attr
, *attrp
;
9775 p
= lock_user_string(arg1
- 1);
9777 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9778 attrp
= &posix_mq_attr
;
9782 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9783 unlock_user (p
, arg1
, 0);
9787 case TARGET_NR_mq_unlink
:
9788 p
= lock_user_string(arg1
- 1);
9789 ret
= get_errno(mq_unlink(p
));
9790 unlock_user (p
, arg1
, 0);
9793 case TARGET_NR_mq_timedsend
:
9797 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9799 target_to_host_timespec(&ts
, arg5
);
9800 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9801 host_to_target_timespec(arg5
, &ts
);
9804 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9805 unlock_user (p
, arg2
, arg3
);
9809 case TARGET_NR_mq_timedreceive
:
9814 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9816 target_to_host_timespec(&ts
, arg5
);
9817 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9818 host_to_target_timespec(arg5
, &ts
);
9821 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9822 unlock_user (p
, arg2
, arg3
);
9824 put_user_u32(prio
, arg4
);
9828 /* Not implemented for now... */
9829 /* case TARGET_NR_mq_notify: */
9832 case TARGET_NR_mq_getsetattr
:
9834 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9837 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9838 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9841 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9842 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9849 #ifdef CONFIG_SPLICE
9850 #ifdef TARGET_NR_tee
9853 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9857 #ifdef TARGET_NR_splice
9858 case TARGET_NR_splice
:
9860 loff_t loff_in
, loff_out
;
9861 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9863 if (get_user_u64(loff_in
, arg2
)) {
9866 ploff_in
= &loff_in
;
9869 if (get_user_u64(loff_out
, arg4
)) {
9872 ploff_out
= &loff_out
;
9874 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9876 if (put_user_u64(loff_in
, arg2
)) {
9881 if (put_user_u64(loff_out
, arg4
)) {
9888 #ifdef TARGET_NR_vmsplice
9889 case TARGET_NR_vmsplice
:
9891 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9893 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9894 unlock_iovec(vec
, arg2
, arg3
, 0);
9896 ret
= -host_to_target_errno(errno
);
9901 #endif /* CONFIG_SPLICE */
9902 #ifdef CONFIG_EVENTFD
9903 #if defined(TARGET_NR_eventfd)
9904 case TARGET_NR_eventfd
:
9905 ret
= get_errno(eventfd(arg1
, 0));
9906 fd_trans_unregister(ret
);
9909 #if defined(TARGET_NR_eventfd2)
9910 case TARGET_NR_eventfd2
:
9912 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9913 if (arg2
& TARGET_O_NONBLOCK
) {
9914 host_flags
|= O_NONBLOCK
;
9916 if (arg2
& TARGET_O_CLOEXEC
) {
9917 host_flags
|= O_CLOEXEC
;
9919 ret
= get_errno(eventfd(arg1
, host_flags
));
9920 fd_trans_unregister(ret
);
9924 #endif /* CONFIG_EVENTFD */
9925 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9926 case TARGET_NR_fallocate
:
9927 #if TARGET_ABI_BITS == 32
9928 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9929 target_offset64(arg5
, arg6
)));
9931 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9935 #if defined(CONFIG_SYNC_FILE_RANGE)
9936 #if defined(TARGET_NR_sync_file_range)
9937 case TARGET_NR_sync_file_range
:
9938 #if TARGET_ABI_BITS == 32
9939 #if defined(TARGET_MIPS)
9940 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9941 target_offset64(arg5
, arg6
), arg7
));
9943 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9944 target_offset64(arg4
, arg5
), arg6
));
9945 #endif /* !TARGET_MIPS */
9947 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9951 #if defined(TARGET_NR_sync_file_range2)
9952 case TARGET_NR_sync_file_range2
:
9953 /* This is like sync_file_range but the arguments are reordered */
9954 #if TARGET_ABI_BITS == 32
9955 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9956 target_offset64(arg5
, arg6
), arg2
));
9958 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9963 #if defined(TARGET_NR_signalfd4)
9964 case TARGET_NR_signalfd4
:
9965 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9968 #if defined(TARGET_NR_signalfd)
9969 case TARGET_NR_signalfd
:
9970 ret
= do_signalfd4(arg1
, arg2
, 0);
9973 #if defined(CONFIG_EPOLL)
9974 #if defined(TARGET_NR_epoll_create)
9975 case TARGET_NR_epoll_create
:
9976 ret
= get_errno(epoll_create(arg1
));
9979 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9980 case TARGET_NR_epoll_create1
:
9981 ret
= get_errno(epoll_create1(arg1
));
9984 #if defined(TARGET_NR_epoll_ctl)
9985 case TARGET_NR_epoll_ctl
:
9987 struct epoll_event ep
;
9988 struct epoll_event
*epp
= 0;
9990 struct target_epoll_event
*target_ep
;
9991 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9994 ep
.events
= tswap32(target_ep
->events
);
9995 /* The epoll_data_t union is just opaque data to the kernel,
9996 * so we transfer all 64 bits across and need not worry what
9997 * actual data type it is.
9999 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10000 unlock_user_struct(target_ep
, arg4
, 0);
10003 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10008 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10009 #define IMPLEMENT_EPOLL_PWAIT
10011 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10012 #if defined(TARGET_NR_epoll_wait)
10013 case TARGET_NR_epoll_wait
:
10015 #if defined(IMPLEMENT_EPOLL_PWAIT)
10016 case TARGET_NR_epoll_pwait
:
10019 struct target_epoll_event
*target_ep
;
10020 struct epoll_event
*ep
;
10022 int maxevents
= arg3
;
10023 int timeout
= arg4
;
10025 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10026 maxevents
* sizeof(struct target_epoll_event
), 1);
10031 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10034 #if defined(IMPLEMENT_EPOLL_PWAIT)
10035 case TARGET_NR_epoll_pwait
:
10037 target_sigset_t
*target_set
;
10038 sigset_t _set
, *set
= &_set
;
10041 target_set
= lock_user(VERIFY_READ
, arg5
,
10042 sizeof(target_sigset_t
), 1);
10044 unlock_user(target_ep
, arg2
, 0);
10047 target_to_host_sigset(set
, target_set
);
10048 unlock_user(target_set
, arg5
, 0);
10053 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
10057 #if defined(TARGET_NR_epoll_wait)
10058 case TARGET_NR_epoll_wait
:
10059 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
10063 ret
= -TARGET_ENOSYS
;
10065 if (!is_error(ret
)) {
10067 for (i
= 0; i
< ret
; i
++) {
10068 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10069 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10072 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10077 #ifdef TARGET_NR_prlimit64
10078 case TARGET_NR_prlimit64
:
10080 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10081 struct target_rlimit64
*target_rnew
, *target_rold
;
10082 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10083 int resource
= target_to_host_resource(arg2
);
10085 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10088 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10089 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10090 unlock_user_struct(target_rnew
, arg3
, 0);
10094 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10095 if (!is_error(ret
) && arg4
) {
10096 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10099 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10100 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10101 unlock_user_struct(target_rold
, arg4
, 1);
10106 #ifdef TARGET_NR_gethostname
10107 case TARGET_NR_gethostname
:
10109 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10111 ret
= get_errno(gethostname(name
, arg2
));
10112 unlock_user(name
, arg1
, arg2
);
10114 ret
= -TARGET_EFAULT
;
10119 #ifdef TARGET_NR_atomic_cmpxchg_32
10120 case TARGET_NR_atomic_cmpxchg_32
:
10122 /* should use start_exclusive from main.c */
10123 abi_ulong mem_value
;
10124 if (get_user_u32(mem_value
, arg6
)) {
10125 target_siginfo_t info
;
10126 info
.si_signo
= SIGSEGV
;
10128 info
.si_code
= TARGET_SEGV_MAPERR
;
10129 info
._sifields
._sigfault
._addr
= arg6
;
10130 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10134 if (mem_value
== arg2
)
10135 put_user_u32(arg1
, arg6
);
10140 #ifdef TARGET_NR_atomic_barrier
10141 case TARGET_NR_atomic_barrier
:
10143 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10149 #ifdef TARGET_NR_timer_create
10150 case TARGET_NR_timer_create
:
10152 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10154 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10157 int timer_index
= next_free_host_timer();
10159 if (timer_index
< 0) {
10160 ret
= -TARGET_EAGAIN
;
10162 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10165 phost_sevp
= &host_sevp
;
10166 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10172 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10176 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10185 #ifdef TARGET_NR_timer_settime
10186 case TARGET_NR_timer_settime
:
10188 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10189 * struct itimerspec * old_value */
10190 target_timer_t timerid
= get_timer_id(arg1
);
10194 } else if (arg3
== 0) {
10195 ret
= -TARGET_EINVAL
;
10197 timer_t htimer
= g_posix_timers
[timerid
];
10198 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10200 target_to_host_itimerspec(&hspec_new
, arg3
);
10202 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10203 host_to_target_itimerspec(arg2
, &hspec_old
);
10209 #ifdef TARGET_NR_timer_gettime
10210 case TARGET_NR_timer_gettime
:
10212 /* args: timer_t timerid, struct itimerspec *curr_value */
10213 target_timer_t timerid
= get_timer_id(arg1
);
10217 } else if (!arg2
) {
10218 ret
= -TARGET_EFAULT
;
10220 timer_t htimer
= g_posix_timers
[timerid
];
10221 struct itimerspec hspec
;
10222 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10224 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10225 ret
= -TARGET_EFAULT
;
10232 #ifdef TARGET_NR_timer_getoverrun
10233 case TARGET_NR_timer_getoverrun
:
10235 /* args: timer_t timerid */
10236 target_timer_t timerid
= get_timer_id(arg1
);
10241 timer_t htimer
= g_posix_timers
[timerid
];
10242 ret
= get_errno(timer_getoverrun(htimer
));
10244 fd_trans_unregister(ret
);
10249 #ifdef TARGET_NR_timer_delete
10250 case TARGET_NR_timer_delete
:
10252 /* args: timer_t timerid */
10253 target_timer_t timerid
= get_timer_id(arg1
);
10258 timer_t htimer
= g_posix_timers
[timerid
];
10259 ret
= get_errno(timer_delete(htimer
));
10260 g_posix_timers
[timerid
] = 0;
10266 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10267 case TARGET_NR_timerfd_create
:
10268 ret
= get_errno(timerfd_create(arg1
,
10269 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10273 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10274 case TARGET_NR_timerfd_gettime
:
10276 struct itimerspec its_curr
;
10278 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10280 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10287 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10288 case TARGET_NR_timerfd_settime
:
10290 struct itimerspec its_new
, its_old
, *p_new
;
10293 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10301 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10303 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10311 case TARGET_NR_ioprio_get
:
10312 ret
= get_errno(ioprio_get(arg1
, arg2
));
10316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10317 case TARGET_NR_ioprio_set
:
10318 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10322 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10323 case TARGET_NR_setns
:
10324 ret
= get_errno(setns(arg1
, arg2
));
10327 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10328 case TARGET_NR_unshare
:
10329 ret
= get_errno(unshare(arg1
));
10335 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10336 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10337 unimplemented_nowarn
:
10339 ret
= -TARGET_ENOSYS
;
10344 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10347 print_syscall_ret(num
, ret
);
10350 ret
= -TARGET_EFAULT
;