4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
27 #include <sys/mount.h>
29 #include <sys/fsuid.h>
30 #include <sys/personality.h>
31 #include <sys/prctl.h>
32 #include <sys/resource.h>
35 #include <linux/capability.h>
38 int __clone2(int (*fn
)(void *), void *child_stack_base
,
39 size_t stack_size
, int flags
, void *arg
, ...);
41 #include <sys/socket.h>
45 #include <sys/times.h>
48 #include <sys/statfs.h>
50 #include <sys/sysinfo.h>
51 #include <sys/signalfd.h>
52 //#include <sys/user.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55 #include <linux/wireless.h>
56 #include <linux/icmp.h>
57 #include "qemu-common.h"
59 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
92 #if defined(CONFIG_FIEMAP)
93 #include <linux/fiemap.h>
97 #include <linux/dm-ioctl.h>
98 #include <linux/reboot.h>
99 #include <linux/route.h>
100 #include <linux/filter.h>
101 #include <linux/blkpg.h>
102 #include "linux_loop.h"
107 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
108 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 //#include <linux/msdos_fs.h>
113 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
114 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125 #define _syscall0(type,name) \
126 static type name (void) \
128 return syscall(__NR_##name); \
131 #define _syscall1(type,name,type1,arg1) \
132 static type name (type1 arg1) \
134 return syscall(__NR_##name, arg1); \
137 #define _syscall2(type,name,type1,arg1,type2,arg2) \
138 static type name (type1 arg1,type2 arg2) \
140 return syscall(__NR_##name, arg1, arg2); \
143 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
144 static type name (type1 arg1,type2 arg2,type3 arg3) \
146 return syscall(__NR_##name, arg1, arg2, arg3); \
149 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
152 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
163 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5,type6,arg6) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
172 #define __NR_sys_uname __NR_uname
173 #define __NR_sys_getcwd1 __NR_getcwd
174 #define __NR_sys_getdents __NR_getdents
175 #define __NR_sys_getdents64 __NR_getdents64
176 #define __NR_sys_getpriority __NR_getpriority
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_syslog __NR_syslog
179 #define __NR_sys_tgkill __NR_tgkill
180 #define __NR_sys_tkill __NR_tkill
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
188 #define __NR__llseek __NR_lseek
191 /* Newer kernel ports have llseek() instead of _llseek() */
192 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
193 #define TARGET_NR__llseek TARGET_NR_llseek
197 _syscall0(int, gettid
)
199 /* This is a replacement for the host gettid() and must return a host
201 static int gettid(void) {
205 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
206 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
208 #if !defined(__NR_getdents) || \
209 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
210 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
212 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
213 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
214 loff_t
*, res
, uint
, wh
);
216 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
217 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
218 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
219 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
221 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
222 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
224 #ifdef __NR_exit_group
225 _syscall1(int,exit_group
,int,error_code
)
227 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
228 _syscall1(int,set_tid_address
,int *,tidptr
)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
232 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
234 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
235 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
236 unsigned long *, user_mask_ptr
);
237 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
238 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
242 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
243 struct __user_cap_data_struct
*, data
);
244 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
245 struct __user_cap_data_struct
*, data
);
246 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
247 _syscall2(int, ioprio_get
, int, which
, int, who
)
249 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
250 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
252 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
253 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
256 static bitmask_transtbl fcntl_flags_tbl
[] = {
257 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
258 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
259 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
260 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
261 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
262 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
263 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
264 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
265 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
266 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
267 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
268 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
269 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
270 #if defined(O_DIRECT)
271 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
273 #if defined(O_NOATIME)
274 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
276 #if defined(O_CLOEXEC)
277 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
280 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
282 /* Don't terminate the list prematurely on 64-bit host+guest. */
283 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
284 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
289 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
290 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
291 typedef struct TargetFdTrans
{
292 TargetFdDataFunc host_to_target_data
;
293 TargetFdDataFunc target_to_host_data
;
294 TargetFdAddrFunc target_to_host_addr
;
297 static TargetFdTrans
**target_fd_trans
;
299 static unsigned int target_fd_max
;
301 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
303 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
304 return target_fd_trans
[fd
]->host_to_target_data
;
309 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
311 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
312 return target_fd_trans
[fd
]->target_to_host_addr
;
317 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
321 if (fd
>= target_fd_max
) {
322 oldmax
= target_fd_max
;
323 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
324 target_fd_trans
= g_renew(TargetFdTrans
*,
325 target_fd_trans
, target_fd_max
);
326 memset((void *)(target_fd_trans
+ oldmax
), 0,
327 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
329 target_fd_trans
[fd
] = trans
;
332 static void fd_trans_unregister(int fd
)
334 if (fd
>= 0 && fd
< target_fd_max
) {
335 target_fd_trans
[fd
] = NULL
;
339 static void fd_trans_dup(int oldfd
, int newfd
)
341 fd_trans_unregister(newfd
);
342 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
343 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
347 static int sys_getcwd1(char *buf
, size_t size
)
349 if (getcwd(buf
, size
) == NULL
) {
350 /* getcwd() sets errno */
353 return strlen(buf
)+1;
356 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
359 * open(2) has extra parameter 'mode' when called with
362 if ((flags
& O_CREAT
) != 0) {
363 return (openat(dirfd
, pathname
, flags
, mode
));
365 return (openat(dirfd
, pathname
, flags
));
368 #ifdef TARGET_NR_utimensat
369 #ifdef CONFIG_UTIMENSAT
370 static int sys_utimensat(int dirfd
, const char *pathname
,
371 const struct timespec times
[2], int flags
)
373 if (pathname
== NULL
)
374 return futimens(dirfd
, times
);
376 return utimensat(dirfd
, pathname
, times
, flags
);
378 #elif defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
381 const struct timespec
*,tsp
,int,flags
)
383 static int sys_utimensat(int dirfd
, const char *pathname
,
384 const struct timespec times
[2], int flags
)
390 #endif /* TARGET_NR_utimensat */
392 #ifdef CONFIG_INOTIFY
393 #include <sys/inotify.h>
395 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
396 static int sys_inotify_init(void)
398 return (inotify_init());
401 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
402 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
404 return (inotify_add_watch(fd
, pathname
, mask
));
407 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
408 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
410 return (inotify_rm_watch(fd
, wd
));
413 #ifdef CONFIG_INOTIFY1
414 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
415 static int sys_inotify_init1(int flags
)
417 return (inotify_init1(flags
));
422 /* Userspace can usually survive runtime without inotify */
423 #undef TARGET_NR_inotify_init
424 #undef TARGET_NR_inotify_init1
425 #undef TARGET_NR_inotify_add_watch
426 #undef TARGET_NR_inotify_rm_watch
427 #endif /* CONFIG_INOTIFY */
429 #if defined(TARGET_NR_ppoll)
431 # define __NR_ppoll -1
433 #define __NR_sys_ppoll __NR_ppoll
434 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
435 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
439 #if defined(TARGET_NR_pselect6)
440 #ifndef __NR_pselect6
441 # define __NR_pselect6 -1
443 #define __NR_sys_pselect6 __NR_pselect6
444 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
445 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64
{
458 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
459 const struct host_rlimit64
*, new_limit
,
460 struct host_rlimit64
*, old_limit
)
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers
[32] = { 0, } ;
468 static inline int next_free_host_timer(void)
471 /* FIXME: Does finding the next free slot require a lock? */
472 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
473 if (g_posix_timers
[k
] == 0) {
474 g_posix_timers
[k
] = (timer_t
) 1;
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
484 static inline int regpairs_aligned(void *cpu_env
) {
485 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
487 #elif defined(TARGET_MIPS)
488 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
489 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
490 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
491 * of registers which translates to the same as ARM/MIPS, because we start with
493 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
495 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
510 [EAGAIN
] = TARGET_EAGAIN
,
511 [EIDRM
] = TARGET_EIDRM
,
512 [ECHRNG
] = TARGET_ECHRNG
,
513 [EL2NSYNC
] = TARGET_EL2NSYNC
,
514 [EL3HLT
] = TARGET_EL3HLT
,
515 [EL3RST
] = TARGET_EL3RST
,
516 [ELNRNG
] = TARGET_ELNRNG
,
517 [EUNATCH
] = TARGET_EUNATCH
,
518 [ENOCSI
] = TARGET_ENOCSI
,
519 [EL2HLT
] = TARGET_EL2HLT
,
520 [EDEADLK
] = TARGET_EDEADLK
,
521 [ENOLCK
] = TARGET_ENOLCK
,
522 [EBADE
] = TARGET_EBADE
,
523 [EBADR
] = TARGET_EBADR
,
524 [EXFULL
] = TARGET_EXFULL
,
525 [ENOANO
] = TARGET_ENOANO
,
526 [EBADRQC
] = TARGET_EBADRQC
,
527 [EBADSLT
] = TARGET_EBADSLT
,
528 [EBFONT
] = TARGET_EBFONT
,
529 [ENOSTR
] = TARGET_ENOSTR
,
530 [ENODATA
] = TARGET_ENODATA
,
531 [ETIME
] = TARGET_ETIME
,
532 [ENOSR
] = TARGET_ENOSR
,
533 [ENONET
] = TARGET_ENONET
,
534 [ENOPKG
] = TARGET_ENOPKG
,
535 [EREMOTE
] = TARGET_EREMOTE
,
536 [ENOLINK
] = TARGET_ENOLINK
,
537 [EADV
] = TARGET_EADV
,
538 [ESRMNT
] = TARGET_ESRMNT
,
539 [ECOMM
] = TARGET_ECOMM
,
540 [EPROTO
] = TARGET_EPROTO
,
541 [EDOTDOT
] = TARGET_EDOTDOT
,
542 [EMULTIHOP
] = TARGET_EMULTIHOP
,
543 [EBADMSG
] = TARGET_EBADMSG
,
544 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
545 [EOVERFLOW
] = TARGET_EOVERFLOW
,
546 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
547 [EBADFD
] = TARGET_EBADFD
,
548 [EREMCHG
] = TARGET_EREMCHG
,
549 [ELIBACC
] = TARGET_ELIBACC
,
550 [ELIBBAD
] = TARGET_ELIBBAD
,
551 [ELIBSCN
] = TARGET_ELIBSCN
,
552 [ELIBMAX
] = TARGET_ELIBMAX
,
553 [ELIBEXEC
] = TARGET_ELIBEXEC
,
554 [EILSEQ
] = TARGET_EILSEQ
,
555 [ENOSYS
] = TARGET_ENOSYS
,
556 [ELOOP
] = TARGET_ELOOP
,
557 [ERESTART
] = TARGET_ERESTART
,
558 [ESTRPIPE
] = TARGET_ESTRPIPE
,
559 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
560 [EUSERS
] = TARGET_EUSERS
,
561 [ENOTSOCK
] = TARGET_ENOTSOCK
,
562 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
563 [EMSGSIZE
] = TARGET_EMSGSIZE
,
564 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
565 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
566 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
567 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
568 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
569 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
570 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
571 [EADDRINUSE
] = TARGET_EADDRINUSE
,
572 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
573 [ENETDOWN
] = TARGET_ENETDOWN
,
574 [ENETUNREACH
] = TARGET_ENETUNREACH
,
575 [ENETRESET
] = TARGET_ENETRESET
,
576 [ECONNABORTED
] = TARGET_ECONNABORTED
,
577 [ECONNRESET
] = TARGET_ECONNRESET
,
578 [ENOBUFS
] = TARGET_ENOBUFS
,
579 [EISCONN
] = TARGET_EISCONN
,
580 [ENOTCONN
] = TARGET_ENOTCONN
,
581 [EUCLEAN
] = TARGET_EUCLEAN
,
582 [ENOTNAM
] = TARGET_ENOTNAM
,
583 [ENAVAIL
] = TARGET_ENAVAIL
,
584 [EISNAM
] = TARGET_EISNAM
,
585 [EREMOTEIO
] = TARGET_EREMOTEIO
,
586 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
587 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
588 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
589 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
590 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
591 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
592 [EALREADY
] = TARGET_EALREADY
,
593 [EINPROGRESS
] = TARGET_EINPROGRESS
,
594 [ESTALE
] = TARGET_ESTALE
,
595 [ECANCELED
] = TARGET_ECANCELED
,
596 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
597 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
599 [ENOKEY
] = TARGET_ENOKEY
,
602 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
605 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
608 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
611 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
613 #ifdef ENOTRECOVERABLE
614 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
618 static inline int host_to_target_errno(int err
)
620 if(host_to_target_errno_table
[err
])
621 return host_to_target_errno_table
[err
];
625 static inline int target_to_host_errno(int err
)
627 if (target_to_host_errno_table
[err
])
628 return target_to_host_errno_table
[err
];
632 static inline abi_long
get_errno(abi_long ret
)
635 return -host_to_target_errno(errno
);
640 static inline int is_error(abi_long ret
)
642 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
645 char *target_strerror(int err
)
647 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
650 return strerror(target_to_host_errno(err
));
653 static inline int host_to_target_sock_type(int host_type
)
657 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
659 target_type
= TARGET_SOCK_DGRAM
;
662 target_type
= TARGET_SOCK_STREAM
;
665 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
669 #if defined(SOCK_CLOEXEC)
670 if (host_type
& SOCK_CLOEXEC
) {
671 target_type
|= TARGET_SOCK_CLOEXEC
;
675 #if defined(SOCK_NONBLOCK)
676 if (host_type
& SOCK_NONBLOCK
) {
677 target_type
|= TARGET_SOCK_NONBLOCK
;
684 static abi_ulong target_brk
;
685 static abi_ulong target_original_brk
;
686 static abi_ulong brk_page
;
688 void target_set_brk(abi_ulong new_brk
)
690 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
691 brk_page
= HOST_PAGE_ALIGN(target_brk
);
694 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
695 #define DEBUGF_BRK(message, args...)
697 /* do_brk() must return target values and target errnos. */
698 abi_long
do_brk(abi_ulong new_brk
)
700 abi_long mapped_addr
;
703 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
706 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
709 if (new_brk
< target_original_brk
) {
710 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
715 /* If the new brk is less than the highest page reserved to the
716 * target heap allocation, set it and we're almost done... */
717 if (new_brk
<= brk_page
) {
718 /* Heap contents are initialized to zero, as for anonymous
720 if (new_brk
> target_brk
) {
721 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
723 target_brk
= new_brk
;
724 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
728 /* We need to allocate more memory after the brk... Note that
729 * we don't use MAP_FIXED because that will map over the top of
730 * any existing mapping (like the one with the host libc or qemu
731 * itself); instead we treat "mapped but at wrong address" as
732 * a failure and unmap again.
734 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
735 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
736 PROT_READ
|PROT_WRITE
,
737 MAP_ANON
|MAP_PRIVATE
, 0, 0));
739 if (mapped_addr
== brk_page
) {
740 /* Heap contents are initialized to zero, as for anonymous
741 * mapped pages. Technically the new pages are already
742 * initialized to zero since they *are* anonymous mapped
743 * pages, however we have to take care with the contents that
744 * come from the remaining part of the previous page: it may
745 * contains garbage data due to a previous heap usage (grown
747 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
749 target_brk
= new_brk
;
750 brk_page
= HOST_PAGE_ALIGN(target_brk
);
751 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
754 } else if (mapped_addr
!= -1) {
755 /* Mapped but at wrong address, meaning there wasn't actually
756 * enough space for this brk.
758 target_munmap(mapped_addr
, new_alloc_size
);
760 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
763 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
766 #if defined(TARGET_ALPHA)
767 /* We (partially) emulate OSF/1 on Alpha, which requires we
768 return a proper errno, not an unchanged brk value. */
769 return -TARGET_ENOMEM
;
771 /* For everything else, return the previous break. */
775 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
776 abi_ulong target_fds_addr
,
780 abi_ulong b
, *target_fds
;
782 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
783 if (!(target_fds
= lock_user(VERIFY_READ
,
785 sizeof(abi_ulong
) * nw
,
787 return -TARGET_EFAULT
;
791 for (i
= 0; i
< nw
; i
++) {
792 /* grab the abi_ulong */
793 __get_user(b
, &target_fds
[i
]);
794 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
795 /* check the bit inside the abi_ulong */
802 unlock_user(target_fds
, target_fds_addr
, 0);
807 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
808 abi_ulong target_fds_addr
,
811 if (target_fds_addr
) {
812 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
813 return -TARGET_EFAULT
;
821 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
827 abi_ulong
*target_fds
;
829 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
830 if (!(target_fds
= lock_user(VERIFY_WRITE
,
832 sizeof(abi_ulong
) * nw
,
834 return -TARGET_EFAULT
;
837 for (i
= 0; i
< nw
; i
++) {
839 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
840 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
843 __put_user(v
, &target_fds
[i
]);
846 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
851 #if defined(__alpha__)
857 static inline abi_long
host_to_target_clock_t(long ticks
)
859 #if HOST_HZ == TARGET_HZ
862 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
866 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
867 const struct rusage
*rusage
)
869 struct target_rusage
*target_rusage
;
871 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
872 return -TARGET_EFAULT
;
873 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
874 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
875 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
876 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
877 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
878 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
879 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
880 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
881 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
882 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
883 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
884 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
885 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
886 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
887 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
888 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
889 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
890 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
891 unlock_user_struct(target_rusage
, target_addr
, 1);
896 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
898 abi_ulong target_rlim_swap
;
901 target_rlim_swap
= tswapal(target_rlim
);
902 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
903 return RLIM_INFINITY
;
905 result
= target_rlim_swap
;
906 if (target_rlim_swap
!= (rlim_t
)result
)
907 return RLIM_INFINITY
;
912 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
914 abi_ulong target_rlim_swap
;
917 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
918 target_rlim_swap
= TARGET_RLIM_INFINITY
;
920 target_rlim_swap
= rlim
;
921 result
= tswapal(target_rlim_swap
);
926 static inline int target_to_host_resource(int code
)
929 case TARGET_RLIMIT_AS
:
931 case TARGET_RLIMIT_CORE
:
933 case TARGET_RLIMIT_CPU
:
935 case TARGET_RLIMIT_DATA
:
937 case TARGET_RLIMIT_FSIZE
:
939 case TARGET_RLIMIT_LOCKS
:
941 case TARGET_RLIMIT_MEMLOCK
:
942 return RLIMIT_MEMLOCK
;
943 case TARGET_RLIMIT_MSGQUEUE
:
944 return RLIMIT_MSGQUEUE
;
945 case TARGET_RLIMIT_NICE
:
947 case TARGET_RLIMIT_NOFILE
:
948 return RLIMIT_NOFILE
;
949 case TARGET_RLIMIT_NPROC
:
951 case TARGET_RLIMIT_RSS
:
953 case TARGET_RLIMIT_RTPRIO
:
954 return RLIMIT_RTPRIO
;
955 case TARGET_RLIMIT_SIGPENDING
:
956 return RLIMIT_SIGPENDING
;
957 case TARGET_RLIMIT_STACK
:
964 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
965 abi_ulong target_tv_addr
)
967 struct target_timeval
*target_tv
;
969 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
970 return -TARGET_EFAULT
;
972 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
973 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
975 unlock_user_struct(target_tv
, target_tv_addr
, 0);
980 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
981 const struct timeval
*tv
)
983 struct target_timeval
*target_tv
;
985 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
986 return -TARGET_EFAULT
;
988 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
989 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
991 unlock_user_struct(target_tv
, target_tv_addr
, 1);
996 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
997 abi_ulong target_tz_addr
)
999 struct target_timezone
*target_tz
;
1001 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1002 return -TARGET_EFAULT
;
1005 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1006 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1008 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1013 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1016 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1017 abi_ulong target_mq_attr_addr
)
1019 struct target_mq_attr
*target_mq_attr
;
1021 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1022 target_mq_attr_addr
, 1))
1023 return -TARGET_EFAULT
;
1025 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1026 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1027 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1028 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1030 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1035 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1036 const struct mq_attr
*attr
)
1038 struct target_mq_attr
*target_mq_attr
;
1040 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1041 target_mq_attr_addr
, 0))
1042 return -TARGET_EFAULT
;
1044 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1045 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1046 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1047 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1049 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1055 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1056 /* do_select() must return target values and target errnos. */
1057 static abi_long
do_select(int n
,
1058 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1059 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1061 fd_set rfds
, wfds
, efds
;
1062 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1063 struct timeval tv
, *tv_ptr
;
1066 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1070 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1074 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1079 if (target_tv_addr
) {
1080 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1081 return -TARGET_EFAULT
;
1087 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1089 if (!is_error(ret
)) {
1090 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1091 return -TARGET_EFAULT
;
1092 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1093 return -TARGET_EFAULT
;
1094 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1095 return -TARGET_EFAULT
;
1097 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1098 return -TARGET_EFAULT
;
1105 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1108 return pipe2(host_pipe
, flags
);
1114 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1115 int flags
, int is_pipe2
)
1119 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1122 return get_errno(ret
);
1124 /* Several targets have special calling conventions for the original
1125 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1127 #if defined(TARGET_ALPHA)
1128 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1129 return host_pipe
[0];
1130 #elif defined(TARGET_MIPS)
1131 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1132 return host_pipe
[0];
1133 #elif defined(TARGET_SH4)
1134 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1135 return host_pipe
[0];
1136 #elif defined(TARGET_SPARC)
1137 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1138 return host_pipe
[0];
1142 if (put_user_s32(host_pipe
[0], pipedes
)
1143 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1144 return -TARGET_EFAULT
;
1145 return get_errno(ret
);
1148 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1149 abi_ulong target_addr
,
1152 struct target_ip_mreqn
*target_smreqn
;
1154 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1156 return -TARGET_EFAULT
;
1157 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1158 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1159 if (len
== sizeof(struct target_ip_mreqn
))
1160 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1161 unlock_user(target_smreqn
, target_addr
, 0);
1166 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1167 abi_ulong target_addr
,
1170 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1171 sa_family_t sa_family
;
1172 struct target_sockaddr
*target_saddr
;
1174 if (fd_trans_target_to_host_addr(fd
)) {
1175 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1178 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1180 return -TARGET_EFAULT
;
1182 sa_family
= tswap16(target_saddr
->sa_family
);
1184 /* Oops. The caller might send a incomplete sun_path; sun_path
1185 * must be terminated by \0 (see the manual page), but
1186 * unfortunately it is quite common to specify sockaddr_un
1187 * length as "strlen(x->sun_path)" while it should be
1188 * "strlen(...) + 1". We'll fix that here if needed.
1189 * Linux kernel has a similar feature.
1192 if (sa_family
== AF_UNIX
) {
1193 if (len
< unix_maxlen
&& len
> 0) {
1194 char *cp
= (char*)target_saddr
;
1196 if ( cp
[len
-1] && !cp
[len
] )
1199 if (len
> unix_maxlen
)
1203 memcpy(addr
, target_saddr
, len
);
1204 addr
->sa_family
= sa_family
;
1205 if (sa_family
== AF_PACKET
) {
1206 struct target_sockaddr_ll
*lladdr
;
1208 lladdr
= (struct target_sockaddr_ll
*)addr
;
1209 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1210 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1212 unlock_user(target_saddr
, target_addr
, 0);
1217 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1218 struct sockaddr
*addr
,
1221 struct target_sockaddr
*target_saddr
;
1223 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1225 return -TARGET_EFAULT
;
1226 memcpy(target_saddr
, addr
, len
);
1227 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1228 unlock_user(target_saddr
, target_addr
, len
);
1233 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1234 struct target_msghdr
*target_msgh
)
1236 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1237 abi_long msg_controllen
;
1238 abi_ulong target_cmsg_addr
;
1239 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1240 socklen_t space
= 0;
1242 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1243 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1245 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1246 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1247 target_cmsg_start
= target_cmsg
;
1249 return -TARGET_EFAULT
;
1251 while (cmsg
&& target_cmsg
) {
1252 void *data
= CMSG_DATA(cmsg
);
1253 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1255 int len
= tswapal(target_cmsg
->cmsg_len
)
1256 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1258 space
+= CMSG_SPACE(len
);
1259 if (space
> msgh
->msg_controllen
) {
1260 space
-= CMSG_SPACE(len
);
1261 /* This is a QEMU bug, since we allocated the payload
1262 * area ourselves (unlike overflow in host-to-target
1263 * conversion, which is just the guest giving us a buffer
1264 * that's too small). It can't happen for the payload types
1265 * we currently support; if it becomes an issue in future
1266 * we would need to improve our allocation strategy to
1267 * something more intelligent than "twice the size of the
1268 * target buffer we're reading from".
1270 gemu_log("Host cmsg overflow\n");
1274 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1275 cmsg
->cmsg_level
= SOL_SOCKET
;
1277 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1279 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1280 cmsg
->cmsg_len
= CMSG_LEN(len
);
1282 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1283 int *fd
= (int *)data
;
1284 int *target_fd
= (int *)target_data
;
1285 int i
, numfds
= len
/ sizeof(int);
1287 for (i
= 0; i
< numfds
; i
++) {
1288 __get_user(fd
[i
], target_fd
+ i
);
1290 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1291 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1292 struct ucred
*cred
= (struct ucred
*)data
;
1293 struct target_ucred
*target_cred
=
1294 (struct target_ucred
*)target_data
;
1296 __get_user(cred
->pid
, &target_cred
->pid
);
1297 __get_user(cred
->uid
, &target_cred
->uid
);
1298 __get_user(cred
->gid
, &target_cred
->gid
);
1300 gemu_log("Unsupported ancillary data: %d/%d\n",
1301 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1302 memcpy(data
, target_data
, len
);
1305 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1306 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1309 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1311 msgh
->msg_controllen
= space
;
1315 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1316 struct msghdr
*msgh
)
1318 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1319 abi_long msg_controllen
;
1320 abi_ulong target_cmsg_addr
;
1321 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1322 socklen_t space
= 0;
1324 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1325 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1327 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1328 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1329 target_cmsg_start
= target_cmsg
;
1331 return -TARGET_EFAULT
;
1333 while (cmsg
&& target_cmsg
) {
1334 void *data
= CMSG_DATA(cmsg
);
1335 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1337 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1338 int tgt_len
, tgt_space
;
1340 /* We never copy a half-header but may copy half-data;
1341 * this is Linux's behaviour in put_cmsg(). Note that
1342 * truncation here is a guest problem (which we report
1343 * to the guest via the CTRUNC bit), unlike truncation
1344 * in target_to_host_cmsg, which is a QEMU bug.
1346 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1347 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1351 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1352 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1354 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1356 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1358 tgt_len
= TARGET_CMSG_LEN(len
);
1360 /* Payload types which need a different size of payload on
1361 * the target must adjust tgt_len here.
1363 switch (cmsg
->cmsg_level
) {
1365 switch (cmsg
->cmsg_type
) {
1367 tgt_len
= sizeof(struct target_timeval
);
1376 if (msg_controllen
< tgt_len
) {
1377 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1378 tgt_len
= msg_controllen
;
1381 /* We must now copy-and-convert len bytes of payload
1382 * into tgt_len bytes of destination space. Bear in mind
1383 * that in both source and destination we may be dealing
1384 * with a truncated value!
1386 switch (cmsg
->cmsg_level
) {
1388 switch (cmsg
->cmsg_type
) {
1391 int *fd
= (int *)data
;
1392 int *target_fd
= (int *)target_data
;
1393 int i
, numfds
= tgt_len
/ sizeof(int);
1395 for (i
= 0; i
< numfds
; i
++) {
1396 __put_user(fd
[i
], target_fd
+ i
);
1402 struct timeval
*tv
= (struct timeval
*)data
;
1403 struct target_timeval
*target_tv
=
1404 (struct target_timeval
*)target_data
;
1406 if (len
!= sizeof(struct timeval
) ||
1407 tgt_len
!= sizeof(struct target_timeval
)) {
1411 /* copy struct timeval to target */
1412 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1413 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1416 case SCM_CREDENTIALS
:
1418 struct ucred
*cred
= (struct ucred
*)data
;
1419 struct target_ucred
*target_cred
=
1420 (struct target_ucred
*)target_data
;
1422 __put_user(cred
->pid
, &target_cred
->pid
);
1423 __put_user(cred
->uid
, &target_cred
->uid
);
1424 __put_user(cred
->gid
, &target_cred
->gid
);
1434 gemu_log("Unsupported ancillary data: %d/%d\n",
1435 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1436 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1437 if (tgt_len
> len
) {
1438 memset(target_data
+ len
, 0, tgt_len
- len
);
1442 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1443 tgt_space
= TARGET_CMSG_SPACE(len
);
1444 if (msg_controllen
< tgt_space
) {
1445 tgt_space
= msg_controllen
;
1447 msg_controllen
-= tgt_space
;
1449 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1450 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1453 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1455 target_msgh
->msg_controllen
= tswapal(space
);
1459 /* do_setsockopt() Must return target values and target errnos. */
1460 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1461 abi_ulong optval_addr
, socklen_t optlen
)
1465 struct ip_mreqn
*ip_mreq
;
1466 struct ip_mreq_source
*ip_mreq_source
;
1470 /* TCP options all take an 'int' value. */
1471 if (optlen
< sizeof(uint32_t))
1472 return -TARGET_EINVAL
;
1474 if (get_user_u32(val
, optval_addr
))
1475 return -TARGET_EFAULT
;
1476 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1483 case IP_ROUTER_ALERT
:
1487 case IP_MTU_DISCOVER
:
1493 case IP_MULTICAST_TTL
:
1494 case IP_MULTICAST_LOOP
:
1496 if (optlen
>= sizeof(uint32_t)) {
1497 if (get_user_u32(val
, optval_addr
))
1498 return -TARGET_EFAULT
;
1499 } else if (optlen
>= 1) {
1500 if (get_user_u8(val
, optval_addr
))
1501 return -TARGET_EFAULT
;
1503 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1505 case IP_ADD_MEMBERSHIP
:
1506 case IP_DROP_MEMBERSHIP
:
1507 if (optlen
< sizeof (struct target_ip_mreq
) ||
1508 optlen
> sizeof (struct target_ip_mreqn
))
1509 return -TARGET_EINVAL
;
1511 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1512 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1513 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1516 case IP_BLOCK_SOURCE
:
1517 case IP_UNBLOCK_SOURCE
:
1518 case IP_ADD_SOURCE_MEMBERSHIP
:
1519 case IP_DROP_SOURCE_MEMBERSHIP
:
1520 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1521 return -TARGET_EINVAL
;
1523 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1524 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1525 unlock_user (ip_mreq_source
, optval_addr
, 0);
1534 case IPV6_MTU_DISCOVER
:
1537 case IPV6_RECVPKTINFO
:
1539 if (optlen
< sizeof(uint32_t)) {
1540 return -TARGET_EINVAL
;
1542 if (get_user_u32(val
, optval_addr
)) {
1543 return -TARGET_EFAULT
;
1545 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1546 &val
, sizeof(val
)));
1555 /* struct icmp_filter takes an u32 value */
1556 if (optlen
< sizeof(uint32_t)) {
1557 return -TARGET_EINVAL
;
1560 if (get_user_u32(val
, optval_addr
)) {
1561 return -TARGET_EFAULT
;
1563 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1564 &val
, sizeof(val
)));
1571 case TARGET_SOL_SOCKET
:
1573 case TARGET_SO_RCVTIMEO
:
1577 optname
= SO_RCVTIMEO
;
1580 if (optlen
!= sizeof(struct target_timeval
)) {
1581 return -TARGET_EINVAL
;
1584 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1585 return -TARGET_EFAULT
;
1588 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1592 case TARGET_SO_SNDTIMEO
:
1593 optname
= SO_SNDTIMEO
;
1595 case TARGET_SO_ATTACH_FILTER
:
1597 struct target_sock_fprog
*tfprog
;
1598 struct target_sock_filter
*tfilter
;
1599 struct sock_fprog fprog
;
1600 struct sock_filter
*filter
;
1603 if (optlen
!= sizeof(*tfprog
)) {
1604 return -TARGET_EINVAL
;
1606 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1607 return -TARGET_EFAULT
;
1609 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1610 tswapal(tfprog
->filter
), 0)) {
1611 unlock_user_struct(tfprog
, optval_addr
, 1);
1612 return -TARGET_EFAULT
;
1615 fprog
.len
= tswap16(tfprog
->len
);
1616 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1617 if (filter
== NULL
) {
1618 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1619 unlock_user_struct(tfprog
, optval_addr
, 1);
1620 return -TARGET_ENOMEM
;
1622 for (i
= 0; i
< fprog
.len
; i
++) {
1623 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1624 filter
[i
].jt
= tfilter
[i
].jt
;
1625 filter
[i
].jf
= tfilter
[i
].jf
;
1626 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1628 fprog
.filter
= filter
;
1630 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1631 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1634 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1635 unlock_user_struct(tfprog
, optval_addr
, 1);
1638 case TARGET_SO_BINDTODEVICE
:
1640 char *dev_ifname
, *addr_ifname
;
1642 if (optlen
> IFNAMSIZ
- 1) {
1643 optlen
= IFNAMSIZ
- 1;
1645 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1647 return -TARGET_EFAULT
;
1649 optname
= SO_BINDTODEVICE
;
1650 addr_ifname
= alloca(IFNAMSIZ
);
1651 memcpy(addr_ifname
, dev_ifname
, optlen
);
1652 addr_ifname
[optlen
] = 0;
1653 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1654 addr_ifname
, optlen
));
1655 unlock_user (dev_ifname
, optval_addr
, 0);
1658 /* Options with 'int' argument. */
1659 case TARGET_SO_DEBUG
:
1662 case TARGET_SO_REUSEADDR
:
1663 optname
= SO_REUSEADDR
;
1665 case TARGET_SO_TYPE
:
1668 case TARGET_SO_ERROR
:
1671 case TARGET_SO_DONTROUTE
:
1672 optname
= SO_DONTROUTE
;
1674 case TARGET_SO_BROADCAST
:
1675 optname
= SO_BROADCAST
;
1677 case TARGET_SO_SNDBUF
:
1678 optname
= SO_SNDBUF
;
1680 case TARGET_SO_SNDBUFFORCE
:
1681 optname
= SO_SNDBUFFORCE
;
1683 case TARGET_SO_RCVBUF
:
1684 optname
= SO_RCVBUF
;
1686 case TARGET_SO_RCVBUFFORCE
:
1687 optname
= SO_RCVBUFFORCE
;
1689 case TARGET_SO_KEEPALIVE
:
1690 optname
= SO_KEEPALIVE
;
1692 case TARGET_SO_OOBINLINE
:
1693 optname
= SO_OOBINLINE
;
1695 case TARGET_SO_NO_CHECK
:
1696 optname
= SO_NO_CHECK
;
1698 case TARGET_SO_PRIORITY
:
1699 optname
= SO_PRIORITY
;
1702 case TARGET_SO_BSDCOMPAT
:
1703 optname
= SO_BSDCOMPAT
;
1706 case TARGET_SO_PASSCRED
:
1707 optname
= SO_PASSCRED
;
1709 case TARGET_SO_PASSSEC
:
1710 optname
= SO_PASSSEC
;
1712 case TARGET_SO_TIMESTAMP
:
1713 optname
= SO_TIMESTAMP
;
1715 case TARGET_SO_RCVLOWAT
:
1716 optname
= SO_RCVLOWAT
;
1722 if (optlen
< sizeof(uint32_t))
1723 return -TARGET_EINVAL
;
1725 if (get_user_u32(val
, optval_addr
))
1726 return -TARGET_EFAULT
;
1727 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1731 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1732 ret
= -TARGET_ENOPROTOOPT
;
1737 /* do_getsockopt() Must return target values and target errnos. */
1738 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1739 abi_ulong optval_addr
, abi_ulong optlen
)
1746 case TARGET_SOL_SOCKET
:
1749 /* These don't just return a single integer */
1750 case TARGET_SO_LINGER
:
1751 case TARGET_SO_RCVTIMEO
:
1752 case TARGET_SO_SNDTIMEO
:
1753 case TARGET_SO_PEERNAME
:
1755 case TARGET_SO_PEERCRED
: {
1758 struct target_ucred
*tcr
;
1760 if (get_user_u32(len
, optlen
)) {
1761 return -TARGET_EFAULT
;
1764 return -TARGET_EINVAL
;
1768 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1776 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1777 return -TARGET_EFAULT
;
1779 __put_user(cr
.pid
, &tcr
->pid
);
1780 __put_user(cr
.uid
, &tcr
->uid
);
1781 __put_user(cr
.gid
, &tcr
->gid
);
1782 unlock_user_struct(tcr
, optval_addr
, 1);
1783 if (put_user_u32(len
, optlen
)) {
1784 return -TARGET_EFAULT
;
1788 /* Options with 'int' argument. */
1789 case TARGET_SO_DEBUG
:
1792 case TARGET_SO_REUSEADDR
:
1793 optname
= SO_REUSEADDR
;
1795 case TARGET_SO_TYPE
:
1798 case TARGET_SO_ERROR
:
1801 case TARGET_SO_DONTROUTE
:
1802 optname
= SO_DONTROUTE
;
1804 case TARGET_SO_BROADCAST
:
1805 optname
= SO_BROADCAST
;
1807 case TARGET_SO_SNDBUF
:
1808 optname
= SO_SNDBUF
;
1810 case TARGET_SO_RCVBUF
:
1811 optname
= SO_RCVBUF
;
1813 case TARGET_SO_KEEPALIVE
:
1814 optname
= SO_KEEPALIVE
;
1816 case TARGET_SO_OOBINLINE
:
1817 optname
= SO_OOBINLINE
;
1819 case TARGET_SO_NO_CHECK
:
1820 optname
= SO_NO_CHECK
;
1822 case TARGET_SO_PRIORITY
:
1823 optname
= SO_PRIORITY
;
1826 case TARGET_SO_BSDCOMPAT
:
1827 optname
= SO_BSDCOMPAT
;
1830 case TARGET_SO_PASSCRED
:
1831 optname
= SO_PASSCRED
;
1833 case TARGET_SO_TIMESTAMP
:
1834 optname
= SO_TIMESTAMP
;
1836 case TARGET_SO_RCVLOWAT
:
1837 optname
= SO_RCVLOWAT
;
1839 case TARGET_SO_ACCEPTCONN
:
1840 optname
= SO_ACCEPTCONN
;
1847 /* TCP options all take an 'int' value. */
1849 if (get_user_u32(len
, optlen
))
1850 return -TARGET_EFAULT
;
1852 return -TARGET_EINVAL
;
1854 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1857 if (optname
== SO_TYPE
) {
1858 val
= host_to_target_sock_type(val
);
1863 if (put_user_u32(val
, optval_addr
))
1864 return -TARGET_EFAULT
;
1866 if (put_user_u8(val
, optval_addr
))
1867 return -TARGET_EFAULT
;
1869 if (put_user_u32(len
, optlen
))
1870 return -TARGET_EFAULT
;
1877 case IP_ROUTER_ALERT
:
1881 case IP_MTU_DISCOVER
:
1887 case IP_MULTICAST_TTL
:
1888 case IP_MULTICAST_LOOP
:
1889 if (get_user_u32(len
, optlen
))
1890 return -TARGET_EFAULT
;
1892 return -TARGET_EINVAL
;
1894 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1897 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1899 if (put_user_u32(len
, optlen
)
1900 || put_user_u8(val
, optval_addr
))
1901 return -TARGET_EFAULT
;
1903 if (len
> sizeof(int))
1905 if (put_user_u32(len
, optlen
)
1906 || put_user_u32(val
, optval_addr
))
1907 return -TARGET_EFAULT
;
1911 ret
= -TARGET_ENOPROTOOPT
;
1917 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1919 ret
= -TARGET_EOPNOTSUPP
;
1925 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1926 int count
, int copy
)
1928 struct target_iovec
*target_vec
;
1930 abi_ulong total_len
, max_len
;
1933 bool bad_address
= false;
1939 if (count
< 0 || count
> IOV_MAX
) {
1944 vec
= g_try_new0(struct iovec
, count
);
1950 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1951 count
* sizeof(struct target_iovec
), 1);
1952 if (target_vec
== NULL
) {
1957 /* ??? If host page size > target page size, this will result in a
1958 value larger than what we can actually support. */
1959 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1962 for (i
= 0; i
< count
; i
++) {
1963 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1964 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1969 } else if (len
== 0) {
1970 /* Zero length pointer is ignored. */
1971 vec
[i
].iov_base
= 0;
1973 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1974 /* If the first buffer pointer is bad, this is a fault. But
1975 * subsequent bad buffers will result in a partial write; this
1976 * is realized by filling the vector with null pointers and
1978 if (!vec
[i
].iov_base
) {
1989 if (len
> max_len
- total_len
) {
1990 len
= max_len
- total_len
;
1993 vec
[i
].iov_len
= len
;
1997 unlock_user(target_vec
, target_addr
, 0);
2002 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2003 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2006 unlock_user(target_vec
, target_addr
, 0);
2013 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2014 int count
, int copy
)
2016 struct target_iovec
*target_vec
;
2019 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2020 count
* sizeof(struct target_iovec
), 1);
2022 for (i
= 0; i
< count
; i
++) {
2023 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2024 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2028 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2030 unlock_user(target_vec
, target_addr
, 0);
2036 static inline int target_to_host_sock_type(int *type
)
2039 int target_type
= *type
;
2041 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2042 case TARGET_SOCK_DGRAM
:
2043 host_type
= SOCK_DGRAM
;
2045 case TARGET_SOCK_STREAM
:
2046 host_type
= SOCK_STREAM
;
2049 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2052 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2053 #if defined(SOCK_CLOEXEC)
2054 host_type
|= SOCK_CLOEXEC
;
2056 return -TARGET_EINVAL
;
2059 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2060 #if defined(SOCK_NONBLOCK)
2061 host_type
|= SOCK_NONBLOCK
;
2062 #elif !defined(O_NONBLOCK)
2063 return -TARGET_EINVAL
;
2070 /* Try to emulate socket type flags after socket creation. */
2071 static int sock_flags_fixup(int fd
, int target_type
)
2073 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2074 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2075 int flags
= fcntl(fd
, F_GETFL
);
2076 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2078 return -TARGET_EINVAL
;
2085 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2086 abi_ulong target_addr
,
2089 struct sockaddr
*addr
= host_addr
;
2090 struct target_sockaddr
*target_saddr
;
2092 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2093 if (!target_saddr
) {
2094 return -TARGET_EFAULT
;
2097 memcpy(addr
, target_saddr
, len
);
2098 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2099 /* spkt_protocol is big-endian */
2101 unlock_user(target_saddr
, target_addr
, 0);
2105 static TargetFdTrans target_packet_trans
= {
2106 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2109 /* do_socket() Must return target values and target errnos. */
2110 static abi_long
do_socket(int domain
, int type
, int protocol
)
2112 int target_type
= type
;
2115 ret
= target_to_host_sock_type(&type
);
2120 if (domain
== PF_NETLINK
)
2121 return -TARGET_EAFNOSUPPORT
;
2123 if (domain
== AF_PACKET
||
2124 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2125 protocol
= tswap16(protocol
);
2128 ret
= get_errno(socket(domain
, type
, protocol
));
2130 ret
= sock_flags_fixup(ret
, target_type
);
2131 if (type
== SOCK_PACKET
) {
2132 /* Manage an obsolete case :
2133 * if socket type is SOCK_PACKET, bind by name
2135 fd_trans_register(ret
, &target_packet_trans
);
2141 /* do_bind() Must return target values and target errnos. */
2142 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2148 if ((int)addrlen
< 0) {
2149 return -TARGET_EINVAL
;
2152 addr
= alloca(addrlen
+1);
2154 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2158 return get_errno(bind(sockfd
, addr
, addrlen
));
2161 /* do_connect() Must return target values and target errnos. */
2162 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2168 if ((int)addrlen
< 0) {
2169 return -TARGET_EINVAL
;
2172 addr
= alloca(addrlen
+1);
2174 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2178 return get_errno(connect(sockfd
, addr
, addrlen
));
2181 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2182 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2183 int flags
, int send
)
2189 abi_ulong target_vec
;
2191 if (msgp
->msg_name
) {
2192 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2193 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2194 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2195 tswapal(msgp
->msg_name
),
2201 msg
.msg_name
= NULL
;
2202 msg
.msg_namelen
= 0;
2204 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2205 msg
.msg_control
= alloca(msg
.msg_controllen
);
2206 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2208 count
= tswapal(msgp
->msg_iovlen
);
2209 target_vec
= tswapal(msgp
->msg_iov
);
2210 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2211 target_vec
, count
, send
);
2213 ret
= -host_to_target_errno(errno
);
2216 msg
.msg_iovlen
= count
;
2220 ret
= target_to_host_cmsg(&msg
, msgp
);
2222 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2224 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2225 if (!is_error(ret
)) {
2227 ret
= host_to_target_cmsg(msgp
, &msg
);
2228 if (!is_error(ret
)) {
2229 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2230 if (msg
.msg_name
!= NULL
) {
2231 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2232 msg
.msg_name
, msg
.msg_namelen
);
2244 unlock_iovec(vec
, target_vec
, count
, !send
);
2249 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2250 int flags
, int send
)
2253 struct target_msghdr
*msgp
;
2255 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2259 return -TARGET_EFAULT
;
2261 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2262 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2266 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2267 * so it might not have this *mmsg-specific flag either.
2269 #ifndef MSG_WAITFORONE
2270 #define MSG_WAITFORONE 0x10000
2273 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2274 unsigned int vlen
, unsigned int flags
,
2277 struct target_mmsghdr
*mmsgp
;
2281 if (vlen
> UIO_MAXIOV
) {
2285 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2287 return -TARGET_EFAULT
;
2290 for (i
= 0; i
< vlen
; i
++) {
2291 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2292 if (is_error(ret
)) {
2295 mmsgp
[i
].msg_len
= tswap32(ret
);
2296 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2297 if (flags
& MSG_WAITFORONE
) {
2298 flags
|= MSG_DONTWAIT
;
2302 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2304 /* Return number of datagrams sent if we sent any at all;
2305 * otherwise return the error.
2313 /* If we don't have a system accept4() then just call accept.
2314 * The callsites to do_accept4() will ensure that they don't
2315 * pass a non-zero flags argument in this config.
2317 #ifndef CONFIG_ACCEPT4
2318 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2319 socklen_t
*addrlen
, int flags
)
2322 return accept(sockfd
, addr
, addrlen
);
2326 /* do_accept4() Must return target values and target errnos. */
2327 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2328 abi_ulong target_addrlen_addr
, int flags
)
2335 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2337 if (target_addr
== 0) {
2338 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2341 /* linux returns EINVAL if addrlen pointer is invalid */
2342 if (get_user_u32(addrlen
, target_addrlen_addr
))
2343 return -TARGET_EINVAL
;
2345 if ((int)addrlen
< 0) {
2346 return -TARGET_EINVAL
;
2349 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2350 return -TARGET_EINVAL
;
2352 addr
= alloca(addrlen
);
2354 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2355 if (!is_error(ret
)) {
2356 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2357 if (put_user_u32(addrlen
, target_addrlen_addr
))
2358 ret
= -TARGET_EFAULT
;
2363 /* do_getpeername() Must return target values and target errnos. */
2364 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2365 abi_ulong target_addrlen_addr
)
2371 if (get_user_u32(addrlen
, target_addrlen_addr
))
2372 return -TARGET_EFAULT
;
2374 if ((int)addrlen
< 0) {
2375 return -TARGET_EINVAL
;
2378 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2379 return -TARGET_EFAULT
;
2381 addr
= alloca(addrlen
);
2383 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2384 if (!is_error(ret
)) {
2385 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2386 if (put_user_u32(addrlen
, target_addrlen_addr
))
2387 ret
= -TARGET_EFAULT
;
2392 /* do_getsockname() Must return target values and target errnos. */
2393 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2394 abi_ulong target_addrlen_addr
)
2400 if (get_user_u32(addrlen
, target_addrlen_addr
))
2401 return -TARGET_EFAULT
;
2403 if ((int)addrlen
< 0) {
2404 return -TARGET_EINVAL
;
2407 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2408 return -TARGET_EFAULT
;
2410 addr
= alloca(addrlen
);
2412 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2413 if (!is_error(ret
)) {
2414 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2415 if (put_user_u32(addrlen
, target_addrlen_addr
))
2416 ret
= -TARGET_EFAULT
;
2421 /* do_socketpair() Must return target values and target errnos. */
2422 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2423 abi_ulong target_tab_addr
)
2428 target_to_host_sock_type(&type
);
2430 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2431 if (!is_error(ret
)) {
2432 if (put_user_s32(tab
[0], target_tab_addr
)
2433 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2434 ret
= -TARGET_EFAULT
;
2439 /* do_sendto() Must return target values and target errnos. */
2440 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2441 abi_ulong target_addr
, socklen_t addrlen
)
2447 if ((int)addrlen
< 0) {
2448 return -TARGET_EINVAL
;
2451 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2453 return -TARGET_EFAULT
;
2455 addr
= alloca(addrlen
+1);
2456 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2458 unlock_user(host_msg
, msg
, 0);
2461 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2463 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2465 unlock_user(host_msg
, msg
, 0);
2469 /* do_recvfrom() Must return target values and target errnos. */
2470 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2471 abi_ulong target_addr
,
2472 abi_ulong target_addrlen
)
2479 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2481 return -TARGET_EFAULT
;
2483 if (get_user_u32(addrlen
, target_addrlen
)) {
2484 ret
= -TARGET_EFAULT
;
2487 if ((int)addrlen
< 0) {
2488 ret
= -TARGET_EINVAL
;
2491 addr
= alloca(addrlen
);
2492 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2494 addr
= NULL
; /* To keep compiler quiet. */
2495 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2497 if (!is_error(ret
)) {
2499 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2500 if (put_user_u32(addrlen
, target_addrlen
)) {
2501 ret
= -TARGET_EFAULT
;
2505 unlock_user(host_msg
, msg
, len
);
2508 unlock_user(host_msg
, msg
, 0);
2513 #ifdef TARGET_NR_socketcall
2514 /* do_socketcall() Must return target values and target errnos. */
2515 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2517 static const unsigned ac
[] = { /* number of arguments per call */
2518 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2519 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2520 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2521 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2522 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2523 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2524 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2525 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2526 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2527 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2528 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2529 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2530 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2531 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2532 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2533 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2534 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2535 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2536 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2537 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2539 abi_long a
[6]; /* max 6 args */
2541 /* first, collect the arguments in a[] according to ac[] */
2542 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2544 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2545 for (i
= 0; i
< ac
[num
]; ++i
) {
2546 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2547 return -TARGET_EFAULT
;
2552 /* now when we have the args, actually handle the call */
2554 case SOCKOP_socket
: /* domain, type, protocol */
2555 return do_socket(a
[0], a
[1], a
[2]);
2556 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2557 return do_bind(a
[0], a
[1], a
[2]);
2558 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2559 return do_connect(a
[0], a
[1], a
[2]);
2560 case SOCKOP_listen
: /* sockfd, backlog */
2561 return get_errno(listen(a
[0], a
[1]));
2562 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2563 return do_accept4(a
[0], a
[1], a
[2], 0);
2564 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2565 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2566 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2567 return do_getsockname(a
[0], a
[1], a
[2]);
2568 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2569 return do_getpeername(a
[0], a
[1], a
[2]);
2570 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2571 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2572 case SOCKOP_send
: /* sockfd, msg, len, flags */
2573 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2574 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2575 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2576 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2577 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2578 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2579 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2580 case SOCKOP_shutdown
: /* sockfd, how */
2581 return get_errno(shutdown(a
[0], a
[1]));
2582 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2583 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2584 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2585 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2586 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2587 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2588 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2589 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2590 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2591 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2592 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2593 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2595 gemu_log("Unsupported socketcall: %d\n", num
);
2596 return -TARGET_ENOSYS
;
2601 #define N_SHM_REGIONS 32
2603 static struct shm_region
{
2607 } shm_regions
[N_SHM_REGIONS
];
2609 struct target_semid_ds
2611 struct target_ipc_perm sem_perm
;
2612 abi_ulong sem_otime
;
2613 #if !defined(TARGET_PPC64)
2614 abi_ulong __unused1
;
2616 abi_ulong sem_ctime
;
2617 #if !defined(TARGET_PPC64)
2618 abi_ulong __unused2
;
2620 abi_ulong sem_nsems
;
2621 abi_ulong __unused3
;
2622 abi_ulong __unused4
;
2625 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2626 abi_ulong target_addr
)
2628 struct target_ipc_perm
*target_ip
;
2629 struct target_semid_ds
*target_sd
;
2631 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2632 return -TARGET_EFAULT
;
2633 target_ip
= &(target_sd
->sem_perm
);
2634 host_ip
->__key
= tswap32(target_ip
->__key
);
2635 host_ip
->uid
= tswap32(target_ip
->uid
);
2636 host_ip
->gid
= tswap32(target_ip
->gid
);
2637 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2638 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2639 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2640 host_ip
->mode
= tswap32(target_ip
->mode
);
2642 host_ip
->mode
= tswap16(target_ip
->mode
);
2644 #if defined(TARGET_PPC)
2645 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2647 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2649 unlock_user_struct(target_sd
, target_addr
, 0);
2653 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2654 struct ipc_perm
*host_ip
)
2656 struct target_ipc_perm
*target_ip
;
2657 struct target_semid_ds
*target_sd
;
2659 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2660 return -TARGET_EFAULT
;
2661 target_ip
= &(target_sd
->sem_perm
);
2662 target_ip
->__key
= tswap32(host_ip
->__key
);
2663 target_ip
->uid
= tswap32(host_ip
->uid
);
2664 target_ip
->gid
= tswap32(host_ip
->gid
);
2665 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2666 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2667 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2668 target_ip
->mode
= tswap32(host_ip
->mode
);
2670 target_ip
->mode
= tswap16(host_ip
->mode
);
2672 #if defined(TARGET_PPC)
2673 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2675 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2677 unlock_user_struct(target_sd
, target_addr
, 1);
2681 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2682 abi_ulong target_addr
)
2684 struct target_semid_ds
*target_sd
;
2686 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2687 return -TARGET_EFAULT
;
2688 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2689 return -TARGET_EFAULT
;
2690 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2691 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2692 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2693 unlock_user_struct(target_sd
, target_addr
, 0);
2697 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2698 struct semid_ds
*host_sd
)
2700 struct target_semid_ds
*target_sd
;
2702 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2703 return -TARGET_EFAULT
;
2704 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2705 return -TARGET_EFAULT
;
2706 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2707 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2708 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2709 unlock_user_struct(target_sd
, target_addr
, 1);
2713 struct target_seminfo
{
2726 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2727 struct seminfo
*host_seminfo
)
2729 struct target_seminfo
*target_seminfo
;
2730 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2731 return -TARGET_EFAULT
;
2732 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2733 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2734 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2735 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2736 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2737 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2738 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2739 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2740 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2741 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2742 unlock_user_struct(target_seminfo
, target_addr
, 1);
2748 struct semid_ds
*buf
;
2749 unsigned short *array
;
2750 struct seminfo
*__buf
;
2753 union target_semun
{
2760 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2761 abi_ulong target_addr
)
2764 unsigned short *array
;
2766 struct semid_ds semid_ds
;
2769 semun
.buf
= &semid_ds
;
2771 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2773 return get_errno(ret
);
2775 nsems
= semid_ds
.sem_nsems
;
2777 *host_array
= g_try_new(unsigned short, nsems
);
2779 return -TARGET_ENOMEM
;
2781 array
= lock_user(VERIFY_READ
, target_addr
,
2782 nsems
*sizeof(unsigned short), 1);
2784 g_free(*host_array
);
2785 return -TARGET_EFAULT
;
2788 for(i
=0; i
<nsems
; i
++) {
2789 __get_user((*host_array
)[i
], &array
[i
]);
2791 unlock_user(array
, target_addr
, 0);
2796 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2797 unsigned short **host_array
)
2800 unsigned short *array
;
2802 struct semid_ds semid_ds
;
2805 semun
.buf
= &semid_ds
;
2807 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2809 return get_errno(ret
);
2811 nsems
= semid_ds
.sem_nsems
;
2813 array
= lock_user(VERIFY_WRITE
, target_addr
,
2814 nsems
*sizeof(unsigned short), 0);
2816 return -TARGET_EFAULT
;
2818 for(i
=0; i
<nsems
; i
++) {
2819 __put_user((*host_array
)[i
], &array
[i
]);
2821 g_free(*host_array
);
2822 unlock_user(array
, target_addr
, 1);
2827 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2828 abi_ulong target_arg
)
2830 union target_semun target_su
= { .buf
= target_arg
};
2832 struct semid_ds dsarg
;
2833 unsigned short *array
= NULL
;
2834 struct seminfo seminfo
;
2835 abi_long ret
= -TARGET_EINVAL
;
2842 /* In 64 bit cross-endian situations, we will erroneously pick up
2843 * the wrong half of the union for the "val" element. To rectify
2844 * this, the entire 8-byte structure is byteswapped, followed by
2845 * a swap of the 4 byte val field. In other cases, the data is
2846 * already in proper host byte order. */
2847 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2848 target_su
.buf
= tswapal(target_su
.buf
);
2849 arg
.val
= tswap32(target_su
.val
);
2851 arg
.val
= target_su
.val
;
2853 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2857 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2861 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2862 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2869 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2873 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2874 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2880 arg
.__buf
= &seminfo
;
2881 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2882 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2890 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2897 struct target_sembuf
{
2898 unsigned short sem_num
;
2903 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2904 abi_ulong target_addr
,
2907 struct target_sembuf
*target_sembuf
;
2910 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2911 nsops
*sizeof(struct target_sembuf
), 1);
2913 return -TARGET_EFAULT
;
2915 for(i
=0; i
<nsops
; i
++) {
2916 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2917 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2918 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2921 unlock_user(target_sembuf
, target_addr
, 0);
2926 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2928 struct sembuf sops
[nsops
];
2930 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2931 return -TARGET_EFAULT
;
2933 return get_errno(semop(semid
, sops
, nsops
));
2936 struct target_msqid_ds
2938 struct target_ipc_perm msg_perm
;
2939 abi_ulong msg_stime
;
2940 #if TARGET_ABI_BITS == 32
2941 abi_ulong __unused1
;
2943 abi_ulong msg_rtime
;
2944 #if TARGET_ABI_BITS == 32
2945 abi_ulong __unused2
;
2947 abi_ulong msg_ctime
;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused3
;
2951 abi_ulong __msg_cbytes
;
2953 abi_ulong msg_qbytes
;
2954 abi_ulong msg_lspid
;
2955 abi_ulong msg_lrpid
;
2956 abi_ulong __unused4
;
2957 abi_ulong __unused5
;
2960 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2961 abi_ulong target_addr
)
2963 struct target_msqid_ds
*target_md
;
2965 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2966 return -TARGET_EFAULT
;
2967 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2968 return -TARGET_EFAULT
;
2969 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2970 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2971 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2972 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2973 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2974 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2975 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2976 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2977 unlock_user_struct(target_md
, target_addr
, 0);
2981 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2982 struct msqid_ds
*host_md
)
2984 struct target_msqid_ds
*target_md
;
2986 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2987 return -TARGET_EFAULT
;
2988 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2989 return -TARGET_EFAULT
;
2990 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2991 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2992 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2993 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2994 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2995 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2996 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2997 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2998 unlock_user_struct(target_md
, target_addr
, 1);
3002 struct target_msginfo
{
3010 unsigned short int msgseg
;
3013 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3014 struct msginfo
*host_msginfo
)
3016 struct target_msginfo
*target_msginfo
;
3017 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3018 return -TARGET_EFAULT
;
3019 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3020 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3021 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3022 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3023 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3024 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3025 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3026 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3027 unlock_user_struct(target_msginfo
, target_addr
, 1);
3031 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3033 struct msqid_ds dsarg
;
3034 struct msginfo msginfo
;
3035 abi_long ret
= -TARGET_EINVAL
;
3043 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3044 return -TARGET_EFAULT
;
3045 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3046 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3047 return -TARGET_EFAULT
;
3050 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3054 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3055 if (host_to_target_msginfo(ptr
, &msginfo
))
3056 return -TARGET_EFAULT
;
3063 struct target_msgbuf
{
3068 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3069 ssize_t msgsz
, int msgflg
)
3071 struct target_msgbuf
*target_mb
;
3072 struct msgbuf
*host_mb
;
3076 return -TARGET_EINVAL
;
3079 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3080 return -TARGET_EFAULT
;
3081 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3083 unlock_user_struct(target_mb
, msgp
, 0);
3084 return -TARGET_ENOMEM
;
3086 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3087 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3088 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3090 unlock_user_struct(target_mb
, msgp
, 0);
3095 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3096 unsigned int msgsz
, abi_long msgtyp
,
3099 struct target_msgbuf
*target_mb
;
3101 struct msgbuf
*host_mb
;
3104 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3105 return -TARGET_EFAULT
;
3107 host_mb
= g_malloc(msgsz
+sizeof(long));
3108 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3111 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3112 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3113 if (!target_mtext
) {
3114 ret
= -TARGET_EFAULT
;
3117 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3118 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3121 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3125 unlock_user_struct(target_mb
, msgp
, 1);
3130 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3131 abi_ulong target_addr
)
3133 struct target_shmid_ds
*target_sd
;
3135 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3136 return -TARGET_EFAULT
;
3137 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3138 return -TARGET_EFAULT
;
3139 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3140 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3141 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3142 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3143 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3144 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3145 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3146 unlock_user_struct(target_sd
, target_addr
, 0);
3150 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3151 struct shmid_ds
*host_sd
)
3153 struct target_shmid_ds
*target_sd
;
3155 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3156 return -TARGET_EFAULT
;
3157 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3158 return -TARGET_EFAULT
;
3159 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3160 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3161 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3162 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3163 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3164 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3165 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3166 unlock_user_struct(target_sd
, target_addr
, 1);
3170 struct target_shminfo
{
3178 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3179 struct shminfo
*host_shminfo
)
3181 struct target_shminfo
*target_shminfo
;
3182 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3183 return -TARGET_EFAULT
;
3184 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3185 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3186 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3187 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3188 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3189 unlock_user_struct(target_shminfo
, target_addr
, 1);
3193 struct target_shm_info
{
3198 abi_ulong swap_attempts
;
3199 abi_ulong swap_successes
;
3202 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3203 struct shm_info
*host_shm_info
)
3205 struct target_shm_info
*target_shm_info
;
3206 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3207 return -TARGET_EFAULT
;
3208 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3209 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3210 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3211 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3212 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3213 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3214 unlock_user_struct(target_shm_info
, target_addr
, 1);
3218 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3220 struct shmid_ds dsarg
;
3221 struct shminfo shminfo
;
3222 struct shm_info shm_info
;
3223 abi_long ret
= -TARGET_EINVAL
;
3231 if (target_to_host_shmid_ds(&dsarg
, buf
))
3232 return -TARGET_EFAULT
;
3233 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3234 if (host_to_target_shmid_ds(buf
, &dsarg
))
3235 return -TARGET_EFAULT
;
3238 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3239 if (host_to_target_shminfo(buf
, &shminfo
))
3240 return -TARGET_EFAULT
;
3243 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3244 if (host_to_target_shm_info(buf
, &shm_info
))
3245 return -TARGET_EFAULT
;
3250 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3257 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3261 struct shmid_ds shm_info
;
3264 /* find out the length of the shared memory segment */
3265 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3266 if (is_error(ret
)) {
3267 /* can't get length, bail out */
3274 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3276 abi_ulong mmap_start
;
3278 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3280 if (mmap_start
== -1) {
3282 host_raddr
= (void *)-1;
3284 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3287 if (host_raddr
== (void *)-1) {
3289 return get_errno((long)host_raddr
);
3291 raddr
=h2g((unsigned long)host_raddr
);
3293 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3294 PAGE_VALID
| PAGE_READ
|
3295 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3297 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3298 if (!shm_regions
[i
].in_use
) {
3299 shm_regions
[i
].in_use
= true;
3300 shm_regions
[i
].start
= raddr
;
3301 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3311 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3315 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3316 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3317 shm_regions
[i
].in_use
= false;
3318 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3323 return get_errno(shmdt(g2h(shmaddr
)));
3326 #ifdef TARGET_NR_ipc
3327 /* ??? This only works with linear mappings. */
3328 /* do_ipc() must return target values and target errnos. */
3329 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3330 abi_long second
, abi_long third
,
3331 abi_long ptr
, abi_long fifth
)
3336 version
= call
>> 16;
3341 ret
= do_semop(first
, ptr
, second
);
3345 ret
= get_errno(semget(first
, second
, third
));
3348 case IPCOP_semctl
: {
3349 /* The semun argument to semctl is passed by value, so dereference the
3352 get_user_ual(atptr
, ptr
);
3353 ret
= do_semctl(first
, second
, third
, atptr
);
3358 ret
= get_errno(msgget(first
, second
));
3362 ret
= do_msgsnd(first
, ptr
, second
, third
);
3366 ret
= do_msgctl(first
, second
, ptr
);
3373 struct target_ipc_kludge
{
3378 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3379 ret
= -TARGET_EFAULT
;
3383 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3385 unlock_user_struct(tmp
, ptr
, 0);
3389 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3398 raddr
= do_shmat(first
, ptr
, second
);
3399 if (is_error(raddr
))
3400 return get_errno(raddr
);
3401 if (put_user_ual(raddr
, third
))
3402 return -TARGET_EFAULT
;
3406 ret
= -TARGET_EINVAL
;
3411 ret
= do_shmdt(ptr
);
3415 /* IPC_* flag values are the same on all linux platforms */
3416 ret
= get_errno(shmget(first
, second
, third
));
3419 /* IPC_* and SHM_* command values are the same on all linux platforms */
3421 ret
= do_shmctl(first
, second
, ptr
);
3424 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3425 ret
= -TARGET_ENOSYS
;
3432 /* kernel structure types definitions */
3434 #define STRUCT(name, ...) STRUCT_ ## name,
3435 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3437 #include "syscall_types.h"
3441 #undef STRUCT_SPECIAL
3443 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3444 #define STRUCT_SPECIAL(name)
3445 #include "syscall_types.h"
3447 #undef STRUCT_SPECIAL
3449 typedef struct IOCTLEntry IOCTLEntry
;
3451 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3452 int fd
, int cmd
, abi_long arg
);
3456 unsigned int host_cmd
;
3459 do_ioctl_fn
*do_ioctl
;
3460 const argtype arg_type
[5];
3463 #define IOC_R 0x0001
3464 #define IOC_W 0x0002
3465 #define IOC_RW (IOC_R | IOC_W)
3467 #define MAX_STRUCT_SIZE 4096
3469 #ifdef CONFIG_FIEMAP
3470 /* So fiemap access checks don't overflow on 32 bit systems.
3471 * This is very slightly smaller than the limit imposed by
3472 * the underlying kernel.
3474 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3475 / sizeof(struct fiemap_extent))
3477 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3478 int fd
, int cmd
, abi_long arg
)
3480 /* The parameter for this ioctl is a struct fiemap followed
3481 * by an array of struct fiemap_extent whose size is set
3482 * in fiemap->fm_extent_count. The array is filled in by the
3485 int target_size_in
, target_size_out
;
3487 const argtype
*arg_type
= ie
->arg_type
;
3488 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3491 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3495 assert(arg_type
[0] == TYPE_PTR
);
3496 assert(ie
->access
== IOC_RW
);
3498 target_size_in
= thunk_type_size(arg_type
, 0);
3499 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3501 return -TARGET_EFAULT
;
3503 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3504 unlock_user(argptr
, arg
, 0);
3505 fm
= (struct fiemap
*)buf_temp
;
3506 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3507 return -TARGET_EINVAL
;
3510 outbufsz
= sizeof (*fm
) +
3511 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3513 if (outbufsz
> MAX_STRUCT_SIZE
) {
3514 /* We can't fit all the extents into the fixed size buffer.
3515 * Allocate one that is large enough and use it instead.
3517 fm
= g_try_malloc(outbufsz
);
3519 return -TARGET_ENOMEM
;
3521 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3524 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3525 if (!is_error(ret
)) {
3526 target_size_out
= target_size_in
;
3527 /* An extent_count of 0 means we were only counting the extents
3528 * so there are no structs to copy
3530 if (fm
->fm_extent_count
!= 0) {
3531 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3533 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3535 ret
= -TARGET_EFAULT
;
3537 /* Convert the struct fiemap */
3538 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3539 if (fm
->fm_extent_count
!= 0) {
3540 p
= argptr
+ target_size_in
;
3541 /* ...and then all the struct fiemap_extents */
3542 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3543 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3548 unlock_user(argptr
, arg
, target_size_out
);
3558 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3559 int fd
, int cmd
, abi_long arg
)
3561 const argtype
*arg_type
= ie
->arg_type
;
3565 struct ifconf
*host_ifconf
;
3567 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3568 int target_ifreq_size
;
3573 abi_long target_ifc_buf
;
3577 assert(arg_type
[0] == TYPE_PTR
);
3578 assert(ie
->access
== IOC_RW
);
3581 target_size
= thunk_type_size(arg_type
, 0);
3583 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3585 return -TARGET_EFAULT
;
3586 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3587 unlock_user(argptr
, arg
, 0);
3589 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3590 target_ifc_len
= host_ifconf
->ifc_len
;
3591 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3593 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3594 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3595 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3597 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3598 if (outbufsz
> MAX_STRUCT_SIZE
) {
3599 /* We can't fit all the extents into the fixed size buffer.
3600 * Allocate one that is large enough and use it instead.
3602 host_ifconf
= malloc(outbufsz
);
3604 return -TARGET_ENOMEM
;
3606 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3609 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3611 host_ifconf
->ifc_len
= host_ifc_len
;
3612 host_ifconf
->ifc_buf
= host_ifc_buf
;
3614 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3615 if (!is_error(ret
)) {
3616 /* convert host ifc_len to target ifc_len */
3618 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3619 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3620 host_ifconf
->ifc_len
= target_ifc_len
;
3622 /* restore target ifc_buf */
3624 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3626 /* copy struct ifconf to target user */
3628 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3630 return -TARGET_EFAULT
;
3631 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3632 unlock_user(argptr
, arg
, target_size
);
3634 /* copy ifreq[] to target user */
3636 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3637 for (i
= 0; i
< nb_ifreq
; i
++) {
3638 thunk_convert(argptr
+ i
* target_ifreq_size
,
3639 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3640 ifreq_arg_type
, THUNK_TARGET
);
3642 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3652 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3653 int cmd
, abi_long arg
)
3656 struct dm_ioctl
*host_dm
;
3657 abi_long guest_data
;
3658 uint32_t guest_data_size
;
3660 const argtype
*arg_type
= ie
->arg_type
;
3662 void *big_buf
= NULL
;
3666 target_size
= thunk_type_size(arg_type
, 0);
3667 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3669 ret
= -TARGET_EFAULT
;
3672 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3673 unlock_user(argptr
, arg
, 0);
3675 /* buf_temp is too small, so fetch things into a bigger buffer */
3676 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3677 memcpy(big_buf
, buf_temp
, target_size
);
3681 guest_data
= arg
+ host_dm
->data_start
;
3682 if ((guest_data
- arg
) < 0) {
3686 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3687 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3689 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3690 switch (ie
->host_cmd
) {
3692 case DM_LIST_DEVICES
:
3695 case DM_DEV_SUSPEND
:
3698 case DM_TABLE_STATUS
:
3699 case DM_TABLE_CLEAR
:
3701 case DM_LIST_VERSIONS
:
3705 case DM_DEV_SET_GEOMETRY
:
3706 /* data contains only strings */
3707 memcpy(host_data
, argptr
, guest_data_size
);
3710 memcpy(host_data
, argptr
, guest_data_size
);
3711 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3715 void *gspec
= argptr
;
3716 void *cur_data
= host_data
;
3717 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3718 int spec_size
= thunk_type_size(arg_type
, 0);
3721 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3722 struct dm_target_spec
*spec
= cur_data
;
3726 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3727 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3729 spec
->next
= sizeof(*spec
) + slen
;
3730 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3732 cur_data
+= spec
->next
;
3737 ret
= -TARGET_EINVAL
;
3738 unlock_user(argptr
, guest_data
, 0);
3741 unlock_user(argptr
, guest_data
, 0);
3743 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3744 if (!is_error(ret
)) {
3745 guest_data
= arg
+ host_dm
->data_start
;
3746 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3747 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3748 switch (ie
->host_cmd
) {
3753 case DM_DEV_SUSPEND
:
3756 case DM_TABLE_CLEAR
:
3758 case DM_DEV_SET_GEOMETRY
:
3759 /* no return data */
3761 case DM_LIST_DEVICES
:
3763 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3764 uint32_t remaining_data
= guest_data_size
;
3765 void *cur_data
= argptr
;
3766 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3767 int nl_size
= 12; /* can't use thunk_size due to alignment */
3770 uint32_t next
= nl
->next
;
3772 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3774 if (remaining_data
< nl
->next
) {
3775 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3778 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3779 strcpy(cur_data
+ nl_size
, nl
->name
);
3780 cur_data
+= nl
->next
;
3781 remaining_data
-= nl
->next
;
3785 nl
= (void*)nl
+ next
;
3790 case DM_TABLE_STATUS
:
3792 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3793 void *cur_data
= argptr
;
3794 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3795 int spec_size
= thunk_type_size(arg_type
, 0);
3798 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3799 uint32_t next
= spec
->next
;
3800 int slen
= strlen((char*)&spec
[1]) + 1;
3801 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3802 if (guest_data_size
< spec
->next
) {
3803 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3806 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3807 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3808 cur_data
= argptr
+ spec
->next
;
3809 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3815 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3816 int count
= *(uint32_t*)hdata
;
3817 uint64_t *hdev
= hdata
+ 8;
3818 uint64_t *gdev
= argptr
+ 8;
3821 *(uint32_t*)argptr
= tswap32(count
);
3822 for (i
= 0; i
< count
; i
++) {
3823 *gdev
= tswap64(*hdev
);
3829 case DM_LIST_VERSIONS
:
3831 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3832 uint32_t remaining_data
= guest_data_size
;
3833 void *cur_data
= argptr
;
3834 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3835 int vers_size
= thunk_type_size(arg_type
, 0);
3838 uint32_t next
= vers
->next
;
3840 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3842 if (remaining_data
< vers
->next
) {
3843 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3846 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3847 strcpy(cur_data
+ vers_size
, vers
->name
);
3848 cur_data
+= vers
->next
;
3849 remaining_data
-= vers
->next
;
3853 vers
= (void*)vers
+ next
;
3858 unlock_user(argptr
, guest_data
, 0);
3859 ret
= -TARGET_EINVAL
;
3862 unlock_user(argptr
, guest_data
, guest_data_size
);
3864 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3866 ret
= -TARGET_EFAULT
;
3869 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3870 unlock_user(argptr
, arg
, target_size
);
3877 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3878 int cmd
, abi_long arg
)
3882 const argtype
*arg_type
= ie
->arg_type
;
3883 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3886 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3887 struct blkpg_partition host_part
;
3889 /* Read and convert blkpg */
3891 target_size
= thunk_type_size(arg_type
, 0);
3892 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3894 ret
= -TARGET_EFAULT
;
3897 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3898 unlock_user(argptr
, arg
, 0);
3900 switch (host_blkpg
->op
) {
3901 case BLKPG_ADD_PARTITION
:
3902 case BLKPG_DEL_PARTITION
:
3903 /* payload is struct blkpg_partition */
3906 /* Unknown opcode */
3907 ret
= -TARGET_EINVAL
;
3911 /* Read and convert blkpg->data */
3912 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3913 target_size
= thunk_type_size(part_arg_type
, 0);
3914 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3916 ret
= -TARGET_EFAULT
;
3919 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3920 unlock_user(argptr
, arg
, 0);
3922 /* Swizzle the data pointer to our local copy and call! */
3923 host_blkpg
->data
= &host_part
;
3924 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3930 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3931 int fd
, int cmd
, abi_long arg
)
3933 const argtype
*arg_type
= ie
->arg_type
;
3934 const StructEntry
*se
;
3935 const argtype
*field_types
;
3936 const int *dst_offsets
, *src_offsets
;
3939 abi_ulong
*target_rt_dev_ptr
;
3940 unsigned long *host_rt_dev_ptr
;
3944 assert(ie
->access
== IOC_W
);
3945 assert(*arg_type
== TYPE_PTR
);
3947 assert(*arg_type
== TYPE_STRUCT
);
3948 target_size
= thunk_type_size(arg_type
, 0);
3949 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3951 return -TARGET_EFAULT
;
3954 assert(*arg_type
== (int)STRUCT_rtentry
);
3955 se
= struct_entries
+ *arg_type
++;
3956 assert(se
->convert
[0] == NULL
);
3957 /* convert struct here to be able to catch rt_dev string */
3958 field_types
= se
->field_types
;
3959 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3960 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3961 for (i
= 0; i
< se
->nb_fields
; i
++) {
3962 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3963 assert(*field_types
== TYPE_PTRVOID
);
3964 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3965 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3966 if (*target_rt_dev_ptr
!= 0) {
3967 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3968 tswapal(*target_rt_dev_ptr
));
3969 if (!*host_rt_dev_ptr
) {
3970 unlock_user(argptr
, arg
, 0);
3971 return -TARGET_EFAULT
;
3974 *host_rt_dev_ptr
= 0;
3979 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3980 argptr
+ src_offsets
[i
],
3981 field_types
, THUNK_HOST
);
3983 unlock_user(argptr
, arg
, 0);
3985 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3986 if (*host_rt_dev_ptr
!= 0) {
3987 unlock_user((void *)*host_rt_dev_ptr
,
3988 *target_rt_dev_ptr
, 0);
3993 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3994 int fd
, int cmd
, abi_long arg
)
3996 int sig
= target_to_host_signal(arg
);
3997 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4000 static IOCTLEntry ioctl_entries
[] = {
4001 #define IOCTL(cmd, access, ...) \
4002 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4003 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4004 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4009 /* ??? Implement proper locking for ioctls. */
4010 /* do_ioctl() Must return target values and target errnos. */
4011 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4013 const IOCTLEntry
*ie
;
4014 const argtype
*arg_type
;
4016 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4022 if (ie
->target_cmd
== 0) {
4023 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4024 return -TARGET_ENOSYS
;
4026 if (ie
->target_cmd
== cmd
)
4030 arg_type
= ie
->arg_type
;
4032 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4035 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4038 switch(arg_type
[0]) {
4041 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4045 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4049 target_size
= thunk_type_size(arg_type
, 0);
4050 switch(ie
->access
) {
4052 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4053 if (!is_error(ret
)) {
4054 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4056 return -TARGET_EFAULT
;
4057 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4058 unlock_user(argptr
, arg
, target_size
);
4062 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4064 return -TARGET_EFAULT
;
4065 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4066 unlock_user(argptr
, arg
, 0);
4067 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4071 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4073 return -TARGET_EFAULT
;
4074 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4075 unlock_user(argptr
, arg
, 0);
4076 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4077 if (!is_error(ret
)) {
4078 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4080 return -TARGET_EFAULT
;
4081 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4082 unlock_user(argptr
, arg
, target_size
);
4088 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4089 (long)cmd
, arg_type
[0]);
4090 ret
= -TARGET_ENOSYS
;
4096 static const bitmask_transtbl iflag_tbl
[] = {
4097 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4098 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4099 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4100 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4101 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4102 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4103 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4104 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4105 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4106 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4107 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4108 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4109 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4110 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4114 static const bitmask_transtbl oflag_tbl
[] = {
4115 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4116 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4117 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4118 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4119 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4120 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4121 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4122 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4123 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4124 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4125 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4126 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4127 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4128 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4129 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4130 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4131 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4132 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4133 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4134 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4135 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4136 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4137 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4138 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4142 static const bitmask_transtbl cflag_tbl
[] = {
4143 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4144 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4145 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4146 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4147 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4148 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4149 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4150 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4151 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4152 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4153 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4154 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4155 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4156 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4157 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4158 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4159 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4160 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4161 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4162 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4163 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4164 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4165 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4166 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4167 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4168 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4169 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4170 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4171 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4172 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4173 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4177 static const bitmask_transtbl lflag_tbl
[] = {
4178 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4179 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4180 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4181 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4182 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4183 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4184 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4185 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4186 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4187 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4188 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4189 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4190 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4191 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4192 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4196 static void target_to_host_termios (void *dst
, const void *src
)
4198 struct host_termios
*host
= dst
;
4199 const struct target_termios
*target
= src
;
4202 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4204 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4206 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4208 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4209 host
->c_line
= target
->c_line
;
4211 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4212 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4213 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4214 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4215 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4216 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4217 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4218 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4219 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4220 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4221 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4222 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4223 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4224 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4225 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4226 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4227 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4228 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4231 static void host_to_target_termios (void *dst
, const void *src
)
4233 struct target_termios
*target
= dst
;
4234 const struct host_termios
*host
= src
;
4237 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4239 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4241 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4243 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4244 target
->c_line
= host
->c_line
;
4246 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4247 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4248 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4249 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4250 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4251 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4252 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4253 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4254 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4255 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4256 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4257 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4258 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4259 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4260 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4261 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4262 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4263 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4266 static const StructEntry struct_termios_def
= {
4267 .convert
= { host_to_target_termios
, target_to_host_termios
},
4268 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4269 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4272 static bitmask_transtbl mmap_flags_tbl
[] = {
4273 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4274 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4275 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4276 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4277 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4278 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4279 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4280 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4281 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4286 #if defined(TARGET_I386)
4288 /* NOTE: there is really one LDT for all the threads */
4289 static uint8_t *ldt_table
;
4291 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4298 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4299 if (size
> bytecount
)
4301 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4303 return -TARGET_EFAULT
;
4304 /* ??? Should this by byteswapped? */
4305 memcpy(p
, ldt_table
, size
);
4306 unlock_user(p
, ptr
, size
);
4310 /* XXX: add locking support */
4311 static abi_long
write_ldt(CPUX86State
*env
,
4312 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4314 struct target_modify_ldt_ldt_s ldt_info
;
4315 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4316 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4317 int seg_not_present
, useable
, lm
;
4318 uint32_t *lp
, entry_1
, entry_2
;
4320 if (bytecount
!= sizeof(ldt_info
))
4321 return -TARGET_EINVAL
;
4322 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4323 return -TARGET_EFAULT
;
4324 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4325 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4326 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4327 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4328 unlock_user_struct(target_ldt_info
, ptr
, 0);
4330 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4331 return -TARGET_EINVAL
;
4332 seg_32bit
= ldt_info
.flags
& 1;
4333 contents
= (ldt_info
.flags
>> 1) & 3;
4334 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4335 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4336 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4337 useable
= (ldt_info
.flags
>> 6) & 1;
4341 lm
= (ldt_info
.flags
>> 7) & 1;
4343 if (contents
== 3) {
4345 return -TARGET_EINVAL
;
4346 if (seg_not_present
== 0)
4347 return -TARGET_EINVAL
;
4349 /* allocate the LDT */
4351 env
->ldt
.base
= target_mmap(0,
4352 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4353 PROT_READ
|PROT_WRITE
,
4354 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4355 if (env
->ldt
.base
== -1)
4356 return -TARGET_ENOMEM
;
4357 memset(g2h(env
->ldt
.base
), 0,
4358 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4359 env
->ldt
.limit
= 0xffff;
4360 ldt_table
= g2h(env
->ldt
.base
);
4363 /* NOTE: same code as Linux kernel */
4364 /* Allow LDTs to be cleared by the user. */
4365 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4368 read_exec_only
== 1 &&
4370 limit_in_pages
== 0 &&
4371 seg_not_present
== 1 &&
4379 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4380 (ldt_info
.limit
& 0x0ffff);
4381 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4382 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4383 (ldt_info
.limit
& 0xf0000) |
4384 ((read_exec_only
^ 1) << 9) |
4386 ((seg_not_present
^ 1) << 15) |
4388 (limit_in_pages
<< 23) |
4392 entry_2
|= (useable
<< 20);
4394 /* Install the new entry ... */
4396 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4397 lp
[0] = tswap32(entry_1
);
4398 lp
[1] = tswap32(entry_2
);
4402 /* specific and weird i386 syscalls */
4403 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4404 unsigned long bytecount
)
4410 ret
= read_ldt(ptr
, bytecount
);
4413 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4416 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4419 ret
= -TARGET_ENOSYS
;
4425 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4426 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4428 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4429 struct target_modify_ldt_ldt_s ldt_info
;
4430 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4431 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4432 int seg_not_present
, useable
, lm
;
4433 uint32_t *lp
, entry_1
, entry_2
;
4436 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4437 if (!target_ldt_info
)
4438 return -TARGET_EFAULT
;
4439 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4440 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4441 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4442 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4443 if (ldt_info
.entry_number
== -1) {
4444 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4445 if (gdt_table
[i
] == 0) {
4446 ldt_info
.entry_number
= i
;
4447 target_ldt_info
->entry_number
= tswap32(i
);
4452 unlock_user_struct(target_ldt_info
, ptr
, 1);
4454 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4455 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4456 return -TARGET_EINVAL
;
4457 seg_32bit
= ldt_info
.flags
& 1;
4458 contents
= (ldt_info
.flags
>> 1) & 3;
4459 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4460 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4461 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4462 useable
= (ldt_info
.flags
>> 6) & 1;
4466 lm
= (ldt_info
.flags
>> 7) & 1;
4469 if (contents
== 3) {
4470 if (seg_not_present
== 0)
4471 return -TARGET_EINVAL
;
4474 /* NOTE: same code as Linux kernel */
4475 /* Allow LDTs to be cleared by the user. */
4476 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4477 if ((contents
== 0 &&
4478 read_exec_only
== 1 &&
4480 limit_in_pages
== 0 &&
4481 seg_not_present
== 1 &&
4489 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4490 (ldt_info
.limit
& 0x0ffff);
4491 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4492 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4493 (ldt_info
.limit
& 0xf0000) |
4494 ((read_exec_only
^ 1) << 9) |
4496 ((seg_not_present
^ 1) << 15) |
4498 (limit_in_pages
<< 23) |
4503 /* Install the new entry ... */
4505 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4506 lp
[0] = tswap32(entry_1
);
4507 lp
[1] = tswap32(entry_2
);
4511 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4513 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4514 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4515 uint32_t base_addr
, limit
, flags
;
4516 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4517 int seg_not_present
, useable
, lm
;
4518 uint32_t *lp
, entry_1
, entry_2
;
4520 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4521 if (!target_ldt_info
)
4522 return -TARGET_EFAULT
;
4523 idx
= tswap32(target_ldt_info
->entry_number
);
4524 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4525 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4526 unlock_user_struct(target_ldt_info
, ptr
, 1);
4527 return -TARGET_EINVAL
;
4529 lp
= (uint32_t *)(gdt_table
+ idx
);
4530 entry_1
= tswap32(lp
[0]);
4531 entry_2
= tswap32(lp
[1]);
4533 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4534 contents
= (entry_2
>> 10) & 3;
4535 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4536 seg_32bit
= (entry_2
>> 22) & 1;
4537 limit_in_pages
= (entry_2
>> 23) & 1;
4538 useable
= (entry_2
>> 20) & 1;
4542 lm
= (entry_2
>> 21) & 1;
4544 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4545 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4546 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4547 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4548 base_addr
= (entry_1
>> 16) |
4549 (entry_2
& 0xff000000) |
4550 ((entry_2
& 0xff) << 16);
4551 target_ldt_info
->base_addr
= tswapal(base_addr
);
4552 target_ldt_info
->limit
= tswap32(limit
);
4553 target_ldt_info
->flags
= tswap32(flags
);
4554 unlock_user_struct(target_ldt_info
, ptr
, 1);
4557 #endif /* TARGET_I386 && TARGET_ABI32 */
4559 #ifndef TARGET_ABI32
4560 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4567 case TARGET_ARCH_SET_GS
:
4568 case TARGET_ARCH_SET_FS
:
4569 if (code
== TARGET_ARCH_SET_GS
)
4573 cpu_x86_load_seg(env
, idx
, 0);
4574 env
->segs
[idx
].base
= addr
;
4576 case TARGET_ARCH_GET_GS
:
4577 case TARGET_ARCH_GET_FS
:
4578 if (code
== TARGET_ARCH_GET_GS
)
4582 val
= env
->segs
[idx
].base
;
4583 if (put_user(val
, addr
, abi_ulong
))
4584 ret
= -TARGET_EFAULT
;
4587 ret
= -TARGET_EINVAL
;
4594 #endif /* defined(TARGET_I386) */
4596 #define NEW_STACK_SIZE 0x40000
4599 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4602 pthread_mutex_t mutex
;
4603 pthread_cond_t cond
;
4606 abi_ulong child_tidptr
;
4607 abi_ulong parent_tidptr
;
4611 static void *clone_func(void *arg
)
4613 new_thread_info
*info
= arg
;
4618 rcu_register_thread();
4620 cpu
= ENV_GET_CPU(env
);
4622 ts
= (TaskState
*)cpu
->opaque
;
4623 info
->tid
= gettid();
4624 cpu
->host_tid
= info
->tid
;
4626 if (info
->child_tidptr
)
4627 put_user_u32(info
->tid
, info
->child_tidptr
);
4628 if (info
->parent_tidptr
)
4629 put_user_u32(info
->tid
, info
->parent_tidptr
);
4630 /* Enable signals. */
4631 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4632 /* Signal to the parent that we're ready. */
4633 pthread_mutex_lock(&info
->mutex
);
4634 pthread_cond_broadcast(&info
->cond
);
4635 pthread_mutex_unlock(&info
->mutex
);
4636 /* Wait until the parent has finshed initializing the tls state. */
4637 pthread_mutex_lock(&clone_lock
);
4638 pthread_mutex_unlock(&clone_lock
);
4644 /* do_fork() Must return host values and target errnos (unlike most
4645 do_*() functions). */
4646 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4647 abi_ulong parent_tidptr
, target_ulong newtls
,
4648 abi_ulong child_tidptr
)
4650 CPUState
*cpu
= ENV_GET_CPU(env
);
4654 CPUArchState
*new_env
;
4655 unsigned int nptl_flags
;
4658 /* Emulate vfork() with fork() */
4659 if (flags
& CLONE_VFORK
)
4660 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4662 if (flags
& CLONE_VM
) {
4663 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4664 new_thread_info info
;
4665 pthread_attr_t attr
;
4667 ts
= g_new0(TaskState
, 1);
4668 init_task_state(ts
);
4669 /* we create a new CPU instance. */
4670 new_env
= cpu_copy(env
);
4671 /* Init regs that differ from the parent. */
4672 cpu_clone_regs(new_env
, newsp
);
4673 new_cpu
= ENV_GET_CPU(new_env
);
4674 new_cpu
->opaque
= ts
;
4675 ts
->bprm
= parent_ts
->bprm
;
4676 ts
->info
= parent_ts
->info
;
4678 flags
&= ~CLONE_NPTL_FLAGS2
;
4680 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4681 ts
->child_tidptr
= child_tidptr
;
4684 if (nptl_flags
& CLONE_SETTLS
)
4685 cpu_set_tls (new_env
, newtls
);
4687 /* Grab a mutex so that thread setup appears atomic. */
4688 pthread_mutex_lock(&clone_lock
);
4690 memset(&info
, 0, sizeof(info
));
4691 pthread_mutex_init(&info
.mutex
, NULL
);
4692 pthread_mutex_lock(&info
.mutex
);
4693 pthread_cond_init(&info
.cond
, NULL
);
4695 if (nptl_flags
& CLONE_CHILD_SETTID
)
4696 info
.child_tidptr
= child_tidptr
;
4697 if (nptl_flags
& CLONE_PARENT_SETTID
)
4698 info
.parent_tidptr
= parent_tidptr
;
4700 ret
= pthread_attr_init(&attr
);
4701 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4702 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4703 /* It is not safe to deliver signals until the child has finished
4704 initializing, so temporarily block all signals. */
4705 sigfillset(&sigmask
);
4706 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4708 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4709 /* TODO: Free new CPU state if thread creation failed. */
4711 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4712 pthread_attr_destroy(&attr
);
4714 /* Wait for the child to initialize. */
4715 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4717 if (flags
& CLONE_PARENT_SETTID
)
4718 put_user_u32(ret
, parent_tidptr
);
4722 pthread_mutex_unlock(&info
.mutex
);
4723 pthread_cond_destroy(&info
.cond
);
4724 pthread_mutex_destroy(&info
.mutex
);
4725 pthread_mutex_unlock(&clone_lock
);
4727 /* if no CLONE_VM, we consider it is a fork */
4728 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4729 return -TARGET_EINVAL
;
4734 /* Child Process. */
4736 cpu_clone_regs(env
, newsp
);
4738 /* There is a race condition here. The parent process could
4739 theoretically read the TID in the child process before the child
4740 tid is set. This would require using either ptrace
4741 (not implemented) or having *_tidptr to point at a shared memory
4742 mapping. We can't repeat the spinlock hack used above because
4743 the child process gets its own copy of the lock. */
4744 if (flags
& CLONE_CHILD_SETTID
)
4745 put_user_u32(gettid(), child_tidptr
);
4746 if (flags
& CLONE_PARENT_SETTID
)
4747 put_user_u32(gettid(), parent_tidptr
);
4748 ts
= (TaskState
*)cpu
->opaque
;
4749 if (flags
& CLONE_SETTLS
)
4750 cpu_set_tls (env
, newtls
);
4751 if (flags
& CLONE_CHILD_CLEARTID
)
4752 ts
->child_tidptr
= child_tidptr
;
4760 /* warning : doesn't handle linux specific flags... */
4761 static int target_to_host_fcntl_cmd(int cmd
)
4764 case TARGET_F_DUPFD
:
4765 case TARGET_F_GETFD
:
4766 case TARGET_F_SETFD
:
4767 case TARGET_F_GETFL
:
4768 case TARGET_F_SETFL
:
4770 case TARGET_F_GETLK
:
4772 case TARGET_F_SETLK
:
4774 case TARGET_F_SETLKW
:
4776 case TARGET_F_GETOWN
:
4778 case TARGET_F_SETOWN
:
4780 case TARGET_F_GETSIG
:
4782 case TARGET_F_SETSIG
:
4784 #if TARGET_ABI_BITS == 32
4785 case TARGET_F_GETLK64
:
4787 case TARGET_F_SETLK64
:
4789 case TARGET_F_SETLKW64
:
4792 case TARGET_F_SETLEASE
:
4794 case TARGET_F_GETLEASE
:
4796 #ifdef F_DUPFD_CLOEXEC
4797 case TARGET_F_DUPFD_CLOEXEC
:
4798 return F_DUPFD_CLOEXEC
;
4800 case TARGET_F_NOTIFY
:
4803 case TARGET_F_GETOWN_EX
:
4807 case TARGET_F_SETOWN_EX
:
4811 return -TARGET_EINVAL
;
4813 return -TARGET_EINVAL
;
4816 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4817 static const bitmask_transtbl flock_tbl
[] = {
4818 TRANSTBL_CONVERT(F_RDLCK
),
4819 TRANSTBL_CONVERT(F_WRLCK
),
4820 TRANSTBL_CONVERT(F_UNLCK
),
4821 TRANSTBL_CONVERT(F_EXLCK
),
4822 TRANSTBL_CONVERT(F_SHLCK
),
4826 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4829 struct target_flock
*target_fl
;
4830 struct flock64 fl64
;
4831 struct target_flock64
*target_fl64
;
4833 struct f_owner_ex fox
;
4834 struct target_f_owner_ex
*target_fox
;
4837 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4839 if (host_cmd
== -TARGET_EINVAL
)
4843 case TARGET_F_GETLK
:
4844 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4845 return -TARGET_EFAULT
;
4847 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4848 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4849 fl
.l_start
= tswapal(target_fl
->l_start
);
4850 fl
.l_len
= tswapal(target_fl
->l_len
);
4851 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4852 unlock_user_struct(target_fl
, arg
, 0);
4853 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4855 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4856 return -TARGET_EFAULT
;
4858 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4859 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4860 target_fl
->l_start
= tswapal(fl
.l_start
);
4861 target_fl
->l_len
= tswapal(fl
.l_len
);
4862 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4863 unlock_user_struct(target_fl
, arg
, 1);
4867 case TARGET_F_SETLK
:
4868 case TARGET_F_SETLKW
:
4869 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4870 return -TARGET_EFAULT
;
4872 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4873 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4874 fl
.l_start
= tswapal(target_fl
->l_start
);
4875 fl
.l_len
= tswapal(target_fl
->l_len
);
4876 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4877 unlock_user_struct(target_fl
, arg
, 0);
4878 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4881 case TARGET_F_GETLK64
:
4882 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4883 return -TARGET_EFAULT
;
4885 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4886 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4887 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4888 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4889 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4890 unlock_user_struct(target_fl64
, arg
, 0);
4891 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4893 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4894 return -TARGET_EFAULT
;
4895 target_fl64
->l_type
=
4896 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4897 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4898 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4899 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4900 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4901 unlock_user_struct(target_fl64
, arg
, 1);
4904 case TARGET_F_SETLK64
:
4905 case TARGET_F_SETLKW64
:
4906 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4907 return -TARGET_EFAULT
;
4909 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4910 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4911 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4912 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4913 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4914 unlock_user_struct(target_fl64
, arg
, 0);
4915 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4918 case TARGET_F_GETFL
:
4919 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4921 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4925 case TARGET_F_SETFL
:
4926 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4930 case TARGET_F_GETOWN_EX
:
4931 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4933 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4934 return -TARGET_EFAULT
;
4935 target_fox
->type
= tswap32(fox
.type
);
4936 target_fox
->pid
= tswap32(fox
.pid
);
4937 unlock_user_struct(target_fox
, arg
, 1);
4943 case TARGET_F_SETOWN_EX
:
4944 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4945 return -TARGET_EFAULT
;
4946 fox
.type
= tswap32(target_fox
->type
);
4947 fox
.pid
= tswap32(target_fox
->pid
);
4948 unlock_user_struct(target_fox
, arg
, 0);
4949 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4953 case TARGET_F_SETOWN
:
4954 case TARGET_F_GETOWN
:
4955 case TARGET_F_SETSIG
:
4956 case TARGET_F_GETSIG
:
4957 case TARGET_F_SETLEASE
:
4958 case TARGET_F_GETLEASE
:
4959 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4963 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4971 static inline int high2lowuid(int uid
)
4979 static inline int high2lowgid(int gid
)
4987 static inline int low2highuid(int uid
)
4989 if ((int16_t)uid
== -1)
4995 static inline int low2highgid(int gid
)
4997 if ((int16_t)gid
== -1)
5002 static inline int tswapid(int id
)
5007 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5009 #else /* !USE_UID16 */
5010 static inline int high2lowuid(int uid
)
5014 static inline int high2lowgid(int gid
)
5018 static inline int low2highuid(int uid
)
5022 static inline int low2highgid(int gid
)
5026 static inline int tswapid(int id
)
5031 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5033 #endif /* USE_UID16 */
5035 void syscall_init(void)
5038 const argtype
*arg_type
;
5042 thunk_init(STRUCT_MAX
);
5044 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5045 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5046 #include "syscall_types.h"
5048 #undef STRUCT_SPECIAL
5050 /* Build target_to_host_errno_table[] table from
5051 * host_to_target_errno_table[]. */
5052 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5053 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5056 /* we patch the ioctl size if necessary. We rely on the fact that
5057 no ioctl has all the bits at '1' in the size field */
5059 while (ie
->target_cmd
!= 0) {
5060 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5061 TARGET_IOC_SIZEMASK
) {
5062 arg_type
= ie
->arg_type
;
5063 if (arg_type
[0] != TYPE_PTR
) {
5064 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5069 size
= thunk_type_size(arg_type
, 0);
5070 ie
->target_cmd
= (ie
->target_cmd
&
5071 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5072 (size
<< TARGET_IOC_SIZESHIFT
);
5075 /* automatic consistency check if same arch */
5076 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5077 (defined(__x86_64__) && defined(TARGET_X86_64))
5078 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5079 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5080 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5087 #if TARGET_ABI_BITS == 32
5088 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5090 #ifdef TARGET_WORDS_BIGENDIAN
5091 return ((uint64_t)word0
<< 32) | word1
;
5093 return ((uint64_t)word1
<< 32) | word0
;
5096 #else /* TARGET_ABI_BITS == 32 */
5097 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5101 #endif /* TARGET_ABI_BITS != 32 */
5103 #ifdef TARGET_NR_truncate64
5104 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5109 if (regpairs_aligned(cpu_env
)) {
5113 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5117 #ifdef TARGET_NR_ftruncate64
5118 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5123 if (regpairs_aligned(cpu_env
)) {
5127 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5131 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5132 abi_ulong target_addr
)
5134 struct target_timespec
*target_ts
;
5136 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5137 return -TARGET_EFAULT
;
5138 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5139 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5140 unlock_user_struct(target_ts
, target_addr
, 0);
5144 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5145 struct timespec
*host_ts
)
5147 struct target_timespec
*target_ts
;
5149 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5150 return -TARGET_EFAULT
;
5151 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5152 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5153 unlock_user_struct(target_ts
, target_addr
, 1);
5157 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5158 abi_ulong target_addr
)
5160 struct target_itimerspec
*target_itspec
;
5162 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5163 return -TARGET_EFAULT
;
5166 host_itspec
->it_interval
.tv_sec
=
5167 tswapal(target_itspec
->it_interval
.tv_sec
);
5168 host_itspec
->it_interval
.tv_nsec
=
5169 tswapal(target_itspec
->it_interval
.tv_nsec
);
5170 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5171 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5173 unlock_user_struct(target_itspec
, target_addr
, 1);
5177 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5178 struct itimerspec
*host_its
)
5180 struct target_itimerspec
*target_itspec
;
5182 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5183 return -TARGET_EFAULT
;
5186 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5187 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5189 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5190 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5192 unlock_user_struct(target_itspec
, target_addr
, 0);
5196 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5197 abi_ulong target_addr
)
5199 struct target_sigevent
*target_sevp
;
5201 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5202 return -TARGET_EFAULT
;
5205 /* This union is awkward on 64 bit systems because it has a 32 bit
5206 * integer and a pointer in it; we follow the conversion approach
5207 * used for handling sigval types in signal.c so the guest should get
5208 * the correct value back even if we did a 64 bit byteswap and it's
5209 * using the 32 bit integer.
5211 host_sevp
->sigev_value
.sival_ptr
=
5212 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5213 host_sevp
->sigev_signo
=
5214 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5215 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5216 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5218 unlock_user_struct(target_sevp
, target_addr
, 1);
5222 #if defined(TARGET_NR_mlockall)
5223 static inline int target_to_host_mlockall_arg(int arg
)
5227 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5228 result
|= MCL_CURRENT
;
5230 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5231 result
|= MCL_FUTURE
;
5237 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5238 abi_ulong target_addr
,
5239 struct stat
*host_st
)
5241 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5242 if (((CPUARMState
*)cpu_env
)->eabi
) {
5243 struct target_eabi_stat64
*target_st
;
5245 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5246 return -TARGET_EFAULT
;
5247 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5248 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5249 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5250 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5251 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5253 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5254 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5255 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5256 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5257 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5258 __put_user(host_st
->st_size
, &target_st
->st_size
);
5259 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5260 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5261 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5262 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5263 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5264 unlock_user_struct(target_st
, target_addr
, 1);
5268 #if defined(TARGET_HAS_STRUCT_STAT64)
5269 struct target_stat64
*target_st
;
5271 struct target_stat
*target_st
;
5274 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5275 return -TARGET_EFAULT
;
5276 memset(target_st
, 0, sizeof(*target_st
));
5277 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5278 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5279 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5280 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5282 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5283 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5284 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5285 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5286 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5287 /* XXX: better use of kernel struct */
5288 __put_user(host_st
->st_size
, &target_st
->st_size
);
5289 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5290 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5291 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5292 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5293 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5294 unlock_user_struct(target_st
, target_addr
, 1);
5300 /* ??? Using host futex calls even when target atomic operations
5301 are not really atomic probably breaks things. However implementing
5302 futexes locally would make futexes shared between multiple processes
5303 tricky. However they're probably useless because guest atomic
5304 operations won't work either. */
5305 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5306 target_ulong uaddr2
, int val3
)
5308 struct timespec ts
, *pts
;
5311 /* ??? We assume FUTEX_* constants are the same on both host
5313 #ifdef FUTEX_CMD_MASK
5314 base_op
= op
& FUTEX_CMD_MASK
;
5320 case FUTEX_WAIT_BITSET
:
5323 target_to_host_timespec(pts
, timeout
);
5327 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5330 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5332 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5334 case FUTEX_CMP_REQUEUE
:
5336 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5337 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5338 But the prototype takes a `struct timespec *'; insert casts
5339 to satisfy the compiler. We do not need to tswap TIMEOUT
5340 since it's not compared to guest memory. */
5341 pts
= (struct timespec
*)(uintptr_t) timeout
;
5342 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5344 (base_op
== FUTEX_CMP_REQUEUE
5348 return -TARGET_ENOSYS
;
5351 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5352 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5353 abi_long handle
, abi_long mount_id
,
5356 struct file_handle
*target_fh
;
5357 struct file_handle
*fh
;
5361 unsigned int size
, total_size
;
5363 if (get_user_s32(size
, handle
)) {
5364 return -TARGET_EFAULT
;
5367 name
= lock_user_string(pathname
);
5369 return -TARGET_EFAULT
;
5372 total_size
= sizeof(struct file_handle
) + size
;
5373 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5375 unlock_user(name
, pathname
, 0);
5376 return -TARGET_EFAULT
;
5379 fh
= g_malloc0(total_size
);
5380 fh
->handle_bytes
= size
;
5382 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5383 unlock_user(name
, pathname
, 0);
5385 /* man name_to_handle_at(2):
5386 * Other than the use of the handle_bytes field, the caller should treat
5387 * the file_handle structure as an opaque data type
5390 memcpy(target_fh
, fh
, total_size
);
5391 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5392 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5394 unlock_user(target_fh
, handle
, total_size
);
5396 if (put_user_s32(mid
, mount_id
)) {
5397 return -TARGET_EFAULT
;
5405 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5406 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5409 struct file_handle
*target_fh
;
5410 struct file_handle
*fh
;
5411 unsigned int size
, total_size
;
5414 if (get_user_s32(size
, handle
)) {
5415 return -TARGET_EFAULT
;
5418 total_size
= sizeof(struct file_handle
) + size
;
5419 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5421 return -TARGET_EFAULT
;
5424 fh
= g_memdup(target_fh
, total_size
);
5425 fh
->handle_bytes
= size
;
5426 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5428 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5429 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5433 unlock_user(target_fh
, handle
, total_size
);
5439 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5441 /* signalfd siginfo conversion */
5444 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5445 const struct signalfd_siginfo
*info
)
5447 int sig
= host_to_target_signal(info
->ssi_signo
);
5449 /* linux/signalfd.h defines a ssi_addr_lsb
5450 * not defined in sys/signalfd.h but used by some kernels
5453 #ifdef BUS_MCEERR_AO
5454 if (tinfo
->ssi_signo
== SIGBUS
&&
5455 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5456 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5457 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5458 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5459 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5463 tinfo
->ssi_signo
= tswap32(sig
);
5464 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5465 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5466 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5467 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5468 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5469 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5470 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5471 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5472 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5473 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5474 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5475 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5476 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5477 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5478 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5481 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5485 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5486 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5492 static TargetFdTrans target_signalfd_trans
= {
5493 .host_to_target_data
= host_to_target_data_signalfd
,
5496 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5499 target_sigset_t
*target_mask
;
5503 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5504 return -TARGET_EINVAL
;
5506 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5507 return -TARGET_EFAULT
;
5510 target_to_host_sigset(&host_mask
, target_mask
);
5512 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5514 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5516 fd_trans_register(ret
, &target_signalfd_trans
);
5519 unlock_user_struct(target_mask
, mask
, 0);
5525 /* Map host to target signal numbers for the wait family of syscalls.
5526 Assume all other status bits are the same. */
5527 int host_to_target_waitstatus(int status
)
5529 if (WIFSIGNALED(status
)) {
5530 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5532 if (WIFSTOPPED(status
)) {
5533 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5539 static int open_self_cmdline(void *cpu_env
, int fd
)
5542 bool word_skipped
= false;
5544 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5554 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5556 fd_orig
= close(fd_orig
);
5558 } else if (nb_read
== 0) {
5562 if (!word_skipped
) {
5563 /* Skip the first string, which is the path to qemu-*-static
5564 instead of the actual command. */
5565 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5567 /* Null byte found, skip one string */
5569 nb_read
-= cp_buf
- buf
;
5570 word_skipped
= true;
5575 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5582 return close(fd_orig
);
5585 static int open_self_maps(void *cpu_env
, int fd
)
5587 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5588 TaskState
*ts
= cpu
->opaque
;
5594 fp
= fopen("/proc/self/maps", "r");
5599 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5600 int fields
, dev_maj
, dev_min
, inode
;
5601 uint64_t min
, max
, offset
;
5602 char flag_r
, flag_w
, flag_x
, flag_p
;
5603 char path
[512] = "";
5604 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5605 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5606 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5608 if ((fields
< 10) || (fields
> 11)) {
5611 if (h2g_valid(min
)) {
5612 int flags
= page_get_flags(h2g(min
));
5613 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5614 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5617 if (h2g(min
) == ts
->info
->stack_limit
) {
5618 pstrcpy(path
, sizeof(path
), " [stack]");
5620 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5621 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5622 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5623 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5624 path
[0] ? " " : "", path
);
5634 static int open_self_stat(void *cpu_env
, int fd
)
5636 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5637 TaskState
*ts
= cpu
->opaque
;
5638 abi_ulong start_stack
= ts
->info
->start_stack
;
5641 for (i
= 0; i
< 44; i
++) {
5649 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5650 } else if (i
== 1) {
5652 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5653 } else if (i
== 27) {
5656 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5658 /* for the rest, there is MasterCard */
5659 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5663 if (write(fd
, buf
, len
) != len
) {
5671 static int open_self_auxv(void *cpu_env
, int fd
)
5673 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5674 TaskState
*ts
= cpu
->opaque
;
5675 abi_ulong auxv
= ts
->info
->saved_auxv
;
5676 abi_ulong len
= ts
->info
->auxv_len
;
5680 * Auxiliary vector is stored in target process stack.
5681 * read in whole auxv vector and copy it to file
5683 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5687 r
= write(fd
, ptr
, len
);
5694 lseek(fd
, 0, SEEK_SET
);
5695 unlock_user(ptr
, auxv
, len
);
5701 static int is_proc_myself(const char *filename
, const char *entry
)
5703 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5704 filename
+= strlen("/proc/");
5705 if (!strncmp(filename
, "self/", strlen("self/"))) {
5706 filename
+= strlen("self/");
5707 } else if (*filename
>= '1' && *filename
<= '9') {
5709 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5710 if (!strncmp(filename
, myself
, strlen(myself
))) {
5711 filename
+= strlen(myself
);
5718 if (!strcmp(filename
, entry
)) {
5725 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5726 static int is_proc(const char *filename
, const char *entry
)
5728 return strcmp(filename
, entry
) == 0;
5731 static int open_net_route(void *cpu_env
, int fd
)
5738 fp
= fopen("/proc/net/route", "r");
5745 read
= getline(&line
, &len
, fp
);
5746 dprintf(fd
, "%s", line
);
5750 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5752 uint32_t dest
, gw
, mask
;
5753 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5754 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5755 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5756 &mask
, &mtu
, &window
, &irtt
);
5757 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5758 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5759 metric
, tswap32(mask
), mtu
, window
, irtt
);
5769 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5772 const char *filename
;
5773 int (*fill
)(void *cpu_env
, int fd
);
5774 int (*cmp
)(const char *s1
, const char *s2
);
5776 const struct fake_open
*fake_open
;
5777 static const struct fake_open fakes
[] = {
5778 { "maps", open_self_maps
, is_proc_myself
},
5779 { "stat", open_self_stat
, is_proc_myself
},
5780 { "auxv", open_self_auxv
, is_proc_myself
},
5781 { "cmdline", open_self_cmdline
, is_proc_myself
},
5782 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5783 { "/proc/net/route", open_net_route
, is_proc
},
5785 { NULL
, NULL
, NULL
}
5788 if (is_proc_myself(pathname
, "exe")) {
5789 int execfd
= qemu_getauxval(AT_EXECFD
);
5790 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5793 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5794 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5799 if (fake_open
->filename
) {
5801 char filename
[PATH_MAX
];
5804 /* create temporary file to map stat to */
5805 tmpdir
= getenv("TMPDIR");
5808 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5809 fd
= mkstemp(filename
);
5815 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5819 lseek(fd
, 0, SEEK_SET
);
5824 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5827 #define TIMER_MAGIC 0x0caf0000
5828 #define TIMER_MAGIC_MASK 0xffff0000
5830 /* Convert QEMU provided timer ID back to internal 16bit index format */
5831 static target_timer_t
get_timer_id(abi_long arg
)
5833 target_timer_t timerid
= arg
;
5835 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5836 return -TARGET_EINVAL
;
5841 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5842 return -TARGET_EINVAL
;
5848 /* do_syscall() should always have a single exit point at the end so
5849 that actions, such as logging of syscall results, can be performed.
5850 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5851 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5852 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5853 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5856 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5863 gemu_log("syscall %d", num
);
5866 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5869 case TARGET_NR_exit
:
5870 /* In old applications this may be used to implement _exit(2).
5871 However in threaded applictions it is used for thread termination,
5872 and _exit_group is used for application termination.
5873 Do thread termination if we have more then one thread. */
5874 /* FIXME: This probably breaks if a signal arrives. We should probably
5875 be disabling signals. */
5876 if (CPU_NEXT(first_cpu
)) {
5880 /* Remove the CPU from the list. */
5881 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5884 if (ts
->child_tidptr
) {
5885 put_user_u32(0, ts
->child_tidptr
);
5886 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5890 object_unref(OBJECT(cpu
));
5892 rcu_unregister_thread();
5898 gdb_exit(cpu_env
, arg1
);
5900 ret
= 0; /* avoid warning */
5902 case TARGET_NR_read
:
5906 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5908 ret
= get_errno(read(arg1
, p
, arg3
));
5910 fd_trans_host_to_target_data(arg1
)) {
5911 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5913 unlock_user(p
, arg2
, ret
);
5916 case TARGET_NR_write
:
5917 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5919 ret
= get_errno(write(arg1
, p
, arg3
));
5920 unlock_user(p
, arg2
, 0);
5922 #ifdef TARGET_NR_open
5923 case TARGET_NR_open
:
5924 if (!(p
= lock_user_string(arg1
)))
5926 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5927 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5929 fd_trans_unregister(ret
);
5930 unlock_user(p
, arg1
, 0);
5933 case TARGET_NR_openat
:
5934 if (!(p
= lock_user_string(arg2
)))
5936 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5937 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5939 fd_trans_unregister(ret
);
5940 unlock_user(p
, arg2
, 0);
5942 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5943 case TARGET_NR_name_to_handle_at
:
5944 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
5947 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5948 case TARGET_NR_open_by_handle_at
:
5949 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
5950 fd_trans_unregister(ret
);
5953 case TARGET_NR_close
:
5954 fd_trans_unregister(arg1
);
5955 ret
= get_errno(close(arg1
));
5960 #ifdef TARGET_NR_fork
5961 case TARGET_NR_fork
:
5962 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5965 #ifdef TARGET_NR_waitpid
5966 case TARGET_NR_waitpid
:
5969 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5970 if (!is_error(ret
) && arg2
&& ret
5971 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5976 #ifdef TARGET_NR_waitid
5977 case TARGET_NR_waitid
:
5981 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5982 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5983 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5985 host_to_target_siginfo(p
, &info
);
5986 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5991 #ifdef TARGET_NR_creat /* not on alpha */
5992 case TARGET_NR_creat
:
5993 if (!(p
= lock_user_string(arg1
)))
5995 ret
= get_errno(creat(p
, arg2
));
5996 fd_trans_unregister(ret
);
5997 unlock_user(p
, arg1
, 0);
6000 #ifdef TARGET_NR_link
6001 case TARGET_NR_link
:
6004 p
= lock_user_string(arg1
);
6005 p2
= lock_user_string(arg2
);
6007 ret
= -TARGET_EFAULT
;
6009 ret
= get_errno(link(p
, p2
));
6010 unlock_user(p2
, arg2
, 0);
6011 unlock_user(p
, arg1
, 0);
6015 #if defined(TARGET_NR_linkat)
6016 case TARGET_NR_linkat
:
6021 p
= lock_user_string(arg2
);
6022 p2
= lock_user_string(arg4
);
6024 ret
= -TARGET_EFAULT
;
6026 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6027 unlock_user(p
, arg2
, 0);
6028 unlock_user(p2
, arg4
, 0);
6032 #ifdef TARGET_NR_unlink
6033 case TARGET_NR_unlink
:
6034 if (!(p
= lock_user_string(arg1
)))
6036 ret
= get_errno(unlink(p
));
6037 unlock_user(p
, arg1
, 0);
6040 #if defined(TARGET_NR_unlinkat)
6041 case TARGET_NR_unlinkat
:
6042 if (!(p
= lock_user_string(arg2
)))
6044 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6045 unlock_user(p
, arg2
, 0);
6048 case TARGET_NR_execve
:
6050 char **argp
, **envp
;
6053 abi_ulong guest_argp
;
6054 abi_ulong guest_envp
;
6061 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6062 if (get_user_ual(addr
, gp
))
6070 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6071 if (get_user_ual(addr
, gp
))
6078 argp
= alloca((argc
+ 1) * sizeof(void *));
6079 envp
= alloca((envc
+ 1) * sizeof(void *));
6081 for (gp
= guest_argp
, q
= argp
; gp
;
6082 gp
+= sizeof(abi_ulong
), q
++) {
6083 if (get_user_ual(addr
, gp
))
6087 if (!(*q
= lock_user_string(addr
)))
6089 total_size
+= strlen(*q
) + 1;
6093 for (gp
= guest_envp
, q
= envp
; gp
;
6094 gp
+= sizeof(abi_ulong
), q
++) {
6095 if (get_user_ual(addr
, gp
))
6099 if (!(*q
= lock_user_string(addr
)))
6101 total_size
+= strlen(*q
) + 1;
6105 if (!(p
= lock_user_string(arg1
)))
6107 ret
= get_errno(execve(p
, argp
, envp
));
6108 unlock_user(p
, arg1
, 0);
6113 ret
= -TARGET_EFAULT
;
6116 for (gp
= guest_argp
, q
= argp
; *q
;
6117 gp
+= sizeof(abi_ulong
), q
++) {
6118 if (get_user_ual(addr
, gp
)
6121 unlock_user(*q
, addr
, 0);
6123 for (gp
= guest_envp
, q
= envp
; *q
;
6124 gp
+= sizeof(abi_ulong
), q
++) {
6125 if (get_user_ual(addr
, gp
)
6128 unlock_user(*q
, addr
, 0);
6132 case TARGET_NR_chdir
:
6133 if (!(p
= lock_user_string(arg1
)))
6135 ret
= get_errno(chdir(p
));
6136 unlock_user(p
, arg1
, 0);
6138 #ifdef TARGET_NR_time
6139 case TARGET_NR_time
:
6142 ret
= get_errno(time(&host_time
));
6145 && put_user_sal(host_time
, arg1
))
6150 #ifdef TARGET_NR_mknod
6151 case TARGET_NR_mknod
:
6152 if (!(p
= lock_user_string(arg1
)))
6154 ret
= get_errno(mknod(p
, arg2
, arg3
));
6155 unlock_user(p
, arg1
, 0);
6158 #if defined(TARGET_NR_mknodat)
6159 case TARGET_NR_mknodat
:
6160 if (!(p
= lock_user_string(arg2
)))
6162 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6163 unlock_user(p
, arg2
, 0);
6166 #ifdef TARGET_NR_chmod
6167 case TARGET_NR_chmod
:
6168 if (!(p
= lock_user_string(arg1
)))
6170 ret
= get_errno(chmod(p
, arg2
));
6171 unlock_user(p
, arg1
, 0);
6174 #ifdef TARGET_NR_break
6175 case TARGET_NR_break
:
6178 #ifdef TARGET_NR_oldstat
6179 case TARGET_NR_oldstat
:
6182 case TARGET_NR_lseek
:
6183 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6185 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6186 /* Alpha specific */
6187 case TARGET_NR_getxpid
:
6188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6189 ret
= get_errno(getpid());
6192 #ifdef TARGET_NR_getpid
6193 case TARGET_NR_getpid
:
6194 ret
= get_errno(getpid());
6197 case TARGET_NR_mount
:
6199 /* need to look at the data field */
6203 p
= lock_user_string(arg1
);
6211 p2
= lock_user_string(arg2
);
6214 unlock_user(p
, arg1
, 0);
6220 p3
= lock_user_string(arg3
);
6223 unlock_user(p
, arg1
, 0);
6225 unlock_user(p2
, arg2
, 0);
6232 /* FIXME - arg5 should be locked, but it isn't clear how to
6233 * do that since it's not guaranteed to be a NULL-terminated
6237 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6239 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6241 ret
= get_errno(ret
);
6244 unlock_user(p
, arg1
, 0);
6246 unlock_user(p2
, arg2
, 0);
6248 unlock_user(p3
, arg3
, 0);
6252 #ifdef TARGET_NR_umount
6253 case TARGET_NR_umount
:
6254 if (!(p
= lock_user_string(arg1
)))
6256 ret
= get_errno(umount(p
));
6257 unlock_user(p
, arg1
, 0);
6260 #ifdef TARGET_NR_stime /* not on alpha */
6261 case TARGET_NR_stime
:
6264 if (get_user_sal(host_time
, arg1
))
6266 ret
= get_errno(stime(&host_time
));
6270 case TARGET_NR_ptrace
:
6272 #ifdef TARGET_NR_alarm /* not on alpha */
6273 case TARGET_NR_alarm
:
6277 #ifdef TARGET_NR_oldfstat
6278 case TARGET_NR_oldfstat
:
6281 #ifdef TARGET_NR_pause /* not on alpha */
6282 case TARGET_NR_pause
:
6283 ret
= get_errno(pause());
6286 #ifdef TARGET_NR_utime
6287 case TARGET_NR_utime
:
6289 struct utimbuf tbuf
, *host_tbuf
;
6290 struct target_utimbuf
*target_tbuf
;
6292 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6294 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6295 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6296 unlock_user_struct(target_tbuf
, arg2
, 0);
6301 if (!(p
= lock_user_string(arg1
)))
6303 ret
= get_errno(utime(p
, host_tbuf
));
6304 unlock_user(p
, arg1
, 0);
6308 #ifdef TARGET_NR_utimes
6309 case TARGET_NR_utimes
:
6311 struct timeval
*tvp
, tv
[2];
6313 if (copy_from_user_timeval(&tv
[0], arg2
)
6314 || copy_from_user_timeval(&tv
[1],
6315 arg2
+ sizeof(struct target_timeval
)))
6321 if (!(p
= lock_user_string(arg1
)))
6323 ret
= get_errno(utimes(p
, tvp
));
6324 unlock_user(p
, arg1
, 0);
6328 #if defined(TARGET_NR_futimesat)
6329 case TARGET_NR_futimesat
:
6331 struct timeval
*tvp
, tv
[2];
6333 if (copy_from_user_timeval(&tv
[0], arg3
)
6334 || copy_from_user_timeval(&tv
[1],
6335 arg3
+ sizeof(struct target_timeval
)))
6341 if (!(p
= lock_user_string(arg2
)))
6343 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6344 unlock_user(p
, arg2
, 0);
6348 #ifdef TARGET_NR_stty
6349 case TARGET_NR_stty
:
6352 #ifdef TARGET_NR_gtty
6353 case TARGET_NR_gtty
:
6356 #ifdef TARGET_NR_access
6357 case TARGET_NR_access
:
6358 if (!(p
= lock_user_string(arg1
)))
6360 ret
= get_errno(access(path(p
), arg2
));
6361 unlock_user(p
, arg1
, 0);
6364 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6365 case TARGET_NR_faccessat
:
6366 if (!(p
= lock_user_string(arg2
)))
6368 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6369 unlock_user(p
, arg2
, 0);
6372 #ifdef TARGET_NR_nice /* not on alpha */
6373 case TARGET_NR_nice
:
6374 ret
= get_errno(nice(arg1
));
6377 #ifdef TARGET_NR_ftime
6378 case TARGET_NR_ftime
:
6381 case TARGET_NR_sync
:
6385 case TARGET_NR_kill
:
6386 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6388 #ifdef TARGET_NR_rename
6389 case TARGET_NR_rename
:
6392 p
= lock_user_string(arg1
);
6393 p2
= lock_user_string(arg2
);
6395 ret
= -TARGET_EFAULT
;
6397 ret
= get_errno(rename(p
, p2
));
6398 unlock_user(p2
, arg2
, 0);
6399 unlock_user(p
, arg1
, 0);
6403 #if defined(TARGET_NR_renameat)
6404 case TARGET_NR_renameat
:
6407 p
= lock_user_string(arg2
);
6408 p2
= lock_user_string(arg4
);
6410 ret
= -TARGET_EFAULT
;
6412 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6413 unlock_user(p2
, arg4
, 0);
6414 unlock_user(p
, arg2
, 0);
6418 #ifdef TARGET_NR_mkdir
6419 case TARGET_NR_mkdir
:
6420 if (!(p
= lock_user_string(arg1
)))
6422 ret
= get_errno(mkdir(p
, arg2
));
6423 unlock_user(p
, arg1
, 0);
6426 #if defined(TARGET_NR_mkdirat)
6427 case TARGET_NR_mkdirat
:
6428 if (!(p
= lock_user_string(arg2
)))
6430 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6431 unlock_user(p
, arg2
, 0);
6434 #ifdef TARGET_NR_rmdir
6435 case TARGET_NR_rmdir
:
6436 if (!(p
= lock_user_string(arg1
)))
6438 ret
= get_errno(rmdir(p
));
6439 unlock_user(p
, arg1
, 0);
6443 ret
= get_errno(dup(arg1
));
6445 fd_trans_dup(arg1
, ret
);
6448 #ifdef TARGET_NR_pipe
6449 case TARGET_NR_pipe
:
6450 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6453 #ifdef TARGET_NR_pipe2
6454 case TARGET_NR_pipe2
:
6455 ret
= do_pipe(cpu_env
, arg1
,
6456 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6459 case TARGET_NR_times
:
6461 struct target_tms
*tmsp
;
6463 ret
= get_errno(times(&tms
));
6465 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6468 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6469 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6470 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6471 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6474 ret
= host_to_target_clock_t(ret
);
6477 #ifdef TARGET_NR_prof
6478 case TARGET_NR_prof
:
6481 #ifdef TARGET_NR_signal
6482 case TARGET_NR_signal
:
6485 case TARGET_NR_acct
:
6487 ret
= get_errno(acct(NULL
));
6489 if (!(p
= lock_user_string(arg1
)))
6491 ret
= get_errno(acct(path(p
)));
6492 unlock_user(p
, arg1
, 0);
6495 #ifdef TARGET_NR_umount2
6496 case TARGET_NR_umount2
:
6497 if (!(p
= lock_user_string(arg1
)))
6499 ret
= get_errno(umount2(p
, arg2
));
6500 unlock_user(p
, arg1
, 0);
6503 #ifdef TARGET_NR_lock
6504 case TARGET_NR_lock
:
6507 case TARGET_NR_ioctl
:
6508 ret
= do_ioctl(arg1
, arg2
, arg3
);
6510 case TARGET_NR_fcntl
:
6511 ret
= do_fcntl(arg1
, arg2
, arg3
);
6513 #ifdef TARGET_NR_mpx
6517 case TARGET_NR_setpgid
:
6518 ret
= get_errno(setpgid(arg1
, arg2
));
6520 #ifdef TARGET_NR_ulimit
6521 case TARGET_NR_ulimit
:
6524 #ifdef TARGET_NR_oldolduname
6525 case TARGET_NR_oldolduname
:
6528 case TARGET_NR_umask
:
6529 ret
= get_errno(umask(arg1
));
6531 case TARGET_NR_chroot
:
6532 if (!(p
= lock_user_string(arg1
)))
6534 ret
= get_errno(chroot(p
));
6535 unlock_user(p
, arg1
, 0);
6537 #ifdef TARGET_NR_ustat
6538 case TARGET_NR_ustat
:
6541 #ifdef TARGET_NR_dup2
6542 case TARGET_NR_dup2
:
6543 ret
= get_errno(dup2(arg1
, arg2
));
6545 fd_trans_dup(arg1
, arg2
);
6549 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6550 case TARGET_NR_dup3
:
6551 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6553 fd_trans_dup(arg1
, arg2
);
6557 #ifdef TARGET_NR_getppid /* not on alpha */
6558 case TARGET_NR_getppid
:
6559 ret
= get_errno(getppid());
6562 #ifdef TARGET_NR_getpgrp
6563 case TARGET_NR_getpgrp
:
6564 ret
= get_errno(getpgrp());
6567 case TARGET_NR_setsid
:
6568 ret
= get_errno(setsid());
6570 #ifdef TARGET_NR_sigaction
6571 case TARGET_NR_sigaction
:
6573 #if defined(TARGET_ALPHA)
6574 struct target_sigaction act
, oact
, *pact
= 0;
6575 struct target_old_sigaction
*old_act
;
6577 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6579 act
._sa_handler
= old_act
->_sa_handler
;
6580 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6581 act
.sa_flags
= old_act
->sa_flags
;
6582 act
.sa_restorer
= 0;
6583 unlock_user_struct(old_act
, arg2
, 0);
6586 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6587 if (!is_error(ret
) && arg3
) {
6588 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6590 old_act
->_sa_handler
= oact
._sa_handler
;
6591 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6592 old_act
->sa_flags
= oact
.sa_flags
;
6593 unlock_user_struct(old_act
, arg3
, 1);
6595 #elif defined(TARGET_MIPS)
6596 struct target_sigaction act
, oact
, *pact
, *old_act
;
6599 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6601 act
._sa_handler
= old_act
->_sa_handler
;
6602 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6603 act
.sa_flags
= old_act
->sa_flags
;
6604 unlock_user_struct(old_act
, arg2
, 0);
6610 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6612 if (!is_error(ret
) && arg3
) {
6613 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6615 old_act
->_sa_handler
= oact
._sa_handler
;
6616 old_act
->sa_flags
= oact
.sa_flags
;
6617 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6618 old_act
->sa_mask
.sig
[1] = 0;
6619 old_act
->sa_mask
.sig
[2] = 0;
6620 old_act
->sa_mask
.sig
[3] = 0;
6621 unlock_user_struct(old_act
, arg3
, 1);
6624 struct target_old_sigaction
*old_act
;
6625 struct target_sigaction act
, oact
, *pact
;
6627 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6629 act
._sa_handler
= old_act
->_sa_handler
;
6630 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6631 act
.sa_flags
= old_act
->sa_flags
;
6632 act
.sa_restorer
= old_act
->sa_restorer
;
6633 unlock_user_struct(old_act
, arg2
, 0);
6638 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6639 if (!is_error(ret
) && arg3
) {
6640 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6642 old_act
->_sa_handler
= oact
._sa_handler
;
6643 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6644 old_act
->sa_flags
= oact
.sa_flags
;
6645 old_act
->sa_restorer
= oact
.sa_restorer
;
6646 unlock_user_struct(old_act
, arg3
, 1);
6652 case TARGET_NR_rt_sigaction
:
6654 #if defined(TARGET_ALPHA)
6655 struct target_sigaction act
, oact
, *pact
= 0;
6656 struct target_rt_sigaction
*rt_act
;
6657 /* ??? arg4 == sizeof(sigset_t). */
6659 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6661 act
._sa_handler
= rt_act
->_sa_handler
;
6662 act
.sa_mask
= rt_act
->sa_mask
;
6663 act
.sa_flags
= rt_act
->sa_flags
;
6664 act
.sa_restorer
= arg5
;
6665 unlock_user_struct(rt_act
, arg2
, 0);
6668 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6669 if (!is_error(ret
) && arg3
) {
6670 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6672 rt_act
->_sa_handler
= oact
._sa_handler
;
6673 rt_act
->sa_mask
= oact
.sa_mask
;
6674 rt_act
->sa_flags
= oact
.sa_flags
;
6675 unlock_user_struct(rt_act
, arg3
, 1);
6678 struct target_sigaction
*act
;
6679 struct target_sigaction
*oact
;
6682 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6687 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6688 ret
= -TARGET_EFAULT
;
6689 goto rt_sigaction_fail
;
6693 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6696 unlock_user_struct(act
, arg2
, 0);
6698 unlock_user_struct(oact
, arg3
, 1);
6702 #ifdef TARGET_NR_sgetmask /* not on alpha */
6703 case TARGET_NR_sgetmask
:
6706 abi_ulong target_set
;
6707 do_sigprocmask(0, NULL
, &cur_set
);
6708 host_to_target_old_sigset(&target_set
, &cur_set
);
6713 #ifdef TARGET_NR_ssetmask /* not on alpha */
6714 case TARGET_NR_ssetmask
:
6716 sigset_t set
, oset
, cur_set
;
6717 abi_ulong target_set
= arg1
;
6718 do_sigprocmask(0, NULL
, &cur_set
);
6719 target_to_host_old_sigset(&set
, &target_set
);
6720 sigorset(&set
, &set
, &cur_set
);
6721 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6722 host_to_target_old_sigset(&target_set
, &oset
);
6727 #ifdef TARGET_NR_sigprocmask
6728 case TARGET_NR_sigprocmask
:
6730 #if defined(TARGET_ALPHA)
6731 sigset_t set
, oldset
;
6736 case TARGET_SIG_BLOCK
:
6739 case TARGET_SIG_UNBLOCK
:
6742 case TARGET_SIG_SETMASK
:
6746 ret
= -TARGET_EINVAL
;
6750 target_to_host_old_sigset(&set
, &mask
);
6752 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6753 if (!is_error(ret
)) {
6754 host_to_target_old_sigset(&mask
, &oldset
);
6756 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6759 sigset_t set
, oldset
, *set_ptr
;
6764 case TARGET_SIG_BLOCK
:
6767 case TARGET_SIG_UNBLOCK
:
6770 case TARGET_SIG_SETMASK
:
6774 ret
= -TARGET_EINVAL
;
6777 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6779 target_to_host_old_sigset(&set
, p
);
6780 unlock_user(p
, arg2
, 0);
6786 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6787 if (!is_error(ret
) && arg3
) {
6788 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6790 host_to_target_old_sigset(p
, &oldset
);
6791 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6797 case TARGET_NR_rt_sigprocmask
:
6800 sigset_t set
, oldset
, *set_ptr
;
6804 case TARGET_SIG_BLOCK
:
6807 case TARGET_SIG_UNBLOCK
:
6810 case TARGET_SIG_SETMASK
:
6814 ret
= -TARGET_EINVAL
;
6817 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6819 target_to_host_sigset(&set
, p
);
6820 unlock_user(p
, arg2
, 0);
6826 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6827 if (!is_error(ret
) && arg3
) {
6828 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6830 host_to_target_sigset(p
, &oldset
);
6831 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6835 #ifdef TARGET_NR_sigpending
6836 case TARGET_NR_sigpending
:
6839 ret
= get_errno(sigpending(&set
));
6840 if (!is_error(ret
)) {
6841 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6843 host_to_target_old_sigset(p
, &set
);
6844 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6849 case TARGET_NR_rt_sigpending
:
6852 ret
= get_errno(sigpending(&set
));
6853 if (!is_error(ret
)) {
6854 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6856 host_to_target_sigset(p
, &set
);
6857 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6861 #ifdef TARGET_NR_sigsuspend
6862 case TARGET_NR_sigsuspend
:
6865 #if defined(TARGET_ALPHA)
6866 abi_ulong mask
= arg1
;
6867 target_to_host_old_sigset(&set
, &mask
);
6869 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6871 target_to_host_old_sigset(&set
, p
);
6872 unlock_user(p
, arg1
, 0);
6874 ret
= get_errno(sigsuspend(&set
));
6878 case TARGET_NR_rt_sigsuspend
:
6881 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6883 target_to_host_sigset(&set
, p
);
6884 unlock_user(p
, arg1
, 0);
6885 ret
= get_errno(sigsuspend(&set
));
6888 case TARGET_NR_rt_sigtimedwait
:
6891 struct timespec uts
, *puts
;
6894 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6896 target_to_host_sigset(&set
, p
);
6897 unlock_user(p
, arg1
, 0);
6900 target_to_host_timespec(puts
, arg3
);
6904 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6905 if (!is_error(ret
)) {
6907 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6912 host_to_target_siginfo(p
, &uinfo
);
6913 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6915 ret
= host_to_target_signal(ret
);
6919 case TARGET_NR_rt_sigqueueinfo
:
6922 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6924 target_to_host_siginfo(&uinfo
, p
);
6925 unlock_user(p
, arg1
, 0);
6926 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6929 #ifdef TARGET_NR_sigreturn
6930 case TARGET_NR_sigreturn
:
6931 /* NOTE: ret is eax, so not transcoding must be done */
6932 ret
= do_sigreturn(cpu_env
);
6935 case TARGET_NR_rt_sigreturn
:
6936 /* NOTE: ret is eax, so not transcoding must be done */
6937 ret
= do_rt_sigreturn(cpu_env
);
6939 case TARGET_NR_sethostname
:
6940 if (!(p
= lock_user_string(arg1
)))
6942 ret
= get_errno(sethostname(p
, arg2
));
6943 unlock_user(p
, arg1
, 0);
6945 case TARGET_NR_setrlimit
:
6947 int resource
= target_to_host_resource(arg1
);
6948 struct target_rlimit
*target_rlim
;
6950 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6952 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6953 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6954 unlock_user_struct(target_rlim
, arg2
, 0);
6955 ret
= get_errno(setrlimit(resource
, &rlim
));
6958 case TARGET_NR_getrlimit
:
6960 int resource
= target_to_host_resource(arg1
);
6961 struct target_rlimit
*target_rlim
;
6964 ret
= get_errno(getrlimit(resource
, &rlim
));
6965 if (!is_error(ret
)) {
6966 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6968 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6969 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6970 unlock_user_struct(target_rlim
, arg2
, 1);
6974 case TARGET_NR_getrusage
:
6976 struct rusage rusage
;
6977 ret
= get_errno(getrusage(arg1
, &rusage
));
6978 if (!is_error(ret
)) {
6979 ret
= host_to_target_rusage(arg2
, &rusage
);
6983 case TARGET_NR_gettimeofday
:
6986 ret
= get_errno(gettimeofday(&tv
, NULL
));
6987 if (!is_error(ret
)) {
6988 if (copy_to_user_timeval(arg1
, &tv
))
6993 case TARGET_NR_settimeofday
:
6995 struct timeval tv
, *ptv
= NULL
;
6996 struct timezone tz
, *ptz
= NULL
;
6999 if (copy_from_user_timeval(&tv
, arg1
)) {
7006 if (copy_from_user_timezone(&tz
, arg2
)) {
7012 ret
= get_errno(settimeofday(ptv
, ptz
));
7015 #if defined(TARGET_NR_select)
7016 case TARGET_NR_select
:
7017 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7018 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7021 struct target_sel_arg_struct
*sel
;
7022 abi_ulong inp
, outp
, exp
, tvp
;
7025 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7027 nsel
= tswapal(sel
->n
);
7028 inp
= tswapal(sel
->inp
);
7029 outp
= tswapal(sel
->outp
);
7030 exp
= tswapal(sel
->exp
);
7031 tvp
= tswapal(sel
->tvp
);
7032 unlock_user_struct(sel
, arg1
, 0);
7033 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7038 #ifdef TARGET_NR_pselect6
7039 case TARGET_NR_pselect6
:
7041 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7042 fd_set rfds
, wfds
, efds
;
7043 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7044 struct timespec ts
, *ts_ptr
;
7047 * The 6th arg is actually two args smashed together,
7048 * so we cannot use the C library.
7056 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7057 target_sigset_t
*target_sigset
;
7065 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7069 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7073 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7079 * This takes a timespec, and not a timeval, so we cannot
7080 * use the do_select() helper ...
7083 if (target_to_host_timespec(&ts
, ts_addr
)) {
7091 /* Extract the two packed args for the sigset */
7094 sig
.size
= _NSIG
/ 8;
7096 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7100 arg_sigset
= tswapal(arg7
[0]);
7101 arg_sigsize
= tswapal(arg7
[1]);
7102 unlock_user(arg7
, arg6
, 0);
7106 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7107 /* Like the kernel, we enforce correct size sigsets */
7108 ret
= -TARGET_EINVAL
;
7111 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7112 sizeof(*target_sigset
), 1);
7113 if (!target_sigset
) {
7116 target_to_host_sigset(&set
, target_sigset
);
7117 unlock_user(target_sigset
, arg_sigset
, 0);
7125 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7128 if (!is_error(ret
)) {
7129 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7131 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7133 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7136 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7142 #ifdef TARGET_NR_symlink
7143 case TARGET_NR_symlink
:
7146 p
= lock_user_string(arg1
);
7147 p2
= lock_user_string(arg2
);
7149 ret
= -TARGET_EFAULT
;
7151 ret
= get_errno(symlink(p
, p2
));
7152 unlock_user(p2
, arg2
, 0);
7153 unlock_user(p
, arg1
, 0);
7157 #if defined(TARGET_NR_symlinkat)
7158 case TARGET_NR_symlinkat
:
7161 p
= lock_user_string(arg1
);
7162 p2
= lock_user_string(arg3
);
7164 ret
= -TARGET_EFAULT
;
7166 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7167 unlock_user(p2
, arg3
, 0);
7168 unlock_user(p
, arg1
, 0);
7172 #ifdef TARGET_NR_oldlstat
7173 case TARGET_NR_oldlstat
:
7176 #ifdef TARGET_NR_readlink
7177 case TARGET_NR_readlink
:
7180 p
= lock_user_string(arg1
);
7181 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7183 ret
= -TARGET_EFAULT
;
7185 /* Short circuit this for the magic exe check. */
7186 ret
= -TARGET_EINVAL
;
7187 } else if (is_proc_myself((const char *)p
, "exe")) {
7188 char real
[PATH_MAX
], *temp
;
7189 temp
= realpath(exec_path
, real
);
7190 /* Return value is # of bytes that we wrote to the buffer. */
7192 ret
= get_errno(-1);
7194 /* Don't worry about sign mismatch as earlier mapping
7195 * logic would have thrown a bad address error. */
7196 ret
= MIN(strlen(real
), arg3
);
7197 /* We cannot NUL terminate the string. */
7198 memcpy(p2
, real
, ret
);
7201 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7203 unlock_user(p2
, arg2
, ret
);
7204 unlock_user(p
, arg1
, 0);
7208 #if defined(TARGET_NR_readlinkat)
7209 case TARGET_NR_readlinkat
:
7212 p
= lock_user_string(arg2
);
7213 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7215 ret
= -TARGET_EFAULT
;
7216 } else if (is_proc_myself((const char *)p
, "exe")) {
7217 char real
[PATH_MAX
], *temp
;
7218 temp
= realpath(exec_path
, real
);
7219 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7220 snprintf((char *)p2
, arg4
, "%s", real
);
7222 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7224 unlock_user(p2
, arg3
, ret
);
7225 unlock_user(p
, arg2
, 0);
7229 #ifdef TARGET_NR_uselib
7230 case TARGET_NR_uselib
:
7233 #ifdef TARGET_NR_swapon
7234 case TARGET_NR_swapon
:
7235 if (!(p
= lock_user_string(arg1
)))
7237 ret
= get_errno(swapon(p
, arg2
));
7238 unlock_user(p
, arg1
, 0);
7241 case TARGET_NR_reboot
:
7242 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7243 /* arg4 must be ignored in all other cases */
7244 p
= lock_user_string(arg4
);
7248 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7249 unlock_user(p
, arg4
, 0);
7251 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7254 #ifdef TARGET_NR_readdir
7255 case TARGET_NR_readdir
:
7258 #ifdef TARGET_NR_mmap
7259 case TARGET_NR_mmap
:
7260 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7261 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7262 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7263 || defined(TARGET_S390X)
7266 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7267 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7275 unlock_user(v
, arg1
, 0);
7276 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7277 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7281 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7282 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7288 #ifdef TARGET_NR_mmap2
7289 case TARGET_NR_mmap2
:
7291 #define MMAP_SHIFT 12
7293 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7294 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7296 arg6
<< MMAP_SHIFT
));
7299 case TARGET_NR_munmap
:
7300 ret
= get_errno(target_munmap(arg1
, arg2
));
7302 case TARGET_NR_mprotect
:
7304 TaskState
*ts
= cpu
->opaque
;
7305 /* Special hack to detect libc making the stack executable. */
7306 if ((arg3
& PROT_GROWSDOWN
)
7307 && arg1
>= ts
->info
->stack_limit
7308 && arg1
<= ts
->info
->start_stack
) {
7309 arg3
&= ~PROT_GROWSDOWN
;
7310 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7311 arg1
= ts
->info
->stack_limit
;
7314 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7316 #ifdef TARGET_NR_mremap
7317 case TARGET_NR_mremap
:
7318 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7321 /* ??? msync/mlock/munlock are broken for softmmu. */
7322 #ifdef TARGET_NR_msync
7323 case TARGET_NR_msync
:
7324 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7327 #ifdef TARGET_NR_mlock
7328 case TARGET_NR_mlock
:
7329 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7332 #ifdef TARGET_NR_munlock
7333 case TARGET_NR_munlock
:
7334 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7337 #ifdef TARGET_NR_mlockall
7338 case TARGET_NR_mlockall
:
7339 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7342 #ifdef TARGET_NR_munlockall
7343 case TARGET_NR_munlockall
:
7344 ret
= get_errno(munlockall());
7347 case TARGET_NR_truncate
:
7348 if (!(p
= lock_user_string(arg1
)))
7350 ret
= get_errno(truncate(p
, arg2
));
7351 unlock_user(p
, arg1
, 0);
7353 case TARGET_NR_ftruncate
:
7354 ret
= get_errno(ftruncate(arg1
, arg2
));
7356 case TARGET_NR_fchmod
:
7357 ret
= get_errno(fchmod(arg1
, arg2
));
7359 #if defined(TARGET_NR_fchmodat)
7360 case TARGET_NR_fchmodat
:
7361 if (!(p
= lock_user_string(arg2
)))
7363 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7364 unlock_user(p
, arg2
, 0);
7367 case TARGET_NR_getpriority
:
7368 /* Note that negative values are valid for getpriority, so we must
7369 differentiate based on errno settings. */
7371 ret
= getpriority(arg1
, arg2
);
7372 if (ret
== -1 && errno
!= 0) {
7373 ret
= -host_to_target_errno(errno
);
7377 /* Return value is the unbiased priority. Signal no error. */
7378 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7380 /* Return value is a biased priority to avoid negative numbers. */
7384 case TARGET_NR_setpriority
:
7385 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7387 #ifdef TARGET_NR_profil
7388 case TARGET_NR_profil
:
7391 case TARGET_NR_statfs
:
7392 if (!(p
= lock_user_string(arg1
)))
7394 ret
= get_errno(statfs(path(p
), &stfs
));
7395 unlock_user(p
, arg1
, 0);
7397 if (!is_error(ret
)) {
7398 struct target_statfs
*target_stfs
;
7400 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7402 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7403 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7404 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7405 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7406 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7407 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7408 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7409 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7410 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7411 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7412 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7413 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7414 unlock_user_struct(target_stfs
, arg2
, 1);
7417 case TARGET_NR_fstatfs
:
7418 ret
= get_errno(fstatfs(arg1
, &stfs
));
7419 goto convert_statfs
;
7420 #ifdef TARGET_NR_statfs64
7421 case TARGET_NR_statfs64
:
7422 if (!(p
= lock_user_string(arg1
)))
7424 ret
= get_errno(statfs(path(p
), &stfs
));
7425 unlock_user(p
, arg1
, 0);
7427 if (!is_error(ret
)) {
7428 struct target_statfs64
*target_stfs
;
7430 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7432 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7433 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7434 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7435 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7436 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7437 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7438 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7439 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7440 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7441 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7442 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7443 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7444 unlock_user_struct(target_stfs
, arg3
, 1);
7447 case TARGET_NR_fstatfs64
:
7448 ret
= get_errno(fstatfs(arg1
, &stfs
));
7449 goto convert_statfs64
;
7451 #ifdef TARGET_NR_ioperm
7452 case TARGET_NR_ioperm
:
7455 #ifdef TARGET_NR_socketcall
7456 case TARGET_NR_socketcall
:
7457 ret
= do_socketcall(arg1
, arg2
);
7460 #ifdef TARGET_NR_accept
7461 case TARGET_NR_accept
:
7462 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7465 #ifdef TARGET_NR_accept4
7466 case TARGET_NR_accept4
:
7467 #ifdef CONFIG_ACCEPT4
7468 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7474 #ifdef TARGET_NR_bind
7475 case TARGET_NR_bind
:
7476 ret
= do_bind(arg1
, arg2
, arg3
);
7479 #ifdef TARGET_NR_connect
7480 case TARGET_NR_connect
:
7481 ret
= do_connect(arg1
, arg2
, arg3
);
7484 #ifdef TARGET_NR_getpeername
7485 case TARGET_NR_getpeername
:
7486 ret
= do_getpeername(arg1
, arg2
, arg3
);
7489 #ifdef TARGET_NR_getsockname
7490 case TARGET_NR_getsockname
:
7491 ret
= do_getsockname(arg1
, arg2
, arg3
);
7494 #ifdef TARGET_NR_getsockopt
7495 case TARGET_NR_getsockopt
:
7496 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7499 #ifdef TARGET_NR_listen
7500 case TARGET_NR_listen
:
7501 ret
= get_errno(listen(arg1
, arg2
));
7504 #ifdef TARGET_NR_recv
7505 case TARGET_NR_recv
:
7506 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7509 #ifdef TARGET_NR_recvfrom
7510 case TARGET_NR_recvfrom
:
7511 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7514 #ifdef TARGET_NR_recvmsg
7515 case TARGET_NR_recvmsg
:
7516 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7519 #ifdef TARGET_NR_send
7520 case TARGET_NR_send
:
7521 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7524 #ifdef TARGET_NR_sendmsg
7525 case TARGET_NR_sendmsg
:
7526 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7529 #ifdef TARGET_NR_sendmmsg
7530 case TARGET_NR_sendmmsg
:
7531 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7533 case TARGET_NR_recvmmsg
:
7534 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7537 #ifdef TARGET_NR_sendto
7538 case TARGET_NR_sendto
:
7539 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7542 #ifdef TARGET_NR_shutdown
7543 case TARGET_NR_shutdown
:
7544 ret
= get_errno(shutdown(arg1
, arg2
));
7547 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7548 case TARGET_NR_getrandom
:
7549 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
7553 ret
= get_errno(getrandom(p
, arg2
, arg3
));
7554 unlock_user(p
, arg1
, ret
);
7557 #ifdef TARGET_NR_socket
7558 case TARGET_NR_socket
:
7559 ret
= do_socket(arg1
, arg2
, arg3
);
7560 fd_trans_unregister(ret
);
7563 #ifdef TARGET_NR_socketpair
7564 case TARGET_NR_socketpair
:
7565 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7568 #ifdef TARGET_NR_setsockopt
7569 case TARGET_NR_setsockopt
:
7570 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7574 case TARGET_NR_syslog
:
7575 if (!(p
= lock_user_string(arg2
)))
7577 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7578 unlock_user(p
, arg2
, 0);
7581 case TARGET_NR_setitimer
:
7583 struct itimerval value
, ovalue
, *pvalue
;
7587 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7588 || copy_from_user_timeval(&pvalue
->it_value
,
7589 arg2
+ sizeof(struct target_timeval
)))
7594 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7595 if (!is_error(ret
) && arg3
) {
7596 if (copy_to_user_timeval(arg3
,
7597 &ovalue
.it_interval
)
7598 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7604 case TARGET_NR_getitimer
:
7606 struct itimerval value
;
7608 ret
= get_errno(getitimer(arg1
, &value
));
7609 if (!is_error(ret
) && arg2
) {
7610 if (copy_to_user_timeval(arg2
,
7612 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7618 #ifdef TARGET_NR_stat
7619 case TARGET_NR_stat
:
7620 if (!(p
= lock_user_string(arg1
)))
7622 ret
= get_errno(stat(path(p
), &st
));
7623 unlock_user(p
, arg1
, 0);
7626 #ifdef TARGET_NR_lstat
7627 case TARGET_NR_lstat
:
7628 if (!(p
= lock_user_string(arg1
)))
7630 ret
= get_errno(lstat(path(p
), &st
));
7631 unlock_user(p
, arg1
, 0);
7634 case TARGET_NR_fstat
:
7636 ret
= get_errno(fstat(arg1
, &st
));
7637 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7640 if (!is_error(ret
)) {
7641 struct target_stat
*target_st
;
7643 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7645 memset(target_st
, 0, sizeof(*target_st
));
7646 __put_user(st
.st_dev
, &target_st
->st_dev
);
7647 __put_user(st
.st_ino
, &target_st
->st_ino
);
7648 __put_user(st
.st_mode
, &target_st
->st_mode
);
7649 __put_user(st
.st_uid
, &target_st
->st_uid
);
7650 __put_user(st
.st_gid
, &target_st
->st_gid
);
7651 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7652 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7653 __put_user(st
.st_size
, &target_st
->st_size
);
7654 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7655 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7656 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7657 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7658 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7659 unlock_user_struct(target_st
, arg2
, 1);
7663 #ifdef TARGET_NR_olduname
7664 case TARGET_NR_olduname
:
7667 #ifdef TARGET_NR_iopl
7668 case TARGET_NR_iopl
:
7671 case TARGET_NR_vhangup
:
7672 ret
= get_errno(vhangup());
7674 #ifdef TARGET_NR_idle
7675 case TARGET_NR_idle
:
7678 #ifdef TARGET_NR_syscall
7679 case TARGET_NR_syscall
:
7680 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7681 arg6
, arg7
, arg8
, 0);
7684 case TARGET_NR_wait4
:
7687 abi_long status_ptr
= arg2
;
7688 struct rusage rusage
, *rusage_ptr
;
7689 abi_ulong target_rusage
= arg4
;
7690 abi_long rusage_err
;
7692 rusage_ptr
= &rusage
;
7695 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7696 if (!is_error(ret
)) {
7697 if (status_ptr
&& ret
) {
7698 status
= host_to_target_waitstatus(status
);
7699 if (put_user_s32(status
, status_ptr
))
7702 if (target_rusage
) {
7703 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7711 #ifdef TARGET_NR_swapoff
7712 case TARGET_NR_swapoff
:
7713 if (!(p
= lock_user_string(arg1
)))
7715 ret
= get_errno(swapoff(p
));
7716 unlock_user(p
, arg1
, 0);
7719 case TARGET_NR_sysinfo
:
7721 struct target_sysinfo
*target_value
;
7722 struct sysinfo value
;
7723 ret
= get_errno(sysinfo(&value
));
7724 if (!is_error(ret
) && arg1
)
7726 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7728 __put_user(value
.uptime
, &target_value
->uptime
);
7729 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7730 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7731 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7732 __put_user(value
.totalram
, &target_value
->totalram
);
7733 __put_user(value
.freeram
, &target_value
->freeram
);
7734 __put_user(value
.sharedram
, &target_value
->sharedram
);
7735 __put_user(value
.bufferram
, &target_value
->bufferram
);
7736 __put_user(value
.totalswap
, &target_value
->totalswap
);
7737 __put_user(value
.freeswap
, &target_value
->freeswap
);
7738 __put_user(value
.procs
, &target_value
->procs
);
7739 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7740 __put_user(value
.freehigh
, &target_value
->freehigh
);
7741 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7742 unlock_user_struct(target_value
, arg1
, 1);
7746 #ifdef TARGET_NR_ipc
7748 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7751 #ifdef TARGET_NR_semget
7752 case TARGET_NR_semget
:
7753 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7756 #ifdef TARGET_NR_semop
7757 case TARGET_NR_semop
:
7758 ret
= do_semop(arg1
, arg2
, arg3
);
7761 #ifdef TARGET_NR_semctl
7762 case TARGET_NR_semctl
:
7763 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7766 #ifdef TARGET_NR_msgctl
7767 case TARGET_NR_msgctl
:
7768 ret
= do_msgctl(arg1
, arg2
, arg3
);
7771 #ifdef TARGET_NR_msgget
7772 case TARGET_NR_msgget
:
7773 ret
= get_errno(msgget(arg1
, arg2
));
7776 #ifdef TARGET_NR_msgrcv
7777 case TARGET_NR_msgrcv
:
7778 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7781 #ifdef TARGET_NR_msgsnd
7782 case TARGET_NR_msgsnd
:
7783 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7786 #ifdef TARGET_NR_shmget
7787 case TARGET_NR_shmget
:
7788 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7791 #ifdef TARGET_NR_shmctl
7792 case TARGET_NR_shmctl
:
7793 ret
= do_shmctl(arg1
, arg2
, arg3
);
7796 #ifdef TARGET_NR_shmat
7797 case TARGET_NR_shmat
:
7798 ret
= do_shmat(arg1
, arg2
, arg3
);
7801 #ifdef TARGET_NR_shmdt
7802 case TARGET_NR_shmdt
:
7803 ret
= do_shmdt(arg1
);
7806 case TARGET_NR_fsync
:
7807 ret
= get_errno(fsync(arg1
));
7809 case TARGET_NR_clone
:
7810 /* Linux manages to have three different orderings for its
7811 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7812 * match the kernel's CONFIG_CLONE_* settings.
7813 * Microblaze is further special in that it uses a sixth
7814 * implicit argument to clone for the TLS pointer.
7816 #if defined(TARGET_MICROBLAZE)
7817 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7818 #elif defined(TARGET_CLONE_BACKWARDS)
7819 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7820 #elif defined(TARGET_CLONE_BACKWARDS2)
7821 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7823 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7826 #ifdef __NR_exit_group
7827 /* new thread calls */
7828 case TARGET_NR_exit_group
:
7832 gdb_exit(cpu_env
, arg1
);
7833 ret
= get_errno(exit_group(arg1
));
7836 case TARGET_NR_setdomainname
:
7837 if (!(p
= lock_user_string(arg1
)))
7839 ret
= get_errno(setdomainname(p
, arg2
));
7840 unlock_user(p
, arg1
, 0);
7842 case TARGET_NR_uname
:
7843 /* no need to transcode because we use the linux syscall */
7845 struct new_utsname
* buf
;
7847 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7849 ret
= get_errno(sys_uname(buf
));
7850 if (!is_error(ret
)) {
7851 /* Overrite the native machine name with whatever is being
7853 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7854 /* Allow the user to override the reported release. */
7855 if (qemu_uname_release
&& *qemu_uname_release
)
7856 strcpy (buf
->release
, qemu_uname_release
);
7858 unlock_user_struct(buf
, arg1
, 1);
7862 case TARGET_NR_modify_ldt
:
7863 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7865 #if !defined(TARGET_X86_64)
7866 case TARGET_NR_vm86old
:
7868 case TARGET_NR_vm86
:
7869 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7873 case TARGET_NR_adjtimex
:
7875 #ifdef TARGET_NR_create_module
7876 case TARGET_NR_create_module
:
7878 case TARGET_NR_init_module
:
7879 case TARGET_NR_delete_module
:
7880 #ifdef TARGET_NR_get_kernel_syms
7881 case TARGET_NR_get_kernel_syms
:
7884 case TARGET_NR_quotactl
:
7886 case TARGET_NR_getpgid
:
7887 ret
= get_errno(getpgid(arg1
));
7889 case TARGET_NR_fchdir
:
7890 ret
= get_errno(fchdir(arg1
));
7892 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7893 case TARGET_NR_bdflush
:
7896 #ifdef TARGET_NR_sysfs
7897 case TARGET_NR_sysfs
:
7900 case TARGET_NR_personality
:
7901 ret
= get_errno(personality(arg1
));
7903 #ifdef TARGET_NR_afs_syscall
7904 case TARGET_NR_afs_syscall
:
7907 #ifdef TARGET_NR__llseek /* Not on alpha */
7908 case TARGET_NR__llseek
:
7911 #if !defined(__NR_llseek)
7912 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7914 ret
= get_errno(res
);
7919 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7921 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7927 #ifdef TARGET_NR_getdents
7928 case TARGET_NR_getdents
:
7929 #ifdef __NR_getdents
7930 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7932 struct target_dirent
*target_dirp
;
7933 struct linux_dirent
*dirp
;
7934 abi_long count
= arg3
;
7936 dirp
= g_try_malloc(count
);
7938 ret
= -TARGET_ENOMEM
;
7942 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7943 if (!is_error(ret
)) {
7944 struct linux_dirent
*de
;
7945 struct target_dirent
*tde
;
7947 int reclen
, treclen
;
7948 int count1
, tnamelen
;
7952 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7956 reclen
= de
->d_reclen
;
7957 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7958 assert(tnamelen
>= 0);
7959 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7960 assert(count1
+ treclen
<= count
);
7961 tde
->d_reclen
= tswap16(treclen
);
7962 tde
->d_ino
= tswapal(de
->d_ino
);
7963 tde
->d_off
= tswapal(de
->d_off
);
7964 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7965 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7967 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7971 unlock_user(target_dirp
, arg2
, ret
);
7977 struct linux_dirent
*dirp
;
7978 abi_long count
= arg3
;
7980 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7982 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7983 if (!is_error(ret
)) {
7984 struct linux_dirent
*de
;
7989 reclen
= de
->d_reclen
;
7992 de
->d_reclen
= tswap16(reclen
);
7993 tswapls(&de
->d_ino
);
7994 tswapls(&de
->d_off
);
7995 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7999 unlock_user(dirp
, arg2
, ret
);
8003 /* Implement getdents in terms of getdents64 */
8005 struct linux_dirent64
*dirp
;
8006 abi_long count
= arg3
;
8008 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8012 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8013 if (!is_error(ret
)) {
8014 /* Convert the dirent64 structs to target dirent. We do this
8015 * in-place, since we can guarantee that a target_dirent is no
8016 * larger than a dirent64; however this means we have to be
8017 * careful to read everything before writing in the new format.
8019 struct linux_dirent64
*de
;
8020 struct target_dirent
*tde
;
8025 tde
= (struct target_dirent
*)dirp
;
8027 int namelen
, treclen
;
8028 int reclen
= de
->d_reclen
;
8029 uint64_t ino
= de
->d_ino
;
8030 int64_t off
= de
->d_off
;
8031 uint8_t type
= de
->d_type
;
8033 namelen
= strlen(de
->d_name
);
8034 treclen
= offsetof(struct target_dirent
, d_name
)
8036 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8038 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8039 tde
->d_ino
= tswapal(ino
);
8040 tde
->d_off
= tswapal(off
);
8041 tde
->d_reclen
= tswap16(treclen
);
8042 /* The target_dirent type is in what was formerly a padding
8043 * byte at the end of the structure:
8045 *(((char *)tde
) + treclen
- 1) = type
;
8047 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8048 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8054 unlock_user(dirp
, arg2
, ret
);
8058 #endif /* TARGET_NR_getdents */
8059 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8060 case TARGET_NR_getdents64
:
8062 struct linux_dirent64
*dirp
;
8063 abi_long count
= arg3
;
8064 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8066 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8067 if (!is_error(ret
)) {
8068 struct linux_dirent64
*de
;
8073 reclen
= de
->d_reclen
;
8076 de
->d_reclen
= tswap16(reclen
);
8077 tswap64s((uint64_t *)&de
->d_ino
);
8078 tswap64s((uint64_t *)&de
->d_off
);
8079 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8083 unlock_user(dirp
, arg2
, ret
);
8086 #endif /* TARGET_NR_getdents64 */
8087 #if defined(TARGET_NR__newselect)
8088 case TARGET_NR__newselect
:
8089 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8092 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8093 # ifdef TARGET_NR_poll
8094 case TARGET_NR_poll
:
8096 # ifdef TARGET_NR_ppoll
8097 case TARGET_NR_ppoll
:
8100 struct target_pollfd
*target_pfd
;
8101 unsigned int nfds
= arg2
;
8109 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8110 sizeof(struct target_pollfd
) * nfds
, 1);
8115 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8116 for (i
= 0; i
< nfds
; i
++) {
8117 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8118 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8122 # ifdef TARGET_NR_ppoll
8123 if (num
== TARGET_NR_ppoll
) {
8124 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8125 target_sigset_t
*target_set
;
8126 sigset_t _set
, *set
= &_set
;
8129 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8130 unlock_user(target_pfd
, arg1
, 0);
8138 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8140 unlock_user(target_pfd
, arg1
, 0);
8143 target_to_host_sigset(set
, target_set
);
8148 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8150 if (!is_error(ret
) && arg3
) {
8151 host_to_target_timespec(arg3
, timeout_ts
);
8154 unlock_user(target_set
, arg4
, 0);
8158 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8160 if (!is_error(ret
)) {
8161 for(i
= 0; i
< nfds
; i
++) {
8162 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8165 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8169 case TARGET_NR_flock
:
8170 /* NOTE: the flock constant seems to be the same for every
8172 ret
= get_errno(flock(arg1
, arg2
));
8174 case TARGET_NR_readv
:
8176 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8178 ret
= get_errno(readv(arg1
, vec
, arg3
));
8179 unlock_iovec(vec
, arg2
, arg3
, 1);
8181 ret
= -host_to_target_errno(errno
);
8185 case TARGET_NR_writev
:
8187 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8189 ret
= get_errno(writev(arg1
, vec
, arg3
));
8190 unlock_iovec(vec
, arg2
, arg3
, 0);
8192 ret
= -host_to_target_errno(errno
);
8196 case TARGET_NR_getsid
:
8197 ret
= get_errno(getsid(arg1
));
8199 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8200 case TARGET_NR_fdatasync
:
8201 ret
= get_errno(fdatasync(arg1
));
8204 #ifdef TARGET_NR__sysctl
8205 case TARGET_NR__sysctl
:
8206 /* We don't implement this, but ENOTDIR is always a safe
8208 ret
= -TARGET_ENOTDIR
;
8211 case TARGET_NR_sched_getaffinity
:
8213 unsigned int mask_size
;
8214 unsigned long *mask
;
8217 * sched_getaffinity needs multiples of ulong, so need to take
8218 * care of mismatches between target ulong and host ulong sizes.
8220 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8221 ret
= -TARGET_EINVAL
;
8224 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8226 mask
= alloca(mask_size
);
8227 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8229 if (!is_error(ret
)) {
8231 /* More data returned than the caller's buffer will fit.
8232 * This only happens if sizeof(abi_long) < sizeof(long)
8233 * and the caller passed us a buffer holding an odd number
8234 * of abi_longs. If the host kernel is actually using the
8235 * extra 4 bytes then fail EINVAL; otherwise we can just
8236 * ignore them and only copy the interesting part.
8238 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8239 if (numcpus
> arg2
* 8) {
8240 ret
= -TARGET_EINVAL
;
8246 if (copy_to_user(arg3
, mask
, ret
)) {
8252 case TARGET_NR_sched_setaffinity
:
8254 unsigned int mask_size
;
8255 unsigned long *mask
;
8258 * sched_setaffinity needs multiples of ulong, so need to take
8259 * care of mismatches between target ulong and host ulong sizes.
8261 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8262 ret
= -TARGET_EINVAL
;
8265 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8267 mask
= alloca(mask_size
);
8268 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8271 memcpy(mask
, p
, arg2
);
8272 unlock_user_struct(p
, arg2
, 0);
8274 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8277 case TARGET_NR_sched_setparam
:
8279 struct sched_param
*target_schp
;
8280 struct sched_param schp
;
8283 return -TARGET_EINVAL
;
8285 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8287 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8288 unlock_user_struct(target_schp
, arg2
, 0);
8289 ret
= get_errno(sched_setparam(arg1
, &schp
));
8292 case TARGET_NR_sched_getparam
:
8294 struct sched_param
*target_schp
;
8295 struct sched_param schp
;
8298 return -TARGET_EINVAL
;
8300 ret
= get_errno(sched_getparam(arg1
, &schp
));
8301 if (!is_error(ret
)) {
8302 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8304 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8305 unlock_user_struct(target_schp
, arg2
, 1);
8309 case TARGET_NR_sched_setscheduler
:
8311 struct sched_param
*target_schp
;
8312 struct sched_param schp
;
8314 return -TARGET_EINVAL
;
8316 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8318 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8319 unlock_user_struct(target_schp
, arg3
, 0);
8320 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8323 case TARGET_NR_sched_getscheduler
:
8324 ret
= get_errno(sched_getscheduler(arg1
));
8326 case TARGET_NR_sched_yield
:
8327 ret
= get_errno(sched_yield());
8329 case TARGET_NR_sched_get_priority_max
:
8330 ret
= get_errno(sched_get_priority_max(arg1
));
8332 case TARGET_NR_sched_get_priority_min
:
8333 ret
= get_errno(sched_get_priority_min(arg1
));
8335 case TARGET_NR_sched_rr_get_interval
:
8338 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8339 if (!is_error(ret
)) {
8340 ret
= host_to_target_timespec(arg2
, &ts
);
8344 case TARGET_NR_nanosleep
:
8346 struct timespec req
, rem
;
8347 target_to_host_timespec(&req
, arg1
);
8348 ret
= get_errno(nanosleep(&req
, &rem
));
8349 if (is_error(ret
) && arg2
) {
8350 host_to_target_timespec(arg2
, &rem
);
8354 #ifdef TARGET_NR_query_module
8355 case TARGET_NR_query_module
:
8358 #ifdef TARGET_NR_nfsservctl
8359 case TARGET_NR_nfsservctl
:
8362 case TARGET_NR_prctl
:
8364 case PR_GET_PDEATHSIG
:
8367 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8368 if (!is_error(ret
) && arg2
8369 && put_user_ual(deathsig
, arg2
)) {
8377 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8381 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8383 unlock_user(name
, arg2
, 16);
8388 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8392 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8394 unlock_user(name
, arg2
, 0);
8399 /* Most prctl options have no pointer arguments */
8400 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8404 #ifdef TARGET_NR_arch_prctl
8405 case TARGET_NR_arch_prctl
:
8406 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8407 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8413 #ifdef TARGET_NR_pread64
8414 case TARGET_NR_pread64
:
8415 if (regpairs_aligned(cpu_env
)) {
8419 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8421 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8422 unlock_user(p
, arg2
, ret
);
8424 case TARGET_NR_pwrite64
:
8425 if (regpairs_aligned(cpu_env
)) {
8429 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8431 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8432 unlock_user(p
, arg2
, 0);
8435 case TARGET_NR_getcwd
:
8436 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8438 ret
= get_errno(sys_getcwd1(p
, arg2
));
8439 unlock_user(p
, arg1
, ret
);
8441 case TARGET_NR_capget
:
8442 case TARGET_NR_capset
:
8444 struct target_user_cap_header
*target_header
;
8445 struct target_user_cap_data
*target_data
= NULL
;
8446 struct __user_cap_header_struct header
;
8447 struct __user_cap_data_struct data
[2];
8448 struct __user_cap_data_struct
*dataptr
= NULL
;
8449 int i
, target_datalen
;
8452 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8455 header
.version
= tswap32(target_header
->version
);
8456 header
.pid
= tswap32(target_header
->pid
);
8458 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8459 /* Version 2 and up takes pointer to two user_data structs */
8463 target_datalen
= sizeof(*target_data
) * data_items
;
8466 if (num
== TARGET_NR_capget
) {
8467 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8469 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8472 unlock_user_struct(target_header
, arg1
, 0);
8476 if (num
== TARGET_NR_capset
) {
8477 for (i
= 0; i
< data_items
; i
++) {
8478 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8479 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8480 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8487 if (num
== TARGET_NR_capget
) {
8488 ret
= get_errno(capget(&header
, dataptr
));
8490 ret
= get_errno(capset(&header
, dataptr
));
8493 /* The kernel always updates version for both capget and capset */
8494 target_header
->version
= tswap32(header
.version
);
8495 unlock_user_struct(target_header
, arg1
, 1);
8498 if (num
== TARGET_NR_capget
) {
8499 for (i
= 0; i
< data_items
; i
++) {
8500 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8501 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8502 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8504 unlock_user(target_data
, arg2
, target_datalen
);
8506 unlock_user(target_data
, arg2
, 0);
8511 case TARGET_NR_sigaltstack
:
8512 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8515 #ifdef CONFIG_SENDFILE
8516 case TARGET_NR_sendfile
:
8521 ret
= get_user_sal(off
, arg3
);
8522 if (is_error(ret
)) {
8527 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8528 if (!is_error(ret
) && arg3
) {
8529 abi_long ret2
= put_user_sal(off
, arg3
);
8530 if (is_error(ret2
)) {
8536 #ifdef TARGET_NR_sendfile64
8537 case TARGET_NR_sendfile64
:
8542 ret
= get_user_s64(off
, arg3
);
8543 if (is_error(ret
)) {
8548 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8549 if (!is_error(ret
) && arg3
) {
8550 abi_long ret2
= put_user_s64(off
, arg3
);
8551 if (is_error(ret2
)) {
8559 case TARGET_NR_sendfile
:
8560 #ifdef TARGET_NR_sendfile64
8561 case TARGET_NR_sendfile64
:
8566 #ifdef TARGET_NR_getpmsg
8567 case TARGET_NR_getpmsg
:
8570 #ifdef TARGET_NR_putpmsg
8571 case TARGET_NR_putpmsg
:
8574 #ifdef TARGET_NR_vfork
8575 case TARGET_NR_vfork
:
8576 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8580 #ifdef TARGET_NR_ugetrlimit
8581 case TARGET_NR_ugetrlimit
:
8584 int resource
= target_to_host_resource(arg1
);
8585 ret
= get_errno(getrlimit(resource
, &rlim
));
8586 if (!is_error(ret
)) {
8587 struct target_rlimit
*target_rlim
;
8588 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8590 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8591 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8592 unlock_user_struct(target_rlim
, arg2
, 1);
8597 #ifdef TARGET_NR_truncate64
8598 case TARGET_NR_truncate64
:
8599 if (!(p
= lock_user_string(arg1
)))
8601 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8602 unlock_user(p
, arg1
, 0);
8605 #ifdef TARGET_NR_ftruncate64
8606 case TARGET_NR_ftruncate64
:
8607 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8610 #ifdef TARGET_NR_stat64
8611 case TARGET_NR_stat64
:
8612 if (!(p
= lock_user_string(arg1
)))
8614 ret
= get_errno(stat(path(p
), &st
));
8615 unlock_user(p
, arg1
, 0);
8617 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8620 #ifdef TARGET_NR_lstat64
8621 case TARGET_NR_lstat64
:
8622 if (!(p
= lock_user_string(arg1
)))
8624 ret
= get_errno(lstat(path(p
), &st
));
8625 unlock_user(p
, arg1
, 0);
8627 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8630 #ifdef TARGET_NR_fstat64
8631 case TARGET_NR_fstat64
:
8632 ret
= get_errno(fstat(arg1
, &st
));
8634 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8637 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8638 #ifdef TARGET_NR_fstatat64
8639 case TARGET_NR_fstatat64
:
8641 #ifdef TARGET_NR_newfstatat
8642 case TARGET_NR_newfstatat
:
8644 if (!(p
= lock_user_string(arg2
)))
8646 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8648 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8651 #ifdef TARGET_NR_lchown
8652 case TARGET_NR_lchown
:
8653 if (!(p
= lock_user_string(arg1
)))
8655 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8656 unlock_user(p
, arg1
, 0);
8659 #ifdef TARGET_NR_getuid
8660 case TARGET_NR_getuid
:
8661 ret
= get_errno(high2lowuid(getuid()));
8664 #ifdef TARGET_NR_getgid
8665 case TARGET_NR_getgid
:
8666 ret
= get_errno(high2lowgid(getgid()));
8669 #ifdef TARGET_NR_geteuid
8670 case TARGET_NR_geteuid
:
8671 ret
= get_errno(high2lowuid(geteuid()));
8674 #ifdef TARGET_NR_getegid
8675 case TARGET_NR_getegid
:
8676 ret
= get_errno(high2lowgid(getegid()));
8679 case TARGET_NR_setreuid
:
8680 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8682 case TARGET_NR_setregid
:
8683 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8685 case TARGET_NR_getgroups
:
8687 int gidsetsize
= arg1
;
8688 target_id
*target_grouplist
;
8692 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8693 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8694 if (gidsetsize
== 0)
8696 if (!is_error(ret
)) {
8697 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8698 if (!target_grouplist
)
8700 for(i
= 0;i
< ret
; i
++)
8701 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8702 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8706 case TARGET_NR_setgroups
:
8708 int gidsetsize
= arg1
;
8709 target_id
*target_grouplist
;
8710 gid_t
*grouplist
= NULL
;
8713 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8714 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8715 if (!target_grouplist
) {
8716 ret
= -TARGET_EFAULT
;
8719 for (i
= 0; i
< gidsetsize
; i
++) {
8720 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8722 unlock_user(target_grouplist
, arg2
, 0);
8724 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8727 case TARGET_NR_fchown
:
8728 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8730 #if defined(TARGET_NR_fchownat)
8731 case TARGET_NR_fchownat
:
8732 if (!(p
= lock_user_string(arg2
)))
8734 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8735 low2highgid(arg4
), arg5
));
8736 unlock_user(p
, arg2
, 0);
8739 #ifdef TARGET_NR_setresuid
8740 case TARGET_NR_setresuid
:
8741 ret
= get_errno(setresuid(low2highuid(arg1
),
8743 low2highuid(arg3
)));
8746 #ifdef TARGET_NR_getresuid
8747 case TARGET_NR_getresuid
:
8749 uid_t ruid
, euid
, suid
;
8750 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8751 if (!is_error(ret
)) {
8752 if (put_user_id(high2lowuid(ruid
), arg1
)
8753 || put_user_id(high2lowuid(euid
), arg2
)
8754 || put_user_id(high2lowuid(suid
), arg3
))
8760 #ifdef TARGET_NR_getresgid
8761 case TARGET_NR_setresgid
:
8762 ret
= get_errno(setresgid(low2highgid(arg1
),
8764 low2highgid(arg3
)));
8767 #ifdef TARGET_NR_getresgid
8768 case TARGET_NR_getresgid
:
8770 gid_t rgid
, egid
, sgid
;
8771 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8772 if (!is_error(ret
)) {
8773 if (put_user_id(high2lowgid(rgid
), arg1
)
8774 || put_user_id(high2lowgid(egid
), arg2
)
8775 || put_user_id(high2lowgid(sgid
), arg3
))
8781 #ifdef TARGET_NR_chown
8782 case TARGET_NR_chown
:
8783 if (!(p
= lock_user_string(arg1
)))
8785 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8786 unlock_user(p
, arg1
, 0);
8789 case TARGET_NR_setuid
:
8790 ret
= get_errno(setuid(low2highuid(arg1
)));
8792 case TARGET_NR_setgid
:
8793 ret
= get_errno(setgid(low2highgid(arg1
)));
8795 case TARGET_NR_setfsuid
:
8796 ret
= get_errno(setfsuid(arg1
));
8798 case TARGET_NR_setfsgid
:
8799 ret
= get_errno(setfsgid(arg1
));
8802 #ifdef TARGET_NR_lchown32
8803 case TARGET_NR_lchown32
:
8804 if (!(p
= lock_user_string(arg1
)))
8806 ret
= get_errno(lchown(p
, arg2
, arg3
));
8807 unlock_user(p
, arg1
, 0);
8810 #ifdef TARGET_NR_getuid32
8811 case TARGET_NR_getuid32
:
8812 ret
= get_errno(getuid());
8816 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8817 /* Alpha specific */
8818 case TARGET_NR_getxuid
:
8822 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8824 ret
= get_errno(getuid());
8827 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8828 /* Alpha specific */
8829 case TARGET_NR_getxgid
:
8833 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8835 ret
= get_errno(getgid());
8838 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8839 /* Alpha specific */
8840 case TARGET_NR_osf_getsysinfo
:
8841 ret
= -TARGET_EOPNOTSUPP
;
8843 case TARGET_GSI_IEEE_FP_CONTROL
:
8845 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8847 /* Copied from linux ieee_fpcr_to_swcr. */
8848 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8849 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8850 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8851 | SWCR_TRAP_ENABLE_DZE
8852 | SWCR_TRAP_ENABLE_OVF
);
8853 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8854 | SWCR_TRAP_ENABLE_INE
);
8855 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8856 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8858 if (put_user_u64 (swcr
, arg2
))
8864 /* case GSI_IEEE_STATE_AT_SIGNAL:
8865 -- Not implemented in linux kernel.
8867 -- Retrieves current unaligned access state; not much used.
8869 -- Retrieves implver information; surely not used.
8871 -- Grabs a copy of the HWRPB; surely not used.
8876 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8877 /* Alpha specific */
8878 case TARGET_NR_osf_setsysinfo
:
8879 ret
= -TARGET_EOPNOTSUPP
;
8881 case TARGET_SSI_IEEE_FP_CONTROL
:
8883 uint64_t swcr
, fpcr
, orig_fpcr
;
8885 if (get_user_u64 (swcr
, arg2
)) {
8888 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8889 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8891 /* Copied from linux ieee_swcr_to_fpcr. */
8892 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8893 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8894 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8895 | SWCR_TRAP_ENABLE_DZE
8896 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8897 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8898 | SWCR_TRAP_ENABLE_INE
)) << 57;
8899 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8900 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8902 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8907 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8909 uint64_t exc
, fpcr
, orig_fpcr
;
8912 if (get_user_u64(exc
, arg2
)) {
8916 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8918 /* We only add to the exception status here. */
8919 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8921 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8924 /* Old exceptions are not signaled. */
8925 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8927 /* If any exceptions set by this call,
8928 and are unmasked, send a signal. */
8930 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8931 si_code
= TARGET_FPE_FLTRES
;
8933 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8934 si_code
= TARGET_FPE_FLTUND
;
8936 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8937 si_code
= TARGET_FPE_FLTOVF
;
8939 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8940 si_code
= TARGET_FPE_FLTDIV
;
8942 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8943 si_code
= TARGET_FPE_FLTINV
;
8946 target_siginfo_t info
;
8947 info
.si_signo
= SIGFPE
;
8949 info
.si_code
= si_code
;
8950 info
._sifields
._sigfault
._addr
8951 = ((CPUArchState
*)cpu_env
)->pc
;
8952 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8957 /* case SSI_NVPAIRS:
8958 -- Used with SSIN_UACPROC to enable unaligned accesses.
8959 case SSI_IEEE_STATE_AT_SIGNAL:
8960 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8961 -- Not implemented in linux kernel
8966 #ifdef TARGET_NR_osf_sigprocmask
8967 /* Alpha specific. */
8968 case TARGET_NR_osf_sigprocmask
:
8972 sigset_t set
, oldset
;
8975 case TARGET_SIG_BLOCK
:
8978 case TARGET_SIG_UNBLOCK
:
8981 case TARGET_SIG_SETMASK
:
8985 ret
= -TARGET_EINVAL
;
8989 target_to_host_old_sigset(&set
, &mask
);
8990 do_sigprocmask(how
, &set
, &oldset
);
8991 host_to_target_old_sigset(&mask
, &oldset
);
8997 #ifdef TARGET_NR_getgid32
8998 case TARGET_NR_getgid32
:
8999 ret
= get_errno(getgid());
9002 #ifdef TARGET_NR_geteuid32
9003 case TARGET_NR_geteuid32
:
9004 ret
= get_errno(geteuid());
9007 #ifdef TARGET_NR_getegid32
9008 case TARGET_NR_getegid32
:
9009 ret
= get_errno(getegid());
9012 #ifdef TARGET_NR_setreuid32
9013 case TARGET_NR_setreuid32
:
9014 ret
= get_errno(setreuid(arg1
, arg2
));
9017 #ifdef TARGET_NR_setregid32
9018 case TARGET_NR_setregid32
:
9019 ret
= get_errno(setregid(arg1
, arg2
));
9022 #ifdef TARGET_NR_getgroups32
9023 case TARGET_NR_getgroups32
:
9025 int gidsetsize
= arg1
;
9026 uint32_t *target_grouplist
;
9030 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9031 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9032 if (gidsetsize
== 0)
9034 if (!is_error(ret
)) {
9035 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9036 if (!target_grouplist
) {
9037 ret
= -TARGET_EFAULT
;
9040 for(i
= 0;i
< ret
; i
++)
9041 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9042 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9047 #ifdef TARGET_NR_setgroups32
9048 case TARGET_NR_setgroups32
:
9050 int gidsetsize
= arg1
;
9051 uint32_t *target_grouplist
;
9055 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9056 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9057 if (!target_grouplist
) {
9058 ret
= -TARGET_EFAULT
;
9061 for(i
= 0;i
< gidsetsize
; i
++)
9062 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9063 unlock_user(target_grouplist
, arg2
, 0);
9064 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9068 #ifdef TARGET_NR_fchown32
9069 case TARGET_NR_fchown32
:
9070 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9073 #ifdef TARGET_NR_setresuid32
9074 case TARGET_NR_setresuid32
:
9075 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9078 #ifdef TARGET_NR_getresuid32
9079 case TARGET_NR_getresuid32
:
9081 uid_t ruid
, euid
, suid
;
9082 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9083 if (!is_error(ret
)) {
9084 if (put_user_u32(ruid
, arg1
)
9085 || put_user_u32(euid
, arg2
)
9086 || put_user_u32(suid
, arg3
))
9092 #ifdef TARGET_NR_setresgid32
9093 case TARGET_NR_setresgid32
:
9094 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9097 #ifdef TARGET_NR_getresgid32
9098 case TARGET_NR_getresgid32
:
9100 gid_t rgid
, egid
, sgid
;
9101 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9102 if (!is_error(ret
)) {
9103 if (put_user_u32(rgid
, arg1
)
9104 || put_user_u32(egid
, arg2
)
9105 || put_user_u32(sgid
, arg3
))
9111 #ifdef TARGET_NR_chown32
9112 case TARGET_NR_chown32
:
9113 if (!(p
= lock_user_string(arg1
)))
9115 ret
= get_errno(chown(p
, arg2
, arg3
));
9116 unlock_user(p
, arg1
, 0);
9119 #ifdef TARGET_NR_setuid32
9120 case TARGET_NR_setuid32
:
9121 ret
= get_errno(setuid(arg1
));
9124 #ifdef TARGET_NR_setgid32
9125 case TARGET_NR_setgid32
:
9126 ret
= get_errno(setgid(arg1
));
9129 #ifdef TARGET_NR_setfsuid32
9130 case TARGET_NR_setfsuid32
:
9131 ret
= get_errno(setfsuid(arg1
));
9134 #ifdef TARGET_NR_setfsgid32
9135 case TARGET_NR_setfsgid32
:
9136 ret
= get_errno(setfsgid(arg1
));
9140 case TARGET_NR_pivot_root
:
9142 #ifdef TARGET_NR_mincore
9143 case TARGET_NR_mincore
:
9146 ret
= -TARGET_EFAULT
;
9147 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9149 if (!(p
= lock_user_string(arg3
)))
9151 ret
= get_errno(mincore(a
, arg2
, p
));
9152 unlock_user(p
, arg3
, ret
);
9154 unlock_user(a
, arg1
, 0);
9158 #ifdef TARGET_NR_arm_fadvise64_64
9159 case TARGET_NR_arm_fadvise64_64
:
9162 * arm_fadvise64_64 looks like fadvise64_64 but
9163 * with different argument order
9171 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9172 #ifdef TARGET_NR_fadvise64_64
9173 case TARGET_NR_fadvise64_64
:
9175 #ifdef TARGET_NR_fadvise64
9176 case TARGET_NR_fadvise64
:
9180 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9181 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9182 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9183 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9187 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9190 #ifdef TARGET_NR_madvise
9191 case TARGET_NR_madvise
:
9192 /* A straight passthrough may not be safe because qemu sometimes
9193 turns private file-backed mappings into anonymous mappings.
9194 This will break MADV_DONTNEED.
9195 This is a hint, so ignoring and returning success is ok. */
9199 #if TARGET_ABI_BITS == 32
9200 case TARGET_NR_fcntl64
:
9204 struct target_flock64
*target_fl
;
9206 struct target_eabi_flock64
*target_efl
;
9209 cmd
= target_to_host_fcntl_cmd(arg2
);
9210 if (cmd
== -TARGET_EINVAL
) {
9216 case TARGET_F_GETLK64
:
9218 if (((CPUARMState
*)cpu_env
)->eabi
) {
9219 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9221 fl
.l_type
= tswap16(target_efl
->l_type
);
9222 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9223 fl
.l_start
= tswap64(target_efl
->l_start
);
9224 fl
.l_len
= tswap64(target_efl
->l_len
);
9225 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9226 unlock_user_struct(target_efl
, arg3
, 0);
9230 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9232 fl
.l_type
= tswap16(target_fl
->l_type
);
9233 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9234 fl
.l_start
= tswap64(target_fl
->l_start
);
9235 fl
.l_len
= tswap64(target_fl
->l_len
);
9236 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9237 unlock_user_struct(target_fl
, arg3
, 0);
9239 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9242 if (((CPUARMState
*)cpu_env
)->eabi
) {
9243 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9245 target_efl
->l_type
= tswap16(fl
.l_type
);
9246 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9247 target_efl
->l_start
= tswap64(fl
.l_start
);
9248 target_efl
->l_len
= tswap64(fl
.l_len
);
9249 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9250 unlock_user_struct(target_efl
, arg3
, 1);
9254 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9256 target_fl
->l_type
= tswap16(fl
.l_type
);
9257 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9258 target_fl
->l_start
= tswap64(fl
.l_start
);
9259 target_fl
->l_len
= tswap64(fl
.l_len
);
9260 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9261 unlock_user_struct(target_fl
, arg3
, 1);
9266 case TARGET_F_SETLK64
:
9267 case TARGET_F_SETLKW64
:
9269 if (((CPUARMState
*)cpu_env
)->eabi
) {
9270 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9272 fl
.l_type
= tswap16(target_efl
->l_type
);
9273 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9274 fl
.l_start
= tswap64(target_efl
->l_start
);
9275 fl
.l_len
= tswap64(target_efl
->l_len
);
9276 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9277 unlock_user_struct(target_efl
, arg3
, 0);
9281 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9283 fl
.l_type
= tswap16(target_fl
->l_type
);
9284 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9285 fl
.l_start
= tswap64(target_fl
->l_start
);
9286 fl
.l_len
= tswap64(target_fl
->l_len
);
9287 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9288 unlock_user_struct(target_fl
, arg3
, 0);
9290 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9293 ret
= do_fcntl(arg1
, arg2
, arg3
);
9299 #ifdef TARGET_NR_cacheflush
9300 case TARGET_NR_cacheflush
:
9301 /* self-modifying code is handled automatically, so nothing needed */
9305 #ifdef TARGET_NR_security
9306 case TARGET_NR_security
:
9309 #ifdef TARGET_NR_getpagesize
9310 case TARGET_NR_getpagesize
:
9311 ret
= TARGET_PAGE_SIZE
;
9314 case TARGET_NR_gettid
:
9315 ret
= get_errno(gettid());
9317 #ifdef TARGET_NR_readahead
9318 case TARGET_NR_readahead
:
9319 #if TARGET_ABI_BITS == 32
9320 if (regpairs_aligned(cpu_env
)) {
9325 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9327 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9332 #ifdef TARGET_NR_setxattr
9333 case TARGET_NR_listxattr
:
9334 case TARGET_NR_llistxattr
:
9338 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9340 ret
= -TARGET_EFAULT
;
9344 p
= lock_user_string(arg1
);
9346 if (num
== TARGET_NR_listxattr
) {
9347 ret
= get_errno(listxattr(p
, b
, arg3
));
9349 ret
= get_errno(llistxattr(p
, b
, arg3
));
9352 ret
= -TARGET_EFAULT
;
9354 unlock_user(p
, arg1
, 0);
9355 unlock_user(b
, arg2
, arg3
);
9358 case TARGET_NR_flistxattr
:
9362 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9364 ret
= -TARGET_EFAULT
;
9368 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9369 unlock_user(b
, arg2
, arg3
);
9372 case TARGET_NR_setxattr
:
9373 case TARGET_NR_lsetxattr
:
9375 void *p
, *n
, *v
= 0;
9377 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9379 ret
= -TARGET_EFAULT
;
9383 p
= lock_user_string(arg1
);
9384 n
= lock_user_string(arg2
);
9386 if (num
== TARGET_NR_setxattr
) {
9387 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9389 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9392 ret
= -TARGET_EFAULT
;
9394 unlock_user(p
, arg1
, 0);
9395 unlock_user(n
, arg2
, 0);
9396 unlock_user(v
, arg3
, 0);
9399 case TARGET_NR_fsetxattr
:
9403 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9405 ret
= -TARGET_EFAULT
;
9409 n
= lock_user_string(arg2
);
9411 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9413 ret
= -TARGET_EFAULT
;
9415 unlock_user(n
, arg2
, 0);
9416 unlock_user(v
, arg3
, 0);
9419 case TARGET_NR_getxattr
:
9420 case TARGET_NR_lgetxattr
:
9422 void *p
, *n
, *v
= 0;
9424 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9426 ret
= -TARGET_EFAULT
;
9430 p
= lock_user_string(arg1
);
9431 n
= lock_user_string(arg2
);
9433 if (num
== TARGET_NR_getxattr
) {
9434 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9436 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9439 ret
= -TARGET_EFAULT
;
9441 unlock_user(p
, arg1
, 0);
9442 unlock_user(n
, arg2
, 0);
9443 unlock_user(v
, arg3
, arg4
);
9446 case TARGET_NR_fgetxattr
:
9450 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9452 ret
= -TARGET_EFAULT
;
9456 n
= lock_user_string(arg2
);
9458 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9460 ret
= -TARGET_EFAULT
;
9462 unlock_user(n
, arg2
, 0);
9463 unlock_user(v
, arg3
, arg4
);
9466 case TARGET_NR_removexattr
:
9467 case TARGET_NR_lremovexattr
:
9470 p
= lock_user_string(arg1
);
9471 n
= lock_user_string(arg2
);
9473 if (num
== TARGET_NR_removexattr
) {
9474 ret
= get_errno(removexattr(p
, n
));
9476 ret
= get_errno(lremovexattr(p
, n
));
9479 ret
= -TARGET_EFAULT
;
9481 unlock_user(p
, arg1
, 0);
9482 unlock_user(n
, arg2
, 0);
9485 case TARGET_NR_fremovexattr
:
9488 n
= lock_user_string(arg2
);
9490 ret
= get_errno(fremovexattr(arg1
, n
));
9492 ret
= -TARGET_EFAULT
;
9494 unlock_user(n
, arg2
, 0);
9498 #endif /* CONFIG_ATTR */
9499 #ifdef TARGET_NR_set_thread_area
9500 case TARGET_NR_set_thread_area
:
9501 #if defined(TARGET_MIPS)
9502 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9505 #elif defined(TARGET_CRIS)
9507 ret
= -TARGET_EINVAL
;
9509 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9513 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9514 ret
= do_set_thread_area(cpu_env
, arg1
);
9516 #elif defined(TARGET_M68K)
9518 TaskState
*ts
= cpu
->opaque
;
9519 ts
->tp_value
= arg1
;
9524 goto unimplemented_nowarn
;
9527 #ifdef TARGET_NR_get_thread_area
9528 case TARGET_NR_get_thread_area
:
9529 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9530 ret
= do_get_thread_area(cpu_env
, arg1
);
9532 #elif defined(TARGET_M68K)
9534 TaskState
*ts
= cpu
->opaque
;
9539 goto unimplemented_nowarn
;
9542 #ifdef TARGET_NR_getdomainname
9543 case TARGET_NR_getdomainname
:
9544 goto unimplemented_nowarn
;
9547 #ifdef TARGET_NR_clock_gettime
9548 case TARGET_NR_clock_gettime
:
9551 ret
= get_errno(clock_gettime(arg1
, &ts
));
9552 if (!is_error(ret
)) {
9553 host_to_target_timespec(arg2
, &ts
);
9558 #ifdef TARGET_NR_clock_getres
9559 case TARGET_NR_clock_getres
:
9562 ret
= get_errno(clock_getres(arg1
, &ts
));
9563 if (!is_error(ret
)) {
9564 host_to_target_timespec(arg2
, &ts
);
9569 #ifdef TARGET_NR_clock_nanosleep
9570 case TARGET_NR_clock_nanosleep
:
9573 target_to_host_timespec(&ts
, arg3
);
9574 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9576 host_to_target_timespec(arg4
, &ts
);
9578 #if defined(TARGET_PPC)
9579 /* clock_nanosleep is odd in that it returns positive errno values.
9580 * On PPC, CR0 bit 3 should be set in such a situation. */
9582 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9589 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9590 case TARGET_NR_set_tid_address
:
9591 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9595 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9596 case TARGET_NR_tkill
:
9597 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9601 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9602 case TARGET_NR_tgkill
:
9603 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9604 target_to_host_signal(arg3
)));
9608 #ifdef TARGET_NR_set_robust_list
9609 case TARGET_NR_set_robust_list
:
9610 case TARGET_NR_get_robust_list
:
9611 /* The ABI for supporting robust futexes has userspace pass
9612 * the kernel a pointer to a linked list which is updated by
9613 * userspace after the syscall; the list is walked by the kernel
9614 * when the thread exits. Since the linked list in QEMU guest
9615 * memory isn't a valid linked list for the host and we have
9616 * no way to reliably intercept the thread-death event, we can't
9617 * support these. Silently return ENOSYS so that guest userspace
9618 * falls back to a non-robust futex implementation (which should
9619 * be OK except in the corner case of the guest crashing while
9620 * holding a mutex that is shared with another process via
9623 goto unimplemented_nowarn
;
9626 #if defined(TARGET_NR_utimensat)
9627 case TARGET_NR_utimensat
:
9629 struct timespec
*tsp
, ts
[2];
9633 target_to_host_timespec(ts
, arg3
);
9634 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9638 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9640 if (!(p
= lock_user_string(arg2
))) {
9641 ret
= -TARGET_EFAULT
;
9644 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9645 unlock_user(p
, arg2
, 0);
9650 case TARGET_NR_futex
:
9651 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9654 case TARGET_NR_inotify_init
:
9655 ret
= get_errno(sys_inotify_init());
9658 #ifdef CONFIG_INOTIFY1
9659 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9660 case TARGET_NR_inotify_init1
:
9661 ret
= get_errno(sys_inotify_init1(arg1
));
9665 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9666 case TARGET_NR_inotify_add_watch
:
9667 p
= lock_user_string(arg2
);
9668 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9669 unlock_user(p
, arg2
, 0);
9672 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9673 case TARGET_NR_inotify_rm_watch
:
9674 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9678 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9679 case TARGET_NR_mq_open
:
9681 struct mq_attr posix_mq_attr
, *attrp
;
9683 p
= lock_user_string(arg1
- 1);
9685 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9686 attrp
= &posix_mq_attr
;
9690 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9691 unlock_user (p
, arg1
, 0);
9695 case TARGET_NR_mq_unlink
:
9696 p
= lock_user_string(arg1
- 1);
9697 ret
= get_errno(mq_unlink(p
));
9698 unlock_user (p
, arg1
, 0);
9701 case TARGET_NR_mq_timedsend
:
9705 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9707 target_to_host_timespec(&ts
, arg5
);
9708 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9709 host_to_target_timespec(arg5
, &ts
);
9712 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9713 unlock_user (p
, arg2
, arg3
);
9717 case TARGET_NR_mq_timedreceive
:
9722 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9724 target_to_host_timespec(&ts
, arg5
);
9725 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9726 host_to_target_timespec(arg5
, &ts
);
9729 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9730 unlock_user (p
, arg2
, arg3
);
9732 put_user_u32(prio
, arg4
);
9736 /* Not implemented for now... */
9737 /* case TARGET_NR_mq_notify: */
9740 case TARGET_NR_mq_getsetattr
:
9742 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9745 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9746 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9749 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9750 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9757 #ifdef CONFIG_SPLICE
9758 #ifdef TARGET_NR_tee
9761 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9765 #ifdef TARGET_NR_splice
9766 case TARGET_NR_splice
:
9768 loff_t loff_in
, loff_out
;
9769 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9771 if (get_user_u64(loff_in
, arg2
)) {
9774 ploff_in
= &loff_in
;
9777 if (get_user_u64(loff_out
, arg4
)) {
9780 ploff_out
= &loff_out
;
9782 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9784 if (put_user_u64(loff_in
, arg2
)) {
9789 if (put_user_u64(loff_out
, arg4
)) {
9796 #ifdef TARGET_NR_vmsplice
9797 case TARGET_NR_vmsplice
:
9799 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9801 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9802 unlock_iovec(vec
, arg2
, arg3
, 0);
9804 ret
= -host_to_target_errno(errno
);
9809 #endif /* CONFIG_SPLICE */
9810 #ifdef CONFIG_EVENTFD
9811 #if defined(TARGET_NR_eventfd)
9812 case TARGET_NR_eventfd
:
9813 ret
= get_errno(eventfd(arg1
, 0));
9814 fd_trans_unregister(ret
);
9817 #if defined(TARGET_NR_eventfd2)
9818 case TARGET_NR_eventfd2
:
9820 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9821 if (arg2
& TARGET_O_NONBLOCK
) {
9822 host_flags
|= O_NONBLOCK
;
9824 if (arg2
& TARGET_O_CLOEXEC
) {
9825 host_flags
|= O_CLOEXEC
;
9827 ret
= get_errno(eventfd(arg1
, host_flags
));
9828 fd_trans_unregister(ret
);
9832 #endif /* CONFIG_EVENTFD */
9833 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9834 case TARGET_NR_fallocate
:
9835 #if TARGET_ABI_BITS == 32
9836 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9837 target_offset64(arg5
, arg6
)));
9839 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9843 #if defined(CONFIG_SYNC_FILE_RANGE)
9844 #if defined(TARGET_NR_sync_file_range)
9845 case TARGET_NR_sync_file_range
:
9846 #if TARGET_ABI_BITS == 32
9847 #if defined(TARGET_MIPS)
9848 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9849 target_offset64(arg5
, arg6
), arg7
));
9851 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9852 target_offset64(arg4
, arg5
), arg6
));
9853 #endif /* !TARGET_MIPS */
9855 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9859 #if defined(TARGET_NR_sync_file_range2)
9860 case TARGET_NR_sync_file_range2
:
9861 /* This is like sync_file_range but the arguments are reordered */
9862 #if TARGET_ABI_BITS == 32
9863 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9864 target_offset64(arg5
, arg6
), arg2
));
9866 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9871 #if defined(TARGET_NR_signalfd4)
9872 case TARGET_NR_signalfd4
:
9873 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9876 #if defined(TARGET_NR_signalfd)
9877 case TARGET_NR_signalfd
:
9878 ret
= do_signalfd4(arg1
, arg2
, 0);
9881 #if defined(CONFIG_EPOLL)
9882 #if defined(TARGET_NR_epoll_create)
9883 case TARGET_NR_epoll_create
:
9884 ret
= get_errno(epoll_create(arg1
));
9887 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9888 case TARGET_NR_epoll_create1
:
9889 ret
= get_errno(epoll_create1(arg1
));
9892 #if defined(TARGET_NR_epoll_ctl)
9893 case TARGET_NR_epoll_ctl
:
9895 struct epoll_event ep
;
9896 struct epoll_event
*epp
= 0;
9898 struct target_epoll_event
*target_ep
;
9899 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9902 ep
.events
= tswap32(target_ep
->events
);
9903 /* The epoll_data_t union is just opaque data to the kernel,
9904 * so we transfer all 64 bits across and need not worry what
9905 * actual data type it is.
9907 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9908 unlock_user_struct(target_ep
, arg4
, 0);
9911 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9916 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9917 #define IMPLEMENT_EPOLL_PWAIT
9919 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9920 #if defined(TARGET_NR_epoll_wait)
9921 case TARGET_NR_epoll_wait
:
9923 #if defined(IMPLEMENT_EPOLL_PWAIT)
9924 case TARGET_NR_epoll_pwait
:
9927 struct target_epoll_event
*target_ep
;
9928 struct epoll_event
*ep
;
9930 int maxevents
= arg3
;
9933 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9934 maxevents
* sizeof(struct target_epoll_event
), 1);
9939 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9942 #if defined(IMPLEMENT_EPOLL_PWAIT)
9943 case TARGET_NR_epoll_pwait
:
9945 target_sigset_t
*target_set
;
9946 sigset_t _set
, *set
= &_set
;
9949 target_set
= lock_user(VERIFY_READ
, arg5
,
9950 sizeof(target_sigset_t
), 1);
9952 unlock_user(target_ep
, arg2
, 0);
9955 target_to_host_sigset(set
, target_set
);
9956 unlock_user(target_set
, arg5
, 0);
9961 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9965 #if defined(TARGET_NR_epoll_wait)
9966 case TARGET_NR_epoll_wait
:
9967 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9971 ret
= -TARGET_ENOSYS
;
9973 if (!is_error(ret
)) {
9975 for (i
= 0; i
< ret
; i
++) {
9976 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9977 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9980 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9985 #ifdef TARGET_NR_prlimit64
9986 case TARGET_NR_prlimit64
:
9988 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9989 struct target_rlimit64
*target_rnew
, *target_rold
;
9990 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9991 int resource
= target_to_host_resource(arg2
);
9993 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9996 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9997 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9998 unlock_user_struct(target_rnew
, arg3
, 0);
10002 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10003 if (!is_error(ret
) && arg4
) {
10004 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10007 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10008 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10009 unlock_user_struct(target_rold
, arg4
, 1);
10014 #ifdef TARGET_NR_gethostname
10015 case TARGET_NR_gethostname
:
10017 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10019 ret
= get_errno(gethostname(name
, arg2
));
10020 unlock_user(name
, arg1
, arg2
);
10022 ret
= -TARGET_EFAULT
;
10027 #ifdef TARGET_NR_atomic_cmpxchg_32
10028 case TARGET_NR_atomic_cmpxchg_32
:
10030 /* should use start_exclusive from main.c */
10031 abi_ulong mem_value
;
10032 if (get_user_u32(mem_value
, arg6
)) {
10033 target_siginfo_t info
;
10034 info
.si_signo
= SIGSEGV
;
10036 info
.si_code
= TARGET_SEGV_MAPERR
;
10037 info
._sifields
._sigfault
._addr
= arg6
;
10038 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10042 if (mem_value
== arg2
)
10043 put_user_u32(arg1
, arg6
);
10048 #ifdef TARGET_NR_atomic_barrier
10049 case TARGET_NR_atomic_barrier
:
10051 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10057 #ifdef TARGET_NR_timer_create
10058 case TARGET_NR_timer_create
:
10060 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10062 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10065 int timer_index
= next_free_host_timer();
10067 if (timer_index
< 0) {
10068 ret
= -TARGET_EAGAIN
;
10070 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10073 phost_sevp
= &host_sevp
;
10074 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10080 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10084 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10093 #ifdef TARGET_NR_timer_settime
10094 case TARGET_NR_timer_settime
:
10096 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10097 * struct itimerspec * old_value */
10098 target_timer_t timerid
= get_timer_id(arg1
);
10102 } else if (arg3
== 0) {
10103 ret
= -TARGET_EINVAL
;
10105 timer_t htimer
= g_posix_timers
[timerid
];
10106 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10108 target_to_host_itimerspec(&hspec_new
, arg3
);
10110 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10111 host_to_target_itimerspec(arg2
, &hspec_old
);
10117 #ifdef TARGET_NR_timer_gettime
10118 case TARGET_NR_timer_gettime
:
10120 /* args: timer_t timerid, struct itimerspec *curr_value */
10121 target_timer_t timerid
= get_timer_id(arg1
);
10125 } else if (!arg2
) {
10126 ret
= -TARGET_EFAULT
;
10128 timer_t htimer
= g_posix_timers
[timerid
];
10129 struct itimerspec hspec
;
10130 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10132 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10133 ret
= -TARGET_EFAULT
;
10140 #ifdef TARGET_NR_timer_getoverrun
10141 case TARGET_NR_timer_getoverrun
:
10143 /* args: timer_t timerid */
10144 target_timer_t timerid
= get_timer_id(arg1
);
10149 timer_t htimer
= g_posix_timers
[timerid
];
10150 ret
= get_errno(timer_getoverrun(htimer
));
10152 fd_trans_unregister(ret
);
10157 #ifdef TARGET_NR_timer_delete
10158 case TARGET_NR_timer_delete
:
10160 /* args: timer_t timerid */
10161 target_timer_t timerid
= get_timer_id(arg1
);
10166 timer_t htimer
= g_posix_timers
[timerid
];
10167 ret
= get_errno(timer_delete(htimer
));
10168 g_posix_timers
[timerid
] = 0;
10174 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10175 case TARGET_NR_timerfd_create
:
10176 ret
= get_errno(timerfd_create(arg1
,
10177 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10181 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10182 case TARGET_NR_timerfd_gettime
:
10184 struct itimerspec its_curr
;
10186 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10188 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10195 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10196 case TARGET_NR_timerfd_settime
:
10198 struct itimerspec its_new
, its_old
, *p_new
;
10201 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10209 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10211 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10218 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10219 case TARGET_NR_ioprio_get
:
10220 ret
= get_errno(ioprio_get(arg1
, arg2
));
10224 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10225 case TARGET_NR_ioprio_set
:
10226 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10230 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10231 case TARGET_NR_setns
:
10232 ret
= get_errno(setns(arg1
, arg2
));
10235 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10236 case TARGET_NR_unshare
:
10237 ret
= get_errno(unshare(arg1
));
10243 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10244 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10245 unimplemented_nowarn
:
10247 ret
= -TARGET_ENOSYS
;
10252 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10255 print_syscall_ret(num
, ret
);
10258 ret
= -TARGET_EFAULT
;