4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
27 #include <sys/mount.h>
29 #include <sys/fsuid.h>
30 #include <sys/personality.h>
31 #include <sys/prctl.h>
32 #include <sys/resource.h>
35 #include <linux/capability.h>
38 int __clone2(int (*fn
)(void *), void *child_stack_base
,
39 size_t stack_size
, int flags
, void *arg
, ...);
41 #include <sys/socket.h>
45 #include <sys/times.h>
48 #include <sys/statfs.h>
50 #include <sys/sysinfo.h>
51 #include <sys/signalfd.h>
52 //#include <sys/user.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55 #include <linux/wireless.h>
56 #include <linux/icmp.h>
57 #include "qemu-common.h"
59 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
92 #if defined(CONFIG_FIEMAP)
93 #include <linux/fiemap.h>
97 #include <linux/dm-ioctl.h>
98 #include <linux/reboot.h>
99 #include <linux/route.h>
100 #include <linux/filter.h>
101 #include <linux/blkpg.h>
102 #include "linux_loop.h"
107 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
108 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 //#include <linux/msdos_fs.h>
113 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
114 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
125 #define _syscall0(type,name) \
126 static type name (void) \
128 return syscall(__NR_##name); \
131 #define _syscall1(type,name,type1,arg1) \
132 static type name (type1 arg1) \
134 return syscall(__NR_##name, arg1); \
137 #define _syscall2(type,name,type1,arg1,type2,arg2) \
138 static type name (type1 arg1,type2 arg2) \
140 return syscall(__NR_##name, arg1, arg2); \
143 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
144 static type name (type1 arg1,type2 arg2,type3 arg3) \
146 return syscall(__NR_##name, arg1, arg2, arg3); \
149 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
152 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
163 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5,type6,arg6) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
172 #define __NR_sys_uname __NR_uname
173 #define __NR_sys_getcwd1 __NR_getcwd
174 #define __NR_sys_getdents __NR_getdents
175 #define __NR_sys_getdents64 __NR_getdents64
176 #define __NR_sys_getpriority __NR_getpriority
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_syslog __NR_syslog
179 #define __NR_sys_tgkill __NR_tgkill
180 #define __NR_sys_tkill __NR_tkill
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
188 #define __NR__llseek __NR_lseek
191 /* Newer kernel ports have llseek() instead of _llseek() */
192 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
193 #define TARGET_NR__llseek TARGET_NR_llseek
197 _syscall0(int, gettid
)
199 /* This is a replacement for the host gettid() and must return a host
201 static int gettid(void) {
205 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
206 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
208 #if !defined(__NR_getdents) || \
209 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
210 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
212 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
213 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
214 loff_t
*, res
, uint
, wh
);
216 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
217 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
218 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
219 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
221 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
222 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
224 #ifdef __NR_exit_group
225 _syscall1(int,exit_group
,int,error_code
)
227 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
228 _syscall1(int,set_tid_address
,int *,tidptr
)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
232 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
234 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
235 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
236 unsigned long *, user_mask_ptr
);
237 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
238 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
242 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
243 struct __user_cap_data_struct
*, data
);
244 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
245 struct __user_cap_data_struct
*, data
);
246 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
247 _syscall2(int, ioprio_get
, int, which
, int, who
)
249 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
250 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
253 static bitmask_transtbl fcntl_flags_tbl
[] = {
254 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
255 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
256 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
257 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
258 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
259 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
260 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
261 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
262 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
263 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
264 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
265 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
266 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
267 #if defined(O_DIRECT)
268 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
270 #if defined(O_NOATIME)
271 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
273 #if defined(O_CLOEXEC)
274 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
277 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
279 /* Don't terminate the list prematurely on 64-bit host+guest. */
280 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
281 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
286 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
287 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
288 typedef struct TargetFdTrans
{
289 TargetFdDataFunc host_to_target_data
;
290 TargetFdDataFunc target_to_host_data
;
291 TargetFdAddrFunc target_to_host_addr
;
294 static TargetFdTrans
**target_fd_trans
;
296 static unsigned int target_fd_max
;
298 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
300 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
301 return target_fd_trans
[fd
]->host_to_target_data
;
306 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
308 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
309 return target_fd_trans
[fd
]->target_to_host_addr
;
314 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
318 if (fd
>= target_fd_max
) {
319 oldmax
= target_fd_max
;
320 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
321 target_fd_trans
= g_realloc(target_fd_trans
,
322 target_fd_max
* sizeof(TargetFdTrans
));
323 memset((void *)(target_fd_trans
+ oldmax
), 0,
324 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
326 target_fd_trans
[fd
] = trans
;
329 static void fd_trans_unregister(int fd
)
331 if (fd
>= 0 && fd
< target_fd_max
) {
332 target_fd_trans
[fd
] = NULL
;
336 static void fd_trans_dup(int oldfd
, int newfd
)
338 fd_trans_unregister(newfd
);
339 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
340 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
344 static int sys_getcwd1(char *buf
, size_t size
)
346 if (getcwd(buf
, size
) == NULL
) {
347 /* getcwd() sets errno */
350 return strlen(buf
)+1;
353 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
356 * open(2) has extra parameter 'mode' when called with
359 if ((flags
& O_CREAT
) != 0) {
360 return (openat(dirfd
, pathname
, flags
, mode
));
362 return (openat(dirfd
, pathname
, flags
));
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd
, const char *pathname
,
368 const struct timespec times
[2], int flags
)
370 if (pathname
== NULL
)
371 return futimens(dirfd
, times
);
373 return utimensat(dirfd
, pathname
, times
, flags
);
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
378 const struct timespec
*,tsp
,int,flags
)
380 static int sys_utimensat(int dirfd
, const char *pathname
,
381 const struct timespec times
[2], int flags
)
387 #endif /* TARGET_NR_utimensat */
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
395 return (inotify_init());
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
401 return (inotify_add_watch(fd
, pathname
, mask
));
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
407 return (inotify_rm_watch(fd
, wd
));
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags
)
414 return (inotify_init1(flags
));
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
426 #if defined(TARGET_NR_ppoll)
428 # define __NR_ppoll -1
430 #define __NR_sys_ppoll __NR_ppoll
431 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
432 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
436 #if defined(TARGET_NR_pselect6)
437 #ifndef __NR_pselect6
438 # define __NR_pselect6 -1
440 #define __NR_sys_pselect6 __NR_pselect6
441 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
442 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
445 #if defined(TARGET_NR_prlimit64)
446 #ifndef __NR_prlimit64
447 # define __NR_prlimit64 -1
449 #define __NR_sys_prlimit64 __NR_prlimit64
450 /* The glibc rlimit structure may not be that used by the underlying syscall */
451 struct host_rlimit64
{
455 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
456 const struct host_rlimit64
*, new_limit
,
457 struct host_rlimit64
*, old_limit
)
461 #if defined(TARGET_NR_timer_create)
462 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
463 static timer_t g_posix_timers
[32] = { 0, } ;
465 static inline int next_free_host_timer(void)
468 /* FIXME: Does finding the next free slot require a lock? */
469 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
470 if (g_posix_timers
[k
] == 0) {
471 g_posix_timers
[k
] = (timer_t
) 1;
479 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
481 static inline int regpairs_aligned(void *cpu_env
) {
482 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
484 #elif defined(TARGET_MIPS)
485 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
486 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
487 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
488 * of registers which translates to the same as ARM/MIPS, because we start with
490 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
492 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
495 #define ERRNO_TABLE_SIZE 1200
497 /* target_to_host_errno_table[] is initialized from
498 * host_to_target_errno_table[] in syscall_init(). */
499 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
503 * This list is the union of errno values overridden in asm-<arch>/errno.h
504 * minus the errnos that are not actually generic to all archs.
506 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
507 [EAGAIN
] = TARGET_EAGAIN
,
508 [EIDRM
] = TARGET_EIDRM
,
509 [ECHRNG
] = TARGET_ECHRNG
,
510 [EL2NSYNC
] = TARGET_EL2NSYNC
,
511 [EL3HLT
] = TARGET_EL3HLT
,
512 [EL3RST
] = TARGET_EL3RST
,
513 [ELNRNG
] = TARGET_ELNRNG
,
514 [EUNATCH
] = TARGET_EUNATCH
,
515 [ENOCSI
] = TARGET_ENOCSI
,
516 [EL2HLT
] = TARGET_EL2HLT
,
517 [EDEADLK
] = TARGET_EDEADLK
,
518 [ENOLCK
] = TARGET_ENOLCK
,
519 [EBADE
] = TARGET_EBADE
,
520 [EBADR
] = TARGET_EBADR
,
521 [EXFULL
] = TARGET_EXFULL
,
522 [ENOANO
] = TARGET_ENOANO
,
523 [EBADRQC
] = TARGET_EBADRQC
,
524 [EBADSLT
] = TARGET_EBADSLT
,
525 [EBFONT
] = TARGET_EBFONT
,
526 [ENOSTR
] = TARGET_ENOSTR
,
527 [ENODATA
] = TARGET_ENODATA
,
528 [ETIME
] = TARGET_ETIME
,
529 [ENOSR
] = TARGET_ENOSR
,
530 [ENONET
] = TARGET_ENONET
,
531 [ENOPKG
] = TARGET_ENOPKG
,
532 [EREMOTE
] = TARGET_EREMOTE
,
533 [ENOLINK
] = TARGET_ENOLINK
,
534 [EADV
] = TARGET_EADV
,
535 [ESRMNT
] = TARGET_ESRMNT
,
536 [ECOMM
] = TARGET_ECOMM
,
537 [EPROTO
] = TARGET_EPROTO
,
538 [EDOTDOT
] = TARGET_EDOTDOT
,
539 [EMULTIHOP
] = TARGET_EMULTIHOP
,
540 [EBADMSG
] = TARGET_EBADMSG
,
541 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
542 [EOVERFLOW
] = TARGET_EOVERFLOW
,
543 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
544 [EBADFD
] = TARGET_EBADFD
,
545 [EREMCHG
] = TARGET_EREMCHG
,
546 [ELIBACC
] = TARGET_ELIBACC
,
547 [ELIBBAD
] = TARGET_ELIBBAD
,
548 [ELIBSCN
] = TARGET_ELIBSCN
,
549 [ELIBMAX
] = TARGET_ELIBMAX
,
550 [ELIBEXEC
] = TARGET_ELIBEXEC
,
551 [EILSEQ
] = TARGET_EILSEQ
,
552 [ENOSYS
] = TARGET_ENOSYS
,
553 [ELOOP
] = TARGET_ELOOP
,
554 [ERESTART
] = TARGET_ERESTART
,
555 [ESTRPIPE
] = TARGET_ESTRPIPE
,
556 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
557 [EUSERS
] = TARGET_EUSERS
,
558 [ENOTSOCK
] = TARGET_ENOTSOCK
,
559 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
560 [EMSGSIZE
] = TARGET_EMSGSIZE
,
561 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
562 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
563 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
564 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
565 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
566 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
567 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
568 [EADDRINUSE
] = TARGET_EADDRINUSE
,
569 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
570 [ENETDOWN
] = TARGET_ENETDOWN
,
571 [ENETUNREACH
] = TARGET_ENETUNREACH
,
572 [ENETRESET
] = TARGET_ENETRESET
,
573 [ECONNABORTED
] = TARGET_ECONNABORTED
,
574 [ECONNRESET
] = TARGET_ECONNRESET
,
575 [ENOBUFS
] = TARGET_ENOBUFS
,
576 [EISCONN
] = TARGET_EISCONN
,
577 [ENOTCONN
] = TARGET_ENOTCONN
,
578 [EUCLEAN
] = TARGET_EUCLEAN
,
579 [ENOTNAM
] = TARGET_ENOTNAM
,
580 [ENAVAIL
] = TARGET_ENAVAIL
,
581 [EISNAM
] = TARGET_EISNAM
,
582 [EREMOTEIO
] = TARGET_EREMOTEIO
,
583 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
584 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
585 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
586 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
587 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
588 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
589 [EALREADY
] = TARGET_EALREADY
,
590 [EINPROGRESS
] = TARGET_EINPROGRESS
,
591 [ESTALE
] = TARGET_ESTALE
,
592 [ECANCELED
] = TARGET_ECANCELED
,
593 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
594 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
596 [ENOKEY
] = TARGET_ENOKEY
,
599 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
602 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
605 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
608 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
610 #ifdef ENOTRECOVERABLE
611 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
615 static inline int host_to_target_errno(int err
)
617 if(host_to_target_errno_table
[err
])
618 return host_to_target_errno_table
[err
];
622 static inline int target_to_host_errno(int err
)
624 if (target_to_host_errno_table
[err
])
625 return target_to_host_errno_table
[err
];
629 static inline abi_long
get_errno(abi_long ret
)
632 return -host_to_target_errno(errno
);
637 static inline int is_error(abi_long ret
)
639 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
642 char *target_strerror(int err
)
644 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
647 return strerror(target_to_host_errno(err
));
650 static inline int host_to_target_sock_type(int host_type
)
654 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
656 target_type
= TARGET_SOCK_DGRAM
;
659 target_type
= TARGET_SOCK_STREAM
;
662 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
666 #if defined(SOCK_CLOEXEC)
667 if (host_type
& SOCK_CLOEXEC
) {
668 target_type
|= TARGET_SOCK_CLOEXEC
;
672 #if defined(SOCK_NONBLOCK)
673 if (host_type
& SOCK_NONBLOCK
) {
674 target_type
|= TARGET_SOCK_NONBLOCK
;
681 static abi_ulong target_brk
;
682 static abi_ulong target_original_brk
;
683 static abi_ulong brk_page
;
685 void target_set_brk(abi_ulong new_brk
)
687 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
688 brk_page
= HOST_PAGE_ALIGN(target_brk
);
691 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
692 #define DEBUGF_BRK(message, args...)
694 /* do_brk() must return target values and target errnos. */
695 abi_long
do_brk(abi_ulong new_brk
)
697 abi_long mapped_addr
;
700 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
703 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
706 if (new_brk
< target_original_brk
) {
707 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
712 /* If the new brk is less than the highest page reserved to the
713 * target heap allocation, set it and we're almost done... */
714 if (new_brk
<= brk_page
) {
715 /* Heap contents are initialized to zero, as for anonymous
717 if (new_brk
> target_brk
) {
718 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
720 target_brk
= new_brk
;
721 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
725 /* We need to allocate more memory after the brk... Note that
726 * we don't use MAP_FIXED because that will map over the top of
727 * any existing mapping (like the one with the host libc or qemu
728 * itself); instead we treat "mapped but at wrong address" as
729 * a failure and unmap again.
731 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
732 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
733 PROT_READ
|PROT_WRITE
,
734 MAP_ANON
|MAP_PRIVATE
, 0, 0));
736 if (mapped_addr
== brk_page
) {
737 /* Heap contents are initialized to zero, as for anonymous
738 * mapped pages. Technically the new pages are already
739 * initialized to zero since they *are* anonymous mapped
740 * pages, however we have to take care with the contents that
741 * come from the remaining part of the previous page: it may
742 * contains garbage data due to a previous heap usage (grown
744 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
746 target_brk
= new_brk
;
747 brk_page
= HOST_PAGE_ALIGN(target_brk
);
748 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
751 } else if (mapped_addr
!= -1) {
752 /* Mapped but at wrong address, meaning there wasn't actually
753 * enough space for this brk.
755 target_munmap(mapped_addr
, new_alloc_size
);
757 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
760 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
763 #if defined(TARGET_ALPHA)
764 /* We (partially) emulate OSF/1 on Alpha, which requires we
765 return a proper errno, not an unchanged brk value. */
766 return -TARGET_ENOMEM
;
768 /* For everything else, return the previous break. */
772 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
773 abi_ulong target_fds_addr
,
777 abi_ulong b
, *target_fds
;
779 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
780 if (!(target_fds
= lock_user(VERIFY_READ
,
782 sizeof(abi_ulong
) * nw
,
784 return -TARGET_EFAULT
;
788 for (i
= 0; i
< nw
; i
++) {
789 /* grab the abi_ulong */
790 __get_user(b
, &target_fds
[i
]);
791 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
792 /* check the bit inside the abi_ulong */
799 unlock_user(target_fds
, target_fds_addr
, 0);
804 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
805 abi_ulong target_fds_addr
,
808 if (target_fds_addr
) {
809 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
810 return -TARGET_EFAULT
;
818 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
824 abi_ulong
*target_fds
;
826 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
827 if (!(target_fds
= lock_user(VERIFY_WRITE
,
829 sizeof(abi_ulong
) * nw
,
831 return -TARGET_EFAULT
;
834 for (i
= 0; i
< nw
; i
++) {
836 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
837 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
840 __put_user(v
, &target_fds
[i
]);
843 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
848 #if defined(__alpha__)
854 static inline abi_long
host_to_target_clock_t(long ticks
)
856 #if HOST_HZ == TARGET_HZ
859 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
863 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
864 const struct rusage
*rusage
)
866 struct target_rusage
*target_rusage
;
868 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
869 return -TARGET_EFAULT
;
870 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
871 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
872 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
873 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
874 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
875 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
876 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
877 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
878 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
879 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
880 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
881 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
882 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
883 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
884 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
885 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
886 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
887 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
888 unlock_user_struct(target_rusage
, target_addr
, 1);
893 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
895 abi_ulong target_rlim_swap
;
898 target_rlim_swap
= tswapal(target_rlim
);
899 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
900 return RLIM_INFINITY
;
902 result
= target_rlim_swap
;
903 if (target_rlim_swap
!= (rlim_t
)result
)
904 return RLIM_INFINITY
;
909 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
911 abi_ulong target_rlim_swap
;
914 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
915 target_rlim_swap
= TARGET_RLIM_INFINITY
;
917 target_rlim_swap
= rlim
;
918 result
= tswapal(target_rlim_swap
);
923 static inline int target_to_host_resource(int code
)
926 case TARGET_RLIMIT_AS
:
928 case TARGET_RLIMIT_CORE
:
930 case TARGET_RLIMIT_CPU
:
932 case TARGET_RLIMIT_DATA
:
934 case TARGET_RLIMIT_FSIZE
:
936 case TARGET_RLIMIT_LOCKS
:
938 case TARGET_RLIMIT_MEMLOCK
:
939 return RLIMIT_MEMLOCK
;
940 case TARGET_RLIMIT_MSGQUEUE
:
941 return RLIMIT_MSGQUEUE
;
942 case TARGET_RLIMIT_NICE
:
944 case TARGET_RLIMIT_NOFILE
:
945 return RLIMIT_NOFILE
;
946 case TARGET_RLIMIT_NPROC
:
948 case TARGET_RLIMIT_RSS
:
950 case TARGET_RLIMIT_RTPRIO
:
951 return RLIMIT_RTPRIO
;
952 case TARGET_RLIMIT_SIGPENDING
:
953 return RLIMIT_SIGPENDING
;
954 case TARGET_RLIMIT_STACK
:
961 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
962 abi_ulong target_tv_addr
)
964 struct target_timeval
*target_tv
;
966 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
967 return -TARGET_EFAULT
;
969 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
970 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
972 unlock_user_struct(target_tv
, target_tv_addr
, 0);
977 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
978 const struct timeval
*tv
)
980 struct target_timeval
*target_tv
;
982 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
983 return -TARGET_EFAULT
;
985 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
986 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
988 unlock_user_struct(target_tv
, target_tv_addr
, 1);
993 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
994 abi_ulong target_tz_addr
)
996 struct target_timezone
*target_tz
;
998 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
999 return -TARGET_EFAULT
;
1002 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1003 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1005 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1010 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1013 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1014 abi_ulong target_mq_attr_addr
)
1016 struct target_mq_attr
*target_mq_attr
;
1018 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1019 target_mq_attr_addr
, 1))
1020 return -TARGET_EFAULT
;
1022 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1023 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1024 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1025 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1027 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1032 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1033 const struct mq_attr
*attr
)
1035 struct target_mq_attr
*target_mq_attr
;
1037 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1038 target_mq_attr_addr
, 0))
1039 return -TARGET_EFAULT
;
1041 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1042 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1043 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1044 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1046 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1052 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1053 /* do_select() must return target values and target errnos. */
1054 static abi_long
do_select(int n
,
1055 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1056 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1058 fd_set rfds
, wfds
, efds
;
1059 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1060 struct timeval tv
, *tv_ptr
;
1063 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1067 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1071 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1076 if (target_tv_addr
) {
1077 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1078 return -TARGET_EFAULT
;
1084 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1086 if (!is_error(ret
)) {
1087 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1088 return -TARGET_EFAULT
;
1089 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1090 return -TARGET_EFAULT
;
1091 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1092 return -TARGET_EFAULT
;
1094 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1095 return -TARGET_EFAULT
;
1102 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1105 return pipe2(host_pipe
, flags
);
1111 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1112 int flags
, int is_pipe2
)
1116 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1119 return get_errno(ret
);
1121 /* Several targets have special calling conventions for the original
1122 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1124 #if defined(TARGET_ALPHA)
1125 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1126 return host_pipe
[0];
1127 #elif defined(TARGET_MIPS)
1128 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1129 return host_pipe
[0];
1130 #elif defined(TARGET_SH4)
1131 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1132 return host_pipe
[0];
1133 #elif defined(TARGET_SPARC)
1134 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1135 return host_pipe
[0];
1139 if (put_user_s32(host_pipe
[0], pipedes
)
1140 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1141 return -TARGET_EFAULT
;
1142 return get_errno(ret
);
1145 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1146 abi_ulong target_addr
,
1149 struct target_ip_mreqn
*target_smreqn
;
1151 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1153 return -TARGET_EFAULT
;
1154 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1155 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1156 if (len
== sizeof(struct target_ip_mreqn
))
1157 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1158 unlock_user(target_smreqn
, target_addr
, 0);
1163 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1164 abi_ulong target_addr
,
1167 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1168 sa_family_t sa_family
;
1169 struct target_sockaddr
*target_saddr
;
1171 if (fd_trans_target_to_host_addr(fd
)) {
1172 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1175 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1177 return -TARGET_EFAULT
;
1179 sa_family
= tswap16(target_saddr
->sa_family
);
1181 /* Oops. The caller might send a incomplete sun_path; sun_path
1182 * must be terminated by \0 (see the manual page), but
1183 * unfortunately it is quite common to specify sockaddr_un
1184 * length as "strlen(x->sun_path)" while it should be
1185 * "strlen(...) + 1". We'll fix that here if needed.
1186 * Linux kernel has a similar feature.
1189 if (sa_family
== AF_UNIX
) {
1190 if (len
< unix_maxlen
&& len
> 0) {
1191 char *cp
= (char*)target_saddr
;
1193 if ( cp
[len
-1] && !cp
[len
] )
1196 if (len
> unix_maxlen
)
1200 memcpy(addr
, target_saddr
, len
);
1201 addr
->sa_family
= sa_family
;
1202 if (sa_family
== AF_PACKET
) {
1203 struct target_sockaddr_ll
*lladdr
;
1205 lladdr
= (struct target_sockaddr_ll
*)addr
;
1206 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1207 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1209 unlock_user(target_saddr
, target_addr
, 0);
1214 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1215 struct sockaddr
*addr
,
1218 struct target_sockaddr
*target_saddr
;
1220 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1222 return -TARGET_EFAULT
;
1223 memcpy(target_saddr
, addr
, len
);
1224 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1225 unlock_user(target_saddr
, target_addr
, len
);
1230 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1231 struct target_msghdr
*target_msgh
)
1233 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1234 abi_long msg_controllen
;
1235 abi_ulong target_cmsg_addr
;
1236 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1237 socklen_t space
= 0;
1239 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1240 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1242 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1243 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1244 target_cmsg_start
= target_cmsg
;
1246 return -TARGET_EFAULT
;
1248 while (cmsg
&& target_cmsg
) {
1249 void *data
= CMSG_DATA(cmsg
);
1250 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1252 int len
= tswapal(target_cmsg
->cmsg_len
)
1253 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1255 space
+= CMSG_SPACE(len
);
1256 if (space
> msgh
->msg_controllen
) {
1257 space
-= CMSG_SPACE(len
);
1258 /* This is a QEMU bug, since we allocated the payload
1259 * area ourselves (unlike overflow in host-to-target
1260 * conversion, which is just the guest giving us a buffer
1261 * that's too small). It can't happen for the payload types
1262 * we currently support; if it becomes an issue in future
1263 * we would need to improve our allocation strategy to
1264 * something more intelligent than "twice the size of the
1265 * target buffer we're reading from".
1267 gemu_log("Host cmsg overflow\n");
1271 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1272 cmsg
->cmsg_level
= SOL_SOCKET
;
1274 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1276 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1277 cmsg
->cmsg_len
= CMSG_LEN(len
);
1279 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1280 int *fd
= (int *)data
;
1281 int *target_fd
= (int *)target_data
;
1282 int i
, numfds
= len
/ sizeof(int);
1284 for (i
= 0; i
< numfds
; i
++) {
1285 __get_user(fd
[i
], target_fd
+ i
);
1287 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1288 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1289 struct ucred
*cred
= (struct ucred
*)data
;
1290 struct target_ucred
*target_cred
=
1291 (struct target_ucred
*)target_data
;
1293 __get_user(cred
->pid
, &target_cred
->pid
);
1294 __get_user(cred
->uid
, &target_cred
->uid
);
1295 __get_user(cred
->gid
, &target_cred
->gid
);
1297 gemu_log("Unsupported ancillary data: %d/%d\n",
1298 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1299 memcpy(data
, target_data
, len
);
1302 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1303 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1306 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1308 msgh
->msg_controllen
= space
;
1312 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1313 struct msghdr
*msgh
)
1315 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1316 abi_long msg_controllen
;
1317 abi_ulong target_cmsg_addr
;
1318 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1319 socklen_t space
= 0;
1321 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1322 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1324 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1325 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1326 target_cmsg_start
= target_cmsg
;
1328 return -TARGET_EFAULT
;
1330 while (cmsg
&& target_cmsg
) {
1331 void *data
= CMSG_DATA(cmsg
);
1332 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1334 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1335 int tgt_len
, tgt_space
;
1337 /* We never copy a half-header but may copy half-data;
1338 * this is Linux's behaviour in put_cmsg(). Note that
1339 * truncation here is a guest problem (which we report
1340 * to the guest via the CTRUNC bit), unlike truncation
1341 * in target_to_host_cmsg, which is a QEMU bug.
1343 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1344 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1348 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1349 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1351 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1353 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1355 tgt_len
= TARGET_CMSG_LEN(len
);
1357 /* Payload types which need a different size of payload on
1358 * the target must adjust tgt_len here.
1360 switch (cmsg
->cmsg_level
) {
1362 switch (cmsg
->cmsg_type
) {
1364 tgt_len
= sizeof(struct target_timeval
);
1373 if (msg_controllen
< tgt_len
) {
1374 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1375 tgt_len
= msg_controllen
;
1378 /* We must now copy-and-convert len bytes of payload
1379 * into tgt_len bytes of destination space. Bear in mind
1380 * that in both source and destination we may be dealing
1381 * with a truncated value!
1383 switch (cmsg
->cmsg_level
) {
1385 switch (cmsg
->cmsg_type
) {
1388 int *fd
= (int *)data
;
1389 int *target_fd
= (int *)target_data
;
1390 int i
, numfds
= tgt_len
/ sizeof(int);
1392 for (i
= 0; i
< numfds
; i
++) {
1393 __put_user(fd
[i
], target_fd
+ i
);
1399 struct timeval
*tv
= (struct timeval
*)data
;
1400 struct target_timeval
*target_tv
=
1401 (struct target_timeval
*)target_data
;
1403 if (len
!= sizeof(struct timeval
) ||
1404 tgt_len
!= sizeof(struct target_timeval
)) {
1408 /* copy struct timeval to target */
1409 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1410 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1413 case SCM_CREDENTIALS
:
1415 struct ucred
*cred
= (struct ucred
*)data
;
1416 struct target_ucred
*target_cred
=
1417 (struct target_ucred
*)target_data
;
1419 __put_user(cred
->pid
, &target_cred
->pid
);
1420 __put_user(cred
->uid
, &target_cred
->uid
);
1421 __put_user(cred
->gid
, &target_cred
->gid
);
1431 gemu_log("Unsupported ancillary data: %d/%d\n",
1432 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1433 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1434 if (tgt_len
> len
) {
1435 memset(target_data
+ len
, 0, tgt_len
- len
);
1439 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1440 tgt_space
= TARGET_CMSG_SPACE(len
);
1441 if (msg_controllen
< tgt_space
) {
1442 tgt_space
= msg_controllen
;
1444 msg_controllen
-= tgt_space
;
1446 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1447 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1450 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1452 target_msgh
->msg_controllen
= tswapal(space
);
1456 /* do_setsockopt() Must return target values and target errnos. */
1457 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1458 abi_ulong optval_addr
, socklen_t optlen
)
1462 struct ip_mreqn
*ip_mreq
;
1463 struct ip_mreq_source
*ip_mreq_source
;
1467 /* TCP options all take an 'int' value. */
1468 if (optlen
< sizeof(uint32_t))
1469 return -TARGET_EINVAL
;
1471 if (get_user_u32(val
, optval_addr
))
1472 return -TARGET_EFAULT
;
1473 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1480 case IP_ROUTER_ALERT
:
1484 case IP_MTU_DISCOVER
:
1490 case IP_MULTICAST_TTL
:
1491 case IP_MULTICAST_LOOP
:
1493 if (optlen
>= sizeof(uint32_t)) {
1494 if (get_user_u32(val
, optval_addr
))
1495 return -TARGET_EFAULT
;
1496 } else if (optlen
>= 1) {
1497 if (get_user_u8(val
, optval_addr
))
1498 return -TARGET_EFAULT
;
1500 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1502 case IP_ADD_MEMBERSHIP
:
1503 case IP_DROP_MEMBERSHIP
:
1504 if (optlen
< sizeof (struct target_ip_mreq
) ||
1505 optlen
> sizeof (struct target_ip_mreqn
))
1506 return -TARGET_EINVAL
;
1508 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1509 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1510 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1513 case IP_BLOCK_SOURCE
:
1514 case IP_UNBLOCK_SOURCE
:
1515 case IP_ADD_SOURCE_MEMBERSHIP
:
1516 case IP_DROP_SOURCE_MEMBERSHIP
:
1517 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1518 return -TARGET_EINVAL
;
1520 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1521 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1522 unlock_user (ip_mreq_source
, optval_addr
, 0);
1531 case IPV6_MTU_DISCOVER
:
1534 case IPV6_RECVPKTINFO
:
1536 if (optlen
< sizeof(uint32_t)) {
1537 return -TARGET_EINVAL
;
1539 if (get_user_u32(val
, optval_addr
)) {
1540 return -TARGET_EFAULT
;
1542 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1543 &val
, sizeof(val
)));
1552 /* struct icmp_filter takes an u32 value */
1553 if (optlen
< sizeof(uint32_t)) {
1554 return -TARGET_EINVAL
;
1557 if (get_user_u32(val
, optval_addr
)) {
1558 return -TARGET_EFAULT
;
1560 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1561 &val
, sizeof(val
)));
1568 case TARGET_SOL_SOCKET
:
1570 case TARGET_SO_RCVTIMEO
:
1574 optname
= SO_RCVTIMEO
;
1577 if (optlen
!= sizeof(struct target_timeval
)) {
1578 return -TARGET_EINVAL
;
1581 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1582 return -TARGET_EFAULT
;
1585 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1589 case TARGET_SO_SNDTIMEO
:
1590 optname
= SO_SNDTIMEO
;
1592 case TARGET_SO_ATTACH_FILTER
:
1594 struct target_sock_fprog
*tfprog
;
1595 struct target_sock_filter
*tfilter
;
1596 struct sock_fprog fprog
;
1597 struct sock_filter
*filter
;
1600 if (optlen
!= sizeof(*tfprog
)) {
1601 return -TARGET_EINVAL
;
1603 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1604 return -TARGET_EFAULT
;
1606 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1607 tswapal(tfprog
->filter
), 0)) {
1608 unlock_user_struct(tfprog
, optval_addr
, 1);
1609 return -TARGET_EFAULT
;
1612 fprog
.len
= tswap16(tfprog
->len
);
1613 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1614 if (filter
== NULL
) {
1615 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1616 unlock_user_struct(tfprog
, optval_addr
, 1);
1617 return -TARGET_ENOMEM
;
1619 for (i
= 0; i
< fprog
.len
; i
++) {
1620 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1621 filter
[i
].jt
= tfilter
[i
].jt
;
1622 filter
[i
].jf
= tfilter
[i
].jf
;
1623 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1625 fprog
.filter
= filter
;
1627 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1628 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1631 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1632 unlock_user_struct(tfprog
, optval_addr
, 1);
1635 case TARGET_SO_BINDTODEVICE
:
1637 char *dev_ifname
, *addr_ifname
;
1639 if (optlen
> IFNAMSIZ
- 1) {
1640 optlen
= IFNAMSIZ
- 1;
1642 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1644 return -TARGET_EFAULT
;
1646 optname
= SO_BINDTODEVICE
;
1647 addr_ifname
= alloca(IFNAMSIZ
);
1648 memcpy(addr_ifname
, dev_ifname
, optlen
);
1649 addr_ifname
[optlen
] = 0;
1650 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1651 addr_ifname
, optlen
));
1652 unlock_user (dev_ifname
, optval_addr
, 0);
1655 /* Options with 'int' argument. */
1656 case TARGET_SO_DEBUG
:
1659 case TARGET_SO_REUSEADDR
:
1660 optname
= SO_REUSEADDR
;
1662 case TARGET_SO_TYPE
:
1665 case TARGET_SO_ERROR
:
1668 case TARGET_SO_DONTROUTE
:
1669 optname
= SO_DONTROUTE
;
1671 case TARGET_SO_BROADCAST
:
1672 optname
= SO_BROADCAST
;
1674 case TARGET_SO_SNDBUF
:
1675 optname
= SO_SNDBUF
;
1677 case TARGET_SO_SNDBUFFORCE
:
1678 optname
= SO_SNDBUFFORCE
;
1680 case TARGET_SO_RCVBUF
:
1681 optname
= SO_RCVBUF
;
1683 case TARGET_SO_RCVBUFFORCE
:
1684 optname
= SO_RCVBUFFORCE
;
1686 case TARGET_SO_KEEPALIVE
:
1687 optname
= SO_KEEPALIVE
;
1689 case TARGET_SO_OOBINLINE
:
1690 optname
= SO_OOBINLINE
;
1692 case TARGET_SO_NO_CHECK
:
1693 optname
= SO_NO_CHECK
;
1695 case TARGET_SO_PRIORITY
:
1696 optname
= SO_PRIORITY
;
1699 case TARGET_SO_BSDCOMPAT
:
1700 optname
= SO_BSDCOMPAT
;
1703 case TARGET_SO_PASSCRED
:
1704 optname
= SO_PASSCRED
;
1706 case TARGET_SO_PASSSEC
:
1707 optname
= SO_PASSSEC
;
1709 case TARGET_SO_TIMESTAMP
:
1710 optname
= SO_TIMESTAMP
;
1712 case TARGET_SO_RCVLOWAT
:
1713 optname
= SO_RCVLOWAT
;
1719 if (optlen
< sizeof(uint32_t))
1720 return -TARGET_EINVAL
;
1722 if (get_user_u32(val
, optval_addr
))
1723 return -TARGET_EFAULT
;
1724 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1728 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1729 ret
= -TARGET_ENOPROTOOPT
;
1734 /* do_getsockopt() Must return target values and target errnos. */
1735 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1736 abi_ulong optval_addr
, abi_ulong optlen
)
1743 case TARGET_SOL_SOCKET
:
1746 /* These don't just return a single integer */
1747 case TARGET_SO_LINGER
:
1748 case TARGET_SO_RCVTIMEO
:
1749 case TARGET_SO_SNDTIMEO
:
1750 case TARGET_SO_PEERNAME
:
1752 case TARGET_SO_PEERCRED
: {
1755 struct target_ucred
*tcr
;
1757 if (get_user_u32(len
, optlen
)) {
1758 return -TARGET_EFAULT
;
1761 return -TARGET_EINVAL
;
1765 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1773 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1774 return -TARGET_EFAULT
;
1776 __put_user(cr
.pid
, &tcr
->pid
);
1777 __put_user(cr
.uid
, &tcr
->uid
);
1778 __put_user(cr
.gid
, &tcr
->gid
);
1779 unlock_user_struct(tcr
, optval_addr
, 1);
1780 if (put_user_u32(len
, optlen
)) {
1781 return -TARGET_EFAULT
;
1785 /* Options with 'int' argument. */
1786 case TARGET_SO_DEBUG
:
1789 case TARGET_SO_REUSEADDR
:
1790 optname
= SO_REUSEADDR
;
1792 case TARGET_SO_TYPE
:
1795 case TARGET_SO_ERROR
:
1798 case TARGET_SO_DONTROUTE
:
1799 optname
= SO_DONTROUTE
;
1801 case TARGET_SO_BROADCAST
:
1802 optname
= SO_BROADCAST
;
1804 case TARGET_SO_SNDBUF
:
1805 optname
= SO_SNDBUF
;
1807 case TARGET_SO_RCVBUF
:
1808 optname
= SO_RCVBUF
;
1810 case TARGET_SO_KEEPALIVE
:
1811 optname
= SO_KEEPALIVE
;
1813 case TARGET_SO_OOBINLINE
:
1814 optname
= SO_OOBINLINE
;
1816 case TARGET_SO_NO_CHECK
:
1817 optname
= SO_NO_CHECK
;
1819 case TARGET_SO_PRIORITY
:
1820 optname
= SO_PRIORITY
;
1823 case TARGET_SO_BSDCOMPAT
:
1824 optname
= SO_BSDCOMPAT
;
1827 case TARGET_SO_PASSCRED
:
1828 optname
= SO_PASSCRED
;
1830 case TARGET_SO_TIMESTAMP
:
1831 optname
= SO_TIMESTAMP
;
1833 case TARGET_SO_RCVLOWAT
:
1834 optname
= SO_RCVLOWAT
;
1836 case TARGET_SO_ACCEPTCONN
:
1837 optname
= SO_ACCEPTCONN
;
1844 /* TCP options all take an 'int' value. */
1846 if (get_user_u32(len
, optlen
))
1847 return -TARGET_EFAULT
;
1849 return -TARGET_EINVAL
;
1851 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1854 if (optname
== SO_TYPE
) {
1855 val
= host_to_target_sock_type(val
);
1860 if (put_user_u32(val
, optval_addr
))
1861 return -TARGET_EFAULT
;
1863 if (put_user_u8(val
, optval_addr
))
1864 return -TARGET_EFAULT
;
1866 if (put_user_u32(len
, optlen
))
1867 return -TARGET_EFAULT
;
1874 case IP_ROUTER_ALERT
:
1878 case IP_MTU_DISCOVER
:
1884 case IP_MULTICAST_TTL
:
1885 case IP_MULTICAST_LOOP
:
1886 if (get_user_u32(len
, optlen
))
1887 return -TARGET_EFAULT
;
1889 return -TARGET_EINVAL
;
1891 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1894 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1896 if (put_user_u32(len
, optlen
)
1897 || put_user_u8(val
, optval_addr
))
1898 return -TARGET_EFAULT
;
1900 if (len
> sizeof(int))
1902 if (put_user_u32(len
, optlen
)
1903 || put_user_u32(val
, optval_addr
))
1904 return -TARGET_EFAULT
;
1908 ret
= -TARGET_ENOPROTOOPT
;
1914 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1916 ret
= -TARGET_EOPNOTSUPP
;
1922 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1923 int count
, int copy
)
1925 struct target_iovec
*target_vec
;
1927 abi_ulong total_len
, max_len
;
1930 bool bad_address
= false;
1936 if (count
< 0 || count
> IOV_MAX
) {
1941 vec
= g_try_new0(struct iovec
, count
);
1947 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1948 count
* sizeof(struct target_iovec
), 1);
1949 if (target_vec
== NULL
) {
1954 /* ??? If host page size > target page size, this will result in a
1955 value larger than what we can actually support. */
1956 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1959 for (i
= 0; i
< count
; i
++) {
1960 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1961 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1966 } else if (len
== 0) {
1967 /* Zero length pointer is ignored. */
1968 vec
[i
].iov_base
= 0;
1970 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1971 /* If the first buffer pointer is bad, this is a fault. But
1972 * subsequent bad buffers will result in a partial write; this
1973 * is realized by filling the vector with null pointers and
1975 if (!vec
[i
].iov_base
) {
1986 if (len
> max_len
- total_len
) {
1987 len
= max_len
- total_len
;
1990 vec
[i
].iov_len
= len
;
1994 unlock_user(target_vec
, target_addr
, 0);
1999 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2000 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2003 unlock_user(target_vec
, target_addr
, 0);
2010 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2011 int count
, int copy
)
2013 struct target_iovec
*target_vec
;
2016 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2017 count
* sizeof(struct target_iovec
), 1);
2019 for (i
= 0; i
< count
; i
++) {
2020 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2021 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2025 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2027 unlock_user(target_vec
, target_addr
, 0);
2033 static inline int target_to_host_sock_type(int *type
)
2036 int target_type
= *type
;
2038 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2039 case TARGET_SOCK_DGRAM
:
2040 host_type
= SOCK_DGRAM
;
2042 case TARGET_SOCK_STREAM
:
2043 host_type
= SOCK_STREAM
;
2046 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2049 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2050 #if defined(SOCK_CLOEXEC)
2051 host_type
|= SOCK_CLOEXEC
;
2053 return -TARGET_EINVAL
;
2056 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2057 #if defined(SOCK_NONBLOCK)
2058 host_type
|= SOCK_NONBLOCK
;
2059 #elif !defined(O_NONBLOCK)
2060 return -TARGET_EINVAL
;
2067 /* Try to emulate socket type flags after socket creation. */
2068 static int sock_flags_fixup(int fd
, int target_type
)
2070 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2071 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2072 int flags
= fcntl(fd
, F_GETFL
);
2073 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2075 return -TARGET_EINVAL
;
2082 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2083 abi_ulong target_addr
,
2086 struct sockaddr
*addr
= host_addr
;
2087 struct target_sockaddr
*target_saddr
;
2089 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2090 if (!target_saddr
) {
2091 return -TARGET_EFAULT
;
2094 memcpy(addr
, target_saddr
, len
);
2095 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2096 /* spkt_protocol is big-endian */
2098 unlock_user(target_saddr
, target_addr
, 0);
2102 static TargetFdTrans target_packet_trans
= {
2103 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2106 /* do_socket() Must return target values and target errnos. */
2107 static abi_long
do_socket(int domain
, int type
, int protocol
)
2109 int target_type
= type
;
2112 ret
= target_to_host_sock_type(&type
);
2117 if (domain
== PF_NETLINK
)
2118 return -TARGET_EAFNOSUPPORT
;
2120 if (domain
== AF_PACKET
||
2121 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2122 protocol
= tswap16(protocol
);
2125 ret
= get_errno(socket(domain
, type
, protocol
));
2127 ret
= sock_flags_fixup(ret
, target_type
);
2128 if (type
== SOCK_PACKET
) {
2129 /* Manage an obsolete case :
2130 * if socket type is SOCK_PACKET, bind by name
2132 fd_trans_register(ret
, &target_packet_trans
);
2138 /* do_bind() Must return target values and target errnos. */
2139 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2145 if ((int)addrlen
< 0) {
2146 return -TARGET_EINVAL
;
2149 addr
= alloca(addrlen
+1);
2151 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2155 return get_errno(bind(sockfd
, addr
, addrlen
));
2158 /* do_connect() Must return target values and target errnos. */
2159 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2165 if ((int)addrlen
< 0) {
2166 return -TARGET_EINVAL
;
2169 addr
= alloca(addrlen
+1);
2171 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2175 return get_errno(connect(sockfd
, addr
, addrlen
));
2178 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2179 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2180 int flags
, int send
)
2186 abi_ulong target_vec
;
2188 if (msgp
->msg_name
) {
2189 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2190 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2191 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2192 tswapal(msgp
->msg_name
),
2198 msg
.msg_name
= NULL
;
2199 msg
.msg_namelen
= 0;
2201 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2202 msg
.msg_control
= alloca(msg
.msg_controllen
);
2203 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2205 count
= tswapal(msgp
->msg_iovlen
);
2206 target_vec
= tswapal(msgp
->msg_iov
);
2207 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2208 target_vec
, count
, send
);
2210 ret
= -host_to_target_errno(errno
);
2213 msg
.msg_iovlen
= count
;
2217 ret
= target_to_host_cmsg(&msg
, msgp
);
2219 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2221 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2222 if (!is_error(ret
)) {
2224 ret
= host_to_target_cmsg(msgp
, &msg
);
2225 if (!is_error(ret
)) {
2226 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2227 if (msg
.msg_name
!= NULL
) {
2228 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2229 msg
.msg_name
, msg
.msg_namelen
);
2241 unlock_iovec(vec
, target_vec
, count
, !send
);
2246 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2247 int flags
, int send
)
2250 struct target_msghdr
*msgp
;
2252 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2256 return -TARGET_EFAULT
;
2258 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2259 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2263 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2264 * so it might not have this *mmsg-specific flag either.
2266 #ifndef MSG_WAITFORONE
2267 #define MSG_WAITFORONE 0x10000
2270 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2271 unsigned int vlen
, unsigned int flags
,
2274 struct target_mmsghdr
*mmsgp
;
2278 if (vlen
> UIO_MAXIOV
) {
2282 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2284 return -TARGET_EFAULT
;
2287 for (i
= 0; i
< vlen
; i
++) {
2288 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2289 if (is_error(ret
)) {
2292 mmsgp
[i
].msg_len
= tswap32(ret
);
2293 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2294 if (flags
& MSG_WAITFORONE
) {
2295 flags
|= MSG_DONTWAIT
;
2299 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2301 /* Return number of datagrams sent if we sent any at all;
2302 * otherwise return the error.
2310 /* If we don't have a system accept4() then just call accept.
2311 * The callsites to do_accept4() will ensure that they don't
2312 * pass a non-zero flags argument in this config.
2314 #ifndef CONFIG_ACCEPT4
2315 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2316 socklen_t
*addrlen
, int flags
)
2319 return accept(sockfd
, addr
, addrlen
);
2323 /* do_accept4() Must return target values and target errnos. */
2324 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2325 abi_ulong target_addrlen_addr
, int flags
)
2332 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2334 if (target_addr
== 0) {
2335 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2338 /* linux returns EINVAL if addrlen pointer is invalid */
2339 if (get_user_u32(addrlen
, target_addrlen_addr
))
2340 return -TARGET_EINVAL
;
2342 if ((int)addrlen
< 0) {
2343 return -TARGET_EINVAL
;
2346 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2347 return -TARGET_EINVAL
;
2349 addr
= alloca(addrlen
);
2351 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2352 if (!is_error(ret
)) {
2353 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2354 if (put_user_u32(addrlen
, target_addrlen_addr
))
2355 ret
= -TARGET_EFAULT
;
2360 /* do_getpeername() Must return target values and target errnos. */
2361 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2362 abi_ulong target_addrlen_addr
)
2368 if (get_user_u32(addrlen
, target_addrlen_addr
))
2369 return -TARGET_EFAULT
;
2371 if ((int)addrlen
< 0) {
2372 return -TARGET_EINVAL
;
2375 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2376 return -TARGET_EFAULT
;
2378 addr
= alloca(addrlen
);
2380 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2381 if (!is_error(ret
)) {
2382 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2383 if (put_user_u32(addrlen
, target_addrlen_addr
))
2384 ret
= -TARGET_EFAULT
;
2389 /* do_getsockname() Must return target values and target errnos. */
2390 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2391 abi_ulong target_addrlen_addr
)
2397 if (get_user_u32(addrlen
, target_addrlen_addr
))
2398 return -TARGET_EFAULT
;
2400 if ((int)addrlen
< 0) {
2401 return -TARGET_EINVAL
;
2404 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2405 return -TARGET_EFAULT
;
2407 addr
= alloca(addrlen
);
2409 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2410 if (!is_error(ret
)) {
2411 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2412 if (put_user_u32(addrlen
, target_addrlen_addr
))
2413 ret
= -TARGET_EFAULT
;
2418 /* do_socketpair() Must return target values and target errnos. */
2419 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2420 abi_ulong target_tab_addr
)
2425 target_to_host_sock_type(&type
);
2427 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2428 if (!is_error(ret
)) {
2429 if (put_user_s32(tab
[0], target_tab_addr
)
2430 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2431 ret
= -TARGET_EFAULT
;
2436 /* do_sendto() Must return target values and target errnos. */
2437 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2438 abi_ulong target_addr
, socklen_t addrlen
)
2444 if ((int)addrlen
< 0) {
2445 return -TARGET_EINVAL
;
2448 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2450 return -TARGET_EFAULT
;
2452 addr
= alloca(addrlen
+1);
2453 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2455 unlock_user(host_msg
, msg
, 0);
2458 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2460 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2462 unlock_user(host_msg
, msg
, 0);
2466 /* do_recvfrom() Must return target values and target errnos. */
2467 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2468 abi_ulong target_addr
,
2469 abi_ulong target_addrlen
)
2476 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2478 return -TARGET_EFAULT
;
2480 if (get_user_u32(addrlen
, target_addrlen
)) {
2481 ret
= -TARGET_EFAULT
;
2484 if ((int)addrlen
< 0) {
2485 ret
= -TARGET_EINVAL
;
2488 addr
= alloca(addrlen
);
2489 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2491 addr
= NULL
; /* To keep compiler quiet. */
2492 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2494 if (!is_error(ret
)) {
2496 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2497 if (put_user_u32(addrlen
, target_addrlen
)) {
2498 ret
= -TARGET_EFAULT
;
2502 unlock_user(host_msg
, msg
, len
);
2505 unlock_user(host_msg
, msg
, 0);
2510 #ifdef TARGET_NR_socketcall
2511 /* do_socketcall() Must return target values and target errnos. */
2512 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2514 static const unsigned ac
[] = { /* number of arguments per call */
2515 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2516 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2517 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2518 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2519 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2520 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2521 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2522 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2523 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2524 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2525 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2526 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2527 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2528 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2529 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2530 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2531 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2532 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2533 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2534 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2536 abi_long a
[6]; /* max 6 args */
2538 /* first, collect the arguments in a[] according to ac[] */
2539 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2541 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2542 for (i
= 0; i
< ac
[num
]; ++i
) {
2543 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2544 return -TARGET_EFAULT
;
2549 /* now when we have the args, actually handle the call */
2551 case SOCKOP_socket
: /* domain, type, protocol */
2552 return do_socket(a
[0], a
[1], a
[2]);
2553 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2554 return do_bind(a
[0], a
[1], a
[2]);
2555 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2556 return do_connect(a
[0], a
[1], a
[2]);
2557 case SOCKOP_listen
: /* sockfd, backlog */
2558 return get_errno(listen(a
[0], a
[1]));
2559 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2560 return do_accept4(a
[0], a
[1], a
[2], 0);
2561 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2562 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2563 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2564 return do_getsockname(a
[0], a
[1], a
[2]);
2565 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2566 return do_getpeername(a
[0], a
[1], a
[2]);
2567 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2568 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2569 case SOCKOP_send
: /* sockfd, msg, len, flags */
2570 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2571 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2572 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2573 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2574 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2575 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2576 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2577 case SOCKOP_shutdown
: /* sockfd, how */
2578 return get_errno(shutdown(a
[0], a
[1]));
2579 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2580 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2581 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2582 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2583 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2584 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2585 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2586 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2587 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2588 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2589 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2590 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2592 gemu_log("Unsupported socketcall: %d\n", num
);
2593 return -TARGET_ENOSYS
;
2598 #define N_SHM_REGIONS 32
2600 static struct shm_region
{
2603 } shm_regions
[N_SHM_REGIONS
];
2605 struct target_semid_ds
2607 struct target_ipc_perm sem_perm
;
2608 abi_ulong sem_otime
;
2609 #if !defined(TARGET_PPC64)
2610 abi_ulong __unused1
;
2612 abi_ulong sem_ctime
;
2613 #if !defined(TARGET_PPC64)
2614 abi_ulong __unused2
;
2616 abi_ulong sem_nsems
;
2617 abi_ulong __unused3
;
2618 abi_ulong __unused4
;
2621 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2622 abi_ulong target_addr
)
2624 struct target_ipc_perm
*target_ip
;
2625 struct target_semid_ds
*target_sd
;
2627 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2628 return -TARGET_EFAULT
;
2629 target_ip
= &(target_sd
->sem_perm
);
2630 host_ip
->__key
= tswap32(target_ip
->__key
);
2631 host_ip
->uid
= tswap32(target_ip
->uid
);
2632 host_ip
->gid
= tswap32(target_ip
->gid
);
2633 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2634 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2635 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2636 host_ip
->mode
= tswap32(target_ip
->mode
);
2638 host_ip
->mode
= tswap16(target_ip
->mode
);
2640 #if defined(TARGET_PPC)
2641 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2643 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2645 unlock_user_struct(target_sd
, target_addr
, 0);
2649 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2650 struct ipc_perm
*host_ip
)
2652 struct target_ipc_perm
*target_ip
;
2653 struct target_semid_ds
*target_sd
;
2655 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2656 return -TARGET_EFAULT
;
2657 target_ip
= &(target_sd
->sem_perm
);
2658 target_ip
->__key
= tswap32(host_ip
->__key
);
2659 target_ip
->uid
= tswap32(host_ip
->uid
);
2660 target_ip
->gid
= tswap32(host_ip
->gid
);
2661 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2662 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2663 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2664 target_ip
->mode
= tswap32(host_ip
->mode
);
2666 target_ip
->mode
= tswap16(host_ip
->mode
);
2668 #if defined(TARGET_PPC)
2669 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2671 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2673 unlock_user_struct(target_sd
, target_addr
, 1);
2677 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2678 abi_ulong target_addr
)
2680 struct target_semid_ds
*target_sd
;
2682 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2683 return -TARGET_EFAULT
;
2684 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2685 return -TARGET_EFAULT
;
2686 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2687 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2688 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2689 unlock_user_struct(target_sd
, target_addr
, 0);
2693 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2694 struct semid_ds
*host_sd
)
2696 struct target_semid_ds
*target_sd
;
2698 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2699 return -TARGET_EFAULT
;
2700 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2701 return -TARGET_EFAULT
;
2702 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2703 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2704 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2705 unlock_user_struct(target_sd
, target_addr
, 1);
2709 struct target_seminfo
{
2722 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2723 struct seminfo
*host_seminfo
)
2725 struct target_seminfo
*target_seminfo
;
2726 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2727 return -TARGET_EFAULT
;
2728 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2729 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2730 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2731 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2732 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2733 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2734 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2735 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2736 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2737 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2738 unlock_user_struct(target_seminfo
, target_addr
, 1);
2744 struct semid_ds
*buf
;
2745 unsigned short *array
;
2746 struct seminfo
*__buf
;
2749 union target_semun
{
2756 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2757 abi_ulong target_addr
)
2760 unsigned short *array
;
2762 struct semid_ds semid_ds
;
2765 semun
.buf
= &semid_ds
;
2767 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2769 return get_errno(ret
);
2771 nsems
= semid_ds
.sem_nsems
;
2773 *host_array
= g_try_new(unsigned short, nsems
);
2775 return -TARGET_ENOMEM
;
2777 array
= lock_user(VERIFY_READ
, target_addr
,
2778 nsems
*sizeof(unsigned short), 1);
2780 g_free(*host_array
);
2781 return -TARGET_EFAULT
;
2784 for(i
=0; i
<nsems
; i
++) {
2785 __get_user((*host_array
)[i
], &array
[i
]);
2787 unlock_user(array
, target_addr
, 0);
2792 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2793 unsigned short **host_array
)
2796 unsigned short *array
;
2798 struct semid_ds semid_ds
;
2801 semun
.buf
= &semid_ds
;
2803 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2805 return get_errno(ret
);
2807 nsems
= semid_ds
.sem_nsems
;
2809 array
= lock_user(VERIFY_WRITE
, target_addr
,
2810 nsems
*sizeof(unsigned short), 0);
2812 return -TARGET_EFAULT
;
2814 for(i
=0; i
<nsems
; i
++) {
2815 __put_user((*host_array
)[i
], &array
[i
]);
2817 g_free(*host_array
);
2818 unlock_user(array
, target_addr
, 1);
2823 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2824 abi_ulong target_arg
)
2826 union target_semun target_su
= { .buf
= target_arg
};
2828 struct semid_ds dsarg
;
2829 unsigned short *array
= NULL
;
2830 struct seminfo seminfo
;
2831 abi_long ret
= -TARGET_EINVAL
;
2838 /* In 64 bit cross-endian situations, we will erroneously pick up
2839 * the wrong half of the union for the "val" element. To rectify
2840 * this, the entire 8-byte structure is byteswapped, followed by
2841 * a swap of the 4 byte val field. In other cases, the data is
2842 * already in proper host byte order. */
2843 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2844 target_su
.buf
= tswapal(target_su
.buf
);
2845 arg
.val
= tswap32(target_su
.val
);
2847 arg
.val
= target_su
.val
;
2849 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2853 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2857 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2858 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2865 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2869 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2870 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2876 arg
.__buf
= &seminfo
;
2877 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2878 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2886 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2893 struct target_sembuf
{
2894 unsigned short sem_num
;
2899 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2900 abi_ulong target_addr
,
2903 struct target_sembuf
*target_sembuf
;
2906 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2907 nsops
*sizeof(struct target_sembuf
), 1);
2909 return -TARGET_EFAULT
;
2911 for(i
=0; i
<nsops
; i
++) {
2912 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2913 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2914 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2917 unlock_user(target_sembuf
, target_addr
, 0);
2922 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2924 struct sembuf sops
[nsops
];
2926 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2927 return -TARGET_EFAULT
;
2929 return get_errno(semop(semid
, sops
, nsops
));
2932 struct target_msqid_ds
2934 struct target_ipc_perm msg_perm
;
2935 abi_ulong msg_stime
;
2936 #if TARGET_ABI_BITS == 32
2937 abi_ulong __unused1
;
2939 abi_ulong msg_rtime
;
2940 #if TARGET_ABI_BITS == 32
2941 abi_ulong __unused2
;
2943 abi_ulong msg_ctime
;
2944 #if TARGET_ABI_BITS == 32
2945 abi_ulong __unused3
;
2947 abi_ulong __msg_cbytes
;
2949 abi_ulong msg_qbytes
;
2950 abi_ulong msg_lspid
;
2951 abi_ulong msg_lrpid
;
2952 abi_ulong __unused4
;
2953 abi_ulong __unused5
;
2956 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2957 abi_ulong target_addr
)
2959 struct target_msqid_ds
*target_md
;
2961 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2962 return -TARGET_EFAULT
;
2963 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2964 return -TARGET_EFAULT
;
2965 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2966 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2967 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2968 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2969 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2970 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2971 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2972 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2973 unlock_user_struct(target_md
, target_addr
, 0);
2977 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2978 struct msqid_ds
*host_md
)
2980 struct target_msqid_ds
*target_md
;
2982 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2983 return -TARGET_EFAULT
;
2984 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2985 return -TARGET_EFAULT
;
2986 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2987 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2988 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2989 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2990 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2991 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2992 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2993 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2994 unlock_user_struct(target_md
, target_addr
, 1);
2998 struct target_msginfo
{
3006 unsigned short int msgseg
;
3009 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3010 struct msginfo
*host_msginfo
)
3012 struct target_msginfo
*target_msginfo
;
3013 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3014 return -TARGET_EFAULT
;
3015 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3016 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3017 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3018 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3019 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3020 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3021 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3022 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3023 unlock_user_struct(target_msginfo
, target_addr
, 1);
3027 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3029 struct msqid_ds dsarg
;
3030 struct msginfo msginfo
;
3031 abi_long ret
= -TARGET_EINVAL
;
3039 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3040 return -TARGET_EFAULT
;
3041 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3042 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3043 return -TARGET_EFAULT
;
3046 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3050 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3051 if (host_to_target_msginfo(ptr
, &msginfo
))
3052 return -TARGET_EFAULT
;
3059 struct target_msgbuf
{
3064 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3065 ssize_t msgsz
, int msgflg
)
3067 struct target_msgbuf
*target_mb
;
3068 struct msgbuf
*host_mb
;
3072 return -TARGET_EINVAL
;
3075 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3076 return -TARGET_EFAULT
;
3077 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3079 unlock_user_struct(target_mb
, msgp
, 0);
3080 return -TARGET_ENOMEM
;
3082 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3083 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3084 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3086 unlock_user_struct(target_mb
, msgp
, 0);
3091 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3092 unsigned int msgsz
, abi_long msgtyp
,
3095 struct target_msgbuf
*target_mb
;
3097 struct msgbuf
*host_mb
;
3100 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3101 return -TARGET_EFAULT
;
3103 host_mb
= g_malloc(msgsz
+sizeof(long));
3104 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3107 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3108 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3109 if (!target_mtext
) {
3110 ret
= -TARGET_EFAULT
;
3113 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3114 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3117 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3121 unlock_user_struct(target_mb
, msgp
, 1);
3126 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3127 abi_ulong target_addr
)
3129 struct target_shmid_ds
*target_sd
;
3131 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3132 return -TARGET_EFAULT
;
3133 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3134 return -TARGET_EFAULT
;
3135 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3136 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3137 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3138 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3139 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3140 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3141 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3142 unlock_user_struct(target_sd
, target_addr
, 0);
3146 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3147 struct shmid_ds
*host_sd
)
3149 struct target_shmid_ds
*target_sd
;
3151 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3152 return -TARGET_EFAULT
;
3153 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3154 return -TARGET_EFAULT
;
3155 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3156 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3157 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3158 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3159 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3160 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3161 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3162 unlock_user_struct(target_sd
, target_addr
, 1);
3166 struct target_shminfo
{
3174 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3175 struct shminfo
*host_shminfo
)
3177 struct target_shminfo
*target_shminfo
;
3178 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3179 return -TARGET_EFAULT
;
3180 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3181 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3182 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3183 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3184 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3185 unlock_user_struct(target_shminfo
, target_addr
, 1);
3189 struct target_shm_info
{
3194 abi_ulong swap_attempts
;
3195 abi_ulong swap_successes
;
3198 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3199 struct shm_info
*host_shm_info
)
3201 struct target_shm_info
*target_shm_info
;
3202 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3203 return -TARGET_EFAULT
;
3204 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3205 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3206 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3207 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3208 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3209 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3210 unlock_user_struct(target_shm_info
, target_addr
, 1);
3214 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3216 struct shmid_ds dsarg
;
3217 struct shminfo shminfo
;
3218 struct shm_info shm_info
;
3219 abi_long ret
= -TARGET_EINVAL
;
3227 if (target_to_host_shmid_ds(&dsarg
, buf
))
3228 return -TARGET_EFAULT
;
3229 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3230 if (host_to_target_shmid_ds(buf
, &dsarg
))
3231 return -TARGET_EFAULT
;
3234 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3235 if (host_to_target_shminfo(buf
, &shminfo
))
3236 return -TARGET_EFAULT
;
3239 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3240 if (host_to_target_shm_info(buf
, &shm_info
))
3241 return -TARGET_EFAULT
;
3246 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3253 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3257 struct shmid_ds shm_info
;
3260 /* find out the length of the shared memory segment */
3261 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3262 if (is_error(ret
)) {
3263 /* can't get length, bail out */
3270 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3272 abi_ulong mmap_start
;
3274 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3276 if (mmap_start
== -1) {
3278 host_raddr
= (void *)-1;
3280 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3283 if (host_raddr
== (void *)-1) {
3285 return get_errno((long)host_raddr
);
3287 raddr
=h2g((unsigned long)host_raddr
);
3289 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3290 PAGE_VALID
| PAGE_READ
|
3291 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3293 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3294 if (shm_regions
[i
].start
== 0) {
3295 shm_regions
[i
].start
= raddr
;
3296 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3306 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3310 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3311 if (shm_regions
[i
].start
== shmaddr
) {
3312 shm_regions
[i
].start
= 0;
3313 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3318 return get_errno(shmdt(g2h(shmaddr
)));
3321 #ifdef TARGET_NR_ipc
3322 /* ??? This only works with linear mappings. */
3323 /* do_ipc() must return target values and target errnos. */
3324 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3325 abi_long second
, abi_long third
,
3326 abi_long ptr
, abi_long fifth
)
3331 version
= call
>> 16;
3336 ret
= do_semop(first
, ptr
, second
);
3340 ret
= get_errno(semget(first
, second
, third
));
3343 case IPCOP_semctl
: {
3344 /* The semun argument to semctl is passed by value, so dereference the
3347 get_user_ual(atptr
, ptr
);
3348 ret
= do_semctl(first
, second
, third
, atptr
);
3353 ret
= get_errno(msgget(first
, second
));
3357 ret
= do_msgsnd(first
, ptr
, second
, third
);
3361 ret
= do_msgctl(first
, second
, ptr
);
3368 struct target_ipc_kludge
{
3373 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3374 ret
= -TARGET_EFAULT
;
3378 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3380 unlock_user_struct(tmp
, ptr
, 0);
3384 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3393 raddr
= do_shmat(first
, ptr
, second
);
3394 if (is_error(raddr
))
3395 return get_errno(raddr
);
3396 if (put_user_ual(raddr
, third
))
3397 return -TARGET_EFAULT
;
3401 ret
= -TARGET_EINVAL
;
3406 ret
= do_shmdt(ptr
);
3410 /* IPC_* flag values are the same on all linux platforms */
3411 ret
= get_errno(shmget(first
, second
, third
));
3414 /* IPC_* and SHM_* command values are the same on all linux platforms */
3416 ret
= do_shmctl(first
, second
, ptr
);
3419 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3420 ret
= -TARGET_ENOSYS
;
3427 /* kernel structure types definitions */
3429 #define STRUCT(name, ...) STRUCT_ ## name,
3430 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3432 #include "syscall_types.h"
3436 #undef STRUCT_SPECIAL
3438 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3439 #define STRUCT_SPECIAL(name)
3440 #include "syscall_types.h"
3442 #undef STRUCT_SPECIAL
3444 typedef struct IOCTLEntry IOCTLEntry
;
3446 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3447 int fd
, int cmd
, abi_long arg
);
3451 unsigned int host_cmd
;
3454 do_ioctl_fn
*do_ioctl
;
3455 const argtype arg_type
[5];
3458 #define IOC_R 0x0001
3459 #define IOC_W 0x0002
3460 #define IOC_RW (IOC_R | IOC_W)
3462 #define MAX_STRUCT_SIZE 4096
3464 #ifdef CONFIG_FIEMAP
3465 /* So fiemap access checks don't overflow on 32 bit systems.
3466 * This is very slightly smaller than the limit imposed by
3467 * the underlying kernel.
3469 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3470 / sizeof(struct fiemap_extent))
3472 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3473 int fd
, int cmd
, abi_long arg
)
3475 /* The parameter for this ioctl is a struct fiemap followed
3476 * by an array of struct fiemap_extent whose size is set
3477 * in fiemap->fm_extent_count. The array is filled in by the
3480 int target_size_in
, target_size_out
;
3482 const argtype
*arg_type
= ie
->arg_type
;
3483 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3486 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3490 assert(arg_type
[0] == TYPE_PTR
);
3491 assert(ie
->access
== IOC_RW
);
3493 target_size_in
= thunk_type_size(arg_type
, 0);
3494 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3496 return -TARGET_EFAULT
;
3498 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3499 unlock_user(argptr
, arg
, 0);
3500 fm
= (struct fiemap
*)buf_temp
;
3501 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3502 return -TARGET_EINVAL
;
3505 outbufsz
= sizeof (*fm
) +
3506 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3508 if (outbufsz
> MAX_STRUCT_SIZE
) {
3509 /* We can't fit all the extents into the fixed size buffer.
3510 * Allocate one that is large enough and use it instead.
3512 fm
= g_try_malloc(outbufsz
);
3514 return -TARGET_ENOMEM
;
3516 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3519 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3520 if (!is_error(ret
)) {
3521 target_size_out
= target_size_in
;
3522 /* An extent_count of 0 means we were only counting the extents
3523 * so there are no structs to copy
3525 if (fm
->fm_extent_count
!= 0) {
3526 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3528 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3530 ret
= -TARGET_EFAULT
;
3532 /* Convert the struct fiemap */
3533 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3534 if (fm
->fm_extent_count
!= 0) {
3535 p
= argptr
+ target_size_in
;
3536 /* ...and then all the struct fiemap_extents */
3537 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3538 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3543 unlock_user(argptr
, arg
, target_size_out
);
3553 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3554 int fd
, int cmd
, abi_long arg
)
3556 const argtype
*arg_type
= ie
->arg_type
;
3560 struct ifconf
*host_ifconf
;
3562 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3563 int target_ifreq_size
;
3568 abi_long target_ifc_buf
;
3572 assert(arg_type
[0] == TYPE_PTR
);
3573 assert(ie
->access
== IOC_RW
);
3576 target_size
= thunk_type_size(arg_type
, 0);
3578 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3580 return -TARGET_EFAULT
;
3581 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3582 unlock_user(argptr
, arg
, 0);
3584 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3585 target_ifc_len
= host_ifconf
->ifc_len
;
3586 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3588 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3589 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3590 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3592 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3593 if (outbufsz
> MAX_STRUCT_SIZE
) {
3594 /* We can't fit all the extents into the fixed size buffer.
3595 * Allocate one that is large enough and use it instead.
3597 host_ifconf
= malloc(outbufsz
);
3599 return -TARGET_ENOMEM
;
3601 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3604 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3606 host_ifconf
->ifc_len
= host_ifc_len
;
3607 host_ifconf
->ifc_buf
= host_ifc_buf
;
3609 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3610 if (!is_error(ret
)) {
3611 /* convert host ifc_len to target ifc_len */
3613 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3614 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3615 host_ifconf
->ifc_len
= target_ifc_len
;
3617 /* restore target ifc_buf */
3619 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3621 /* copy struct ifconf to target user */
3623 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3625 return -TARGET_EFAULT
;
3626 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3627 unlock_user(argptr
, arg
, target_size
);
3629 /* copy ifreq[] to target user */
3631 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3632 for (i
= 0; i
< nb_ifreq
; i
++) {
3633 thunk_convert(argptr
+ i
* target_ifreq_size
,
3634 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3635 ifreq_arg_type
, THUNK_TARGET
);
3637 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3647 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3648 int cmd
, abi_long arg
)
3651 struct dm_ioctl
*host_dm
;
3652 abi_long guest_data
;
3653 uint32_t guest_data_size
;
3655 const argtype
*arg_type
= ie
->arg_type
;
3657 void *big_buf
= NULL
;
3661 target_size
= thunk_type_size(arg_type
, 0);
3662 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3664 ret
= -TARGET_EFAULT
;
3667 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3668 unlock_user(argptr
, arg
, 0);
3670 /* buf_temp is too small, so fetch things into a bigger buffer */
3671 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3672 memcpy(big_buf
, buf_temp
, target_size
);
3676 guest_data
= arg
+ host_dm
->data_start
;
3677 if ((guest_data
- arg
) < 0) {
3681 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3682 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3684 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3685 switch (ie
->host_cmd
) {
3687 case DM_LIST_DEVICES
:
3690 case DM_DEV_SUSPEND
:
3693 case DM_TABLE_STATUS
:
3694 case DM_TABLE_CLEAR
:
3696 case DM_LIST_VERSIONS
:
3700 case DM_DEV_SET_GEOMETRY
:
3701 /* data contains only strings */
3702 memcpy(host_data
, argptr
, guest_data_size
);
3705 memcpy(host_data
, argptr
, guest_data_size
);
3706 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3710 void *gspec
= argptr
;
3711 void *cur_data
= host_data
;
3712 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3713 int spec_size
= thunk_type_size(arg_type
, 0);
3716 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3717 struct dm_target_spec
*spec
= cur_data
;
3721 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3722 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3724 spec
->next
= sizeof(*spec
) + slen
;
3725 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3727 cur_data
+= spec
->next
;
3732 ret
= -TARGET_EINVAL
;
3733 unlock_user(argptr
, guest_data
, 0);
3736 unlock_user(argptr
, guest_data
, 0);
3738 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3739 if (!is_error(ret
)) {
3740 guest_data
= arg
+ host_dm
->data_start
;
3741 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3742 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3743 switch (ie
->host_cmd
) {
3748 case DM_DEV_SUSPEND
:
3751 case DM_TABLE_CLEAR
:
3753 case DM_DEV_SET_GEOMETRY
:
3754 /* no return data */
3756 case DM_LIST_DEVICES
:
3758 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3759 uint32_t remaining_data
= guest_data_size
;
3760 void *cur_data
= argptr
;
3761 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3762 int nl_size
= 12; /* can't use thunk_size due to alignment */
3765 uint32_t next
= nl
->next
;
3767 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3769 if (remaining_data
< nl
->next
) {
3770 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3773 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3774 strcpy(cur_data
+ nl_size
, nl
->name
);
3775 cur_data
+= nl
->next
;
3776 remaining_data
-= nl
->next
;
3780 nl
= (void*)nl
+ next
;
3785 case DM_TABLE_STATUS
:
3787 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3788 void *cur_data
= argptr
;
3789 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3790 int spec_size
= thunk_type_size(arg_type
, 0);
3793 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3794 uint32_t next
= spec
->next
;
3795 int slen
= strlen((char*)&spec
[1]) + 1;
3796 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3797 if (guest_data_size
< spec
->next
) {
3798 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3801 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3802 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3803 cur_data
= argptr
+ spec
->next
;
3804 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3810 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3811 int count
= *(uint32_t*)hdata
;
3812 uint64_t *hdev
= hdata
+ 8;
3813 uint64_t *gdev
= argptr
+ 8;
3816 *(uint32_t*)argptr
= tswap32(count
);
3817 for (i
= 0; i
< count
; i
++) {
3818 *gdev
= tswap64(*hdev
);
3824 case DM_LIST_VERSIONS
:
3826 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3827 uint32_t remaining_data
= guest_data_size
;
3828 void *cur_data
= argptr
;
3829 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3830 int vers_size
= thunk_type_size(arg_type
, 0);
3833 uint32_t next
= vers
->next
;
3835 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3837 if (remaining_data
< vers
->next
) {
3838 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3841 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3842 strcpy(cur_data
+ vers_size
, vers
->name
);
3843 cur_data
+= vers
->next
;
3844 remaining_data
-= vers
->next
;
3848 vers
= (void*)vers
+ next
;
3853 unlock_user(argptr
, guest_data
, 0);
3854 ret
= -TARGET_EINVAL
;
3857 unlock_user(argptr
, guest_data
, guest_data_size
);
3859 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3861 ret
= -TARGET_EFAULT
;
3864 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3865 unlock_user(argptr
, arg
, target_size
);
3872 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3873 int cmd
, abi_long arg
)
3877 const argtype
*arg_type
= ie
->arg_type
;
3878 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3881 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3882 struct blkpg_partition host_part
;
3884 /* Read and convert blkpg */
3886 target_size
= thunk_type_size(arg_type
, 0);
3887 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3889 ret
= -TARGET_EFAULT
;
3892 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3893 unlock_user(argptr
, arg
, 0);
3895 switch (host_blkpg
->op
) {
3896 case BLKPG_ADD_PARTITION
:
3897 case BLKPG_DEL_PARTITION
:
3898 /* payload is struct blkpg_partition */
3901 /* Unknown opcode */
3902 ret
= -TARGET_EINVAL
;
3906 /* Read and convert blkpg->data */
3907 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3908 target_size
= thunk_type_size(part_arg_type
, 0);
3909 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3911 ret
= -TARGET_EFAULT
;
3914 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3915 unlock_user(argptr
, arg
, 0);
3917 /* Swizzle the data pointer to our local copy and call! */
3918 host_blkpg
->data
= &host_part
;
3919 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3925 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3926 int fd
, int cmd
, abi_long arg
)
3928 const argtype
*arg_type
= ie
->arg_type
;
3929 const StructEntry
*se
;
3930 const argtype
*field_types
;
3931 const int *dst_offsets
, *src_offsets
;
3934 abi_ulong
*target_rt_dev_ptr
;
3935 unsigned long *host_rt_dev_ptr
;
3939 assert(ie
->access
== IOC_W
);
3940 assert(*arg_type
== TYPE_PTR
);
3942 assert(*arg_type
== TYPE_STRUCT
);
3943 target_size
= thunk_type_size(arg_type
, 0);
3944 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3946 return -TARGET_EFAULT
;
3949 assert(*arg_type
== (int)STRUCT_rtentry
);
3950 se
= struct_entries
+ *arg_type
++;
3951 assert(se
->convert
[0] == NULL
);
3952 /* convert struct here to be able to catch rt_dev string */
3953 field_types
= se
->field_types
;
3954 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3955 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3956 for (i
= 0; i
< se
->nb_fields
; i
++) {
3957 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3958 assert(*field_types
== TYPE_PTRVOID
);
3959 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3960 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3961 if (*target_rt_dev_ptr
!= 0) {
3962 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3963 tswapal(*target_rt_dev_ptr
));
3964 if (!*host_rt_dev_ptr
) {
3965 unlock_user(argptr
, arg
, 0);
3966 return -TARGET_EFAULT
;
3969 *host_rt_dev_ptr
= 0;
3974 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3975 argptr
+ src_offsets
[i
],
3976 field_types
, THUNK_HOST
);
3978 unlock_user(argptr
, arg
, 0);
3980 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3981 if (*host_rt_dev_ptr
!= 0) {
3982 unlock_user((void *)*host_rt_dev_ptr
,
3983 *target_rt_dev_ptr
, 0);
3988 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3989 int fd
, int cmd
, abi_long arg
)
3991 int sig
= target_to_host_signal(arg
);
3992 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
3995 static IOCTLEntry ioctl_entries
[] = {
3996 #define IOCTL(cmd, access, ...) \
3997 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3998 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3999 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4004 /* ??? Implement proper locking for ioctls. */
4005 /* do_ioctl() Must return target values and target errnos. */
4006 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4008 const IOCTLEntry
*ie
;
4009 const argtype
*arg_type
;
4011 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4017 if (ie
->target_cmd
== 0) {
4018 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4019 return -TARGET_ENOSYS
;
4021 if (ie
->target_cmd
== cmd
)
4025 arg_type
= ie
->arg_type
;
4027 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4030 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4033 switch(arg_type
[0]) {
4036 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4040 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4044 target_size
= thunk_type_size(arg_type
, 0);
4045 switch(ie
->access
) {
4047 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4048 if (!is_error(ret
)) {
4049 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4051 return -TARGET_EFAULT
;
4052 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4053 unlock_user(argptr
, arg
, target_size
);
4057 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4059 return -TARGET_EFAULT
;
4060 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4061 unlock_user(argptr
, arg
, 0);
4062 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4066 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4068 return -TARGET_EFAULT
;
4069 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4070 unlock_user(argptr
, arg
, 0);
4071 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4072 if (!is_error(ret
)) {
4073 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4075 return -TARGET_EFAULT
;
4076 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4077 unlock_user(argptr
, arg
, target_size
);
4083 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4084 (long)cmd
, arg_type
[0]);
4085 ret
= -TARGET_ENOSYS
;
4091 static const bitmask_transtbl iflag_tbl
[] = {
4092 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4093 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4094 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4095 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4096 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4097 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4098 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4099 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4100 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4101 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4102 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4103 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4104 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4105 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4109 static const bitmask_transtbl oflag_tbl
[] = {
4110 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4111 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4112 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4113 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4114 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4115 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4116 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4117 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4118 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4119 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4120 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4121 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4122 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4123 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4124 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4125 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4126 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4127 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4128 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4129 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4130 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4131 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4132 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4133 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4137 static const bitmask_transtbl cflag_tbl
[] = {
4138 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4139 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4140 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4141 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4142 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4143 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4144 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4145 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4146 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4147 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4148 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4149 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4150 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4151 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4152 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4153 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4154 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4155 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4156 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4157 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4158 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4159 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4160 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4161 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4162 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4163 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4164 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4165 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4166 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4167 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4168 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4172 static const bitmask_transtbl lflag_tbl
[] = {
4173 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4174 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4175 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4176 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4177 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4178 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4179 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4180 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4181 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4182 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4183 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4184 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4185 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4186 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4187 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4191 static void target_to_host_termios (void *dst
, const void *src
)
4193 struct host_termios
*host
= dst
;
4194 const struct target_termios
*target
= src
;
4197 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4199 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4201 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4203 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4204 host
->c_line
= target
->c_line
;
4206 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4207 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4208 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4209 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4210 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4211 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4212 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4213 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4214 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4215 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4216 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4217 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4218 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4219 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4220 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4221 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4222 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4223 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4226 static void host_to_target_termios (void *dst
, const void *src
)
4228 struct target_termios
*target
= dst
;
4229 const struct host_termios
*host
= src
;
4232 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4234 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4236 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4238 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4239 target
->c_line
= host
->c_line
;
4241 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4242 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4243 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4244 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4245 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4246 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4247 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4248 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4249 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4250 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4251 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4252 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4253 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4254 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4255 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4256 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4257 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4258 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4261 static const StructEntry struct_termios_def
= {
4262 .convert
= { host_to_target_termios
, target_to_host_termios
},
4263 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4264 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4267 static bitmask_transtbl mmap_flags_tbl
[] = {
4268 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4269 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4270 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4271 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4272 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4273 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4274 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4275 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4276 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4281 #if defined(TARGET_I386)
4283 /* NOTE: there is really one LDT for all the threads */
4284 static uint8_t *ldt_table
;
4286 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4293 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4294 if (size
> bytecount
)
4296 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4298 return -TARGET_EFAULT
;
4299 /* ??? Should this by byteswapped? */
4300 memcpy(p
, ldt_table
, size
);
4301 unlock_user(p
, ptr
, size
);
4305 /* XXX: add locking support */
4306 static abi_long
write_ldt(CPUX86State
*env
,
4307 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4309 struct target_modify_ldt_ldt_s ldt_info
;
4310 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4311 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4312 int seg_not_present
, useable
, lm
;
4313 uint32_t *lp
, entry_1
, entry_2
;
4315 if (bytecount
!= sizeof(ldt_info
))
4316 return -TARGET_EINVAL
;
4317 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4318 return -TARGET_EFAULT
;
4319 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4320 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4321 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4322 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4323 unlock_user_struct(target_ldt_info
, ptr
, 0);
4325 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4326 return -TARGET_EINVAL
;
4327 seg_32bit
= ldt_info
.flags
& 1;
4328 contents
= (ldt_info
.flags
>> 1) & 3;
4329 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4330 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4331 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4332 useable
= (ldt_info
.flags
>> 6) & 1;
4336 lm
= (ldt_info
.flags
>> 7) & 1;
4338 if (contents
== 3) {
4340 return -TARGET_EINVAL
;
4341 if (seg_not_present
== 0)
4342 return -TARGET_EINVAL
;
4344 /* allocate the LDT */
4346 env
->ldt
.base
= target_mmap(0,
4347 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4348 PROT_READ
|PROT_WRITE
,
4349 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4350 if (env
->ldt
.base
== -1)
4351 return -TARGET_ENOMEM
;
4352 memset(g2h(env
->ldt
.base
), 0,
4353 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4354 env
->ldt
.limit
= 0xffff;
4355 ldt_table
= g2h(env
->ldt
.base
);
4358 /* NOTE: same code as Linux kernel */
4359 /* Allow LDTs to be cleared by the user. */
4360 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4363 read_exec_only
== 1 &&
4365 limit_in_pages
== 0 &&
4366 seg_not_present
== 1 &&
4374 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4375 (ldt_info
.limit
& 0x0ffff);
4376 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4377 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4378 (ldt_info
.limit
& 0xf0000) |
4379 ((read_exec_only
^ 1) << 9) |
4381 ((seg_not_present
^ 1) << 15) |
4383 (limit_in_pages
<< 23) |
4387 entry_2
|= (useable
<< 20);
4389 /* Install the new entry ... */
4391 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4392 lp
[0] = tswap32(entry_1
);
4393 lp
[1] = tswap32(entry_2
);
4397 /* specific and weird i386 syscalls */
4398 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4399 unsigned long bytecount
)
4405 ret
= read_ldt(ptr
, bytecount
);
4408 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4411 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4414 ret
= -TARGET_ENOSYS
;
4420 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4421 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4423 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4424 struct target_modify_ldt_ldt_s ldt_info
;
4425 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4426 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4427 int seg_not_present
, useable
, lm
;
4428 uint32_t *lp
, entry_1
, entry_2
;
4431 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4432 if (!target_ldt_info
)
4433 return -TARGET_EFAULT
;
4434 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4435 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4436 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4437 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4438 if (ldt_info
.entry_number
== -1) {
4439 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4440 if (gdt_table
[i
] == 0) {
4441 ldt_info
.entry_number
= i
;
4442 target_ldt_info
->entry_number
= tswap32(i
);
4447 unlock_user_struct(target_ldt_info
, ptr
, 1);
4449 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4450 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4451 return -TARGET_EINVAL
;
4452 seg_32bit
= ldt_info
.flags
& 1;
4453 contents
= (ldt_info
.flags
>> 1) & 3;
4454 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4455 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4456 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4457 useable
= (ldt_info
.flags
>> 6) & 1;
4461 lm
= (ldt_info
.flags
>> 7) & 1;
4464 if (contents
== 3) {
4465 if (seg_not_present
== 0)
4466 return -TARGET_EINVAL
;
4469 /* NOTE: same code as Linux kernel */
4470 /* Allow LDTs to be cleared by the user. */
4471 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4472 if ((contents
== 0 &&
4473 read_exec_only
== 1 &&
4475 limit_in_pages
== 0 &&
4476 seg_not_present
== 1 &&
4484 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4485 (ldt_info
.limit
& 0x0ffff);
4486 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4487 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4488 (ldt_info
.limit
& 0xf0000) |
4489 ((read_exec_only
^ 1) << 9) |
4491 ((seg_not_present
^ 1) << 15) |
4493 (limit_in_pages
<< 23) |
4498 /* Install the new entry ... */
4500 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4501 lp
[0] = tswap32(entry_1
);
4502 lp
[1] = tswap32(entry_2
);
4506 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4508 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4509 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4510 uint32_t base_addr
, limit
, flags
;
4511 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4512 int seg_not_present
, useable
, lm
;
4513 uint32_t *lp
, entry_1
, entry_2
;
4515 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4516 if (!target_ldt_info
)
4517 return -TARGET_EFAULT
;
4518 idx
= tswap32(target_ldt_info
->entry_number
);
4519 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4520 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4521 unlock_user_struct(target_ldt_info
, ptr
, 1);
4522 return -TARGET_EINVAL
;
4524 lp
= (uint32_t *)(gdt_table
+ idx
);
4525 entry_1
= tswap32(lp
[0]);
4526 entry_2
= tswap32(lp
[1]);
4528 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4529 contents
= (entry_2
>> 10) & 3;
4530 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4531 seg_32bit
= (entry_2
>> 22) & 1;
4532 limit_in_pages
= (entry_2
>> 23) & 1;
4533 useable
= (entry_2
>> 20) & 1;
4537 lm
= (entry_2
>> 21) & 1;
4539 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4540 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4541 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4542 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4543 base_addr
= (entry_1
>> 16) |
4544 (entry_2
& 0xff000000) |
4545 ((entry_2
& 0xff) << 16);
4546 target_ldt_info
->base_addr
= tswapal(base_addr
);
4547 target_ldt_info
->limit
= tswap32(limit
);
4548 target_ldt_info
->flags
= tswap32(flags
);
4549 unlock_user_struct(target_ldt_info
, ptr
, 1);
4552 #endif /* TARGET_I386 && TARGET_ABI32 */
4554 #ifndef TARGET_ABI32
4555 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4562 case TARGET_ARCH_SET_GS
:
4563 case TARGET_ARCH_SET_FS
:
4564 if (code
== TARGET_ARCH_SET_GS
)
4568 cpu_x86_load_seg(env
, idx
, 0);
4569 env
->segs
[idx
].base
= addr
;
4571 case TARGET_ARCH_GET_GS
:
4572 case TARGET_ARCH_GET_FS
:
4573 if (code
== TARGET_ARCH_GET_GS
)
4577 val
= env
->segs
[idx
].base
;
4578 if (put_user(val
, addr
, abi_ulong
))
4579 ret
= -TARGET_EFAULT
;
4582 ret
= -TARGET_EINVAL
;
4589 #endif /* defined(TARGET_I386) */
4591 #define NEW_STACK_SIZE 0x40000
4594 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4597 pthread_mutex_t mutex
;
4598 pthread_cond_t cond
;
4601 abi_ulong child_tidptr
;
4602 abi_ulong parent_tidptr
;
4606 static void *clone_func(void *arg
)
4608 new_thread_info
*info
= arg
;
4613 rcu_register_thread();
4615 cpu
= ENV_GET_CPU(env
);
4617 ts
= (TaskState
*)cpu
->opaque
;
4618 info
->tid
= gettid();
4619 cpu
->host_tid
= info
->tid
;
4621 if (info
->child_tidptr
)
4622 put_user_u32(info
->tid
, info
->child_tidptr
);
4623 if (info
->parent_tidptr
)
4624 put_user_u32(info
->tid
, info
->parent_tidptr
);
4625 /* Enable signals. */
4626 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4627 /* Signal to the parent that we're ready. */
4628 pthread_mutex_lock(&info
->mutex
);
4629 pthread_cond_broadcast(&info
->cond
);
4630 pthread_mutex_unlock(&info
->mutex
);
4631 /* Wait until the parent has finshed initializing the tls state. */
4632 pthread_mutex_lock(&clone_lock
);
4633 pthread_mutex_unlock(&clone_lock
);
4639 /* do_fork() Must return host values and target errnos (unlike most
4640 do_*() functions). */
4641 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4642 abi_ulong parent_tidptr
, target_ulong newtls
,
4643 abi_ulong child_tidptr
)
4645 CPUState
*cpu
= ENV_GET_CPU(env
);
4649 CPUArchState
*new_env
;
4650 unsigned int nptl_flags
;
4653 /* Emulate vfork() with fork() */
4654 if (flags
& CLONE_VFORK
)
4655 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4657 if (flags
& CLONE_VM
) {
4658 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4659 new_thread_info info
;
4660 pthread_attr_t attr
;
4662 ts
= g_new0(TaskState
, 1);
4663 init_task_state(ts
);
4664 /* we create a new CPU instance. */
4665 new_env
= cpu_copy(env
);
4666 /* Init regs that differ from the parent. */
4667 cpu_clone_regs(new_env
, newsp
);
4668 new_cpu
= ENV_GET_CPU(new_env
);
4669 new_cpu
->opaque
= ts
;
4670 ts
->bprm
= parent_ts
->bprm
;
4671 ts
->info
= parent_ts
->info
;
4673 flags
&= ~CLONE_NPTL_FLAGS2
;
4675 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4676 ts
->child_tidptr
= child_tidptr
;
4679 if (nptl_flags
& CLONE_SETTLS
)
4680 cpu_set_tls (new_env
, newtls
);
4682 /* Grab a mutex so that thread setup appears atomic. */
4683 pthread_mutex_lock(&clone_lock
);
4685 memset(&info
, 0, sizeof(info
));
4686 pthread_mutex_init(&info
.mutex
, NULL
);
4687 pthread_mutex_lock(&info
.mutex
);
4688 pthread_cond_init(&info
.cond
, NULL
);
4690 if (nptl_flags
& CLONE_CHILD_SETTID
)
4691 info
.child_tidptr
= child_tidptr
;
4692 if (nptl_flags
& CLONE_PARENT_SETTID
)
4693 info
.parent_tidptr
= parent_tidptr
;
4695 ret
= pthread_attr_init(&attr
);
4696 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4697 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4698 /* It is not safe to deliver signals until the child has finished
4699 initializing, so temporarily block all signals. */
4700 sigfillset(&sigmask
);
4701 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4703 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4704 /* TODO: Free new CPU state if thread creation failed. */
4706 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4707 pthread_attr_destroy(&attr
);
4709 /* Wait for the child to initialize. */
4710 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4712 if (flags
& CLONE_PARENT_SETTID
)
4713 put_user_u32(ret
, parent_tidptr
);
4717 pthread_mutex_unlock(&info
.mutex
);
4718 pthread_cond_destroy(&info
.cond
);
4719 pthread_mutex_destroy(&info
.mutex
);
4720 pthread_mutex_unlock(&clone_lock
);
4722 /* if no CLONE_VM, we consider it is a fork */
4723 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4724 return -TARGET_EINVAL
;
4729 /* Child Process. */
4731 cpu_clone_regs(env
, newsp
);
4733 /* There is a race condition here. The parent process could
4734 theoretically read the TID in the child process before the child
4735 tid is set. This would require using either ptrace
4736 (not implemented) or having *_tidptr to point at a shared memory
4737 mapping. We can't repeat the spinlock hack used above because
4738 the child process gets its own copy of the lock. */
4739 if (flags
& CLONE_CHILD_SETTID
)
4740 put_user_u32(gettid(), child_tidptr
);
4741 if (flags
& CLONE_PARENT_SETTID
)
4742 put_user_u32(gettid(), parent_tidptr
);
4743 ts
= (TaskState
*)cpu
->opaque
;
4744 if (flags
& CLONE_SETTLS
)
4745 cpu_set_tls (env
, newtls
);
4746 if (flags
& CLONE_CHILD_CLEARTID
)
4747 ts
->child_tidptr
= child_tidptr
;
4755 /* warning : doesn't handle linux specific flags... */
4756 static int target_to_host_fcntl_cmd(int cmd
)
4759 case TARGET_F_DUPFD
:
4760 case TARGET_F_GETFD
:
4761 case TARGET_F_SETFD
:
4762 case TARGET_F_GETFL
:
4763 case TARGET_F_SETFL
:
4765 case TARGET_F_GETLK
:
4767 case TARGET_F_SETLK
:
4769 case TARGET_F_SETLKW
:
4771 case TARGET_F_GETOWN
:
4773 case TARGET_F_SETOWN
:
4775 case TARGET_F_GETSIG
:
4777 case TARGET_F_SETSIG
:
4779 #if TARGET_ABI_BITS == 32
4780 case TARGET_F_GETLK64
:
4782 case TARGET_F_SETLK64
:
4784 case TARGET_F_SETLKW64
:
4787 case TARGET_F_SETLEASE
:
4789 case TARGET_F_GETLEASE
:
4791 #ifdef F_DUPFD_CLOEXEC
4792 case TARGET_F_DUPFD_CLOEXEC
:
4793 return F_DUPFD_CLOEXEC
;
4795 case TARGET_F_NOTIFY
:
4798 case TARGET_F_GETOWN_EX
:
4802 case TARGET_F_SETOWN_EX
:
4806 return -TARGET_EINVAL
;
4808 return -TARGET_EINVAL
;
4811 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4812 static const bitmask_transtbl flock_tbl
[] = {
4813 TRANSTBL_CONVERT(F_RDLCK
),
4814 TRANSTBL_CONVERT(F_WRLCK
),
4815 TRANSTBL_CONVERT(F_UNLCK
),
4816 TRANSTBL_CONVERT(F_EXLCK
),
4817 TRANSTBL_CONVERT(F_SHLCK
),
4821 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4824 struct target_flock
*target_fl
;
4825 struct flock64 fl64
;
4826 struct target_flock64
*target_fl64
;
4828 struct f_owner_ex fox
;
4829 struct target_f_owner_ex
*target_fox
;
4832 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4834 if (host_cmd
== -TARGET_EINVAL
)
4838 case TARGET_F_GETLK
:
4839 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4840 return -TARGET_EFAULT
;
4842 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4843 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4844 fl
.l_start
= tswapal(target_fl
->l_start
);
4845 fl
.l_len
= tswapal(target_fl
->l_len
);
4846 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4847 unlock_user_struct(target_fl
, arg
, 0);
4848 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4850 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4851 return -TARGET_EFAULT
;
4853 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4854 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4855 target_fl
->l_start
= tswapal(fl
.l_start
);
4856 target_fl
->l_len
= tswapal(fl
.l_len
);
4857 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4858 unlock_user_struct(target_fl
, arg
, 1);
4862 case TARGET_F_SETLK
:
4863 case TARGET_F_SETLKW
:
4864 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4865 return -TARGET_EFAULT
;
4867 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4868 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4869 fl
.l_start
= tswapal(target_fl
->l_start
);
4870 fl
.l_len
= tswapal(target_fl
->l_len
);
4871 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4872 unlock_user_struct(target_fl
, arg
, 0);
4873 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4876 case TARGET_F_GETLK64
:
4877 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4878 return -TARGET_EFAULT
;
4880 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4881 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4882 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4883 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4884 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4885 unlock_user_struct(target_fl64
, arg
, 0);
4886 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4888 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4889 return -TARGET_EFAULT
;
4890 target_fl64
->l_type
=
4891 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4892 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4893 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4894 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4895 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4896 unlock_user_struct(target_fl64
, arg
, 1);
4899 case TARGET_F_SETLK64
:
4900 case TARGET_F_SETLKW64
:
4901 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4902 return -TARGET_EFAULT
;
4904 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4905 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4906 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4907 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4908 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4909 unlock_user_struct(target_fl64
, arg
, 0);
4910 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4913 case TARGET_F_GETFL
:
4914 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4916 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4920 case TARGET_F_SETFL
:
4921 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4925 case TARGET_F_GETOWN_EX
:
4926 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4928 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4929 return -TARGET_EFAULT
;
4930 target_fox
->type
= tswap32(fox
.type
);
4931 target_fox
->pid
= tswap32(fox
.pid
);
4932 unlock_user_struct(target_fox
, arg
, 1);
4938 case TARGET_F_SETOWN_EX
:
4939 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4940 return -TARGET_EFAULT
;
4941 fox
.type
= tswap32(target_fox
->type
);
4942 fox
.pid
= tswap32(target_fox
->pid
);
4943 unlock_user_struct(target_fox
, arg
, 0);
4944 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4948 case TARGET_F_SETOWN
:
4949 case TARGET_F_GETOWN
:
4950 case TARGET_F_SETSIG
:
4951 case TARGET_F_GETSIG
:
4952 case TARGET_F_SETLEASE
:
4953 case TARGET_F_GETLEASE
:
4954 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4958 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4966 static inline int high2lowuid(int uid
)
4974 static inline int high2lowgid(int gid
)
4982 static inline int low2highuid(int uid
)
4984 if ((int16_t)uid
== -1)
4990 static inline int low2highgid(int gid
)
4992 if ((int16_t)gid
== -1)
4997 static inline int tswapid(int id
)
5002 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5004 #else /* !USE_UID16 */
5005 static inline int high2lowuid(int uid
)
5009 static inline int high2lowgid(int gid
)
5013 static inline int low2highuid(int uid
)
5017 static inline int low2highgid(int gid
)
5021 static inline int tswapid(int id
)
5026 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5028 #endif /* USE_UID16 */
5030 void syscall_init(void)
5033 const argtype
*arg_type
;
5037 thunk_init(STRUCT_MAX
);
5039 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5040 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5041 #include "syscall_types.h"
5043 #undef STRUCT_SPECIAL
5045 /* Build target_to_host_errno_table[] table from
5046 * host_to_target_errno_table[]. */
5047 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5048 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5051 /* we patch the ioctl size if necessary. We rely on the fact that
5052 no ioctl has all the bits at '1' in the size field */
5054 while (ie
->target_cmd
!= 0) {
5055 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5056 TARGET_IOC_SIZEMASK
) {
5057 arg_type
= ie
->arg_type
;
5058 if (arg_type
[0] != TYPE_PTR
) {
5059 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5064 size
= thunk_type_size(arg_type
, 0);
5065 ie
->target_cmd
= (ie
->target_cmd
&
5066 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5067 (size
<< TARGET_IOC_SIZESHIFT
);
5070 /* automatic consistency check if same arch */
5071 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5072 (defined(__x86_64__) && defined(TARGET_X86_64))
5073 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5074 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5075 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5082 #if TARGET_ABI_BITS == 32
5083 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5085 #ifdef TARGET_WORDS_BIGENDIAN
5086 return ((uint64_t)word0
<< 32) | word1
;
5088 return ((uint64_t)word1
<< 32) | word0
;
5091 #else /* TARGET_ABI_BITS == 32 */
5092 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5096 #endif /* TARGET_ABI_BITS != 32 */
5098 #ifdef TARGET_NR_truncate64
5099 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5104 if (regpairs_aligned(cpu_env
)) {
5108 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5112 #ifdef TARGET_NR_ftruncate64
5113 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5118 if (regpairs_aligned(cpu_env
)) {
5122 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5126 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5127 abi_ulong target_addr
)
5129 struct target_timespec
*target_ts
;
5131 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5132 return -TARGET_EFAULT
;
5133 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5134 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5135 unlock_user_struct(target_ts
, target_addr
, 0);
5139 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5140 struct timespec
*host_ts
)
5142 struct target_timespec
*target_ts
;
5144 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5145 return -TARGET_EFAULT
;
5146 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5147 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5148 unlock_user_struct(target_ts
, target_addr
, 1);
5152 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5153 abi_ulong target_addr
)
5155 struct target_itimerspec
*target_itspec
;
5157 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5158 return -TARGET_EFAULT
;
5161 host_itspec
->it_interval
.tv_sec
=
5162 tswapal(target_itspec
->it_interval
.tv_sec
);
5163 host_itspec
->it_interval
.tv_nsec
=
5164 tswapal(target_itspec
->it_interval
.tv_nsec
);
5165 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5166 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5168 unlock_user_struct(target_itspec
, target_addr
, 1);
5172 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5173 struct itimerspec
*host_its
)
5175 struct target_itimerspec
*target_itspec
;
5177 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5178 return -TARGET_EFAULT
;
5181 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5182 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5184 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5185 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5187 unlock_user_struct(target_itspec
, target_addr
, 0);
5191 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5192 abi_ulong target_addr
)
5194 struct target_sigevent
*target_sevp
;
5196 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5197 return -TARGET_EFAULT
;
5200 /* This union is awkward on 64 bit systems because it has a 32 bit
5201 * integer and a pointer in it; we follow the conversion approach
5202 * used for handling sigval types in signal.c so the guest should get
5203 * the correct value back even if we did a 64 bit byteswap and it's
5204 * using the 32 bit integer.
5206 host_sevp
->sigev_value
.sival_ptr
=
5207 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5208 host_sevp
->sigev_signo
=
5209 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5210 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5211 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5213 unlock_user_struct(target_sevp
, target_addr
, 1);
5217 #if defined(TARGET_NR_mlockall)
5218 static inline int target_to_host_mlockall_arg(int arg
)
5222 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5223 result
|= MCL_CURRENT
;
5225 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5226 result
|= MCL_FUTURE
;
5232 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5233 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5234 abi_ulong target_addr
,
5235 struct stat
*host_st
)
5237 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5238 if (((CPUARMState
*)cpu_env
)->eabi
) {
5239 struct target_eabi_stat64
*target_st
;
5241 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5242 return -TARGET_EFAULT
;
5243 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5244 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5245 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5246 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5247 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5249 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5250 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5251 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5252 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5253 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5254 __put_user(host_st
->st_size
, &target_st
->st_size
);
5255 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5256 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5257 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5258 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5259 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5260 unlock_user_struct(target_st
, target_addr
, 1);
5264 #if defined(TARGET_HAS_STRUCT_STAT64)
5265 struct target_stat64
*target_st
;
5267 struct target_stat
*target_st
;
5270 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5271 return -TARGET_EFAULT
;
5272 memset(target_st
, 0, sizeof(*target_st
));
5273 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5274 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5275 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5276 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5278 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5279 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5280 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5281 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5282 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5283 /* XXX: better use of kernel struct */
5284 __put_user(host_st
->st_size
, &target_st
->st_size
);
5285 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5286 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5287 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5288 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5289 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5290 unlock_user_struct(target_st
, target_addr
, 1);
5297 /* ??? Using host futex calls even when target atomic operations
5298 are not really atomic probably breaks things. However implementing
5299 futexes locally would make futexes shared between multiple processes
5300 tricky. However they're probably useless because guest atomic
5301 operations won't work either. */
5302 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5303 target_ulong uaddr2
, int val3
)
5305 struct timespec ts
, *pts
;
5308 /* ??? We assume FUTEX_* constants are the same on both host
5310 #ifdef FUTEX_CMD_MASK
5311 base_op
= op
& FUTEX_CMD_MASK
;
5317 case FUTEX_WAIT_BITSET
:
5320 target_to_host_timespec(pts
, timeout
);
5324 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5327 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5329 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5331 case FUTEX_CMP_REQUEUE
:
5333 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5334 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5335 But the prototype takes a `struct timespec *'; insert casts
5336 to satisfy the compiler. We do not need to tswap TIMEOUT
5337 since it's not compared to guest memory. */
5338 pts
= (struct timespec
*)(uintptr_t) timeout
;
5339 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5341 (base_op
== FUTEX_CMP_REQUEUE
5345 return -TARGET_ENOSYS
;
5348 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5349 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5350 abi_long handle
, abi_long mount_id
,
5353 struct file_handle
*target_fh
;
5354 struct file_handle
*fh
;
5358 unsigned int size
, total_size
;
5360 if (get_user_s32(size
, handle
)) {
5361 return -TARGET_EFAULT
;
5364 name
= lock_user_string(pathname
);
5366 return -TARGET_EFAULT
;
5369 total_size
= sizeof(struct file_handle
) + size
;
5370 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5372 unlock_user(name
, pathname
, 0);
5373 return -TARGET_EFAULT
;
5376 fh
= g_malloc0(total_size
);
5377 fh
->handle_bytes
= size
;
5379 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5380 unlock_user(name
, pathname
, 0);
5382 /* man name_to_handle_at(2):
5383 * Other than the use of the handle_bytes field, the caller should treat
5384 * the file_handle structure as an opaque data type
5387 memcpy(target_fh
, fh
, total_size
);
5388 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5389 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5391 unlock_user(target_fh
, handle
, total_size
);
5393 if (put_user_s32(mid
, mount_id
)) {
5394 return -TARGET_EFAULT
;
5402 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5403 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5406 struct file_handle
*target_fh
;
5407 struct file_handle
*fh
;
5408 unsigned int size
, total_size
;
5411 if (get_user_s32(size
, handle
)) {
5412 return -TARGET_EFAULT
;
5415 total_size
= sizeof(struct file_handle
) + size
;
5416 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5418 return -TARGET_EFAULT
;
5421 fh
= g_memdup(target_fh
, total_size
);
5422 fh
->handle_bytes
= size
;
5423 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5425 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5426 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5430 unlock_user(target_fh
, handle
, total_size
);
5436 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5438 /* signalfd siginfo conversion */
5441 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5442 const struct signalfd_siginfo
*info
)
5444 int sig
= host_to_target_signal(info
->ssi_signo
);
5446 /* linux/signalfd.h defines a ssi_addr_lsb
5447 * not defined in sys/signalfd.h but used by some kernels
5450 #ifdef BUS_MCEERR_AO
5451 if (tinfo
->ssi_signo
== SIGBUS
&&
5452 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5453 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5454 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5455 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5456 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5460 tinfo
->ssi_signo
= tswap32(sig
);
5461 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5462 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5463 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5464 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5465 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5466 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5467 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5468 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5469 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5470 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5471 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5472 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5473 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5474 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5475 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5478 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5482 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5483 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5489 static TargetFdTrans target_signalfd_trans
= {
5490 .host_to_target_data
= host_to_target_data_signalfd
,
5493 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5496 target_sigset_t
*target_mask
;
5500 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5501 return -TARGET_EINVAL
;
5503 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5504 return -TARGET_EFAULT
;
5507 target_to_host_sigset(&host_mask
, target_mask
);
5509 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5511 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5513 fd_trans_register(ret
, &target_signalfd_trans
);
5516 unlock_user_struct(target_mask
, mask
, 0);
5522 /* Map host to target signal numbers for the wait family of syscalls.
5523 Assume all other status bits are the same. */
5524 int host_to_target_waitstatus(int status
)
5526 if (WIFSIGNALED(status
)) {
5527 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5529 if (WIFSTOPPED(status
)) {
5530 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5536 static int open_self_cmdline(void *cpu_env
, int fd
)
5539 bool word_skipped
= false;
5541 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5551 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5553 fd_orig
= close(fd_orig
);
5555 } else if (nb_read
== 0) {
5559 if (!word_skipped
) {
5560 /* Skip the first string, which is the path to qemu-*-static
5561 instead of the actual command. */
5562 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5564 /* Null byte found, skip one string */
5566 nb_read
-= cp_buf
- buf
;
5567 word_skipped
= true;
5572 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5579 return close(fd_orig
);
5582 static int open_self_maps(void *cpu_env
, int fd
)
5584 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5585 TaskState
*ts
= cpu
->opaque
;
5591 fp
= fopen("/proc/self/maps", "r");
5596 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5597 int fields
, dev_maj
, dev_min
, inode
;
5598 uint64_t min
, max
, offset
;
5599 char flag_r
, flag_w
, flag_x
, flag_p
;
5600 char path
[512] = "";
5601 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5602 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5603 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5605 if ((fields
< 10) || (fields
> 11)) {
5608 if (h2g_valid(min
)) {
5609 int flags
= page_get_flags(h2g(min
));
5610 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5611 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5614 if (h2g(min
) == ts
->info
->stack_limit
) {
5615 pstrcpy(path
, sizeof(path
), " [stack]");
5617 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5618 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5619 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5620 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5621 path
[0] ? " " : "", path
);
5631 static int open_self_stat(void *cpu_env
, int fd
)
5633 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5634 TaskState
*ts
= cpu
->opaque
;
5635 abi_ulong start_stack
= ts
->info
->start_stack
;
5638 for (i
= 0; i
< 44; i
++) {
5646 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5647 } else if (i
== 1) {
5649 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5650 } else if (i
== 27) {
5653 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5655 /* for the rest, there is MasterCard */
5656 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5660 if (write(fd
, buf
, len
) != len
) {
5668 static int open_self_auxv(void *cpu_env
, int fd
)
5670 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5671 TaskState
*ts
= cpu
->opaque
;
5672 abi_ulong auxv
= ts
->info
->saved_auxv
;
5673 abi_ulong len
= ts
->info
->auxv_len
;
5677 * Auxiliary vector is stored in target process stack.
5678 * read in whole auxv vector and copy it to file
5680 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5684 r
= write(fd
, ptr
, len
);
5691 lseek(fd
, 0, SEEK_SET
);
5692 unlock_user(ptr
, auxv
, len
);
5698 static int is_proc_myself(const char *filename
, const char *entry
)
5700 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5701 filename
+= strlen("/proc/");
5702 if (!strncmp(filename
, "self/", strlen("self/"))) {
5703 filename
+= strlen("self/");
5704 } else if (*filename
>= '1' && *filename
<= '9') {
5706 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5707 if (!strncmp(filename
, myself
, strlen(myself
))) {
5708 filename
+= strlen(myself
);
5715 if (!strcmp(filename
, entry
)) {
5722 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5723 static int is_proc(const char *filename
, const char *entry
)
5725 return strcmp(filename
, entry
) == 0;
5728 static int open_net_route(void *cpu_env
, int fd
)
5735 fp
= fopen("/proc/net/route", "r");
5742 read
= getline(&line
, &len
, fp
);
5743 dprintf(fd
, "%s", line
);
5747 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5749 uint32_t dest
, gw
, mask
;
5750 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5751 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5752 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5753 &mask
, &mtu
, &window
, &irtt
);
5754 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5755 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5756 metric
, tswap32(mask
), mtu
, window
, irtt
);
5766 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5769 const char *filename
;
5770 int (*fill
)(void *cpu_env
, int fd
);
5771 int (*cmp
)(const char *s1
, const char *s2
);
5773 const struct fake_open
*fake_open
;
5774 static const struct fake_open fakes
[] = {
5775 { "maps", open_self_maps
, is_proc_myself
},
5776 { "stat", open_self_stat
, is_proc_myself
},
5777 { "auxv", open_self_auxv
, is_proc_myself
},
5778 { "cmdline", open_self_cmdline
, is_proc_myself
},
5779 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5780 { "/proc/net/route", open_net_route
, is_proc
},
5782 { NULL
, NULL
, NULL
}
5785 if (is_proc_myself(pathname
, "exe")) {
5786 int execfd
= qemu_getauxval(AT_EXECFD
);
5787 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5790 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5791 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5796 if (fake_open
->filename
) {
5798 char filename
[PATH_MAX
];
5801 /* create temporary file to map stat to */
5802 tmpdir
= getenv("TMPDIR");
5805 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5806 fd
= mkstemp(filename
);
5812 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5816 lseek(fd
, 0, SEEK_SET
);
5821 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5824 #define TIMER_MAGIC 0x0caf0000
5825 #define TIMER_MAGIC_MASK 0xffff0000
5827 /* Convert QEMU provided timer ID back to internal 16bit index format */
5828 static target_timer_t
get_timer_id(abi_long arg
)
5830 target_timer_t timerid
= arg
;
5832 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5833 return -TARGET_EINVAL
;
5838 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5839 return -TARGET_EINVAL
;
5845 /* do_syscall() should always have a single exit point at the end so
5846 that actions, such as logging of syscall results, can be performed.
5847 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5848 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5849 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5850 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5853 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5860 gemu_log("syscall %d", num
);
5863 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5866 case TARGET_NR_exit
:
5867 /* In old applications this may be used to implement _exit(2).
5868 However in threaded applictions it is used for thread termination,
5869 and _exit_group is used for application termination.
5870 Do thread termination if we have more then one thread. */
5871 /* FIXME: This probably breaks if a signal arrives. We should probably
5872 be disabling signals. */
5873 if (CPU_NEXT(first_cpu
)) {
5877 /* Remove the CPU from the list. */
5878 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5881 if (ts
->child_tidptr
) {
5882 put_user_u32(0, ts
->child_tidptr
);
5883 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5887 object_unref(OBJECT(cpu
));
5889 rcu_unregister_thread();
5895 gdb_exit(cpu_env
, arg1
);
5897 ret
= 0; /* avoid warning */
5899 case TARGET_NR_read
:
5903 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5905 ret
= get_errno(read(arg1
, p
, arg3
));
5907 fd_trans_host_to_target_data(arg1
)) {
5908 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5910 unlock_user(p
, arg2
, ret
);
5913 case TARGET_NR_write
:
5914 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5916 ret
= get_errno(write(arg1
, p
, arg3
));
5917 unlock_user(p
, arg2
, 0);
5919 #ifdef TARGET_NR_open
5920 case TARGET_NR_open
:
5921 if (!(p
= lock_user_string(arg1
)))
5923 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5924 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5926 fd_trans_unregister(ret
);
5927 unlock_user(p
, arg1
, 0);
5930 case TARGET_NR_openat
:
5931 if (!(p
= lock_user_string(arg2
)))
5933 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5934 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5936 fd_trans_unregister(ret
);
5937 unlock_user(p
, arg2
, 0);
5939 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5940 case TARGET_NR_name_to_handle_at
:
5941 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
5944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5945 case TARGET_NR_open_by_handle_at
:
5946 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
5947 fd_trans_unregister(ret
);
5950 case TARGET_NR_close
:
5951 fd_trans_unregister(arg1
);
5952 ret
= get_errno(close(arg1
));
5957 #ifdef TARGET_NR_fork
5958 case TARGET_NR_fork
:
5959 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5962 #ifdef TARGET_NR_waitpid
5963 case TARGET_NR_waitpid
:
5966 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5967 if (!is_error(ret
) && arg2
&& ret
5968 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5973 #ifdef TARGET_NR_waitid
5974 case TARGET_NR_waitid
:
5978 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5979 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5980 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5982 host_to_target_siginfo(p
, &info
);
5983 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5988 #ifdef TARGET_NR_creat /* not on alpha */
5989 case TARGET_NR_creat
:
5990 if (!(p
= lock_user_string(arg1
)))
5992 ret
= get_errno(creat(p
, arg2
));
5993 fd_trans_unregister(ret
);
5994 unlock_user(p
, arg1
, 0);
5997 #ifdef TARGET_NR_link
5998 case TARGET_NR_link
:
6001 p
= lock_user_string(arg1
);
6002 p2
= lock_user_string(arg2
);
6004 ret
= -TARGET_EFAULT
;
6006 ret
= get_errno(link(p
, p2
));
6007 unlock_user(p2
, arg2
, 0);
6008 unlock_user(p
, arg1
, 0);
6012 #if defined(TARGET_NR_linkat)
6013 case TARGET_NR_linkat
:
6018 p
= lock_user_string(arg2
);
6019 p2
= lock_user_string(arg4
);
6021 ret
= -TARGET_EFAULT
;
6023 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6024 unlock_user(p
, arg2
, 0);
6025 unlock_user(p2
, arg4
, 0);
6029 #ifdef TARGET_NR_unlink
6030 case TARGET_NR_unlink
:
6031 if (!(p
= lock_user_string(arg1
)))
6033 ret
= get_errno(unlink(p
));
6034 unlock_user(p
, arg1
, 0);
6037 #if defined(TARGET_NR_unlinkat)
6038 case TARGET_NR_unlinkat
:
6039 if (!(p
= lock_user_string(arg2
)))
6041 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6042 unlock_user(p
, arg2
, 0);
6045 case TARGET_NR_execve
:
6047 char **argp
, **envp
;
6050 abi_ulong guest_argp
;
6051 abi_ulong guest_envp
;
6058 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6059 if (get_user_ual(addr
, gp
))
6067 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6068 if (get_user_ual(addr
, gp
))
6075 argp
= alloca((argc
+ 1) * sizeof(void *));
6076 envp
= alloca((envc
+ 1) * sizeof(void *));
6078 for (gp
= guest_argp
, q
= argp
; gp
;
6079 gp
+= sizeof(abi_ulong
), q
++) {
6080 if (get_user_ual(addr
, gp
))
6084 if (!(*q
= lock_user_string(addr
)))
6086 total_size
+= strlen(*q
) + 1;
6090 for (gp
= guest_envp
, q
= envp
; gp
;
6091 gp
+= sizeof(abi_ulong
), q
++) {
6092 if (get_user_ual(addr
, gp
))
6096 if (!(*q
= lock_user_string(addr
)))
6098 total_size
+= strlen(*q
) + 1;
6102 if (!(p
= lock_user_string(arg1
)))
6104 ret
= get_errno(execve(p
, argp
, envp
));
6105 unlock_user(p
, arg1
, 0);
6110 ret
= -TARGET_EFAULT
;
6113 for (gp
= guest_argp
, q
= argp
; *q
;
6114 gp
+= sizeof(abi_ulong
), q
++) {
6115 if (get_user_ual(addr
, gp
)
6118 unlock_user(*q
, addr
, 0);
6120 for (gp
= guest_envp
, q
= envp
; *q
;
6121 gp
+= sizeof(abi_ulong
), q
++) {
6122 if (get_user_ual(addr
, gp
)
6125 unlock_user(*q
, addr
, 0);
6129 case TARGET_NR_chdir
:
6130 if (!(p
= lock_user_string(arg1
)))
6132 ret
= get_errno(chdir(p
));
6133 unlock_user(p
, arg1
, 0);
6135 #ifdef TARGET_NR_time
6136 case TARGET_NR_time
:
6139 ret
= get_errno(time(&host_time
));
6142 && put_user_sal(host_time
, arg1
))
6147 #ifdef TARGET_NR_mknod
6148 case TARGET_NR_mknod
:
6149 if (!(p
= lock_user_string(arg1
)))
6151 ret
= get_errno(mknod(p
, arg2
, arg3
));
6152 unlock_user(p
, arg1
, 0);
6155 #if defined(TARGET_NR_mknodat)
6156 case TARGET_NR_mknodat
:
6157 if (!(p
= lock_user_string(arg2
)))
6159 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6160 unlock_user(p
, arg2
, 0);
6163 #ifdef TARGET_NR_chmod
6164 case TARGET_NR_chmod
:
6165 if (!(p
= lock_user_string(arg1
)))
6167 ret
= get_errno(chmod(p
, arg2
));
6168 unlock_user(p
, arg1
, 0);
6171 #ifdef TARGET_NR_break
6172 case TARGET_NR_break
:
6175 #ifdef TARGET_NR_oldstat
6176 case TARGET_NR_oldstat
:
6179 case TARGET_NR_lseek
:
6180 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6182 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6183 /* Alpha specific */
6184 case TARGET_NR_getxpid
:
6185 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6186 ret
= get_errno(getpid());
6189 #ifdef TARGET_NR_getpid
6190 case TARGET_NR_getpid
:
6191 ret
= get_errno(getpid());
6194 case TARGET_NR_mount
:
6196 /* need to look at the data field */
6200 p
= lock_user_string(arg1
);
6208 p2
= lock_user_string(arg2
);
6211 unlock_user(p
, arg1
, 0);
6217 p3
= lock_user_string(arg3
);
6220 unlock_user(p
, arg1
, 0);
6222 unlock_user(p2
, arg2
, 0);
6229 /* FIXME - arg5 should be locked, but it isn't clear how to
6230 * do that since it's not guaranteed to be a NULL-terminated
6234 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6236 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6238 ret
= get_errno(ret
);
6241 unlock_user(p
, arg1
, 0);
6243 unlock_user(p2
, arg2
, 0);
6245 unlock_user(p3
, arg3
, 0);
6249 #ifdef TARGET_NR_umount
6250 case TARGET_NR_umount
:
6251 if (!(p
= lock_user_string(arg1
)))
6253 ret
= get_errno(umount(p
));
6254 unlock_user(p
, arg1
, 0);
6257 #ifdef TARGET_NR_stime /* not on alpha */
6258 case TARGET_NR_stime
:
6261 if (get_user_sal(host_time
, arg1
))
6263 ret
= get_errno(stime(&host_time
));
6267 case TARGET_NR_ptrace
:
6269 #ifdef TARGET_NR_alarm /* not on alpha */
6270 case TARGET_NR_alarm
:
6274 #ifdef TARGET_NR_oldfstat
6275 case TARGET_NR_oldfstat
:
6278 #ifdef TARGET_NR_pause /* not on alpha */
6279 case TARGET_NR_pause
:
6280 ret
= get_errno(pause());
6283 #ifdef TARGET_NR_utime
6284 case TARGET_NR_utime
:
6286 struct utimbuf tbuf
, *host_tbuf
;
6287 struct target_utimbuf
*target_tbuf
;
6289 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6291 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6292 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6293 unlock_user_struct(target_tbuf
, arg2
, 0);
6298 if (!(p
= lock_user_string(arg1
)))
6300 ret
= get_errno(utime(p
, host_tbuf
));
6301 unlock_user(p
, arg1
, 0);
6305 #ifdef TARGET_NR_utimes
6306 case TARGET_NR_utimes
:
6308 struct timeval
*tvp
, tv
[2];
6310 if (copy_from_user_timeval(&tv
[0], arg2
)
6311 || copy_from_user_timeval(&tv
[1],
6312 arg2
+ sizeof(struct target_timeval
)))
6318 if (!(p
= lock_user_string(arg1
)))
6320 ret
= get_errno(utimes(p
, tvp
));
6321 unlock_user(p
, arg1
, 0);
6325 #if defined(TARGET_NR_futimesat)
6326 case TARGET_NR_futimesat
:
6328 struct timeval
*tvp
, tv
[2];
6330 if (copy_from_user_timeval(&tv
[0], arg3
)
6331 || copy_from_user_timeval(&tv
[1],
6332 arg3
+ sizeof(struct target_timeval
)))
6338 if (!(p
= lock_user_string(arg2
)))
6340 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6341 unlock_user(p
, arg2
, 0);
6345 #ifdef TARGET_NR_stty
6346 case TARGET_NR_stty
:
6349 #ifdef TARGET_NR_gtty
6350 case TARGET_NR_gtty
:
6353 #ifdef TARGET_NR_access
6354 case TARGET_NR_access
:
6355 if (!(p
= lock_user_string(arg1
)))
6357 ret
= get_errno(access(path(p
), arg2
));
6358 unlock_user(p
, arg1
, 0);
6361 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6362 case TARGET_NR_faccessat
:
6363 if (!(p
= lock_user_string(arg2
)))
6365 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6366 unlock_user(p
, arg2
, 0);
6369 #ifdef TARGET_NR_nice /* not on alpha */
6370 case TARGET_NR_nice
:
6371 ret
= get_errno(nice(arg1
));
6374 #ifdef TARGET_NR_ftime
6375 case TARGET_NR_ftime
:
6378 case TARGET_NR_sync
:
6382 case TARGET_NR_kill
:
6383 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6385 #ifdef TARGET_NR_rename
6386 case TARGET_NR_rename
:
6389 p
= lock_user_string(arg1
);
6390 p2
= lock_user_string(arg2
);
6392 ret
= -TARGET_EFAULT
;
6394 ret
= get_errno(rename(p
, p2
));
6395 unlock_user(p2
, arg2
, 0);
6396 unlock_user(p
, arg1
, 0);
6400 #if defined(TARGET_NR_renameat)
6401 case TARGET_NR_renameat
:
6404 p
= lock_user_string(arg2
);
6405 p2
= lock_user_string(arg4
);
6407 ret
= -TARGET_EFAULT
;
6409 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6410 unlock_user(p2
, arg4
, 0);
6411 unlock_user(p
, arg2
, 0);
6415 #ifdef TARGET_NR_mkdir
6416 case TARGET_NR_mkdir
:
6417 if (!(p
= lock_user_string(arg1
)))
6419 ret
= get_errno(mkdir(p
, arg2
));
6420 unlock_user(p
, arg1
, 0);
6423 #if defined(TARGET_NR_mkdirat)
6424 case TARGET_NR_mkdirat
:
6425 if (!(p
= lock_user_string(arg2
)))
6427 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6428 unlock_user(p
, arg2
, 0);
6431 #ifdef TARGET_NR_rmdir
6432 case TARGET_NR_rmdir
:
6433 if (!(p
= lock_user_string(arg1
)))
6435 ret
= get_errno(rmdir(p
));
6436 unlock_user(p
, arg1
, 0);
6440 ret
= get_errno(dup(arg1
));
6442 fd_trans_dup(arg1
, ret
);
6445 #ifdef TARGET_NR_pipe
6446 case TARGET_NR_pipe
:
6447 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6450 #ifdef TARGET_NR_pipe2
6451 case TARGET_NR_pipe2
:
6452 ret
= do_pipe(cpu_env
, arg1
,
6453 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6456 case TARGET_NR_times
:
6458 struct target_tms
*tmsp
;
6460 ret
= get_errno(times(&tms
));
6462 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6465 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6466 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6467 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6468 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6471 ret
= host_to_target_clock_t(ret
);
6474 #ifdef TARGET_NR_prof
6475 case TARGET_NR_prof
:
6478 #ifdef TARGET_NR_signal
6479 case TARGET_NR_signal
:
6482 case TARGET_NR_acct
:
6484 ret
= get_errno(acct(NULL
));
6486 if (!(p
= lock_user_string(arg1
)))
6488 ret
= get_errno(acct(path(p
)));
6489 unlock_user(p
, arg1
, 0);
6492 #ifdef TARGET_NR_umount2
6493 case TARGET_NR_umount2
:
6494 if (!(p
= lock_user_string(arg1
)))
6496 ret
= get_errno(umount2(p
, arg2
));
6497 unlock_user(p
, arg1
, 0);
6500 #ifdef TARGET_NR_lock
6501 case TARGET_NR_lock
:
6504 case TARGET_NR_ioctl
:
6505 ret
= do_ioctl(arg1
, arg2
, arg3
);
6507 case TARGET_NR_fcntl
:
6508 ret
= do_fcntl(arg1
, arg2
, arg3
);
6510 #ifdef TARGET_NR_mpx
6514 case TARGET_NR_setpgid
:
6515 ret
= get_errno(setpgid(arg1
, arg2
));
6517 #ifdef TARGET_NR_ulimit
6518 case TARGET_NR_ulimit
:
6521 #ifdef TARGET_NR_oldolduname
6522 case TARGET_NR_oldolduname
:
6525 case TARGET_NR_umask
:
6526 ret
= get_errno(umask(arg1
));
6528 case TARGET_NR_chroot
:
6529 if (!(p
= lock_user_string(arg1
)))
6531 ret
= get_errno(chroot(p
));
6532 unlock_user(p
, arg1
, 0);
6534 #ifdef TARGET_NR_ustat
6535 case TARGET_NR_ustat
:
6538 #ifdef TARGET_NR_dup2
6539 case TARGET_NR_dup2
:
6540 ret
= get_errno(dup2(arg1
, arg2
));
6542 fd_trans_dup(arg1
, arg2
);
6546 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6547 case TARGET_NR_dup3
:
6548 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6550 fd_trans_dup(arg1
, arg2
);
6554 #ifdef TARGET_NR_getppid /* not on alpha */
6555 case TARGET_NR_getppid
:
6556 ret
= get_errno(getppid());
6559 #ifdef TARGET_NR_getpgrp
6560 case TARGET_NR_getpgrp
:
6561 ret
= get_errno(getpgrp());
6564 case TARGET_NR_setsid
:
6565 ret
= get_errno(setsid());
6567 #ifdef TARGET_NR_sigaction
6568 case TARGET_NR_sigaction
:
6570 #if defined(TARGET_ALPHA)
6571 struct target_sigaction act
, oact
, *pact
= 0;
6572 struct target_old_sigaction
*old_act
;
6574 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6576 act
._sa_handler
= old_act
->_sa_handler
;
6577 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6578 act
.sa_flags
= old_act
->sa_flags
;
6579 act
.sa_restorer
= 0;
6580 unlock_user_struct(old_act
, arg2
, 0);
6583 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6584 if (!is_error(ret
) && arg3
) {
6585 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6587 old_act
->_sa_handler
= oact
._sa_handler
;
6588 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6589 old_act
->sa_flags
= oact
.sa_flags
;
6590 unlock_user_struct(old_act
, arg3
, 1);
6592 #elif defined(TARGET_MIPS)
6593 struct target_sigaction act
, oact
, *pact
, *old_act
;
6596 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6598 act
._sa_handler
= old_act
->_sa_handler
;
6599 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6600 act
.sa_flags
= old_act
->sa_flags
;
6601 unlock_user_struct(old_act
, arg2
, 0);
6607 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6609 if (!is_error(ret
) && arg3
) {
6610 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6612 old_act
->_sa_handler
= oact
._sa_handler
;
6613 old_act
->sa_flags
= oact
.sa_flags
;
6614 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6615 old_act
->sa_mask
.sig
[1] = 0;
6616 old_act
->sa_mask
.sig
[2] = 0;
6617 old_act
->sa_mask
.sig
[3] = 0;
6618 unlock_user_struct(old_act
, arg3
, 1);
6621 struct target_old_sigaction
*old_act
;
6622 struct target_sigaction act
, oact
, *pact
;
6624 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6626 act
._sa_handler
= old_act
->_sa_handler
;
6627 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6628 act
.sa_flags
= old_act
->sa_flags
;
6629 act
.sa_restorer
= old_act
->sa_restorer
;
6630 unlock_user_struct(old_act
, arg2
, 0);
6635 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6636 if (!is_error(ret
) && arg3
) {
6637 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6639 old_act
->_sa_handler
= oact
._sa_handler
;
6640 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6641 old_act
->sa_flags
= oact
.sa_flags
;
6642 old_act
->sa_restorer
= oact
.sa_restorer
;
6643 unlock_user_struct(old_act
, arg3
, 1);
6649 case TARGET_NR_rt_sigaction
:
6651 #if defined(TARGET_ALPHA)
6652 struct target_sigaction act
, oact
, *pact
= 0;
6653 struct target_rt_sigaction
*rt_act
;
6654 /* ??? arg4 == sizeof(sigset_t). */
6656 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6658 act
._sa_handler
= rt_act
->_sa_handler
;
6659 act
.sa_mask
= rt_act
->sa_mask
;
6660 act
.sa_flags
= rt_act
->sa_flags
;
6661 act
.sa_restorer
= arg5
;
6662 unlock_user_struct(rt_act
, arg2
, 0);
6665 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6666 if (!is_error(ret
) && arg3
) {
6667 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6669 rt_act
->_sa_handler
= oact
._sa_handler
;
6670 rt_act
->sa_mask
= oact
.sa_mask
;
6671 rt_act
->sa_flags
= oact
.sa_flags
;
6672 unlock_user_struct(rt_act
, arg3
, 1);
6675 struct target_sigaction
*act
;
6676 struct target_sigaction
*oact
;
6679 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6684 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6685 ret
= -TARGET_EFAULT
;
6686 goto rt_sigaction_fail
;
6690 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6693 unlock_user_struct(act
, arg2
, 0);
6695 unlock_user_struct(oact
, arg3
, 1);
6699 #ifdef TARGET_NR_sgetmask /* not on alpha */
6700 case TARGET_NR_sgetmask
:
6703 abi_ulong target_set
;
6704 do_sigprocmask(0, NULL
, &cur_set
);
6705 host_to_target_old_sigset(&target_set
, &cur_set
);
6710 #ifdef TARGET_NR_ssetmask /* not on alpha */
6711 case TARGET_NR_ssetmask
:
6713 sigset_t set
, oset
, cur_set
;
6714 abi_ulong target_set
= arg1
;
6715 do_sigprocmask(0, NULL
, &cur_set
);
6716 target_to_host_old_sigset(&set
, &target_set
);
6717 sigorset(&set
, &set
, &cur_set
);
6718 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6719 host_to_target_old_sigset(&target_set
, &oset
);
6724 #ifdef TARGET_NR_sigprocmask
6725 case TARGET_NR_sigprocmask
:
6727 #if defined(TARGET_ALPHA)
6728 sigset_t set
, oldset
;
6733 case TARGET_SIG_BLOCK
:
6736 case TARGET_SIG_UNBLOCK
:
6739 case TARGET_SIG_SETMASK
:
6743 ret
= -TARGET_EINVAL
;
6747 target_to_host_old_sigset(&set
, &mask
);
6749 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6750 if (!is_error(ret
)) {
6751 host_to_target_old_sigset(&mask
, &oldset
);
6753 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6756 sigset_t set
, oldset
, *set_ptr
;
6761 case TARGET_SIG_BLOCK
:
6764 case TARGET_SIG_UNBLOCK
:
6767 case TARGET_SIG_SETMASK
:
6771 ret
= -TARGET_EINVAL
;
6774 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6776 target_to_host_old_sigset(&set
, p
);
6777 unlock_user(p
, arg2
, 0);
6783 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6784 if (!is_error(ret
) && arg3
) {
6785 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6787 host_to_target_old_sigset(p
, &oldset
);
6788 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6794 case TARGET_NR_rt_sigprocmask
:
6797 sigset_t set
, oldset
, *set_ptr
;
6801 case TARGET_SIG_BLOCK
:
6804 case TARGET_SIG_UNBLOCK
:
6807 case TARGET_SIG_SETMASK
:
6811 ret
= -TARGET_EINVAL
;
6814 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6816 target_to_host_sigset(&set
, p
);
6817 unlock_user(p
, arg2
, 0);
6823 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6824 if (!is_error(ret
) && arg3
) {
6825 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6827 host_to_target_sigset(p
, &oldset
);
6828 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6832 #ifdef TARGET_NR_sigpending
6833 case TARGET_NR_sigpending
:
6836 ret
= get_errno(sigpending(&set
));
6837 if (!is_error(ret
)) {
6838 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6840 host_to_target_old_sigset(p
, &set
);
6841 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6846 case TARGET_NR_rt_sigpending
:
6849 ret
= get_errno(sigpending(&set
));
6850 if (!is_error(ret
)) {
6851 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6853 host_to_target_sigset(p
, &set
);
6854 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6858 #ifdef TARGET_NR_sigsuspend
6859 case TARGET_NR_sigsuspend
:
6862 #if defined(TARGET_ALPHA)
6863 abi_ulong mask
= arg1
;
6864 target_to_host_old_sigset(&set
, &mask
);
6866 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6868 target_to_host_old_sigset(&set
, p
);
6869 unlock_user(p
, arg1
, 0);
6871 ret
= get_errno(sigsuspend(&set
));
6875 case TARGET_NR_rt_sigsuspend
:
6878 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6880 target_to_host_sigset(&set
, p
);
6881 unlock_user(p
, arg1
, 0);
6882 ret
= get_errno(sigsuspend(&set
));
6885 case TARGET_NR_rt_sigtimedwait
:
6888 struct timespec uts
, *puts
;
6891 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6893 target_to_host_sigset(&set
, p
);
6894 unlock_user(p
, arg1
, 0);
6897 target_to_host_timespec(puts
, arg3
);
6901 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6902 if (!is_error(ret
)) {
6904 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6909 host_to_target_siginfo(p
, &uinfo
);
6910 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6912 ret
= host_to_target_signal(ret
);
6916 case TARGET_NR_rt_sigqueueinfo
:
6919 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6921 target_to_host_siginfo(&uinfo
, p
);
6922 unlock_user(p
, arg1
, 0);
6923 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6926 #ifdef TARGET_NR_sigreturn
6927 case TARGET_NR_sigreturn
:
6928 /* NOTE: ret is eax, so not transcoding must be done */
6929 ret
= do_sigreturn(cpu_env
);
6932 case TARGET_NR_rt_sigreturn
:
6933 /* NOTE: ret is eax, so not transcoding must be done */
6934 ret
= do_rt_sigreturn(cpu_env
);
6936 case TARGET_NR_sethostname
:
6937 if (!(p
= lock_user_string(arg1
)))
6939 ret
= get_errno(sethostname(p
, arg2
));
6940 unlock_user(p
, arg1
, 0);
6942 case TARGET_NR_setrlimit
:
6944 int resource
= target_to_host_resource(arg1
);
6945 struct target_rlimit
*target_rlim
;
6947 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6949 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6950 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6951 unlock_user_struct(target_rlim
, arg2
, 0);
6952 ret
= get_errno(setrlimit(resource
, &rlim
));
6955 case TARGET_NR_getrlimit
:
6957 int resource
= target_to_host_resource(arg1
);
6958 struct target_rlimit
*target_rlim
;
6961 ret
= get_errno(getrlimit(resource
, &rlim
));
6962 if (!is_error(ret
)) {
6963 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6965 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6966 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6967 unlock_user_struct(target_rlim
, arg2
, 1);
6971 case TARGET_NR_getrusage
:
6973 struct rusage rusage
;
6974 ret
= get_errno(getrusage(arg1
, &rusage
));
6975 if (!is_error(ret
)) {
6976 ret
= host_to_target_rusage(arg2
, &rusage
);
6980 case TARGET_NR_gettimeofday
:
6983 ret
= get_errno(gettimeofday(&tv
, NULL
));
6984 if (!is_error(ret
)) {
6985 if (copy_to_user_timeval(arg1
, &tv
))
6990 case TARGET_NR_settimeofday
:
6992 struct timeval tv
, *ptv
= NULL
;
6993 struct timezone tz
, *ptz
= NULL
;
6996 if (copy_from_user_timeval(&tv
, arg1
)) {
7003 if (copy_from_user_timezone(&tz
, arg2
)) {
7009 ret
= get_errno(settimeofday(ptv
, ptz
));
7012 #if defined(TARGET_NR_select)
7013 case TARGET_NR_select
:
7014 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7015 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7018 struct target_sel_arg_struct
*sel
;
7019 abi_ulong inp
, outp
, exp
, tvp
;
7022 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7024 nsel
= tswapal(sel
->n
);
7025 inp
= tswapal(sel
->inp
);
7026 outp
= tswapal(sel
->outp
);
7027 exp
= tswapal(sel
->exp
);
7028 tvp
= tswapal(sel
->tvp
);
7029 unlock_user_struct(sel
, arg1
, 0);
7030 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7035 #ifdef TARGET_NR_pselect6
7036 case TARGET_NR_pselect6
:
7038 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7039 fd_set rfds
, wfds
, efds
;
7040 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7041 struct timespec ts
, *ts_ptr
;
7044 * The 6th arg is actually two args smashed together,
7045 * so we cannot use the C library.
7053 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7054 target_sigset_t
*target_sigset
;
7062 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7066 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7070 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7076 * This takes a timespec, and not a timeval, so we cannot
7077 * use the do_select() helper ...
7080 if (target_to_host_timespec(&ts
, ts_addr
)) {
7088 /* Extract the two packed args for the sigset */
7091 sig
.size
= _NSIG
/ 8;
7093 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7097 arg_sigset
= tswapal(arg7
[0]);
7098 arg_sigsize
= tswapal(arg7
[1]);
7099 unlock_user(arg7
, arg6
, 0);
7103 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7104 /* Like the kernel, we enforce correct size sigsets */
7105 ret
= -TARGET_EINVAL
;
7108 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7109 sizeof(*target_sigset
), 1);
7110 if (!target_sigset
) {
7113 target_to_host_sigset(&set
, target_sigset
);
7114 unlock_user(target_sigset
, arg_sigset
, 0);
7122 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7125 if (!is_error(ret
)) {
7126 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7128 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7130 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7133 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7139 #ifdef TARGET_NR_symlink
7140 case TARGET_NR_symlink
:
7143 p
= lock_user_string(arg1
);
7144 p2
= lock_user_string(arg2
);
7146 ret
= -TARGET_EFAULT
;
7148 ret
= get_errno(symlink(p
, p2
));
7149 unlock_user(p2
, arg2
, 0);
7150 unlock_user(p
, arg1
, 0);
7154 #if defined(TARGET_NR_symlinkat)
7155 case TARGET_NR_symlinkat
:
7158 p
= lock_user_string(arg1
);
7159 p2
= lock_user_string(arg3
);
7161 ret
= -TARGET_EFAULT
;
7163 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7164 unlock_user(p2
, arg3
, 0);
7165 unlock_user(p
, arg1
, 0);
7169 #ifdef TARGET_NR_oldlstat
7170 case TARGET_NR_oldlstat
:
7173 #ifdef TARGET_NR_readlink
7174 case TARGET_NR_readlink
:
7177 p
= lock_user_string(arg1
);
7178 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7180 ret
= -TARGET_EFAULT
;
7182 /* Short circuit this for the magic exe check. */
7183 ret
= -TARGET_EINVAL
;
7184 } else if (is_proc_myself((const char *)p
, "exe")) {
7185 char real
[PATH_MAX
], *temp
;
7186 temp
= realpath(exec_path
, real
);
7187 /* Return value is # of bytes that we wrote to the buffer. */
7189 ret
= get_errno(-1);
7191 /* Don't worry about sign mismatch as earlier mapping
7192 * logic would have thrown a bad address error. */
7193 ret
= MIN(strlen(real
), arg3
);
7194 /* We cannot NUL terminate the string. */
7195 memcpy(p2
, real
, ret
);
7198 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7200 unlock_user(p2
, arg2
, ret
);
7201 unlock_user(p
, arg1
, 0);
7205 #if defined(TARGET_NR_readlinkat)
7206 case TARGET_NR_readlinkat
:
7209 p
= lock_user_string(arg2
);
7210 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7212 ret
= -TARGET_EFAULT
;
7213 } else if (is_proc_myself((const char *)p
, "exe")) {
7214 char real
[PATH_MAX
], *temp
;
7215 temp
= realpath(exec_path
, real
);
7216 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7217 snprintf((char *)p2
, arg4
, "%s", real
);
7219 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7221 unlock_user(p2
, arg3
, ret
);
7222 unlock_user(p
, arg2
, 0);
7226 #ifdef TARGET_NR_uselib
7227 case TARGET_NR_uselib
:
7230 #ifdef TARGET_NR_swapon
7231 case TARGET_NR_swapon
:
7232 if (!(p
= lock_user_string(arg1
)))
7234 ret
= get_errno(swapon(p
, arg2
));
7235 unlock_user(p
, arg1
, 0);
7238 case TARGET_NR_reboot
:
7239 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7240 /* arg4 must be ignored in all other cases */
7241 p
= lock_user_string(arg4
);
7245 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7246 unlock_user(p
, arg4
, 0);
7248 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7251 #ifdef TARGET_NR_readdir
7252 case TARGET_NR_readdir
:
7255 #ifdef TARGET_NR_mmap
7256 case TARGET_NR_mmap
:
7257 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7258 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7259 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7260 || defined(TARGET_S390X)
7263 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7264 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7272 unlock_user(v
, arg1
, 0);
7273 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7274 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7278 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7279 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7285 #ifdef TARGET_NR_mmap2
7286 case TARGET_NR_mmap2
:
7288 #define MMAP_SHIFT 12
7290 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7291 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7293 arg6
<< MMAP_SHIFT
));
7296 case TARGET_NR_munmap
:
7297 ret
= get_errno(target_munmap(arg1
, arg2
));
7299 case TARGET_NR_mprotect
:
7301 TaskState
*ts
= cpu
->opaque
;
7302 /* Special hack to detect libc making the stack executable. */
7303 if ((arg3
& PROT_GROWSDOWN
)
7304 && arg1
>= ts
->info
->stack_limit
7305 && arg1
<= ts
->info
->start_stack
) {
7306 arg3
&= ~PROT_GROWSDOWN
;
7307 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7308 arg1
= ts
->info
->stack_limit
;
7311 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7313 #ifdef TARGET_NR_mremap
7314 case TARGET_NR_mremap
:
7315 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7318 /* ??? msync/mlock/munlock are broken for softmmu. */
7319 #ifdef TARGET_NR_msync
7320 case TARGET_NR_msync
:
7321 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7324 #ifdef TARGET_NR_mlock
7325 case TARGET_NR_mlock
:
7326 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7329 #ifdef TARGET_NR_munlock
7330 case TARGET_NR_munlock
:
7331 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7334 #ifdef TARGET_NR_mlockall
7335 case TARGET_NR_mlockall
:
7336 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7339 #ifdef TARGET_NR_munlockall
7340 case TARGET_NR_munlockall
:
7341 ret
= get_errno(munlockall());
7344 case TARGET_NR_truncate
:
7345 if (!(p
= lock_user_string(arg1
)))
7347 ret
= get_errno(truncate(p
, arg2
));
7348 unlock_user(p
, arg1
, 0);
7350 case TARGET_NR_ftruncate
:
7351 ret
= get_errno(ftruncate(arg1
, arg2
));
7353 case TARGET_NR_fchmod
:
7354 ret
= get_errno(fchmod(arg1
, arg2
));
7356 #if defined(TARGET_NR_fchmodat)
7357 case TARGET_NR_fchmodat
:
7358 if (!(p
= lock_user_string(arg2
)))
7360 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7361 unlock_user(p
, arg2
, 0);
7364 case TARGET_NR_getpriority
:
7365 /* Note that negative values are valid for getpriority, so we must
7366 differentiate based on errno settings. */
7368 ret
= getpriority(arg1
, arg2
);
7369 if (ret
== -1 && errno
!= 0) {
7370 ret
= -host_to_target_errno(errno
);
7374 /* Return value is the unbiased priority. Signal no error. */
7375 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7377 /* Return value is a biased priority to avoid negative numbers. */
7381 case TARGET_NR_setpriority
:
7382 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7384 #ifdef TARGET_NR_profil
7385 case TARGET_NR_profil
:
7388 case TARGET_NR_statfs
:
7389 if (!(p
= lock_user_string(arg1
)))
7391 ret
= get_errno(statfs(path(p
), &stfs
));
7392 unlock_user(p
, arg1
, 0);
7394 if (!is_error(ret
)) {
7395 struct target_statfs
*target_stfs
;
7397 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7399 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7400 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7401 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7402 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7403 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7404 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7405 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7406 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7407 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7408 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7409 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7410 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7411 unlock_user_struct(target_stfs
, arg2
, 1);
7414 case TARGET_NR_fstatfs
:
7415 ret
= get_errno(fstatfs(arg1
, &stfs
));
7416 goto convert_statfs
;
7417 #ifdef TARGET_NR_statfs64
7418 case TARGET_NR_statfs64
:
7419 if (!(p
= lock_user_string(arg1
)))
7421 ret
= get_errno(statfs(path(p
), &stfs
));
7422 unlock_user(p
, arg1
, 0);
7424 if (!is_error(ret
)) {
7425 struct target_statfs64
*target_stfs
;
7427 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7429 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7430 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7431 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7432 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7433 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7434 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7435 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7436 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7437 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7438 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7439 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7440 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7441 unlock_user_struct(target_stfs
, arg3
, 1);
7444 case TARGET_NR_fstatfs64
:
7445 ret
= get_errno(fstatfs(arg1
, &stfs
));
7446 goto convert_statfs64
;
7448 #ifdef TARGET_NR_ioperm
7449 case TARGET_NR_ioperm
:
7452 #ifdef TARGET_NR_socketcall
7453 case TARGET_NR_socketcall
:
7454 ret
= do_socketcall(arg1
, arg2
);
7457 #ifdef TARGET_NR_accept
7458 case TARGET_NR_accept
:
7459 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7462 #ifdef TARGET_NR_accept4
7463 case TARGET_NR_accept4
:
7464 #ifdef CONFIG_ACCEPT4
7465 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7471 #ifdef TARGET_NR_bind
7472 case TARGET_NR_bind
:
7473 ret
= do_bind(arg1
, arg2
, arg3
);
7476 #ifdef TARGET_NR_connect
7477 case TARGET_NR_connect
:
7478 ret
= do_connect(arg1
, arg2
, arg3
);
7481 #ifdef TARGET_NR_getpeername
7482 case TARGET_NR_getpeername
:
7483 ret
= do_getpeername(arg1
, arg2
, arg3
);
7486 #ifdef TARGET_NR_getsockname
7487 case TARGET_NR_getsockname
:
7488 ret
= do_getsockname(arg1
, arg2
, arg3
);
7491 #ifdef TARGET_NR_getsockopt
7492 case TARGET_NR_getsockopt
:
7493 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7496 #ifdef TARGET_NR_listen
7497 case TARGET_NR_listen
:
7498 ret
= get_errno(listen(arg1
, arg2
));
7501 #ifdef TARGET_NR_recv
7502 case TARGET_NR_recv
:
7503 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7506 #ifdef TARGET_NR_recvfrom
7507 case TARGET_NR_recvfrom
:
7508 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7511 #ifdef TARGET_NR_recvmsg
7512 case TARGET_NR_recvmsg
:
7513 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7516 #ifdef TARGET_NR_send
7517 case TARGET_NR_send
:
7518 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7521 #ifdef TARGET_NR_sendmsg
7522 case TARGET_NR_sendmsg
:
7523 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7526 #ifdef TARGET_NR_sendmmsg
7527 case TARGET_NR_sendmmsg
:
7528 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7530 case TARGET_NR_recvmmsg
:
7531 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7534 #ifdef TARGET_NR_sendto
7535 case TARGET_NR_sendto
:
7536 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7539 #ifdef TARGET_NR_shutdown
7540 case TARGET_NR_shutdown
:
7541 ret
= get_errno(shutdown(arg1
, arg2
));
7544 #ifdef TARGET_NR_socket
7545 case TARGET_NR_socket
:
7546 ret
= do_socket(arg1
, arg2
, arg3
);
7547 fd_trans_unregister(ret
);
7550 #ifdef TARGET_NR_socketpair
7551 case TARGET_NR_socketpair
:
7552 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7555 #ifdef TARGET_NR_setsockopt
7556 case TARGET_NR_setsockopt
:
7557 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7561 case TARGET_NR_syslog
:
7562 if (!(p
= lock_user_string(arg2
)))
7564 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7565 unlock_user(p
, arg2
, 0);
7568 case TARGET_NR_setitimer
:
7570 struct itimerval value
, ovalue
, *pvalue
;
7574 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7575 || copy_from_user_timeval(&pvalue
->it_value
,
7576 arg2
+ sizeof(struct target_timeval
)))
7581 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7582 if (!is_error(ret
) && arg3
) {
7583 if (copy_to_user_timeval(arg3
,
7584 &ovalue
.it_interval
)
7585 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7591 case TARGET_NR_getitimer
:
7593 struct itimerval value
;
7595 ret
= get_errno(getitimer(arg1
, &value
));
7596 if (!is_error(ret
) && arg2
) {
7597 if (copy_to_user_timeval(arg2
,
7599 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7605 #ifdef TARGET_NR_stat
7606 case TARGET_NR_stat
:
7607 if (!(p
= lock_user_string(arg1
)))
7609 ret
= get_errno(stat(path(p
), &st
));
7610 unlock_user(p
, arg1
, 0);
7613 #ifdef TARGET_NR_lstat
7614 case TARGET_NR_lstat
:
7615 if (!(p
= lock_user_string(arg1
)))
7617 ret
= get_errno(lstat(path(p
), &st
));
7618 unlock_user(p
, arg1
, 0);
7621 case TARGET_NR_fstat
:
7623 ret
= get_errno(fstat(arg1
, &st
));
7624 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7627 if (!is_error(ret
)) {
7628 struct target_stat
*target_st
;
7630 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7632 memset(target_st
, 0, sizeof(*target_st
));
7633 __put_user(st
.st_dev
, &target_st
->st_dev
);
7634 __put_user(st
.st_ino
, &target_st
->st_ino
);
7635 __put_user(st
.st_mode
, &target_st
->st_mode
);
7636 __put_user(st
.st_uid
, &target_st
->st_uid
);
7637 __put_user(st
.st_gid
, &target_st
->st_gid
);
7638 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7639 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7640 __put_user(st
.st_size
, &target_st
->st_size
);
7641 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7642 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7643 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7644 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7645 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7646 unlock_user_struct(target_st
, arg2
, 1);
7650 #ifdef TARGET_NR_olduname
7651 case TARGET_NR_olduname
:
7654 #ifdef TARGET_NR_iopl
7655 case TARGET_NR_iopl
:
7658 case TARGET_NR_vhangup
:
7659 ret
= get_errno(vhangup());
7661 #ifdef TARGET_NR_idle
7662 case TARGET_NR_idle
:
7665 #ifdef TARGET_NR_syscall
7666 case TARGET_NR_syscall
:
7667 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7668 arg6
, arg7
, arg8
, 0);
7671 case TARGET_NR_wait4
:
7674 abi_long status_ptr
= arg2
;
7675 struct rusage rusage
, *rusage_ptr
;
7676 abi_ulong target_rusage
= arg4
;
7677 abi_long rusage_err
;
7679 rusage_ptr
= &rusage
;
7682 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7683 if (!is_error(ret
)) {
7684 if (status_ptr
&& ret
) {
7685 status
= host_to_target_waitstatus(status
);
7686 if (put_user_s32(status
, status_ptr
))
7689 if (target_rusage
) {
7690 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7698 #ifdef TARGET_NR_swapoff
7699 case TARGET_NR_swapoff
:
7700 if (!(p
= lock_user_string(arg1
)))
7702 ret
= get_errno(swapoff(p
));
7703 unlock_user(p
, arg1
, 0);
7706 case TARGET_NR_sysinfo
:
7708 struct target_sysinfo
*target_value
;
7709 struct sysinfo value
;
7710 ret
= get_errno(sysinfo(&value
));
7711 if (!is_error(ret
) && arg1
)
7713 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7715 __put_user(value
.uptime
, &target_value
->uptime
);
7716 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7717 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7718 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7719 __put_user(value
.totalram
, &target_value
->totalram
);
7720 __put_user(value
.freeram
, &target_value
->freeram
);
7721 __put_user(value
.sharedram
, &target_value
->sharedram
);
7722 __put_user(value
.bufferram
, &target_value
->bufferram
);
7723 __put_user(value
.totalswap
, &target_value
->totalswap
);
7724 __put_user(value
.freeswap
, &target_value
->freeswap
);
7725 __put_user(value
.procs
, &target_value
->procs
);
7726 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7727 __put_user(value
.freehigh
, &target_value
->freehigh
);
7728 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7729 unlock_user_struct(target_value
, arg1
, 1);
7733 #ifdef TARGET_NR_ipc
7735 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7738 #ifdef TARGET_NR_semget
7739 case TARGET_NR_semget
:
7740 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7743 #ifdef TARGET_NR_semop
7744 case TARGET_NR_semop
:
7745 ret
= do_semop(arg1
, arg2
, arg3
);
7748 #ifdef TARGET_NR_semctl
7749 case TARGET_NR_semctl
:
7750 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7753 #ifdef TARGET_NR_msgctl
7754 case TARGET_NR_msgctl
:
7755 ret
= do_msgctl(arg1
, arg2
, arg3
);
7758 #ifdef TARGET_NR_msgget
7759 case TARGET_NR_msgget
:
7760 ret
= get_errno(msgget(arg1
, arg2
));
7763 #ifdef TARGET_NR_msgrcv
7764 case TARGET_NR_msgrcv
:
7765 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7768 #ifdef TARGET_NR_msgsnd
7769 case TARGET_NR_msgsnd
:
7770 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7773 #ifdef TARGET_NR_shmget
7774 case TARGET_NR_shmget
:
7775 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7778 #ifdef TARGET_NR_shmctl
7779 case TARGET_NR_shmctl
:
7780 ret
= do_shmctl(arg1
, arg2
, arg3
);
7783 #ifdef TARGET_NR_shmat
7784 case TARGET_NR_shmat
:
7785 ret
= do_shmat(arg1
, arg2
, arg3
);
7788 #ifdef TARGET_NR_shmdt
7789 case TARGET_NR_shmdt
:
7790 ret
= do_shmdt(arg1
);
7793 case TARGET_NR_fsync
:
7794 ret
= get_errno(fsync(arg1
));
7796 case TARGET_NR_clone
:
7797 /* Linux manages to have three different orderings for its
7798 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7799 * match the kernel's CONFIG_CLONE_* settings.
7800 * Microblaze is further special in that it uses a sixth
7801 * implicit argument to clone for the TLS pointer.
7803 #if defined(TARGET_MICROBLAZE)
7804 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7805 #elif defined(TARGET_CLONE_BACKWARDS)
7806 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7807 #elif defined(TARGET_CLONE_BACKWARDS2)
7808 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7810 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7813 #ifdef __NR_exit_group
7814 /* new thread calls */
7815 case TARGET_NR_exit_group
:
7819 gdb_exit(cpu_env
, arg1
);
7820 ret
= get_errno(exit_group(arg1
));
7823 case TARGET_NR_setdomainname
:
7824 if (!(p
= lock_user_string(arg1
)))
7826 ret
= get_errno(setdomainname(p
, arg2
));
7827 unlock_user(p
, arg1
, 0);
7829 case TARGET_NR_uname
:
7830 /* no need to transcode because we use the linux syscall */
7832 struct new_utsname
* buf
;
7834 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7836 ret
= get_errno(sys_uname(buf
));
7837 if (!is_error(ret
)) {
7838 /* Overrite the native machine name with whatever is being
7840 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7841 /* Allow the user to override the reported release. */
7842 if (qemu_uname_release
&& *qemu_uname_release
)
7843 strcpy (buf
->release
, qemu_uname_release
);
7845 unlock_user_struct(buf
, arg1
, 1);
7849 case TARGET_NR_modify_ldt
:
7850 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7852 #if !defined(TARGET_X86_64)
7853 case TARGET_NR_vm86old
:
7855 case TARGET_NR_vm86
:
7856 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7860 case TARGET_NR_adjtimex
:
7862 #ifdef TARGET_NR_create_module
7863 case TARGET_NR_create_module
:
7865 case TARGET_NR_init_module
:
7866 case TARGET_NR_delete_module
:
7867 #ifdef TARGET_NR_get_kernel_syms
7868 case TARGET_NR_get_kernel_syms
:
7871 case TARGET_NR_quotactl
:
7873 case TARGET_NR_getpgid
:
7874 ret
= get_errno(getpgid(arg1
));
7876 case TARGET_NR_fchdir
:
7877 ret
= get_errno(fchdir(arg1
));
7879 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7880 case TARGET_NR_bdflush
:
7883 #ifdef TARGET_NR_sysfs
7884 case TARGET_NR_sysfs
:
7887 case TARGET_NR_personality
:
7888 ret
= get_errno(personality(arg1
));
7890 #ifdef TARGET_NR_afs_syscall
7891 case TARGET_NR_afs_syscall
:
7894 #ifdef TARGET_NR__llseek /* Not on alpha */
7895 case TARGET_NR__llseek
:
7898 #if !defined(__NR_llseek)
7899 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7901 ret
= get_errno(res
);
7906 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7908 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7914 #ifdef TARGET_NR_getdents
7915 case TARGET_NR_getdents
:
7916 #ifdef __NR_getdents
7917 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7919 struct target_dirent
*target_dirp
;
7920 struct linux_dirent
*dirp
;
7921 abi_long count
= arg3
;
7923 dirp
= g_try_malloc(count
);
7925 ret
= -TARGET_ENOMEM
;
7929 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7930 if (!is_error(ret
)) {
7931 struct linux_dirent
*de
;
7932 struct target_dirent
*tde
;
7934 int reclen
, treclen
;
7935 int count1
, tnamelen
;
7939 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7943 reclen
= de
->d_reclen
;
7944 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7945 assert(tnamelen
>= 0);
7946 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7947 assert(count1
+ treclen
<= count
);
7948 tde
->d_reclen
= tswap16(treclen
);
7949 tde
->d_ino
= tswapal(de
->d_ino
);
7950 tde
->d_off
= tswapal(de
->d_off
);
7951 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7952 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7954 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7958 unlock_user(target_dirp
, arg2
, ret
);
7964 struct linux_dirent
*dirp
;
7965 abi_long count
= arg3
;
7967 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7969 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7970 if (!is_error(ret
)) {
7971 struct linux_dirent
*de
;
7976 reclen
= de
->d_reclen
;
7979 de
->d_reclen
= tswap16(reclen
);
7980 tswapls(&de
->d_ino
);
7981 tswapls(&de
->d_off
);
7982 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7986 unlock_user(dirp
, arg2
, ret
);
7990 /* Implement getdents in terms of getdents64 */
7992 struct linux_dirent64
*dirp
;
7993 abi_long count
= arg3
;
7995 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7999 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8000 if (!is_error(ret
)) {
8001 /* Convert the dirent64 structs to target dirent. We do this
8002 * in-place, since we can guarantee that a target_dirent is no
8003 * larger than a dirent64; however this means we have to be
8004 * careful to read everything before writing in the new format.
8006 struct linux_dirent64
*de
;
8007 struct target_dirent
*tde
;
8012 tde
= (struct target_dirent
*)dirp
;
8014 int namelen
, treclen
;
8015 int reclen
= de
->d_reclen
;
8016 uint64_t ino
= de
->d_ino
;
8017 int64_t off
= de
->d_off
;
8018 uint8_t type
= de
->d_type
;
8020 namelen
= strlen(de
->d_name
);
8021 treclen
= offsetof(struct target_dirent
, d_name
)
8023 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8025 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8026 tde
->d_ino
= tswapal(ino
);
8027 tde
->d_off
= tswapal(off
);
8028 tde
->d_reclen
= tswap16(treclen
);
8029 /* The target_dirent type is in what was formerly a padding
8030 * byte at the end of the structure:
8032 *(((char *)tde
) + treclen
- 1) = type
;
8034 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8035 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8041 unlock_user(dirp
, arg2
, ret
);
8045 #endif /* TARGET_NR_getdents */
8046 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8047 case TARGET_NR_getdents64
:
8049 struct linux_dirent64
*dirp
;
8050 abi_long count
= arg3
;
8051 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8053 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8054 if (!is_error(ret
)) {
8055 struct linux_dirent64
*de
;
8060 reclen
= de
->d_reclen
;
8063 de
->d_reclen
= tswap16(reclen
);
8064 tswap64s((uint64_t *)&de
->d_ino
);
8065 tswap64s((uint64_t *)&de
->d_off
);
8066 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8070 unlock_user(dirp
, arg2
, ret
);
8073 #endif /* TARGET_NR_getdents64 */
8074 #if defined(TARGET_NR__newselect)
8075 case TARGET_NR__newselect
:
8076 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8079 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8080 # ifdef TARGET_NR_poll
8081 case TARGET_NR_poll
:
8083 # ifdef TARGET_NR_ppoll
8084 case TARGET_NR_ppoll
:
8087 struct target_pollfd
*target_pfd
;
8088 unsigned int nfds
= arg2
;
8096 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8097 sizeof(struct target_pollfd
) * nfds
, 1);
8102 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8103 for (i
= 0; i
< nfds
; i
++) {
8104 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8105 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8109 # ifdef TARGET_NR_ppoll
8110 if (num
== TARGET_NR_ppoll
) {
8111 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8112 target_sigset_t
*target_set
;
8113 sigset_t _set
, *set
= &_set
;
8116 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8117 unlock_user(target_pfd
, arg1
, 0);
8125 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8127 unlock_user(target_pfd
, arg1
, 0);
8130 target_to_host_sigset(set
, target_set
);
8135 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8137 if (!is_error(ret
) && arg3
) {
8138 host_to_target_timespec(arg3
, timeout_ts
);
8141 unlock_user(target_set
, arg4
, 0);
8145 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8147 if (!is_error(ret
)) {
8148 for(i
= 0; i
< nfds
; i
++) {
8149 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8152 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8156 case TARGET_NR_flock
:
8157 /* NOTE: the flock constant seems to be the same for every
8159 ret
= get_errno(flock(arg1
, arg2
));
8161 case TARGET_NR_readv
:
8163 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8165 ret
= get_errno(readv(arg1
, vec
, arg3
));
8166 unlock_iovec(vec
, arg2
, arg3
, 1);
8168 ret
= -host_to_target_errno(errno
);
8172 case TARGET_NR_writev
:
8174 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8176 ret
= get_errno(writev(arg1
, vec
, arg3
));
8177 unlock_iovec(vec
, arg2
, arg3
, 0);
8179 ret
= -host_to_target_errno(errno
);
8183 case TARGET_NR_getsid
:
8184 ret
= get_errno(getsid(arg1
));
8186 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8187 case TARGET_NR_fdatasync
:
8188 ret
= get_errno(fdatasync(arg1
));
8191 #ifdef TARGET_NR__sysctl
8192 case TARGET_NR__sysctl
:
8193 /* We don't implement this, but ENOTDIR is always a safe
8195 ret
= -TARGET_ENOTDIR
;
8198 case TARGET_NR_sched_getaffinity
:
8200 unsigned int mask_size
;
8201 unsigned long *mask
;
8204 * sched_getaffinity needs multiples of ulong, so need to take
8205 * care of mismatches between target ulong and host ulong sizes.
8207 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8208 ret
= -TARGET_EINVAL
;
8211 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8213 mask
= alloca(mask_size
);
8214 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8216 if (!is_error(ret
)) {
8218 /* More data returned than the caller's buffer will fit.
8219 * This only happens if sizeof(abi_long) < sizeof(long)
8220 * and the caller passed us a buffer holding an odd number
8221 * of abi_longs. If the host kernel is actually using the
8222 * extra 4 bytes then fail EINVAL; otherwise we can just
8223 * ignore them and only copy the interesting part.
8225 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8226 if (numcpus
> arg2
* 8) {
8227 ret
= -TARGET_EINVAL
;
8233 if (copy_to_user(arg3
, mask
, ret
)) {
8239 case TARGET_NR_sched_setaffinity
:
8241 unsigned int mask_size
;
8242 unsigned long *mask
;
8245 * sched_setaffinity needs multiples of ulong, so need to take
8246 * care of mismatches between target ulong and host ulong sizes.
8248 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8249 ret
= -TARGET_EINVAL
;
8252 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8254 mask
= alloca(mask_size
);
8255 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8258 memcpy(mask
, p
, arg2
);
8259 unlock_user_struct(p
, arg2
, 0);
8261 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8264 case TARGET_NR_sched_setparam
:
8266 struct sched_param
*target_schp
;
8267 struct sched_param schp
;
8270 return -TARGET_EINVAL
;
8272 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8274 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8275 unlock_user_struct(target_schp
, arg2
, 0);
8276 ret
= get_errno(sched_setparam(arg1
, &schp
));
8279 case TARGET_NR_sched_getparam
:
8281 struct sched_param
*target_schp
;
8282 struct sched_param schp
;
8285 return -TARGET_EINVAL
;
8287 ret
= get_errno(sched_getparam(arg1
, &schp
));
8288 if (!is_error(ret
)) {
8289 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8291 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8292 unlock_user_struct(target_schp
, arg2
, 1);
8296 case TARGET_NR_sched_setscheduler
:
8298 struct sched_param
*target_schp
;
8299 struct sched_param schp
;
8301 return -TARGET_EINVAL
;
8303 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8305 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8306 unlock_user_struct(target_schp
, arg3
, 0);
8307 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8310 case TARGET_NR_sched_getscheduler
:
8311 ret
= get_errno(sched_getscheduler(arg1
));
8313 case TARGET_NR_sched_yield
:
8314 ret
= get_errno(sched_yield());
8316 case TARGET_NR_sched_get_priority_max
:
8317 ret
= get_errno(sched_get_priority_max(arg1
));
8319 case TARGET_NR_sched_get_priority_min
:
8320 ret
= get_errno(sched_get_priority_min(arg1
));
8322 case TARGET_NR_sched_rr_get_interval
:
8325 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8326 if (!is_error(ret
)) {
8327 ret
= host_to_target_timespec(arg2
, &ts
);
8331 case TARGET_NR_nanosleep
:
8333 struct timespec req
, rem
;
8334 target_to_host_timespec(&req
, arg1
);
8335 ret
= get_errno(nanosleep(&req
, &rem
));
8336 if (is_error(ret
) && arg2
) {
8337 host_to_target_timespec(arg2
, &rem
);
8341 #ifdef TARGET_NR_query_module
8342 case TARGET_NR_query_module
:
8345 #ifdef TARGET_NR_nfsservctl
8346 case TARGET_NR_nfsservctl
:
8349 case TARGET_NR_prctl
:
8351 case PR_GET_PDEATHSIG
:
8354 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8355 if (!is_error(ret
) && arg2
8356 && put_user_ual(deathsig
, arg2
)) {
8364 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8368 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8370 unlock_user(name
, arg2
, 16);
8375 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8379 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8381 unlock_user(name
, arg2
, 0);
8386 /* Most prctl options have no pointer arguments */
8387 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8391 #ifdef TARGET_NR_arch_prctl
8392 case TARGET_NR_arch_prctl
:
8393 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8394 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8400 #ifdef TARGET_NR_pread64
8401 case TARGET_NR_pread64
:
8402 if (regpairs_aligned(cpu_env
)) {
8406 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8408 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8409 unlock_user(p
, arg2
, ret
);
8411 case TARGET_NR_pwrite64
:
8412 if (regpairs_aligned(cpu_env
)) {
8416 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8418 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8419 unlock_user(p
, arg2
, 0);
8422 case TARGET_NR_getcwd
:
8423 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8425 ret
= get_errno(sys_getcwd1(p
, arg2
));
8426 unlock_user(p
, arg1
, ret
);
8428 case TARGET_NR_capget
:
8429 case TARGET_NR_capset
:
8431 struct target_user_cap_header
*target_header
;
8432 struct target_user_cap_data
*target_data
= NULL
;
8433 struct __user_cap_header_struct header
;
8434 struct __user_cap_data_struct data
[2];
8435 struct __user_cap_data_struct
*dataptr
= NULL
;
8436 int i
, target_datalen
;
8439 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8442 header
.version
= tswap32(target_header
->version
);
8443 header
.pid
= tswap32(target_header
->pid
);
8445 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8446 /* Version 2 and up takes pointer to two user_data structs */
8450 target_datalen
= sizeof(*target_data
) * data_items
;
8453 if (num
== TARGET_NR_capget
) {
8454 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8456 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8459 unlock_user_struct(target_header
, arg1
, 0);
8463 if (num
== TARGET_NR_capset
) {
8464 for (i
= 0; i
< data_items
; i
++) {
8465 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8466 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8467 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8474 if (num
== TARGET_NR_capget
) {
8475 ret
= get_errno(capget(&header
, dataptr
));
8477 ret
= get_errno(capset(&header
, dataptr
));
8480 /* The kernel always updates version for both capget and capset */
8481 target_header
->version
= tswap32(header
.version
);
8482 unlock_user_struct(target_header
, arg1
, 1);
8485 if (num
== TARGET_NR_capget
) {
8486 for (i
= 0; i
< data_items
; i
++) {
8487 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8488 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8489 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8491 unlock_user(target_data
, arg2
, target_datalen
);
8493 unlock_user(target_data
, arg2
, 0);
8498 case TARGET_NR_sigaltstack
:
8499 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8502 #ifdef CONFIG_SENDFILE
8503 case TARGET_NR_sendfile
:
8508 ret
= get_user_sal(off
, arg3
);
8509 if (is_error(ret
)) {
8514 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8515 if (!is_error(ret
) && arg3
) {
8516 abi_long ret2
= put_user_sal(off
, arg3
);
8517 if (is_error(ret2
)) {
8523 #ifdef TARGET_NR_sendfile64
8524 case TARGET_NR_sendfile64
:
8529 ret
= get_user_s64(off
, arg3
);
8530 if (is_error(ret
)) {
8535 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8536 if (!is_error(ret
) && arg3
) {
8537 abi_long ret2
= put_user_s64(off
, arg3
);
8538 if (is_error(ret2
)) {
8546 case TARGET_NR_sendfile
:
8547 #ifdef TARGET_NR_sendfile64
8548 case TARGET_NR_sendfile64
:
8553 #ifdef TARGET_NR_getpmsg
8554 case TARGET_NR_getpmsg
:
8557 #ifdef TARGET_NR_putpmsg
8558 case TARGET_NR_putpmsg
:
8561 #ifdef TARGET_NR_vfork
8562 case TARGET_NR_vfork
:
8563 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8567 #ifdef TARGET_NR_ugetrlimit
8568 case TARGET_NR_ugetrlimit
:
8571 int resource
= target_to_host_resource(arg1
);
8572 ret
= get_errno(getrlimit(resource
, &rlim
));
8573 if (!is_error(ret
)) {
8574 struct target_rlimit
*target_rlim
;
8575 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8577 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8578 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8579 unlock_user_struct(target_rlim
, arg2
, 1);
8584 #ifdef TARGET_NR_truncate64
8585 case TARGET_NR_truncate64
:
8586 if (!(p
= lock_user_string(arg1
)))
8588 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8589 unlock_user(p
, arg1
, 0);
8592 #ifdef TARGET_NR_ftruncate64
8593 case TARGET_NR_ftruncate64
:
8594 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8597 #ifdef TARGET_NR_stat64
8598 case TARGET_NR_stat64
:
8599 if (!(p
= lock_user_string(arg1
)))
8601 ret
= get_errno(stat(path(p
), &st
));
8602 unlock_user(p
, arg1
, 0);
8604 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8607 #ifdef TARGET_NR_lstat64
8608 case TARGET_NR_lstat64
:
8609 if (!(p
= lock_user_string(arg1
)))
8611 ret
= get_errno(lstat(path(p
), &st
));
8612 unlock_user(p
, arg1
, 0);
8614 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8617 #ifdef TARGET_NR_fstat64
8618 case TARGET_NR_fstat64
:
8619 ret
= get_errno(fstat(arg1
, &st
));
8621 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8624 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8625 #ifdef TARGET_NR_fstatat64
8626 case TARGET_NR_fstatat64
:
8628 #ifdef TARGET_NR_newfstatat
8629 case TARGET_NR_newfstatat
:
8631 if (!(p
= lock_user_string(arg2
)))
8633 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8635 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8638 #ifdef TARGET_NR_lchown
8639 case TARGET_NR_lchown
:
8640 if (!(p
= lock_user_string(arg1
)))
8642 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8643 unlock_user(p
, arg1
, 0);
8646 #ifdef TARGET_NR_getuid
8647 case TARGET_NR_getuid
:
8648 ret
= get_errno(high2lowuid(getuid()));
8651 #ifdef TARGET_NR_getgid
8652 case TARGET_NR_getgid
:
8653 ret
= get_errno(high2lowgid(getgid()));
8656 #ifdef TARGET_NR_geteuid
8657 case TARGET_NR_geteuid
:
8658 ret
= get_errno(high2lowuid(geteuid()));
8661 #ifdef TARGET_NR_getegid
8662 case TARGET_NR_getegid
:
8663 ret
= get_errno(high2lowgid(getegid()));
8666 case TARGET_NR_setreuid
:
8667 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8669 case TARGET_NR_setregid
:
8670 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8672 case TARGET_NR_getgroups
:
8674 int gidsetsize
= arg1
;
8675 target_id
*target_grouplist
;
8679 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8680 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8681 if (gidsetsize
== 0)
8683 if (!is_error(ret
)) {
8684 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8685 if (!target_grouplist
)
8687 for(i
= 0;i
< ret
; i
++)
8688 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8689 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8693 case TARGET_NR_setgroups
:
8695 int gidsetsize
= arg1
;
8696 target_id
*target_grouplist
;
8697 gid_t
*grouplist
= NULL
;
8700 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8701 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8702 if (!target_grouplist
) {
8703 ret
= -TARGET_EFAULT
;
8706 for (i
= 0; i
< gidsetsize
; i
++) {
8707 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8709 unlock_user(target_grouplist
, arg2
, 0);
8711 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8714 case TARGET_NR_fchown
:
8715 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8717 #if defined(TARGET_NR_fchownat)
8718 case TARGET_NR_fchownat
:
8719 if (!(p
= lock_user_string(arg2
)))
8721 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8722 low2highgid(arg4
), arg5
));
8723 unlock_user(p
, arg2
, 0);
8726 #ifdef TARGET_NR_setresuid
8727 case TARGET_NR_setresuid
:
8728 ret
= get_errno(setresuid(low2highuid(arg1
),
8730 low2highuid(arg3
)));
8733 #ifdef TARGET_NR_getresuid
8734 case TARGET_NR_getresuid
:
8736 uid_t ruid
, euid
, suid
;
8737 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8738 if (!is_error(ret
)) {
8739 if (put_user_id(high2lowuid(ruid
), arg1
)
8740 || put_user_id(high2lowuid(euid
), arg2
)
8741 || put_user_id(high2lowuid(suid
), arg3
))
8747 #ifdef TARGET_NR_getresgid
8748 case TARGET_NR_setresgid
:
8749 ret
= get_errno(setresgid(low2highgid(arg1
),
8751 low2highgid(arg3
)));
8754 #ifdef TARGET_NR_getresgid
8755 case TARGET_NR_getresgid
:
8757 gid_t rgid
, egid
, sgid
;
8758 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8759 if (!is_error(ret
)) {
8760 if (put_user_id(high2lowgid(rgid
), arg1
)
8761 || put_user_id(high2lowgid(egid
), arg2
)
8762 || put_user_id(high2lowgid(sgid
), arg3
))
8768 #ifdef TARGET_NR_chown
8769 case TARGET_NR_chown
:
8770 if (!(p
= lock_user_string(arg1
)))
8772 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8773 unlock_user(p
, arg1
, 0);
8776 case TARGET_NR_setuid
:
8777 ret
= get_errno(setuid(low2highuid(arg1
)));
8779 case TARGET_NR_setgid
:
8780 ret
= get_errno(setgid(low2highgid(arg1
)));
8782 case TARGET_NR_setfsuid
:
8783 ret
= get_errno(setfsuid(arg1
));
8785 case TARGET_NR_setfsgid
:
8786 ret
= get_errno(setfsgid(arg1
));
8789 #ifdef TARGET_NR_lchown32
8790 case TARGET_NR_lchown32
:
8791 if (!(p
= lock_user_string(arg1
)))
8793 ret
= get_errno(lchown(p
, arg2
, arg3
));
8794 unlock_user(p
, arg1
, 0);
8797 #ifdef TARGET_NR_getuid32
8798 case TARGET_NR_getuid32
:
8799 ret
= get_errno(getuid());
8803 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8804 /* Alpha specific */
8805 case TARGET_NR_getxuid
:
8809 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8811 ret
= get_errno(getuid());
8814 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8815 /* Alpha specific */
8816 case TARGET_NR_getxgid
:
8820 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8822 ret
= get_errno(getgid());
8825 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8826 /* Alpha specific */
8827 case TARGET_NR_osf_getsysinfo
:
8828 ret
= -TARGET_EOPNOTSUPP
;
8830 case TARGET_GSI_IEEE_FP_CONTROL
:
8832 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8834 /* Copied from linux ieee_fpcr_to_swcr. */
8835 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8836 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8837 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8838 | SWCR_TRAP_ENABLE_DZE
8839 | SWCR_TRAP_ENABLE_OVF
);
8840 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8841 | SWCR_TRAP_ENABLE_INE
);
8842 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8843 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8845 if (put_user_u64 (swcr
, arg2
))
8851 /* case GSI_IEEE_STATE_AT_SIGNAL:
8852 -- Not implemented in linux kernel.
8854 -- Retrieves current unaligned access state; not much used.
8856 -- Retrieves implver information; surely not used.
8858 -- Grabs a copy of the HWRPB; surely not used.
8863 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8864 /* Alpha specific */
8865 case TARGET_NR_osf_setsysinfo
:
8866 ret
= -TARGET_EOPNOTSUPP
;
8868 case TARGET_SSI_IEEE_FP_CONTROL
:
8870 uint64_t swcr
, fpcr
, orig_fpcr
;
8872 if (get_user_u64 (swcr
, arg2
)) {
8875 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8876 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8878 /* Copied from linux ieee_swcr_to_fpcr. */
8879 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8880 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8881 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8882 | SWCR_TRAP_ENABLE_DZE
8883 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8884 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8885 | SWCR_TRAP_ENABLE_INE
)) << 57;
8886 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8887 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8889 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8894 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8896 uint64_t exc
, fpcr
, orig_fpcr
;
8899 if (get_user_u64(exc
, arg2
)) {
8903 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8905 /* We only add to the exception status here. */
8906 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8908 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8911 /* Old exceptions are not signaled. */
8912 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8914 /* If any exceptions set by this call,
8915 and are unmasked, send a signal. */
8917 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8918 si_code
= TARGET_FPE_FLTRES
;
8920 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8921 si_code
= TARGET_FPE_FLTUND
;
8923 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8924 si_code
= TARGET_FPE_FLTOVF
;
8926 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8927 si_code
= TARGET_FPE_FLTDIV
;
8929 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8930 si_code
= TARGET_FPE_FLTINV
;
8933 target_siginfo_t info
;
8934 info
.si_signo
= SIGFPE
;
8936 info
.si_code
= si_code
;
8937 info
._sifields
._sigfault
._addr
8938 = ((CPUArchState
*)cpu_env
)->pc
;
8939 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8944 /* case SSI_NVPAIRS:
8945 -- Used with SSIN_UACPROC to enable unaligned accesses.
8946 case SSI_IEEE_STATE_AT_SIGNAL:
8947 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8948 -- Not implemented in linux kernel
8953 #ifdef TARGET_NR_osf_sigprocmask
8954 /* Alpha specific. */
8955 case TARGET_NR_osf_sigprocmask
:
8959 sigset_t set
, oldset
;
8962 case TARGET_SIG_BLOCK
:
8965 case TARGET_SIG_UNBLOCK
:
8968 case TARGET_SIG_SETMASK
:
8972 ret
= -TARGET_EINVAL
;
8976 target_to_host_old_sigset(&set
, &mask
);
8977 do_sigprocmask(how
, &set
, &oldset
);
8978 host_to_target_old_sigset(&mask
, &oldset
);
8984 #ifdef TARGET_NR_getgid32
8985 case TARGET_NR_getgid32
:
8986 ret
= get_errno(getgid());
8989 #ifdef TARGET_NR_geteuid32
8990 case TARGET_NR_geteuid32
:
8991 ret
= get_errno(geteuid());
8994 #ifdef TARGET_NR_getegid32
8995 case TARGET_NR_getegid32
:
8996 ret
= get_errno(getegid());
8999 #ifdef TARGET_NR_setreuid32
9000 case TARGET_NR_setreuid32
:
9001 ret
= get_errno(setreuid(arg1
, arg2
));
9004 #ifdef TARGET_NR_setregid32
9005 case TARGET_NR_setregid32
:
9006 ret
= get_errno(setregid(arg1
, arg2
));
9009 #ifdef TARGET_NR_getgroups32
9010 case TARGET_NR_getgroups32
:
9012 int gidsetsize
= arg1
;
9013 uint32_t *target_grouplist
;
9017 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9018 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9019 if (gidsetsize
== 0)
9021 if (!is_error(ret
)) {
9022 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9023 if (!target_grouplist
) {
9024 ret
= -TARGET_EFAULT
;
9027 for(i
= 0;i
< ret
; i
++)
9028 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9029 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9034 #ifdef TARGET_NR_setgroups32
9035 case TARGET_NR_setgroups32
:
9037 int gidsetsize
= arg1
;
9038 uint32_t *target_grouplist
;
9042 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9043 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9044 if (!target_grouplist
) {
9045 ret
= -TARGET_EFAULT
;
9048 for(i
= 0;i
< gidsetsize
; i
++)
9049 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9050 unlock_user(target_grouplist
, arg2
, 0);
9051 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9055 #ifdef TARGET_NR_fchown32
9056 case TARGET_NR_fchown32
:
9057 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9060 #ifdef TARGET_NR_setresuid32
9061 case TARGET_NR_setresuid32
:
9062 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9065 #ifdef TARGET_NR_getresuid32
9066 case TARGET_NR_getresuid32
:
9068 uid_t ruid
, euid
, suid
;
9069 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9070 if (!is_error(ret
)) {
9071 if (put_user_u32(ruid
, arg1
)
9072 || put_user_u32(euid
, arg2
)
9073 || put_user_u32(suid
, arg3
))
9079 #ifdef TARGET_NR_setresgid32
9080 case TARGET_NR_setresgid32
:
9081 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9084 #ifdef TARGET_NR_getresgid32
9085 case TARGET_NR_getresgid32
:
9087 gid_t rgid
, egid
, sgid
;
9088 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9089 if (!is_error(ret
)) {
9090 if (put_user_u32(rgid
, arg1
)
9091 || put_user_u32(egid
, arg2
)
9092 || put_user_u32(sgid
, arg3
))
9098 #ifdef TARGET_NR_chown32
9099 case TARGET_NR_chown32
:
9100 if (!(p
= lock_user_string(arg1
)))
9102 ret
= get_errno(chown(p
, arg2
, arg3
));
9103 unlock_user(p
, arg1
, 0);
9106 #ifdef TARGET_NR_setuid32
9107 case TARGET_NR_setuid32
:
9108 ret
= get_errno(setuid(arg1
));
9111 #ifdef TARGET_NR_setgid32
9112 case TARGET_NR_setgid32
:
9113 ret
= get_errno(setgid(arg1
));
9116 #ifdef TARGET_NR_setfsuid32
9117 case TARGET_NR_setfsuid32
:
9118 ret
= get_errno(setfsuid(arg1
));
9121 #ifdef TARGET_NR_setfsgid32
9122 case TARGET_NR_setfsgid32
:
9123 ret
= get_errno(setfsgid(arg1
));
9127 case TARGET_NR_pivot_root
:
9129 #ifdef TARGET_NR_mincore
9130 case TARGET_NR_mincore
:
9133 ret
= -TARGET_EFAULT
;
9134 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9136 if (!(p
= lock_user_string(arg3
)))
9138 ret
= get_errno(mincore(a
, arg2
, p
));
9139 unlock_user(p
, arg3
, ret
);
9141 unlock_user(a
, arg1
, 0);
9145 #ifdef TARGET_NR_arm_fadvise64_64
9146 case TARGET_NR_arm_fadvise64_64
:
9149 * arm_fadvise64_64 looks like fadvise64_64 but
9150 * with different argument order
9158 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9159 #ifdef TARGET_NR_fadvise64_64
9160 case TARGET_NR_fadvise64_64
:
9162 #ifdef TARGET_NR_fadvise64
9163 case TARGET_NR_fadvise64
:
9167 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9168 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9169 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9170 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9174 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9177 #ifdef TARGET_NR_madvise
9178 case TARGET_NR_madvise
:
9179 /* A straight passthrough may not be safe because qemu sometimes
9180 turns private file-backed mappings into anonymous mappings.
9181 This will break MADV_DONTNEED.
9182 This is a hint, so ignoring and returning success is ok. */
9186 #if TARGET_ABI_BITS == 32
9187 case TARGET_NR_fcntl64
:
9191 struct target_flock64
*target_fl
;
9193 struct target_eabi_flock64
*target_efl
;
9196 cmd
= target_to_host_fcntl_cmd(arg2
);
9197 if (cmd
== -TARGET_EINVAL
) {
9203 case TARGET_F_GETLK64
:
9205 if (((CPUARMState
*)cpu_env
)->eabi
) {
9206 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9208 fl
.l_type
= tswap16(target_efl
->l_type
);
9209 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9210 fl
.l_start
= tswap64(target_efl
->l_start
);
9211 fl
.l_len
= tswap64(target_efl
->l_len
);
9212 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9213 unlock_user_struct(target_efl
, arg3
, 0);
9217 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9219 fl
.l_type
= tswap16(target_fl
->l_type
);
9220 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9221 fl
.l_start
= tswap64(target_fl
->l_start
);
9222 fl
.l_len
= tswap64(target_fl
->l_len
);
9223 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9224 unlock_user_struct(target_fl
, arg3
, 0);
9226 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9229 if (((CPUARMState
*)cpu_env
)->eabi
) {
9230 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9232 target_efl
->l_type
= tswap16(fl
.l_type
);
9233 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9234 target_efl
->l_start
= tswap64(fl
.l_start
);
9235 target_efl
->l_len
= tswap64(fl
.l_len
);
9236 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9237 unlock_user_struct(target_efl
, arg3
, 1);
9241 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9243 target_fl
->l_type
= tswap16(fl
.l_type
);
9244 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9245 target_fl
->l_start
= tswap64(fl
.l_start
);
9246 target_fl
->l_len
= tswap64(fl
.l_len
);
9247 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9248 unlock_user_struct(target_fl
, arg3
, 1);
9253 case TARGET_F_SETLK64
:
9254 case TARGET_F_SETLKW64
:
9256 if (((CPUARMState
*)cpu_env
)->eabi
) {
9257 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9259 fl
.l_type
= tswap16(target_efl
->l_type
);
9260 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9261 fl
.l_start
= tswap64(target_efl
->l_start
);
9262 fl
.l_len
= tswap64(target_efl
->l_len
);
9263 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9264 unlock_user_struct(target_efl
, arg3
, 0);
9268 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9270 fl
.l_type
= tswap16(target_fl
->l_type
);
9271 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9272 fl
.l_start
= tswap64(target_fl
->l_start
);
9273 fl
.l_len
= tswap64(target_fl
->l_len
);
9274 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9275 unlock_user_struct(target_fl
, arg3
, 0);
9277 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9280 ret
= do_fcntl(arg1
, arg2
, arg3
);
9286 #ifdef TARGET_NR_cacheflush
9287 case TARGET_NR_cacheflush
:
9288 /* self-modifying code is handled automatically, so nothing needed */
9292 #ifdef TARGET_NR_security
9293 case TARGET_NR_security
:
9296 #ifdef TARGET_NR_getpagesize
9297 case TARGET_NR_getpagesize
:
9298 ret
= TARGET_PAGE_SIZE
;
9301 case TARGET_NR_gettid
:
9302 ret
= get_errno(gettid());
9304 #ifdef TARGET_NR_readahead
9305 case TARGET_NR_readahead
:
9306 #if TARGET_ABI_BITS == 32
9307 if (regpairs_aligned(cpu_env
)) {
9312 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9314 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9319 #ifdef TARGET_NR_setxattr
9320 case TARGET_NR_listxattr
:
9321 case TARGET_NR_llistxattr
:
9325 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9327 ret
= -TARGET_EFAULT
;
9331 p
= lock_user_string(arg1
);
9333 if (num
== TARGET_NR_listxattr
) {
9334 ret
= get_errno(listxattr(p
, b
, arg3
));
9336 ret
= get_errno(llistxattr(p
, b
, arg3
));
9339 ret
= -TARGET_EFAULT
;
9341 unlock_user(p
, arg1
, 0);
9342 unlock_user(b
, arg2
, arg3
);
9345 case TARGET_NR_flistxattr
:
9349 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9351 ret
= -TARGET_EFAULT
;
9355 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9356 unlock_user(b
, arg2
, arg3
);
9359 case TARGET_NR_setxattr
:
9360 case TARGET_NR_lsetxattr
:
9362 void *p
, *n
, *v
= 0;
9364 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9366 ret
= -TARGET_EFAULT
;
9370 p
= lock_user_string(arg1
);
9371 n
= lock_user_string(arg2
);
9373 if (num
== TARGET_NR_setxattr
) {
9374 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9376 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9379 ret
= -TARGET_EFAULT
;
9381 unlock_user(p
, arg1
, 0);
9382 unlock_user(n
, arg2
, 0);
9383 unlock_user(v
, arg3
, 0);
9386 case TARGET_NR_fsetxattr
:
9390 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9392 ret
= -TARGET_EFAULT
;
9396 n
= lock_user_string(arg2
);
9398 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9400 ret
= -TARGET_EFAULT
;
9402 unlock_user(n
, arg2
, 0);
9403 unlock_user(v
, arg3
, 0);
9406 case TARGET_NR_getxattr
:
9407 case TARGET_NR_lgetxattr
:
9409 void *p
, *n
, *v
= 0;
9411 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9413 ret
= -TARGET_EFAULT
;
9417 p
= lock_user_string(arg1
);
9418 n
= lock_user_string(arg2
);
9420 if (num
== TARGET_NR_getxattr
) {
9421 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9423 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9426 ret
= -TARGET_EFAULT
;
9428 unlock_user(p
, arg1
, 0);
9429 unlock_user(n
, arg2
, 0);
9430 unlock_user(v
, arg3
, arg4
);
9433 case TARGET_NR_fgetxattr
:
9437 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9439 ret
= -TARGET_EFAULT
;
9443 n
= lock_user_string(arg2
);
9445 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9447 ret
= -TARGET_EFAULT
;
9449 unlock_user(n
, arg2
, 0);
9450 unlock_user(v
, arg3
, arg4
);
9453 case TARGET_NR_removexattr
:
9454 case TARGET_NR_lremovexattr
:
9457 p
= lock_user_string(arg1
);
9458 n
= lock_user_string(arg2
);
9460 if (num
== TARGET_NR_removexattr
) {
9461 ret
= get_errno(removexattr(p
, n
));
9463 ret
= get_errno(lremovexattr(p
, n
));
9466 ret
= -TARGET_EFAULT
;
9468 unlock_user(p
, arg1
, 0);
9469 unlock_user(n
, arg2
, 0);
9472 case TARGET_NR_fremovexattr
:
9475 n
= lock_user_string(arg2
);
9477 ret
= get_errno(fremovexattr(arg1
, n
));
9479 ret
= -TARGET_EFAULT
;
9481 unlock_user(n
, arg2
, 0);
9485 #endif /* CONFIG_ATTR */
9486 #ifdef TARGET_NR_set_thread_area
9487 case TARGET_NR_set_thread_area
:
9488 #if defined(TARGET_MIPS)
9489 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9492 #elif defined(TARGET_CRIS)
9494 ret
= -TARGET_EINVAL
;
9496 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9500 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9501 ret
= do_set_thread_area(cpu_env
, arg1
);
9503 #elif defined(TARGET_M68K)
9505 TaskState
*ts
= cpu
->opaque
;
9506 ts
->tp_value
= arg1
;
9511 goto unimplemented_nowarn
;
9514 #ifdef TARGET_NR_get_thread_area
9515 case TARGET_NR_get_thread_area
:
9516 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9517 ret
= do_get_thread_area(cpu_env
, arg1
);
9519 #elif defined(TARGET_M68K)
9521 TaskState
*ts
= cpu
->opaque
;
9526 goto unimplemented_nowarn
;
9529 #ifdef TARGET_NR_getdomainname
9530 case TARGET_NR_getdomainname
:
9531 goto unimplemented_nowarn
;
9534 #ifdef TARGET_NR_clock_gettime
9535 case TARGET_NR_clock_gettime
:
9538 ret
= get_errno(clock_gettime(arg1
, &ts
));
9539 if (!is_error(ret
)) {
9540 host_to_target_timespec(arg2
, &ts
);
9545 #ifdef TARGET_NR_clock_getres
9546 case TARGET_NR_clock_getres
:
9549 ret
= get_errno(clock_getres(arg1
, &ts
));
9550 if (!is_error(ret
)) {
9551 host_to_target_timespec(arg2
, &ts
);
9556 #ifdef TARGET_NR_clock_nanosleep
9557 case TARGET_NR_clock_nanosleep
:
9560 target_to_host_timespec(&ts
, arg3
);
9561 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9563 host_to_target_timespec(arg4
, &ts
);
9565 #if defined(TARGET_PPC)
9566 /* clock_nanosleep is odd in that it returns positive errno values.
9567 * On PPC, CR0 bit 3 should be set in such a situation. */
9569 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9576 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9577 case TARGET_NR_set_tid_address
:
9578 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9582 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9583 case TARGET_NR_tkill
:
9584 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9588 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9589 case TARGET_NR_tgkill
:
9590 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9591 target_to_host_signal(arg3
)));
9595 #ifdef TARGET_NR_set_robust_list
9596 case TARGET_NR_set_robust_list
:
9597 case TARGET_NR_get_robust_list
:
9598 /* The ABI for supporting robust futexes has userspace pass
9599 * the kernel a pointer to a linked list which is updated by
9600 * userspace after the syscall; the list is walked by the kernel
9601 * when the thread exits. Since the linked list in QEMU guest
9602 * memory isn't a valid linked list for the host and we have
9603 * no way to reliably intercept the thread-death event, we can't
9604 * support these. Silently return ENOSYS so that guest userspace
9605 * falls back to a non-robust futex implementation (which should
9606 * be OK except in the corner case of the guest crashing while
9607 * holding a mutex that is shared with another process via
9610 goto unimplemented_nowarn
;
9613 #if defined(TARGET_NR_utimensat)
9614 case TARGET_NR_utimensat
:
9616 struct timespec
*tsp
, ts
[2];
9620 target_to_host_timespec(ts
, arg3
);
9621 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9625 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9627 if (!(p
= lock_user_string(arg2
))) {
9628 ret
= -TARGET_EFAULT
;
9631 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9632 unlock_user(p
, arg2
, 0);
9637 case TARGET_NR_futex
:
9638 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9640 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9641 case TARGET_NR_inotify_init
:
9642 ret
= get_errno(sys_inotify_init());
9645 #ifdef CONFIG_INOTIFY1
9646 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9647 case TARGET_NR_inotify_init1
:
9648 ret
= get_errno(sys_inotify_init1(arg1
));
9652 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9653 case TARGET_NR_inotify_add_watch
:
9654 p
= lock_user_string(arg2
);
9655 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9656 unlock_user(p
, arg2
, 0);
9659 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9660 case TARGET_NR_inotify_rm_watch
:
9661 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9665 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9666 case TARGET_NR_mq_open
:
9668 struct mq_attr posix_mq_attr
, *attrp
;
9670 p
= lock_user_string(arg1
- 1);
9672 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9673 attrp
= &posix_mq_attr
;
9677 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9678 unlock_user (p
, arg1
, 0);
9682 case TARGET_NR_mq_unlink
:
9683 p
= lock_user_string(arg1
- 1);
9684 ret
= get_errno(mq_unlink(p
));
9685 unlock_user (p
, arg1
, 0);
9688 case TARGET_NR_mq_timedsend
:
9692 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9694 target_to_host_timespec(&ts
, arg5
);
9695 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9696 host_to_target_timespec(arg5
, &ts
);
9699 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9700 unlock_user (p
, arg2
, arg3
);
9704 case TARGET_NR_mq_timedreceive
:
9709 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9711 target_to_host_timespec(&ts
, arg5
);
9712 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9713 host_to_target_timespec(arg5
, &ts
);
9716 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9717 unlock_user (p
, arg2
, arg3
);
9719 put_user_u32(prio
, arg4
);
9723 /* Not implemented for now... */
9724 /* case TARGET_NR_mq_notify: */
9727 case TARGET_NR_mq_getsetattr
:
9729 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9732 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9733 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9736 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9737 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9744 #ifdef CONFIG_SPLICE
9745 #ifdef TARGET_NR_tee
9748 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9752 #ifdef TARGET_NR_splice
9753 case TARGET_NR_splice
:
9755 loff_t loff_in
, loff_out
;
9756 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9758 if (get_user_u64(loff_in
, arg2
)) {
9761 ploff_in
= &loff_in
;
9764 if (get_user_u64(loff_out
, arg4
)) {
9767 ploff_out
= &loff_out
;
9769 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9771 if (put_user_u64(loff_in
, arg2
)) {
9776 if (put_user_u64(loff_out
, arg4
)) {
9783 #ifdef TARGET_NR_vmsplice
9784 case TARGET_NR_vmsplice
:
9786 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9788 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9789 unlock_iovec(vec
, arg2
, arg3
, 0);
9791 ret
= -host_to_target_errno(errno
);
9796 #endif /* CONFIG_SPLICE */
9797 #ifdef CONFIG_EVENTFD
9798 #if defined(TARGET_NR_eventfd)
9799 case TARGET_NR_eventfd
:
9800 ret
= get_errno(eventfd(arg1
, 0));
9801 fd_trans_unregister(ret
);
9804 #if defined(TARGET_NR_eventfd2)
9805 case TARGET_NR_eventfd2
:
9807 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9808 if (arg2
& TARGET_O_NONBLOCK
) {
9809 host_flags
|= O_NONBLOCK
;
9811 if (arg2
& TARGET_O_CLOEXEC
) {
9812 host_flags
|= O_CLOEXEC
;
9814 ret
= get_errno(eventfd(arg1
, host_flags
));
9815 fd_trans_unregister(ret
);
9819 #endif /* CONFIG_EVENTFD */
9820 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9821 case TARGET_NR_fallocate
:
9822 #if TARGET_ABI_BITS == 32
9823 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9824 target_offset64(arg5
, arg6
)));
9826 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9830 #if defined(CONFIG_SYNC_FILE_RANGE)
9831 #if defined(TARGET_NR_sync_file_range)
9832 case TARGET_NR_sync_file_range
:
9833 #if TARGET_ABI_BITS == 32
9834 #if defined(TARGET_MIPS)
9835 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9836 target_offset64(arg5
, arg6
), arg7
));
9838 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9839 target_offset64(arg4
, arg5
), arg6
));
9840 #endif /* !TARGET_MIPS */
9842 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9846 #if defined(TARGET_NR_sync_file_range2)
9847 case TARGET_NR_sync_file_range2
:
9848 /* This is like sync_file_range but the arguments are reordered */
9849 #if TARGET_ABI_BITS == 32
9850 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9851 target_offset64(arg5
, arg6
), arg2
));
9853 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9858 #if defined(TARGET_NR_signalfd4)
9859 case TARGET_NR_signalfd4
:
9860 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9863 #if defined(TARGET_NR_signalfd)
9864 case TARGET_NR_signalfd
:
9865 ret
= do_signalfd4(arg1
, arg2
, 0);
9868 #if defined(CONFIG_EPOLL)
9869 #if defined(TARGET_NR_epoll_create)
9870 case TARGET_NR_epoll_create
:
9871 ret
= get_errno(epoll_create(arg1
));
9874 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9875 case TARGET_NR_epoll_create1
:
9876 ret
= get_errno(epoll_create1(arg1
));
9879 #if defined(TARGET_NR_epoll_ctl)
9880 case TARGET_NR_epoll_ctl
:
9882 struct epoll_event ep
;
9883 struct epoll_event
*epp
= 0;
9885 struct target_epoll_event
*target_ep
;
9886 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9889 ep
.events
= tswap32(target_ep
->events
);
9890 /* The epoll_data_t union is just opaque data to the kernel,
9891 * so we transfer all 64 bits across and need not worry what
9892 * actual data type it is.
9894 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9895 unlock_user_struct(target_ep
, arg4
, 0);
9898 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9903 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9904 #define IMPLEMENT_EPOLL_PWAIT
9906 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9907 #if defined(TARGET_NR_epoll_wait)
9908 case TARGET_NR_epoll_wait
:
9910 #if defined(IMPLEMENT_EPOLL_PWAIT)
9911 case TARGET_NR_epoll_pwait
:
9914 struct target_epoll_event
*target_ep
;
9915 struct epoll_event
*ep
;
9917 int maxevents
= arg3
;
9920 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9921 maxevents
* sizeof(struct target_epoll_event
), 1);
9926 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9929 #if defined(IMPLEMENT_EPOLL_PWAIT)
9930 case TARGET_NR_epoll_pwait
:
9932 target_sigset_t
*target_set
;
9933 sigset_t _set
, *set
= &_set
;
9936 target_set
= lock_user(VERIFY_READ
, arg5
,
9937 sizeof(target_sigset_t
), 1);
9939 unlock_user(target_ep
, arg2
, 0);
9942 target_to_host_sigset(set
, target_set
);
9943 unlock_user(target_set
, arg5
, 0);
9948 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9952 #if defined(TARGET_NR_epoll_wait)
9953 case TARGET_NR_epoll_wait
:
9954 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9958 ret
= -TARGET_ENOSYS
;
9960 if (!is_error(ret
)) {
9962 for (i
= 0; i
< ret
; i
++) {
9963 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9964 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9967 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9972 #ifdef TARGET_NR_prlimit64
9973 case TARGET_NR_prlimit64
:
9975 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9976 struct target_rlimit64
*target_rnew
, *target_rold
;
9977 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9978 int resource
= target_to_host_resource(arg2
);
9980 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9983 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9984 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9985 unlock_user_struct(target_rnew
, arg3
, 0);
9989 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
9990 if (!is_error(ret
) && arg4
) {
9991 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9994 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9995 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9996 unlock_user_struct(target_rold
, arg4
, 1);
10001 #ifdef TARGET_NR_gethostname
10002 case TARGET_NR_gethostname
:
10004 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10006 ret
= get_errno(gethostname(name
, arg2
));
10007 unlock_user(name
, arg1
, arg2
);
10009 ret
= -TARGET_EFAULT
;
10014 #ifdef TARGET_NR_atomic_cmpxchg_32
10015 case TARGET_NR_atomic_cmpxchg_32
:
10017 /* should use start_exclusive from main.c */
10018 abi_ulong mem_value
;
10019 if (get_user_u32(mem_value
, arg6
)) {
10020 target_siginfo_t info
;
10021 info
.si_signo
= SIGSEGV
;
10023 info
.si_code
= TARGET_SEGV_MAPERR
;
10024 info
._sifields
._sigfault
._addr
= arg6
;
10025 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10029 if (mem_value
== arg2
)
10030 put_user_u32(arg1
, arg6
);
10035 #ifdef TARGET_NR_atomic_barrier
10036 case TARGET_NR_atomic_barrier
:
10038 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10044 #ifdef TARGET_NR_timer_create
10045 case TARGET_NR_timer_create
:
10047 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10049 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10052 int timer_index
= next_free_host_timer();
10054 if (timer_index
< 0) {
10055 ret
= -TARGET_EAGAIN
;
10057 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10060 phost_sevp
= &host_sevp
;
10061 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10067 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10071 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10080 #ifdef TARGET_NR_timer_settime
10081 case TARGET_NR_timer_settime
:
10083 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10084 * struct itimerspec * old_value */
10085 target_timer_t timerid
= get_timer_id(arg1
);
10089 } else if (arg3
== 0) {
10090 ret
= -TARGET_EINVAL
;
10092 timer_t htimer
= g_posix_timers
[timerid
];
10093 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10095 target_to_host_itimerspec(&hspec_new
, arg3
);
10097 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10098 host_to_target_itimerspec(arg2
, &hspec_old
);
10104 #ifdef TARGET_NR_timer_gettime
10105 case TARGET_NR_timer_gettime
:
10107 /* args: timer_t timerid, struct itimerspec *curr_value */
10108 target_timer_t timerid
= get_timer_id(arg1
);
10112 } else if (!arg2
) {
10113 ret
= -TARGET_EFAULT
;
10115 timer_t htimer
= g_posix_timers
[timerid
];
10116 struct itimerspec hspec
;
10117 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10119 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10120 ret
= -TARGET_EFAULT
;
10127 #ifdef TARGET_NR_timer_getoverrun
10128 case TARGET_NR_timer_getoverrun
:
10130 /* args: timer_t timerid */
10131 target_timer_t timerid
= get_timer_id(arg1
);
10136 timer_t htimer
= g_posix_timers
[timerid
];
10137 ret
= get_errno(timer_getoverrun(htimer
));
10139 fd_trans_unregister(ret
);
10144 #ifdef TARGET_NR_timer_delete
10145 case TARGET_NR_timer_delete
:
10147 /* args: timer_t timerid */
10148 target_timer_t timerid
= get_timer_id(arg1
);
10153 timer_t htimer
= g_posix_timers
[timerid
];
10154 ret
= get_errno(timer_delete(htimer
));
10155 g_posix_timers
[timerid
] = 0;
10161 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10162 case TARGET_NR_timerfd_create
:
10163 ret
= get_errno(timerfd_create(arg1
,
10164 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10168 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10169 case TARGET_NR_timerfd_gettime
:
10171 struct itimerspec its_curr
;
10173 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10175 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10182 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10183 case TARGET_NR_timerfd_settime
:
10185 struct itimerspec its_new
, its_old
, *p_new
;
10188 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10196 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10198 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10205 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10206 case TARGET_NR_ioprio_get
:
10207 ret
= get_errno(ioprio_get(arg1
, arg2
));
10211 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10212 case TARGET_NR_ioprio_set
:
10213 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10217 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10218 case TARGET_NR_setns
:
10219 ret
= get_errno(setns(arg1
, arg2
));
10222 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10223 case TARGET_NR_unshare
:
10224 ret
= get_errno(unshare(arg1
));
10230 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10231 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10232 unimplemented_nowarn
:
10234 ret
= -TARGET_ENOSYS
;
10239 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10242 print_syscall_ret(num
, ret
);
10245 ret
= -TARGET_EFAULT
;