4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn
)(void *), void *child_stack_base
,
51 size_t stack_size
, int flags
, void *arg
, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
70 #include <sys/timerfd.h>
76 #include <sys/eventfd.h>
79 #include <sys/epoll.h>
82 #include "qemu/xattr.h"
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 #define __NR__llseek __NR_lseek
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
208 _syscall0(int, gettid
)
210 /* This is a replacement for the host gettid() and must return a host
212 static int gettid(void) {
217 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
225 loff_t
*, res
, uint
, wh
);
227 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
228 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group
,int,error_code
)
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address
,int *,tidptr
)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
243 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
250 unsigned long *, user_mask_ptr
);
251 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
253 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
254 struct __user_cap_data_struct
*, data
);
255 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
256 struct __user_cap_data_struct
*, data
);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get
, int, which
, int, who
)
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
264 static bitmask_transtbl fcntl_flags_tbl
[] = {
265 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
266 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
267 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
268 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
269 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
270 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
271 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
272 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
273 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
274 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
275 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
276 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
277 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
288 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
297 static int sys_getcwd1(char *buf
, size_t size
)
299 if (getcwd(buf
, size
) == NULL
) {
300 /* getcwd() sets errno */
303 return strlen(buf
)+1;
306 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
309 * open(2) has extra parameter 'mode' when called with
312 if ((flags
& O_CREAT
) != 0) {
313 return (openat(dirfd
, pathname
, flags
, mode
));
315 return (openat(dirfd
, pathname
, flags
));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd
, const char *pathname
,
321 const struct timespec times
[2], int flags
)
323 if (pathname
== NULL
)
324 return futimens(dirfd
, times
);
326 return utimensat(dirfd
, pathname
, times
, flags
);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
331 const struct timespec
*,tsp
,int,flags
)
333 static int sys_utimensat(int dirfd
, const char *pathname
,
334 const struct timespec times
[2], int flags
)
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
354 return (inotify_add_watch(fd
, pathname
, mask
));
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
360 return (inotify_rm_watch(fd
, wd
));
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags
)
367 return (inotify_init1(flags
));
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
381 # define __NR_ppoll -1
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
385 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
395 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64
{
408 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
409 const struct host_rlimit64
*, new_limit
,
410 struct host_rlimit64
*, old_limit
)
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers
[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
423 if (g_posix_timers
[k
] == 0) {
424 g_posix_timers
[k
] = (timer_t
) 1;
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
434 static inline int regpairs_aligned(void *cpu_env
) {
435 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
443 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
445 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
460 [EIDRM
] = TARGET_EIDRM
,
461 [ECHRNG
] = TARGET_ECHRNG
,
462 [EL2NSYNC
] = TARGET_EL2NSYNC
,
463 [EL3HLT
] = TARGET_EL3HLT
,
464 [EL3RST
] = TARGET_EL3RST
,
465 [ELNRNG
] = TARGET_ELNRNG
,
466 [EUNATCH
] = TARGET_EUNATCH
,
467 [ENOCSI
] = TARGET_ENOCSI
,
468 [EL2HLT
] = TARGET_EL2HLT
,
469 [EDEADLK
] = TARGET_EDEADLK
,
470 [ENOLCK
] = TARGET_ENOLCK
,
471 [EBADE
] = TARGET_EBADE
,
472 [EBADR
] = TARGET_EBADR
,
473 [EXFULL
] = TARGET_EXFULL
,
474 [ENOANO
] = TARGET_ENOANO
,
475 [EBADRQC
] = TARGET_EBADRQC
,
476 [EBADSLT
] = TARGET_EBADSLT
,
477 [EBFONT
] = TARGET_EBFONT
,
478 [ENOSTR
] = TARGET_ENOSTR
,
479 [ENODATA
] = TARGET_ENODATA
,
480 [ETIME
] = TARGET_ETIME
,
481 [ENOSR
] = TARGET_ENOSR
,
482 [ENONET
] = TARGET_ENONET
,
483 [ENOPKG
] = TARGET_ENOPKG
,
484 [EREMOTE
] = TARGET_EREMOTE
,
485 [ENOLINK
] = TARGET_ENOLINK
,
486 [EADV
] = TARGET_EADV
,
487 [ESRMNT
] = TARGET_ESRMNT
,
488 [ECOMM
] = TARGET_ECOMM
,
489 [EPROTO
] = TARGET_EPROTO
,
490 [EDOTDOT
] = TARGET_EDOTDOT
,
491 [EMULTIHOP
] = TARGET_EMULTIHOP
,
492 [EBADMSG
] = TARGET_EBADMSG
,
493 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
494 [EOVERFLOW
] = TARGET_EOVERFLOW
,
495 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
496 [EBADFD
] = TARGET_EBADFD
,
497 [EREMCHG
] = TARGET_EREMCHG
,
498 [ELIBACC
] = TARGET_ELIBACC
,
499 [ELIBBAD
] = TARGET_ELIBBAD
,
500 [ELIBSCN
] = TARGET_ELIBSCN
,
501 [ELIBMAX
] = TARGET_ELIBMAX
,
502 [ELIBEXEC
] = TARGET_ELIBEXEC
,
503 [EILSEQ
] = TARGET_EILSEQ
,
504 [ENOSYS
] = TARGET_ENOSYS
,
505 [ELOOP
] = TARGET_ELOOP
,
506 [ERESTART
] = TARGET_ERESTART
,
507 [ESTRPIPE
] = TARGET_ESTRPIPE
,
508 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
509 [EUSERS
] = TARGET_EUSERS
,
510 [ENOTSOCK
] = TARGET_ENOTSOCK
,
511 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
512 [EMSGSIZE
] = TARGET_EMSGSIZE
,
513 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
514 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
515 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
516 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
517 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
518 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
519 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
520 [EADDRINUSE
] = TARGET_EADDRINUSE
,
521 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
522 [ENETDOWN
] = TARGET_ENETDOWN
,
523 [ENETUNREACH
] = TARGET_ENETUNREACH
,
524 [ENETRESET
] = TARGET_ENETRESET
,
525 [ECONNABORTED
] = TARGET_ECONNABORTED
,
526 [ECONNRESET
] = TARGET_ECONNRESET
,
527 [ENOBUFS
] = TARGET_ENOBUFS
,
528 [EISCONN
] = TARGET_EISCONN
,
529 [ENOTCONN
] = TARGET_ENOTCONN
,
530 [EUCLEAN
] = TARGET_EUCLEAN
,
531 [ENOTNAM
] = TARGET_ENOTNAM
,
532 [ENAVAIL
] = TARGET_ENAVAIL
,
533 [EISNAM
] = TARGET_EISNAM
,
534 [EREMOTEIO
] = TARGET_EREMOTEIO
,
535 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
536 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
537 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
538 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
539 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
540 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
541 [EALREADY
] = TARGET_EALREADY
,
542 [EINPROGRESS
] = TARGET_EINPROGRESS
,
543 [ESTALE
] = TARGET_ESTALE
,
544 [ECANCELED
] = TARGET_ECANCELED
,
545 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
546 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
548 [ENOKEY
] = TARGET_ENOKEY
,
551 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
554 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
557 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
560 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
562 #ifdef ENOTRECOVERABLE
563 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
567 static inline int host_to_target_errno(int err
)
569 if(host_to_target_errno_table
[err
])
570 return host_to_target_errno_table
[err
];
574 static inline int target_to_host_errno(int err
)
576 if (target_to_host_errno_table
[err
])
577 return target_to_host_errno_table
[err
];
581 static inline abi_long
get_errno(abi_long ret
)
584 return -host_to_target_errno(errno
);
589 static inline int is_error(abi_long ret
)
591 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
594 char *target_strerror(int err
)
596 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
599 return strerror(target_to_host_errno(err
));
602 static inline int host_to_target_sock_type(int host_type
)
606 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
608 target_type
= TARGET_SOCK_DGRAM
;
611 target_type
= TARGET_SOCK_STREAM
;
614 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
618 #if defined(SOCK_CLOEXEC)
619 if (host_type
& SOCK_CLOEXEC
) {
620 target_type
|= TARGET_SOCK_CLOEXEC
;
624 #if defined(SOCK_NONBLOCK)
625 if (host_type
& SOCK_NONBLOCK
) {
626 target_type
|= TARGET_SOCK_NONBLOCK
;
633 static abi_ulong target_brk
;
634 static abi_ulong target_original_brk
;
635 static abi_ulong brk_page
;
637 void target_set_brk(abi_ulong new_brk
)
639 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
640 brk_page
= HOST_PAGE_ALIGN(target_brk
);
643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
644 #define DEBUGF_BRK(message, args...)
646 /* do_brk() must return target values and target errnos. */
647 abi_long
do_brk(abi_ulong new_brk
)
649 abi_long mapped_addr
;
652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
655 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
658 if (new_brk
< target_original_brk
) {
659 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
664 /* If the new brk is less than the highest page reserved to the
665 * target heap allocation, set it and we're almost done... */
666 if (new_brk
<= brk_page
) {
667 /* Heap contents are initialized to zero, as for anonymous
669 if (new_brk
> target_brk
) {
670 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
672 target_brk
= new_brk
;
673 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
677 /* We need to allocate more memory after the brk... Note that
678 * we don't use MAP_FIXED because that will map over the top of
679 * any existing mapping (like the one with the host libc or qemu
680 * itself); instead we treat "mapped but at wrong address" as
681 * a failure and unmap again.
683 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
684 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
685 PROT_READ
|PROT_WRITE
,
686 MAP_ANON
|MAP_PRIVATE
, 0, 0));
688 if (mapped_addr
== brk_page
) {
689 /* Heap contents are initialized to zero, as for anonymous
690 * mapped pages. Technically the new pages are already
691 * initialized to zero since they *are* anonymous mapped
692 * pages, however we have to take care with the contents that
693 * come from the remaining part of the previous page: it may
694 * contains garbage data due to a previous heap usage (grown
696 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
698 target_brk
= new_brk
;
699 brk_page
= HOST_PAGE_ALIGN(target_brk
);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
703 } else if (mapped_addr
!= -1) {
704 /* Mapped but at wrong address, meaning there wasn't actually
705 * enough space for this brk.
707 target_munmap(mapped_addr
, new_alloc_size
);
709 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
712 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
715 #if defined(TARGET_ALPHA)
716 /* We (partially) emulate OSF/1 on Alpha, which requires we
717 return a proper errno, not an unchanged brk value. */
718 return -TARGET_ENOMEM
;
720 /* For everything else, return the previous break. */
724 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
725 abi_ulong target_fds_addr
,
729 abi_ulong b
, *target_fds
;
731 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
732 if (!(target_fds
= lock_user(VERIFY_READ
,
734 sizeof(abi_ulong
) * nw
,
736 return -TARGET_EFAULT
;
740 for (i
= 0; i
< nw
; i
++) {
741 /* grab the abi_ulong */
742 __get_user(b
, &target_fds
[i
]);
743 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
744 /* check the bit inside the abi_ulong */
751 unlock_user(target_fds
, target_fds_addr
, 0);
756 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
757 abi_ulong target_fds_addr
,
760 if (target_fds_addr
) {
761 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
762 return -TARGET_EFAULT
;
770 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
776 abi_ulong
*target_fds
;
778 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
779 if (!(target_fds
= lock_user(VERIFY_WRITE
,
781 sizeof(abi_ulong
) * nw
,
783 return -TARGET_EFAULT
;
786 for (i
= 0; i
< nw
; i
++) {
788 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
789 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
792 __put_user(v
, &target_fds
[i
]);
795 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
800 #if defined(__alpha__)
806 static inline abi_long
host_to_target_clock_t(long ticks
)
808 #if HOST_HZ == TARGET_HZ
811 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
815 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
816 const struct rusage
*rusage
)
818 struct target_rusage
*target_rusage
;
820 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
821 return -TARGET_EFAULT
;
822 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
823 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
824 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
825 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
826 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
827 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
828 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
829 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
830 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
831 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
832 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
833 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
834 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
835 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
836 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
837 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
838 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
839 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
840 unlock_user_struct(target_rusage
, target_addr
, 1);
845 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
847 abi_ulong target_rlim_swap
;
850 target_rlim_swap
= tswapal(target_rlim
);
851 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
852 return RLIM_INFINITY
;
854 result
= target_rlim_swap
;
855 if (target_rlim_swap
!= (rlim_t
)result
)
856 return RLIM_INFINITY
;
861 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
863 abi_ulong target_rlim_swap
;
866 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
867 target_rlim_swap
= TARGET_RLIM_INFINITY
;
869 target_rlim_swap
= rlim
;
870 result
= tswapal(target_rlim_swap
);
875 static inline int target_to_host_resource(int code
)
878 case TARGET_RLIMIT_AS
:
880 case TARGET_RLIMIT_CORE
:
882 case TARGET_RLIMIT_CPU
:
884 case TARGET_RLIMIT_DATA
:
886 case TARGET_RLIMIT_FSIZE
:
888 case TARGET_RLIMIT_LOCKS
:
890 case TARGET_RLIMIT_MEMLOCK
:
891 return RLIMIT_MEMLOCK
;
892 case TARGET_RLIMIT_MSGQUEUE
:
893 return RLIMIT_MSGQUEUE
;
894 case TARGET_RLIMIT_NICE
:
896 case TARGET_RLIMIT_NOFILE
:
897 return RLIMIT_NOFILE
;
898 case TARGET_RLIMIT_NPROC
:
900 case TARGET_RLIMIT_RSS
:
902 case TARGET_RLIMIT_RTPRIO
:
903 return RLIMIT_RTPRIO
;
904 case TARGET_RLIMIT_SIGPENDING
:
905 return RLIMIT_SIGPENDING
;
906 case TARGET_RLIMIT_STACK
:
913 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
914 abi_ulong target_tv_addr
)
916 struct target_timeval
*target_tv
;
918 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
919 return -TARGET_EFAULT
;
921 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
922 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
924 unlock_user_struct(target_tv
, target_tv_addr
, 0);
929 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
930 const struct timeval
*tv
)
932 struct target_timeval
*target_tv
;
934 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
935 return -TARGET_EFAULT
;
937 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
938 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
940 unlock_user_struct(target_tv
, target_tv_addr
, 1);
945 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
946 abi_ulong target_tz_addr
)
948 struct target_timezone
*target_tz
;
950 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
951 return -TARGET_EFAULT
;
954 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
955 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
957 unlock_user_struct(target_tz
, target_tz_addr
, 0);
962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
965 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
966 abi_ulong target_mq_attr_addr
)
968 struct target_mq_attr
*target_mq_attr
;
970 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
971 target_mq_attr_addr
, 1))
972 return -TARGET_EFAULT
;
974 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
975 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
976 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
977 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
979 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
984 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
985 const struct mq_attr
*attr
)
987 struct target_mq_attr
*target_mq_attr
;
989 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
990 target_mq_attr_addr
, 0))
991 return -TARGET_EFAULT
;
993 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
994 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
995 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
996 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
998 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1005 /* do_select() must return target values and target errnos. */
1006 static abi_long
do_select(int n
,
1007 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1008 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1010 fd_set rfds
, wfds
, efds
;
1011 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1012 struct timeval tv
, *tv_ptr
;
1015 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1019 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1023 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1028 if (target_tv_addr
) {
1029 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1030 return -TARGET_EFAULT
;
1036 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1038 if (!is_error(ret
)) {
1039 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1040 return -TARGET_EFAULT
;
1041 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1042 return -TARGET_EFAULT
;
1043 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1044 return -TARGET_EFAULT
;
1046 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1047 return -TARGET_EFAULT
;
1054 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1057 return pipe2(host_pipe
, flags
);
1063 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1064 int flags
, int is_pipe2
)
1068 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1071 return get_errno(ret
);
1073 /* Several targets have special calling conventions for the original
1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1076 #if defined(TARGET_ALPHA)
1077 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1078 return host_pipe
[0];
1079 #elif defined(TARGET_MIPS)
1080 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1081 return host_pipe
[0];
1082 #elif defined(TARGET_SH4)
1083 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1084 return host_pipe
[0];
1085 #elif defined(TARGET_SPARC)
1086 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1087 return host_pipe
[0];
1091 if (put_user_s32(host_pipe
[0], pipedes
)
1092 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1093 return -TARGET_EFAULT
;
1094 return get_errno(ret
);
1097 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1098 abi_ulong target_addr
,
1101 struct target_ip_mreqn
*target_smreqn
;
1103 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1105 return -TARGET_EFAULT
;
1106 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1107 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1108 if (len
== sizeof(struct target_ip_mreqn
))
1109 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1110 unlock_user(target_smreqn
, target_addr
, 0);
1115 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1116 abi_ulong target_addr
,
1119 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1120 sa_family_t sa_family
;
1121 struct target_sockaddr
*target_saddr
;
1123 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1125 return -TARGET_EFAULT
;
1127 sa_family
= tswap16(target_saddr
->sa_family
);
1129 /* Oops. The caller might send a incomplete sun_path; sun_path
1130 * must be terminated by \0 (see the manual page), but
1131 * unfortunately it is quite common to specify sockaddr_un
1132 * length as "strlen(x->sun_path)" while it should be
1133 * "strlen(...) + 1". We'll fix that here if needed.
1134 * Linux kernel has a similar feature.
1137 if (sa_family
== AF_UNIX
) {
1138 if (len
< unix_maxlen
&& len
> 0) {
1139 char *cp
= (char*)target_saddr
;
1141 if ( cp
[len
-1] && !cp
[len
] )
1144 if (len
> unix_maxlen
)
1148 memcpy(addr
, target_saddr
, len
);
1149 addr
->sa_family
= sa_family
;
1150 if (sa_family
== AF_PACKET
) {
1151 struct target_sockaddr_ll
*lladdr
;
1153 lladdr
= (struct target_sockaddr_ll
*)addr
;
1154 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1155 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1157 unlock_user(target_saddr
, target_addr
, 0);
1162 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1163 struct sockaddr
*addr
,
1166 struct target_sockaddr
*target_saddr
;
1168 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1170 return -TARGET_EFAULT
;
1171 memcpy(target_saddr
, addr
, len
);
1172 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1173 unlock_user(target_saddr
, target_addr
, len
);
1178 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1179 struct target_msghdr
*target_msgh
)
1181 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1182 abi_long msg_controllen
;
1183 abi_ulong target_cmsg_addr
;
1184 struct target_cmsghdr
*target_cmsg
;
1185 socklen_t space
= 0;
1187 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1188 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1190 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1191 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1193 return -TARGET_EFAULT
;
1195 while (cmsg
&& target_cmsg
) {
1196 void *data
= CMSG_DATA(cmsg
);
1197 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1199 int len
= tswapal(target_cmsg
->cmsg_len
)
1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1202 space
+= CMSG_SPACE(len
);
1203 if (space
> msgh
->msg_controllen
) {
1204 space
-= CMSG_SPACE(len
);
1205 gemu_log("Host cmsg overflow\n");
1209 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1210 cmsg
->cmsg_level
= SOL_SOCKET
;
1212 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1214 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1215 cmsg
->cmsg_len
= CMSG_LEN(len
);
1217 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1218 int *fd
= (int *)data
;
1219 int *target_fd
= (int *)target_data
;
1220 int i
, numfds
= len
/ sizeof(int);
1222 for (i
= 0; i
< numfds
; i
++)
1223 fd
[i
] = tswap32(target_fd
[i
]);
1224 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1225 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1226 struct ucred
*cred
= (struct ucred
*)data
;
1227 struct target_ucred
*target_cred
=
1228 (struct target_ucred
*)target_data
;
1230 __put_user(target_cred
->pid
, &cred
->pid
);
1231 __put_user(target_cred
->uid
, &cred
->uid
);
1232 __put_user(target_cred
->gid
, &cred
->gid
);
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1236 memcpy(data
, target_data
, len
);
1239 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1240 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1242 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1244 msgh
->msg_controllen
= space
;
1248 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1249 struct msghdr
*msgh
)
1251 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1252 abi_long msg_controllen
;
1253 abi_ulong target_cmsg_addr
;
1254 struct target_cmsghdr
*target_cmsg
;
1255 socklen_t space
= 0;
1257 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1258 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1260 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1261 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1263 return -TARGET_EFAULT
;
1265 while (cmsg
&& target_cmsg
) {
1266 void *data
= CMSG_DATA(cmsg
);
1267 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1269 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1271 space
+= TARGET_CMSG_SPACE(len
);
1272 if (space
> msg_controllen
) {
1273 space
-= TARGET_CMSG_SPACE(len
);
1274 gemu_log("Target cmsg overflow\n");
1278 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1279 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1281 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1283 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1284 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1286 switch (cmsg
->cmsg_level
) {
1288 switch (cmsg
->cmsg_type
) {
1291 int *fd
= (int *)data
;
1292 int *target_fd
= (int *)target_data
;
1293 int i
, numfds
= len
/ sizeof(int);
1295 for (i
= 0; i
< numfds
; i
++)
1296 target_fd
[i
] = tswap32(fd
[i
]);
1301 struct timeval
*tv
= (struct timeval
*)data
;
1302 struct target_timeval
*target_tv
=
1303 (struct target_timeval
*)target_data
;
1305 if (len
!= sizeof(struct timeval
))
1308 /* copy struct timeval to target */
1309 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1310 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1313 case SCM_CREDENTIALS
:
1315 struct ucred
*cred
= (struct ucred
*)data
;
1316 struct target_ucred
*target_cred
=
1317 (struct target_ucred
*)target_data
;
1319 __put_user(cred
->pid
, &target_cred
->pid
);
1320 __put_user(cred
->uid
, &target_cred
->uid
);
1321 __put_user(cred
->gid
, &target_cred
->gid
);
1331 gemu_log("Unsupported ancillary data: %d/%d\n",
1332 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1333 memcpy(target_data
, data
, len
);
1336 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1337 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1339 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1341 target_msgh
->msg_controllen
= tswapal(space
);
1345 /* do_setsockopt() Must return target values and target errnos. */
1346 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1347 abi_ulong optval_addr
, socklen_t optlen
)
1351 struct ip_mreqn
*ip_mreq
;
1352 struct ip_mreq_source
*ip_mreq_source
;
1356 /* TCP options all take an 'int' value. */
1357 if (optlen
< sizeof(uint32_t))
1358 return -TARGET_EINVAL
;
1360 if (get_user_u32(val
, optval_addr
))
1361 return -TARGET_EFAULT
;
1362 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1369 case IP_ROUTER_ALERT
:
1373 case IP_MTU_DISCOVER
:
1379 case IP_MULTICAST_TTL
:
1380 case IP_MULTICAST_LOOP
:
1382 if (optlen
>= sizeof(uint32_t)) {
1383 if (get_user_u32(val
, optval_addr
))
1384 return -TARGET_EFAULT
;
1385 } else if (optlen
>= 1) {
1386 if (get_user_u8(val
, optval_addr
))
1387 return -TARGET_EFAULT
;
1389 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1391 case IP_ADD_MEMBERSHIP
:
1392 case IP_DROP_MEMBERSHIP
:
1393 if (optlen
< sizeof (struct target_ip_mreq
) ||
1394 optlen
> sizeof (struct target_ip_mreqn
))
1395 return -TARGET_EINVAL
;
1397 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1398 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1399 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1402 case IP_BLOCK_SOURCE
:
1403 case IP_UNBLOCK_SOURCE
:
1404 case IP_ADD_SOURCE_MEMBERSHIP
:
1405 case IP_DROP_SOURCE_MEMBERSHIP
:
1406 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1407 return -TARGET_EINVAL
;
1409 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1410 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1411 unlock_user (ip_mreq_source
, optval_addr
, 0);
1420 case IPV6_MTU_DISCOVER
:
1423 case IPV6_RECVPKTINFO
:
1425 if (optlen
< sizeof(uint32_t)) {
1426 return -TARGET_EINVAL
;
1428 if (get_user_u32(val
, optval_addr
)) {
1429 return -TARGET_EFAULT
;
1431 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1432 &val
, sizeof(val
)));
1441 /* struct icmp_filter takes an u32 value */
1442 if (optlen
< sizeof(uint32_t)) {
1443 return -TARGET_EINVAL
;
1446 if (get_user_u32(val
, optval_addr
)) {
1447 return -TARGET_EFAULT
;
1449 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1450 &val
, sizeof(val
)));
1457 case TARGET_SOL_SOCKET
:
1459 case TARGET_SO_RCVTIMEO
:
1463 optname
= SO_RCVTIMEO
;
1466 if (optlen
!= sizeof(struct target_timeval
)) {
1467 return -TARGET_EINVAL
;
1470 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1471 return -TARGET_EFAULT
;
1474 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1478 case TARGET_SO_SNDTIMEO
:
1479 optname
= SO_SNDTIMEO
;
1481 case TARGET_SO_ATTACH_FILTER
:
1483 struct target_sock_fprog
*tfprog
;
1484 struct target_sock_filter
*tfilter
;
1485 struct sock_fprog fprog
;
1486 struct sock_filter
*filter
;
1489 if (optlen
!= sizeof(*tfprog
)) {
1490 return -TARGET_EINVAL
;
1492 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1493 return -TARGET_EFAULT
;
1495 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1496 tswapal(tfprog
->filter
), 0)) {
1497 unlock_user_struct(tfprog
, optval_addr
, 1);
1498 return -TARGET_EFAULT
;
1501 fprog
.len
= tswap16(tfprog
->len
);
1502 filter
= malloc(fprog
.len
* sizeof(*filter
));
1503 if (filter
== NULL
) {
1504 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1505 unlock_user_struct(tfprog
, optval_addr
, 1);
1506 return -TARGET_ENOMEM
;
1508 for (i
= 0; i
< fprog
.len
; i
++) {
1509 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1510 filter
[i
].jt
= tfilter
[i
].jt
;
1511 filter
[i
].jf
= tfilter
[i
].jf
;
1512 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1514 fprog
.filter
= filter
;
1516 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1517 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1520 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1521 unlock_user_struct(tfprog
, optval_addr
, 1);
1524 case TARGET_SO_BINDTODEVICE
:
1526 char *dev_ifname
, *addr_ifname
;
1528 if (optlen
> IFNAMSIZ
- 1) {
1529 optlen
= IFNAMSIZ
- 1;
1531 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1533 return -TARGET_EFAULT
;
1535 optname
= SO_BINDTODEVICE
;
1536 addr_ifname
= alloca(IFNAMSIZ
);
1537 memcpy(addr_ifname
, dev_ifname
, optlen
);
1538 addr_ifname
[optlen
] = 0;
1539 ret
= get_errno(setsockopt(sockfd
, level
, optname
, addr_ifname
, optlen
));
1540 unlock_user (dev_ifname
, optval_addr
, 0);
1543 /* Options with 'int' argument. */
1544 case TARGET_SO_DEBUG
:
1547 case TARGET_SO_REUSEADDR
:
1548 optname
= SO_REUSEADDR
;
1550 case TARGET_SO_TYPE
:
1553 case TARGET_SO_ERROR
:
1556 case TARGET_SO_DONTROUTE
:
1557 optname
= SO_DONTROUTE
;
1559 case TARGET_SO_BROADCAST
:
1560 optname
= SO_BROADCAST
;
1562 case TARGET_SO_SNDBUF
:
1563 optname
= SO_SNDBUF
;
1565 case TARGET_SO_SNDBUFFORCE
:
1566 optname
= SO_SNDBUFFORCE
;
1568 case TARGET_SO_RCVBUF
:
1569 optname
= SO_RCVBUF
;
1571 case TARGET_SO_RCVBUFFORCE
:
1572 optname
= SO_RCVBUFFORCE
;
1574 case TARGET_SO_KEEPALIVE
:
1575 optname
= SO_KEEPALIVE
;
1577 case TARGET_SO_OOBINLINE
:
1578 optname
= SO_OOBINLINE
;
1580 case TARGET_SO_NO_CHECK
:
1581 optname
= SO_NO_CHECK
;
1583 case TARGET_SO_PRIORITY
:
1584 optname
= SO_PRIORITY
;
1587 case TARGET_SO_BSDCOMPAT
:
1588 optname
= SO_BSDCOMPAT
;
1591 case TARGET_SO_PASSCRED
:
1592 optname
= SO_PASSCRED
;
1594 case TARGET_SO_PASSSEC
:
1595 optname
= SO_PASSSEC
;
1597 case TARGET_SO_TIMESTAMP
:
1598 optname
= SO_TIMESTAMP
;
1600 case TARGET_SO_RCVLOWAT
:
1601 optname
= SO_RCVLOWAT
;
1607 if (optlen
< sizeof(uint32_t))
1608 return -TARGET_EINVAL
;
1610 if (get_user_u32(val
, optval_addr
))
1611 return -TARGET_EFAULT
;
1612 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1616 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1617 ret
= -TARGET_ENOPROTOOPT
;
1622 /* do_getsockopt() Must return target values and target errnos. */
1623 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1624 abi_ulong optval_addr
, abi_ulong optlen
)
1631 case TARGET_SOL_SOCKET
:
1634 /* These don't just return a single integer */
1635 case TARGET_SO_LINGER
:
1636 case TARGET_SO_RCVTIMEO
:
1637 case TARGET_SO_SNDTIMEO
:
1638 case TARGET_SO_PEERNAME
:
1640 case TARGET_SO_PEERCRED
: {
1643 struct target_ucred
*tcr
;
1645 if (get_user_u32(len
, optlen
)) {
1646 return -TARGET_EFAULT
;
1649 return -TARGET_EINVAL
;
1653 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1661 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1662 return -TARGET_EFAULT
;
1664 __put_user(cr
.pid
, &tcr
->pid
);
1665 __put_user(cr
.uid
, &tcr
->uid
);
1666 __put_user(cr
.gid
, &tcr
->gid
);
1667 unlock_user_struct(tcr
, optval_addr
, 1);
1668 if (put_user_u32(len
, optlen
)) {
1669 return -TARGET_EFAULT
;
1673 /* Options with 'int' argument. */
1674 case TARGET_SO_DEBUG
:
1677 case TARGET_SO_REUSEADDR
:
1678 optname
= SO_REUSEADDR
;
1680 case TARGET_SO_TYPE
:
1683 case TARGET_SO_ERROR
:
1686 case TARGET_SO_DONTROUTE
:
1687 optname
= SO_DONTROUTE
;
1689 case TARGET_SO_BROADCAST
:
1690 optname
= SO_BROADCAST
;
1692 case TARGET_SO_SNDBUF
:
1693 optname
= SO_SNDBUF
;
1695 case TARGET_SO_RCVBUF
:
1696 optname
= SO_RCVBUF
;
1698 case TARGET_SO_KEEPALIVE
:
1699 optname
= SO_KEEPALIVE
;
1701 case TARGET_SO_OOBINLINE
:
1702 optname
= SO_OOBINLINE
;
1704 case TARGET_SO_NO_CHECK
:
1705 optname
= SO_NO_CHECK
;
1707 case TARGET_SO_PRIORITY
:
1708 optname
= SO_PRIORITY
;
1711 case TARGET_SO_BSDCOMPAT
:
1712 optname
= SO_BSDCOMPAT
;
1715 case TARGET_SO_PASSCRED
:
1716 optname
= SO_PASSCRED
;
1718 case TARGET_SO_TIMESTAMP
:
1719 optname
= SO_TIMESTAMP
;
1721 case TARGET_SO_RCVLOWAT
:
1722 optname
= SO_RCVLOWAT
;
1724 case TARGET_SO_ACCEPTCONN
:
1725 optname
= SO_ACCEPTCONN
;
1732 /* TCP options all take an 'int' value. */
1734 if (get_user_u32(len
, optlen
))
1735 return -TARGET_EFAULT
;
1737 return -TARGET_EINVAL
;
1739 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1742 if (optname
== SO_TYPE
) {
1743 val
= host_to_target_sock_type(val
);
1748 if (put_user_u32(val
, optval_addr
))
1749 return -TARGET_EFAULT
;
1751 if (put_user_u8(val
, optval_addr
))
1752 return -TARGET_EFAULT
;
1754 if (put_user_u32(len
, optlen
))
1755 return -TARGET_EFAULT
;
1762 case IP_ROUTER_ALERT
:
1766 case IP_MTU_DISCOVER
:
1772 case IP_MULTICAST_TTL
:
1773 case IP_MULTICAST_LOOP
:
1774 if (get_user_u32(len
, optlen
))
1775 return -TARGET_EFAULT
;
1777 return -TARGET_EINVAL
;
1779 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1782 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1784 if (put_user_u32(len
, optlen
)
1785 || put_user_u8(val
, optval_addr
))
1786 return -TARGET_EFAULT
;
1788 if (len
> sizeof(int))
1790 if (put_user_u32(len
, optlen
)
1791 || put_user_u32(val
, optval_addr
))
1792 return -TARGET_EFAULT
;
1796 ret
= -TARGET_ENOPROTOOPT
;
1802 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1804 ret
= -TARGET_EOPNOTSUPP
;
1810 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1811 int count
, int copy
)
1813 struct target_iovec
*target_vec
;
1815 abi_ulong total_len
, max_len
;
1818 bool bad_address
= false;
1824 if (count
< 0 || count
> IOV_MAX
) {
1829 vec
= calloc(count
, sizeof(struct iovec
));
1835 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1836 count
* sizeof(struct target_iovec
), 1);
1837 if (target_vec
== NULL
) {
1842 /* ??? If host page size > target page size, this will result in a
1843 value larger than what we can actually support. */
1844 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1847 for (i
= 0; i
< count
; i
++) {
1848 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1849 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1854 } else if (len
== 0) {
1855 /* Zero length pointer is ignored. */
1856 vec
[i
].iov_base
= 0;
1858 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1859 /* If the first buffer pointer is bad, this is a fault. But
1860 * subsequent bad buffers will result in a partial write; this
1861 * is realized by filling the vector with null pointers and
1863 if (!vec
[i
].iov_base
) {
1874 if (len
> max_len
- total_len
) {
1875 len
= max_len
- total_len
;
1878 vec
[i
].iov_len
= len
;
1882 unlock_user(target_vec
, target_addr
, 0);
1886 unlock_user(target_vec
, target_addr
, 0);
1893 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1894 int count
, int copy
)
1896 struct target_iovec
*target_vec
;
1899 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1900 count
* sizeof(struct target_iovec
), 1);
1902 for (i
= 0; i
< count
; i
++) {
1903 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1904 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1908 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1910 unlock_user(target_vec
, target_addr
, 0);
1916 static inline int target_to_host_sock_type(int *type
)
1919 int target_type
= *type
;
1921 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1922 case TARGET_SOCK_DGRAM
:
1923 host_type
= SOCK_DGRAM
;
1925 case TARGET_SOCK_STREAM
:
1926 host_type
= SOCK_STREAM
;
1929 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1932 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1933 #if defined(SOCK_CLOEXEC)
1934 host_type
|= SOCK_CLOEXEC
;
1936 return -TARGET_EINVAL
;
1939 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1940 #if defined(SOCK_NONBLOCK)
1941 host_type
|= SOCK_NONBLOCK
;
1942 #elif !defined(O_NONBLOCK)
1943 return -TARGET_EINVAL
;
1950 /* Try to emulate socket type flags after socket creation. */
1951 static int sock_flags_fixup(int fd
, int target_type
)
1953 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1954 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1955 int flags
= fcntl(fd
, F_GETFL
);
1956 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
1958 return -TARGET_EINVAL
;
1965 /* do_socket() Must return target values and target errnos. */
1966 static abi_long
do_socket(int domain
, int type
, int protocol
)
1968 int target_type
= type
;
1971 ret
= target_to_host_sock_type(&type
);
1976 if (domain
== PF_NETLINK
)
1977 return -TARGET_EAFNOSUPPORT
;
1978 ret
= get_errno(socket(domain
, type
, protocol
));
1980 ret
= sock_flags_fixup(ret
, target_type
);
1985 /* do_bind() Must return target values and target errnos. */
1986 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1992 if ((int)addrlen
< 0) {
1993 return -TARGET_EINVAL
;
1996 addr
= alloca(addrlen
+1);
1998 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2002 return get_errno(bind(sockfd
, addr
, addrlen
));
2005 /* do_connect() Must return target values and target errnos. */
2006 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2012 if ((int)addrlen
< 0) {
2013 return -TARGET_EINVAL
;
2016 addr
= alloca(addrlen
+1);
2018 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2022 return get_errno(connect(sockfd
, addr
, addrlen
));
2025 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2026 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2027 int flags
, int send
)
2033 abi_ulong target_vec
;
2035 if (msgp
->msg_name
) {
2036 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2037 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2038 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
2044 msg
.msg_name
= NULL
;
2045 msg
.msg_namelen
= 0;
2047 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2048 msg
.msg_control
= alloca(msg
.msg_controllen
);
2049 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2051 count
= tswapal(msgp
->msg_iovlen
);
2052 target_vec
= tswapal(msgp
->msg_iov
);
2053 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2054 target_vec
, count
, send
);
2056 ret
= -host_to_target_errno(errno
);
2059 msg
.msg_iovlen
= count
;
2063 ret
= target_to_host_cmsg(&msg
, msgp
);
2065 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2067 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2068 if (!is_error(ret
)) {
2070 ret
= host_to_target_cmsg(msgp
, &msg
);
2071 if (!is_error(ret
)) {
2072 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2073 if (msg
.msg_name
!= NULL
) {
2074 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2075 msg
.msg_name
, msg
.msg_namelen
);
2087 unlock_iovec(vec
, target_vec
, count
, !send
);
2092 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2093 int flags
, int send
)
2096 struct target_msghdr
*msgp
;
2098 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2102 return -TARGET_EFAULT
;
2104 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2105 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2109 #ifdef TARGET_NR_sendmmsg
2110 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2111 * so it might not have this *mmsg-specific flag either.
2113 #ifndef MSG_WAITFORONE
2114 #define MSG_WAITFORONE 0x10000
2117 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2118 unsigned int vlen
, unsigned int flags
,
2121 struct target_mmsghdr
*mmsgp
;
2125 if (vlen
> UIO_MAXIOV
) {
2129 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2131 return -TARGET_EFAULT
;
2134 for (i
= 0; i
< vlen
; i
++) {
2135 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2136 if (is_error(ret
)) {
2139 mmsgp
[i
].msg_len
= tswap32(ret
);
2140 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2141 if (flags
& MSG_WAITFORONE
) {
2142 flags
|= MSG_DONTWAIT
;
2146 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2148 /* Return number of datagrams sent if we sent any at all;
2149 * otherwise return the error.
2158 /* If we don't have a system accept4() then just call accept.
2159 * The callsites to do_accept4() will ensure that they don't
2160 * pass a non-zero flags argument in this config.
2162 #ifndef CONFIG_ACCEPT4
2163 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2164 socklen_t
*addrlen
, int flags
)
2167 return accept(sockfd
, addr
, addrlen
);
2171 /* do_accept4() Must return target values and target errnos. */
2172 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2173 abi_ulong target_addrlen_addr
, int flags
)
2180 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2182 if (target_addr
== 0) {
2183 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2186 /* linux returns EINVAL if addrlen pointer is invalid */
2187 if (get_user_u32(addrlen
, target_addrlen_addr
))
2188 return -TARGET_EINVAL
;
2190 if ((int)addrlen
< 0) {
2191 return -TARGET_EINVAL
;
2194 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2195 return -TARGET_EINVAL
;
2197 addr
= alloca(addrlen
);
2199 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2200 if (!is_error(ret
)) {
2201 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2202 if (put_user_u32(addrlen
, target_addrlen_addr
))
2203 ret
= -TARGET_EFAULT
;
2208 /* do_getpeername() Must return target values and target errnos. */
2209 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2210 abi_ulong target_addrlen_addr
)
2216 if (get_user_u32(addrlen
, target_addrlen_addr
))
2217 return -TARGET_EFAULT
;
2219 if ((int)addrlen
< 0) {
2220 return -TARGET_EINVAL
;
2223 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2224 return -TARGET_EFAULT
;
2226 addr
= alloca(addrlen
);
2228 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2229 if (!is_error(ret
)) {
2230 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2231 if (put_user_u32(addrlen
, target_addrlen_addr
))
2232 ret
= -TARGET_EFAULT
;
2237 /* do_getsockname() Must return target values and target errnos. */
2238 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2239 abi_ulong target_addrlen_addr
)
2245 if (get_user_u32(addrlen
, target_addrlen_addr
))
2246 return -TARGET_EFAULT
;
2248 if ((int)addrlen
< 0) {
2249 return -TARGET_EINVAL
;
2252 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2253 return -TARGET_EFAULT
;
2255 addr
= alloca(addrlen
);
2257 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2258 if (!is_error(ret
)) {
2259 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2260 if (put_user_u32(addrlen
, target_addrlen_addr
))
2261 ret
= -TARGET_EFAULT
;
2266 /* do_socketpair() Must return target values and target errnos. */
2267 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2268 abi_ulong target_tab_addr
)
2273 target_to_host_sock_type(&type
);
2275 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2276 if (!is_error(ret
)) {
2277 if (put_user_s32(tab
[0], target_tab_addr
)
2278 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2279 ret
= -TARGET_EFAULT
;
2284 /* do_sendto() Must return target values and target errnos. */
2285 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2286 abi_ulong target_addr
, socklen_t addrlen
)
2292 if ((int)addrlen
< 0) {
2293 return -TARGET_EINVAL
;
2296 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2298 return -TARGET_EFAULT
;
2300 addr
= alloca(addrlen
+1);
2301 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2303 unlock_user(host_msg
, msg
, 0);
2306 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2308 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2310 unlock_user(host_msg
, msg
, 0);
2314 /* do_recvfrom() Must return target values and target errnos. */
2315 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2316 abi_ulong target_addr
,
2317 abi_ulong target_addrlen
)
2324 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2326 return -TARGET_EFAULT
;
2328 if (get_user_u32(addrlen
, target_addrlen
)) {
2329 ret
= -TARGET_EFAULT
;
2332 if ((int)addrlen
< 0) {
2333 ret
= -TARGET_EINVAL
;
2336 addr
= alloca(addrlen
);
2337 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2339 addr
= NULL
; /* To keep compiler quiet. */
2340 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2342 if (!is_error(ret
)) {
2344 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2345 if (put_user_u32(addrlen
, target_addrlen
)) {
2346 ret
= -TARGET_EFAULT
;
2350 unlock_user(host_msg
, msg
, len
);
2353 unlock_user(host_msg
, msg
, 0);
2358 #ifdef TARGET_NR_socketcall
2359 /* do_socketcall() Must return target values and target errnos. */
2360 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2362 static const unsigned ac
[] = { /* number of arguments per call */
2363 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2364 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2365 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2366 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2367 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2368 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2369 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2370 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2371 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2372 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2373 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2374 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2375 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2376 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2377 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2378 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2379 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2380 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2382 abi_long a
[6]; /* max 6 args */
2384 /* first, collect the arguments in a[] according to ac[] */
2385 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2387 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2388 for (i
= 0; i
< ac
[num
]; ++i
) {
2389 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2390 return -TARGET_EFAULT
;
2395 /* now when we have the args, actually handle the call */
2397 case SOCKOP_socket
: /* domain, type, protocol */
2398 return do_socket(a
[0], a
[1], a
[2]);
2399 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2400 return do_bind(a
[0], a
[1], a
[2]);
2401 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2402 return do_connect(a
[0], a
[1], a
[2]);
2403 case SOCKOP_listen
: /* sockfd, backlog */
2404 return get_errno(listen(a
[0], a
[1]));
2405 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2406 return do_accept4(a
[0], a
[1], a
[2], 0);
2407 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2408 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2409 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2410 return do_getsockname(a
[0], a
[1], a
[2]);
2411 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2412 return do_getpeername(a
[0], a
[1], a
[2]);
2413 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2414 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2415 case SOCKOP_send
: /* sockfd, msg, len, flags */
2416 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2417 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2418 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2419 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2420 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2421 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2422 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2423 case SOCKOP_shutdown
: /* sockfd, how */
2424 return get_errno(shutdown(a
[0], a
[1]));
2425 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2426 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2427 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2428 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2429 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2430 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2431 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2432 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2434 gemu_log("Unsupported socketcall: %d\n", num
);
2435 return -TARGET_ENOSYS
;
2440 #define N_SHM_REGIONS 32
2442 static struct shm_region
{
2445 } shm_regions
[N_SHM_REGIONS
];
2447 struct target_semid_ds
2449 struct target_ipc_perm sem_perm
;
2450 abi_ulong sem_otime
;
2451 #if !defined(TARGET_PPC64)
2452 abi_ulong __unused1
;
2454 abi_ulong sem_ctime
;
2455 #if !defined(TARGET_PPC64)
2456 abi_ulong __unused2
;
2458 abi_ulong sem_nsems
;
2459 abi_ulong __unused3
;
2460 abi_ulong __unused4
;
2463 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2464 abi_ulong target_addr
)
2466 struct target_ipc_perm
*target_ip
;
2467 struct target_semid_ds
*target_sd
;
2469 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2470 return -TARGET_EFAULT
;
2471 target_ip
= &(target_sd
->sem_perm
);
2472 host_ip
->__key
= tswap32(target_ip
->__key
);
2473 host_ip
->uid
= tswap32(target_ip
->uid
);
2474 host_ip
->gid
= tswap32(target_ip
->gid
);
2475 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2476 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2477 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2478 host_ip
->mode
= tswap32(target_ip
->mode
);
2480 host_ip
->mode
= tswap16(target_ip
->mode
);
2482 #if defined(TARGET_PPC)
2483 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2485 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2487 unlock_user_struct(target_sd
, target_addr
, 0);
2491 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2492 struct ipc_perm
*host_ip
)
2494 struct target_ipc_perm
*target_ip
;
2495 struct target_semid_ds
*target_sd
;
2497 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2498 return -TARGET_EFAULT
;
2499 target_ip
= &(target_sd
->sem_perm
);
2500 target_ip
->__key
= tswap32(host_ip
->__key
);
2501 target_ip
->uid
= tswap32(host_ip
->uid
);
2502 target_ip
->gid
= tswap32(host_ip
->gid
);
2503 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2504 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2505 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2506 target_ip
->mode
= tswap32(host_ip
->mode
);
2508 target_ip
->mode
= tswap16(host_ip
->mode
);
2510 #if defined(TARGET_PPC)
2511 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2513 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2515 unlock_user_struct(target_sd
, target_addr
, 1);
2519 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2520 abi_ulong target_addr
)
2522 struct target_semid_ds
*target_sd
;
2524 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2525 return -TARGET_EFAULT
;
2526 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2527 return -TARGET_EFAULT
;
2528 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2529 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2530 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2531 unlock_user_struct(target_sd
, target_addr
, 0);
2535 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2536 struct semid_ds
*host_sd
)
2538 struct target_semid_ds
*target_sd
;
2540 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2541 return -TARGET_EFAULT
;
2542 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2543 return -TARGET_EFAULT
;
2544 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2545 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2546 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2547 unlock_user_struct(target_sd
, target_addr
, 1);
2551 struct target_seminfo
{
2564 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2565 struct seminfo
*host_seminfo
)
2567 struct target_seminfo
*target_seminfo
;
2568 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2569 return -TARGET_EFAULT
;
2570 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2571 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2572 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2573 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2574 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2575 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2576 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2577 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2578 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2579 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2580 unlock_user_struct(target_seminfo
, target_addr
, 1);
2586 struct semid_ds
*buf
;
2587 unsigned short *array
;
2588 struct seminfo
*__buf
;
2591 union target_semun
{
2598 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2599 abi_ulong target_addr
)
2602 unsigned short *array
;
2604 struct semid_ds semid_ds
;
2607 semun
.buf
= &semid_ds
;
2609 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2611 return get_errno(ret
);
2613 nsems
= semid_ds
.sem_nsems
;
2615 *host_array
= malloc(nsems
*sizeof(unsigned short));
2617 return -TARGET_ENOMEM
;
2619 array
= lock_user(VERIFY_READ
, target_addr
,
2620 nsems
*sizeof(unsigned short), 1);
2623 return -TARGET_EFAULT
;
2626 for(i
=0; i
<nsems
; i
++) {
2627 __get_user((*host_array
)[i
], &array
[i
]);
2629 unlock_user(array
, target_addr
, 0);
2634 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2635 unsigned short **host_array
)
2638 unsigned short *array
;
2640 struct semid_ds semid_ds
;
2643 semun
.buf
= &semid_ds
;
2645 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2647 return get_errno(ret
);
2649 nsems
= semid_ds
.sem_nsems
;
2651 array
= lock_user(VERIFY_WRITE
, target_addr
,
2652 nsems
*sizeof(unsigned short), 0);
2654 return -TARGET_EFAULT
;
2656 for(i
=0; i
<nsems
; i
++) {
2657 __put_user((*host_array
)[i
], &array
[i
]);
2660 unlock_user(array
, target_addr
, 1);
2665 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2666 union target_semun target_su
)
2669 struct semid_ds dsarg
;
2670 unsigned short *array
= NULL
;
2671 struct seminfo seminfo
;
2672 abi_long ret
= -TARGET_EINVAL
;
2679 /* In 64 bit cross-endian situations, we will erroneously pick up
2680 * the wrong half of the union for the "val" element. To rectify
2681 * this, the entire 8-byte structure is byteswapped, followed by
2682 * a swap of the 4 byte val field. In other cases, the data is
2683 * already in proper host byte order. */
2684 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2685 target_su
.buf
= tswapal(target_su
.buf
);
2686 arg
.val
= tswap32(target_su
.val
);
2688 arg
.val
= target_su
.val
;
2690 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2694 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2698 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2699 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2706 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2710 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2711 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2717 arg
.__buf
= &seminfo
;
2718 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2719 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2727 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2734 struct target_sembuf
{
2735 unsigned short sem_num
;
2740 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2741 abi_ulong target_addr
,
2744 struct target_sembuf
*target_sembuf
;
2747 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2748 nsops
*sizeof(struct target_sembuf
), 1);
2750 return -TARGET_EFAULT
;
2752 for(i
=0; i
<nsops
; i
++) {
2753 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2754 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2755 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2758 unlock_user(target_sembuf
, target_addr
, 0);
2763 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2765 struct sembuf sops
[nsops
];
2767 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2768 return -TARGET_EFAULT
;
2770 return get_errno(semop(semid
, sops
, nsops
));
2773 struct target_msqid_ds
2775 struct target_ipc_perm msg_perm
;
2776 abi_ulong msg_stime
;
2777 #if TARGET_ABI_BITS == 32
2778 abi_ulong __unused1
;
2780 abi_ulong msg_rtime
;
2781 #if TARGET_ABI_BITS == 32
2782 abi_ulong __unused2
;
2784 abi_ulong msg_ctime
;
2785 #if TARGET_ABI_BITS == 32
2786 abi_ulong __unused3
;
2788 abi_ulong __msg_cbytes
;
2790 abi_ulong msg_qbytes
;
2791 abi_ulong msg_lspid
;
2792 abi_ulong msg_lrpid
;
2793 abi_ulong __unused4
;
2794 abi_ulong __unused5
;
2797 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2798 abi_ulong target_addr
)
2800 struct target_msqid_ds
*target_md
;
2802 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2803 return -TARGET_EFAULT
;
2804 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2805 return -TARGET_EFAULT
;
2806 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2807 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2808 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2809 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2810 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2811 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2812 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2813 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2814 unlock_user_struct(target_md
, target_addr
, 0);
2818 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2819 struct msqid_ds
*host_md
)
2821 struct target_msqid_ds
*target_md
;
2823 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2824 return -TARGET_EFAULT
;
2825 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2826 return -TARGET_EFAULT
;
2827 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2828 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2829 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2830 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2831 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2832 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2833 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2834 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2835 unlock_user_struct(target_md
, target_addr
, 1);
2839 struct target_msginfo
{
2847 unsigned short int msgseg
;
2850 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2851 struct msginfo
*host_msginfo
)
2853 struct target_msginfo
*target_msginfo
;
2854 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2855 return -TARGET_EFAULT
;
2856 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2857 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2858 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2859 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2860 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2861 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2862 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2863 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2864 unlock_user_struct(target_msginfo
, target_addr
, 1);
2868 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2870 struct msqid_ds dsarg
;
2871 struct msginfo msginfo
;
2872 abi_long ret
= -TARGET_EINVAL
;
2880 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2881 return -TARGET_EFAULT
;
2882 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2883 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2884 return -TARGET_EFAULT
;
2887 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2891 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2892 if (host_to_target_msginfo(ptr
, &msginfo
))
2893 return -TARGET_EFAULT
;
2900 struct target_msgbuf
{
2905 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2906 ssize_t msgsz
, int msgflg
)
2908 struct target_msgbuf
*target_mb
;
2909 struct msgbuf
*host_mb
;
2913 return -TARGET_EINVAL
;
2916 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2917 return -TARGET_EFAULT
;
2918 host_mb
= malloc(msgsz
+sizeof(long));
2920 unlock_user_struct(target_mb
, msgp
, 0);
2921 return -TARGET_ENOMEM
;
2923 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2924 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2925 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2927 unlock_user_struct(target_mb
, msgp
, 0);
2932 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2933 unsigned int msgsz
, abi_long msgtyp
,
2936 struct target_msgbuf
*target_mb
;
2938 struct msgbuf
*host_mb
;
2941 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2942 return -TARGET_EFAULT
;
2944 host_mb
= g_malloc(msgsz
+sizeof(long));
2945 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2948 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2949 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2950 if (!target_mtext
) {
2951 ret
= -TARGET_EFAULT
;
2954 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2955 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2958 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2962 unlock_user_struct(target_mb
, msgp
, 1);
2967 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2968 abi_ulong target_addr
)
2970 struct target_shmid_ds
*target_sd
;
2972 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2973 return -TARGET_EFAULT
;
2974 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2975 return -TARGET_EFAULT
;
2976 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2977 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2978 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2979 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2980 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2981 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2982 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2983 unlock_user_struct(target_sd
, target_addr
, 0);
2987 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2988 struct shmid_ds
*host_sd
)
2990 struct target_shmid_ds
*target_sd
;
2992 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2993 return -TARGET_EFAULT
;
2994 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2995 return -TARGET_EFAULT
;
2996 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2997 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2998 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2999 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3000 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3001 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3002 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3003 unlock_user_struct(target_sd
, target_addr
, 1);
3007 struct target_shminfo
{
3015 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3016 struct shminfo
*host_shminfo
)
3018 struct target_shminfo
*target_shminfo
;
3019 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3020 return -TARGET_EFAULT
;
3021 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3022 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3023 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3024 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3025 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3026 unlock_user_struct(target_shminfo
, target_addr
, 1);
3030 struct target_shm_info
{
3035 abi_ulong swap_attempts
;
3036 abi_ulong swap_successes
;
3039 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3040 struct shm_info
*host_shm_info
)
3042 struct target_shm_info
*target_shm_info
;
3043 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3044 return -TARGET_EFAULT
;
3045 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3046 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3047 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3048 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3049 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3050 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3051 unlock_user_struct(target_shm_info
, target_addr
, 1);
3055 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3057 struct shmid_ds dsarg
;
3058 struct shminfo shminfo
;
3059 struct shm_info shm_info
;
3060 abi_long ret
= -TARGET_EINVAL
;
3068 if (target_to_host_shmid_ds(&dsarg
, buf
))
3069 return -TARGET_EFAULT
;
3070 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3071 if (host_to_target_shmid_ds(buf
, &dsarg
))
3072 return -TARGET_EFAULT
;
3075 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3076 if (host_to_target_shminfo(buf
, &shminfo
))
3077 return -TARGET_EFAULT
;
3080 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3081 if (host_to_target_shm_info(buf
, &shm_info
))
3082 return -TARGET_EFAULT
;
3087 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3094 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3098 struct shmid_ds shm_info
;
3101 /* find out the length of the shared memory segment */
3102 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3103 if (is_error(ret
)) {
3104 /* can't get length, bail out */
3111 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3113 abi_ulong mmap_start
;
3115 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3117 if (mmap_start
== -1) {
3119 host_raddr
= (void *)-1;
3121 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3124 if (host_raddr
== (void *)-1) {
3126 return get_errno((long)host_raddr
);
3128 raddr
=h2g((unsigned long)host_raddr
);
3130 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3131 PAGE_VALID
| PAGE_READ
|
3132 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3134 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3135 if (shm_regions
[i
].start
== 0) {
3136 shm_regions
[i
].start
= raddr
;
3137 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3147 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3151 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3152 if (shm_regions
[i
].start
== shmaddr
) {
3153 shm_regions
[i
].start
= 0;
3154 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3159 return get_errno(shmdt(g2h(shmaddr
)));
3162 #ifdef TARGET_NR_ipc
3163 /* ??? This only works with linear mappings. */
3164 /* do_ipc() must return target values and target errnos. */
3165 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3166 abi_long second
, abi_long third
,
3167 abi_long ptr
, abi_long fifth
)
3172 version
= call
>> 16;
3177 ret
= do_semop(first
, ptr
, second
);
3181 ret
= get_errno(semget(first
, second
, third
));
3184 case IPCOP_semctl
: {
3185 /* The semun argument to semctl is passed by value, so dereference the
3188 get_user_ual(atptr
, ptr
);
3189 ret
= do_semctl(first
, second
, third
,
3190 (union target_semun
) atptr
);
3195 ret
= get_errno(msgget(first
, second
));
3199 ret
= do_msgsnd(first
, ptr
, second
, third
);
3203 ret
= do_msgctl(first
, second
, ptr
);
3210 struct target_ipc_kludge
{
3215 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3216 ret
= -TARGET_EFAULT
;
3220 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3222 unlock_user_struct(tmp
, ptr
, 0);
3226 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3235 raddr
= do_shmat(first
, ptr
, second
);
3236 if (is_error(raddr
))
3237 return get_errno(raddr
);
3238 if (put_user_ual(raddr
, third
))
3239 return -TARGET_EFAULT
;
3243 ret
= -TARGET_EINVAL
;
3248 ret
= do_shmdt(ptr
);
3252 /* IPC_* flag values are the same on all linux platforms */
3253 ret
= get_errno(shmget(first
, second
, third
));
3256 /* IPC_* and SHM_* command values are the same on all linux platforms */
3258 ret
= do_shmctl(first
, second
, ptr
);
3261 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3262 ret
= -TARGET_ENOSYS
;
3269 /* kernel structure types definitions */
3271 #define STRUCT(name, ...) STRUCT_ ## name,
3272 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3274 #include "syscall_types.h"
3277 #undef STRUCT_SPECIAL
3279 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3280 #define STRUCT_SPECIAL(name)
3281 #include "syscall_types.h"
3283 #undef STRUCT_SPECIAL
3285 typedef struct IOCTLEntry IOCTLEntry
;
3287 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3288 int fd
, abi_long cmd
, abi_long arg
);
3292 unsigned int host_cmd
;
3295 do_ioctl_fn
*do_ioctl
;
3296 const argtype arg_type
[5];
3299 #define IOC_R 0x0001
3300 #define IOC_W 0x0002
3301 #define IOC_RW (IOC_R | IOC_W)
3303 #define MAX_STRUCT_SIZE 4096
3305 #ifdef CONFIG_FIEMAP
3306 /* So fiemap access checks don't overflow on 32 bit systems.
3307 * This is very slightly smaller than the limit imposed by
3308 * the underlying kernel.
3310 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3311 / sizeof(struct fiemap_extent))
3313 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3314 int fd
, abi_long cmd
, abi_long arg
)
3316 /* The parameter for this ioctl is a struct fiemap followed
3317 * by an array of struct fiemap_extent whose size is set
3318 * in fiemap->fm_extent_count. The array is filled in by the
3321 int target_size_in
, target_size_out
;
3323 const argtype
*arg_type
= ie
->arg_type
;
3324 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3327 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3331 assert(arg_type
[0] == TYPE_PTR
);
3332 assert(ie
->access
== IOC_RW
);
3334 target_size_in
= thunk_type_size(arg_type
, 0);
3335 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3337 return -TARGET_EFAULT
;
3339 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3340 unlock_user(argptr
, arg
, 0);
3341 fm
= (struct fiemap
*)buf_temp
;
3342 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3343 return -TARGET_EINVAL
;
3346 outbufsz
= sizeof (*fm
) +
3347 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3349 if (outbufsz
> MAX_STRUCT_SIZE
) {
3350 /* We can't fit all the extents into the fixed size buffer.
3351 * Allocate one that is large enough and use it instead.
3353 fm
= malloc(outbufsz
);
3355 return -TARGET_ENOMEM
;
3357 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3360 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3361 if (!is_error(ret
)) {
3362 target_size_out
= target_size_in
;
3363 /* An extent_count of 0 means we were only counting the extents
3364 * so there are no structs to copy
3366 if (fm
->fm_extent_count
!= 0) {
3367 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3369 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3371 ret
= -TARGET_EFAULT
;
3373 /* Convert the struct fiemap */
3374 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3375 if (fm
->fm_extent_count
!= 0) {
3376 p
= argptr
+ target_size_in
;
3377 /* ...and then all the struct fiemap_extents */
3378 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3379 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3384 unlock_user(argptr
, arg
, target_size_out
);
3394 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3395 int fd
, abi_long cmd
, abi_long arg
)
3397 const argtype
*arg_type
= ie
->arg_type
;
3401 struct ifconf
*host_ifconf
;
3403 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3404 int target_ifreq_size
;
3409 abi_long target_ifc_buf
;
3413 assert(arg_type
[0] == TYPE_PTR
);
3414 assert(ie
->access
== IOC_RW
);
3417 target_size
= thunk_type_size(arg_type
, 0);
3419 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3421 return -TARGET_EFAULT
;
3422 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3423 unlock_user(argptr
, arg
, 0);
3425 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3426 target_ifc_len
= host_ifconf
->ifc_len
;
3427 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3429 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3430 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3431 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3433 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3434 if (outbufsz
> MAX_STRUCT_SIZE
) {
3435 /* We can't fit all the extents into the fixed size buffer.
3436 * Allocate one that is large enough and use it instead.
3438 host_ifconf
= malloc(outbufsz
);
3440 return -TARGET_ENOMEM
;
3442 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3445 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3447 host_ifconf
->ifc_len
= host_ifc_len
;
3448 host_ifconf
->ifc_buf
= host_ifc_buf
;
3450 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3451 if (!is_error(ret
)) {
3452 /* convert host ifc_len to target ifc_len */
3454 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3455 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3456 host_ifconf
->ifc_len
= target_ifc_len
;
3458 /* restore target ifc_buf */
3460 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3462 /* copy struct ifconf to target user */
3464 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3466 return -TARGET_EFAULT
;
3467 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3468 unlock_user(argptr
, arg
, target_size
);
3470 /* copy ifreq[] to target user */
3472 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3473 for (i
= 0; i
< nb_ifreq
; i
++) {
3474 thunk_convert(argptr
+ i
* target_ifreq_size
,
3475 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3476 ifreq_arg_type
, THUNK_TARGET
);
3478 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3488 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3489 abi_long cmd
, abi_long arg
)
3492 struct dm_ioctl
*host_dm
;
3493 abi_long guest_data
;
3494 uint32_t guest_data_size
;
3496 const argtype
*arg_type
= ie
->arg_type
;
3498 void *big_buf
= NULL
;
3502 target_size
= thunk_type_size(arg_type
, 0);
3503 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3505 ret
= -TARGET_EFAULT
;
3508 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3509 unlock_user(argptr
, arg
, 0);
3511 /* buf_temp is too small, so fetch things into a bigger buffer */
3512 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3513 memcpy(big_buf
, buf_temp
, target_size
);
3517 guest_data
= arg
+ host_dm
->data_start
;
3518 if ((guest_data
- arg
) < 0) {
3522 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3523 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3525 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3526 switch (ie
->host_cmd
) {
3528 case DM_LIST_DEVICES
:
3531 case DM_DEV_SUSPEND
:
3534 case DM_TABLE_STATUS
:
3535 case DM_TABLE_CLEAR
:
3537 case DM_LIST_VERSIONS
:
3541 case DM_DEV_SET_GEOMETRY
:
3542 /* data contains only strings */
3543 memcpy(host_data
, argptr
, guest_data_size
);
3546 memcpy(host_data
, argptr
, guest_data_size
);
3547 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3551 void *gspec
= argptr
;
3552 void *cur_data
= host_data
;
3553 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3554 int spec_size
= thunk_type_size(arg_type
, 0);
3557 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3558 struct dm_target_spec
*spec
= cur_data
;
3562 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3563 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3565 spec
->next
= sizeof(*spec
) + slen
;
3566 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3568 cur_data
+= spec
->next
;
3573 ret
= -TARGET_EINVAL
;
3576 unlock_user(argptr
, guest_data
, 0);
3578 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3579 if (!is_error(ret
)) {
3580 guest_data
= arg
+ host_dm
->data_start
;
3581 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3582 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3583 switch (ie
->host_cmd
) {
3588 case DM_DEV_SUSPEND
:
3591 case DM_TABLE_CLEAR
:
3593 case DM_DEV_SET_GEOMETRY
:
3594 /* no return data */
3596 case DM_LIST_DEVICES
:
3598 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3599 uint32_t remaining_data
= guest_data_size
;
3600 void *cur_data
= argptr
;
3601 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3602 int nl_size
= 12; /* can't use thunk_size due to alignment */
3605 uint32_t next
= nl
->next
;
3607 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3609 if (remaining_data
< nl
->next
) {
3610 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3613 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3614 strcpy(cur_data
+ nl_size
, nl
->name
);
3615 cur_data
+= nl
->next
;
3616 remaining_data
-= nl
->next
;
3620 nl
= (void*)nl
+ next
;
3625 case DM_TABLE_STATUS
:
3627 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3628 void *cur_data
= argptr
;
3629 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3630 int spec_size
= thunk_type_size(arg_type
, 0);
3633 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3634 uint32_t next
= spec
->next
;
3635 int slen
= strlen((char*)&spec
[1]) + 1;
3636 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3637 if (guest_data_size
< spec
->next
) {
3638 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3641 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3642 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3643 cur_data
= argptr
+ spec
->next
;
3644 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3650 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3651 int count
= *(uint32_t*)hdata
;
3652 uint64_t *hdev
= hdata
+ 8;
3653 uint64_t *gdev
= argptr
+ 8;
3656 *(uint32_t*)argptr
= tswap32(count
);
3657 for (i
= 0; i
< count
; i
++) {
3658 *gdev
= tswap64(*hdev
);
3664 case DM_LIST_VERSIONS
:
3666 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3667 uint32_t remaining_data
= guest_data_size
;
3668 void *cur_data
= argptr
;
3669 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3670 int vers_size
= thunk_type_size(arg_type
, 0);
3673 uint32_t next
= vers
->next
;
3675 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3677 if (remaining_data
< vers
->next
) {
3678 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3681 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3682 strcpy(cur_data
+ vers_size
, vers
->name
);
3683 cur_data
+= vers
->next
;
3684 remaining_data
-= vers
->next
;
3688 vers
= (void*)vers
+ next
;
3693 ret
= -TARGET_EINVAL
;
3696 unlock_user(argptr
, guest_data
, guest_data_size
);
3698 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3700 ret
= -TARGET_EFAULT
;
3703 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3704 unlock_user(argptr
, arg
, target_size
);
3711 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3712 abi_long cmd
, abi_long arg
)
3716 const argtype
*arg_type
= ie
->arg_type
;
3717 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3720 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3721 struct blkpg_partition host_part
;
3723 /* Read and convert blkpg */
3725 target_size
= thunk_type_size(arg_type
, 0);
3726 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3728 ret
= -TARGET_EFAULT
;
3731 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3732 unlock_user(argptr
, arg
, 0);
3734 switch (host_blkpg
->op
) {
3735 case BLKPG_ADD_PARTITION
:
3736 case BLKPG_DEL_PARTITION
:
3737 /* payload is struct blkpg_partition */
3740 /* Unknown opcode */
3741 ret
= -TARGET_EINVAL
;
3745 /* Read and convert blkpg->data */
3746 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3747 target_size
= thunk_type_size(part_arg_type
, 0);
3748 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3750 ret
= -TARGET_EFAULT
;
3753 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3754 unlock_user(argptr
, arg
, 0);
3756 /* Swizzle the data pointer to our local copy and call! */
3757 host_blkpg
->data
= &host_part
;
3758 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3764 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3765 int fd
, abi_long cmd
, abi_long arg
)
3767 const argtype
*arg_type
= ie
->arg_type
;
3768 const StructEntry
*se
;
3769 const argtype
*field_types
;
3770 const int *dst_offsets
, *src_offsets
;
3773 abi_ulong
*target_rt_dev_ptr
;
3774 unsigned long *host_rt_dev_ptr
;
3778 assert(ie
->access
== IOC_W
);
3779 assert(*arg_type
== TYPE_PTR
);
3781 assert(*arg_type
== TYPE_STRUCT
);
3782 target_size
= thunk_type_size(arg_type
, 0);
3783 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3785 return -TARGET_EFAULT
;
3788 assert(*arg_type
== (int)STRUCT_rtentry
);
3789 se
= struct_entries
+ *arg_type
++;
3790 assert(se
->convert
[0] == NULL
);
3791 /* convert struct here to be able to catch rt_dev string */
3792 field_types
= se
->field_types
;
3793 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3794 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3795 for (i
= 0; i
< se
->nb_fields
; i
++) {
3796 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3797 assert(*field_types
== TYPE_PTRVOID
);
3798 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3799 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3800 if (*target_rt_dev_ptr
!= 0) {
3801 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3802 tswapal(*target_rt_dev_ptr
));
3803 if (!*host_rt_dev_ptr
) {
3804 unlock_user(argptr
, arg
, 0);
3805 return -TARGET_EFAULT
;
3808 *host_rt_dev_ptr
= 0;
3813 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3814 argptr
+ src_offsets
[i
],
3815 field_types
, THUNK_HOST
);
3817 unlock_user(argptr
, arg
, 0);
3819 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3820 if (*host_rt_dev_ptr
!= 0) {
3821 unlock_user((void *)*host_rt_dev_ptr
,
3822 *target_rt_dev_ptr
, 0);
3827 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3828 int fd
, abi_long cmd
, abi_long arg
)
3830 int sig
= target_to_host_signal(arg
);
3831 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
3834 static IOCTLEntry ioctl_entries
[] = {
3835 #define IOCTL(cmd, access, ...) \
3836 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3837 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3838 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3843 /* ??? Implement proper locking for ioctls. */
3844 /* do_ioctl() Must return target values and target errnos. */
3845 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3847 const IOCTLEntry
*ie
;
3848 const argtype
*arg_type
;
3850 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3856 if (ie
->target_cmd
== 0) {
3857 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3858 return -TARGET_ENOSYS
;
3860 if (ie
->target_cmd
== cmd
)
3864 arg_type
= ie
->arg_type
;
3866 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3869 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3872 switch(arg_type
[0]) {
3875 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3880 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3884 target_size
= thunk_type_size(arg_type
, 0);
3885 switch(ie
->access
) {
3887 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3888 if (!is_error(ret
)) {
3889 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3891 return -TARGET_EFAULT
;
3892 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3893 unlock_user(argptr
, arg
, target_size
);
3897 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3899 return -TARGET_EFAULT
;
3900 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3901 unlock_user(argptr
, arg
, 0);
3902 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3906 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3908 return -TARGET_EFAULT
;
3909 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3910 unlock_user(argptr
, arg
, 0);
3911 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3912 if (!is_error(ret
)) {
3913 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3915 return -TARGET_EFAULT
;
3916 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3917 unlock_user(argptr
, arg
, target_size
);
3923 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3924 (long)cmd
, arg_type
[0]);
3925 ret
= -TARGET_ENOSYS
;
3931 static const bitmask_transtbl iflag_tbl
[] = {
3932 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3933 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3934 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3935 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3936 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3937 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3938 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3939 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3940 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3941 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3942 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3943 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3944 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3945 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3949 static const bitmask_transtbl oflag_tbl
[] = {
3950 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3951 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3952 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3953 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3954 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3955 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3956 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3957 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3958 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3959 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3960 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3961 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3962 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3963 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3964 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3965 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3966 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3967 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3968 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3969 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3970 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3971 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3972 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3973 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3977 static const bitmask_transtbl cflag_tbl
[] = {
3978 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3979 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3980 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3981 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3982 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3983 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3984 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3985 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3986 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3987 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3988 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3989 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3990 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3991 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3992 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3993 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3994 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3995 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3996 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3997 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3998 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3999 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4000 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4001 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4002 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4003 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4004 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4005 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4006 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4007 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4008 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4012 static const bitmask_transtbl lflag_tbl
[] = {
4013 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4014 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4015 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4016 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4017 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4018 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4019 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4020 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4021 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4022 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4023 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4024 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4025 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4026 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4027 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4031 static void target_to_host_termios (void *dst
, const void *src
)
4033 struct host_termios
*host
= dst
;
4034 const struct target_termios
*target
= src
;
4037 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4039 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4041 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4043 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4044 host
->c_line
= target
->c_line
;
4046 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4047 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4048 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4049 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4050 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4051 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4052 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4053 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4054 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4055 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4056 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4057 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4058 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4059 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4060 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4061 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4062 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4063 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4066 static void host_to_target_termios (void *dst
, const void *src
)
4068 struct target_termios
*target
= dst
;
4069 const struct host_termios
*host
= src
;
4072 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4074 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4076 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4078 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4079 target
->c_line
= host
->c_line
;
4081 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4082 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4083 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4084 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4085 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4086 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4087 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4088 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4089 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4090 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4091 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4092 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4093 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4094 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4095 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4096 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4097 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4098 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4101 static const StructEntry struct_termios_def
= {
4102 .convert
= { host_to_target_termios
, target_to_host_termios
},
4103 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4104 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4107 static bitmask_transtbl mmap_flags_tbl
[] = {
4108 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4109 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4110 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4111 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4112 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4113 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4114 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4115 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4116 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4121 #if defined(TARGET_I386)
4123 /* NOTE: there is really one LDT for all the threads */
4124 static uint8_t *ldt_table
;
4126 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4133 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4134 if (size
> bytecount
)
4136 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4138 return -TARGET_EFAULT
;
4139 /* ??? Should this by byteswapped? */
4140 memcpy(p
, ldt_table
, size
);
4141 unlock_user(p
, ptr
, size
);
4145 /* XXX: add locking support */
4146 static abi_long
write_ldt(CPUX86State
*env
,
4147 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4149 struct target_modify_ldt_ldt_s ldt_info
;
4150 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4151 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4152 int seg_not_present
, useable
, lm
;
4153 uint32_t *lp
, entry_1
, entry_2
;
4155 if (bytecount
!= sizeof(ldt_info
))
4156 return -TARGET_EINVAL
;
4157 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4158 return -TARGET_EFAULT
;
4159 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4160 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4161 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4162 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4163 unlock_user_struct(target_ldt_info
, ptr
, 0);
4165 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4166 return -TARGET_EINVAL
;
4167 seg_32bit
= ldt_info
.flags
& 1;
4168 contents
= (ldt_info
.flags
>> 1) & 3;
4169 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4170 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4171 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4172 useable
= (ldt_info
.flags
>> 6) & 1;
4176 lm
= (ldt_info
.flags
>> 7) & 1;
4178 if (contents
== 3) {
4180 return -TARGET_EINVAL
;
4181 if (seg_not_present
== 0)
4182 return -TARGET_EINVAL
;
4184 /* allocate the LDT */
4186 env
->ldt
.base
= target_mmap(0,
4187 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4188 PROT_READ
|PROT_WRITE
,
4189 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4190 if (env
->ldt
.base
== -1)
4191 return -TARGET_ENOMEM
;
4192 memset(g2h(env
->ldt
.base
), 0,
4193 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4194 env
->ldt
.limit
= 0xffff;
4195 ldt_table
= g2h(env
->ldt
.base
);
4198 /* NOTE: same code as Linux kernel */
4199 /* Allow LDTs to be cleared by the user. */
4200 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4203 read_exec_only
== 1 &&
4205 limit_in_pages
== 0 &&
4206 seg_not_present
== 1 &&
4214 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4215 (ldt_info
.limit
& 0x0ffff);
4216 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4217 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4218 (ldt_info
.limit
& 0xf0000) |
4219 ((read_exec_only
^ 1) << 9) |
4221 ((seg_not_present
^ 1) << 15) |
4223 (limit_in_pages
<< 23) |
4227 entry_2
|= (useable
<< 20);
4229 /* Install the new entry ... */
4231 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4232 lp
[0] = tswap32(entry_1
);
4233 lp
[1] = tswap32(entry_2
);
4237 /* specific and weird i386 syscalls */
4238 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4239 unsigned long bytecount
)
4245 ret
= read_ldt(ptr
, bytecount
);
4248 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4251 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4254 ret
= -TARGET_ENOSYS
;
4260 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4261 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4263 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4264 struct target_modify_ldt_ldt_s ldt_info
;
4265 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4266 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4267 int seg_not_present
, useable
, lm
;
4268 uint32_t *lp
, entry_1
, entry_2
;
4271 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4272 if (!target_ldt_info
)
4273 return -TARGET_EFAULT
;
4274 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4275 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4276 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4277 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4278 if (ldt_info
.entry_number
== -1) {
4279 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4280 if (gdt_table
[i
] == 0) {
4281 ldt_info
.entry_number
= i
;
4282 target_ldt_info
->entry_number
= tswap32(i
);
4287 unlock_user_struct(target_ldt_info
, ptr
, 1);
4289 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4290 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4291 return -TARGET_EINVAL
;
4292 seg_32bit
= ldt_info
.flags
& 1;
4293 contents
= (ldt_info
.flags
>> 1) & 3;
4294 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4295 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4296 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4297 useable
= (ldt_info
.flags
>> 6) & 1;
4301 lm
= (ldt_info
.flags
>> 7) & 1;
4304 if (contents
== 3) {
4305 if (seg_not_present
== 0)
4306 return -TARGET_EINVAL
;
4309 /* NOTE: same code as Linux kernel */
4310 /* Allow LDTs to be cleared by the user. */
4311 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4312 if ((contents
== 0 &&
4313 read_exec_only
== 1 &&
4315 limit_in_pages
== 0 &&
4316 seg_not_present
== 1 &&
4324 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4325 (ldt_info
.limit
& 0x0ffff);
4326 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4327 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4328 (ldt_info
.limit
& 0xf0000) |
4329 ((read_exec_only
^ 1) << 9) |
4331 ((seg_not_present
^ 1) << 15) |
4333 (limit_in_pages
<< 23) |
4338 /* Install the new entry ... */
4340 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4341 lp
[0] = tswap32(entry_1
);
4342 lp
[1] = tswap32(entry_2
);
4346 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4348 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4349 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4350 uint32_t base_addr
, limit
, flags
;
4351 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4352 int seg_not_present
, useable
, lm
;
4353 uint32_t *lp
, entry_1
, entry_2
;
4355 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4356 if (!target_ldt_info
)
4357 return -TARGET_EFAULT
;
4358 idx
= tswap32(target_ldt_info
->entry_number
);
4359 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4360 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4361 unlock_user_struct(target_ldt_info
, ptr
, 1);
4362 return -TARGET_EINVAL
;
4364 lp
= (uint32_t *)(gdt_table
+ idx
);
4365 entry_1
= tswap32(lp
[0]);
4366 entry_2
= tswap32(lp
[1]);
4368 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4369 contents
= (entry_2
>> 10) & 3;
4370 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4371 seg_32bit
= (entry_2
>> 22) & 1;
4372 limit_in_pages
= (entry_2
>> 23) & 1;
4373 useable
= (entry_2
>> 20) & 1;
4377 lm
= (entry_2
>> 21) & 1;
4379 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4380 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4381 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4382 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4383 base_addr
= (entry_1
>> 16) |
4384 (entry_2
& 0xff000000) |
4385 ((entry_2
& 0xff) << 16);
4386 target_ldt_info
->base_addr
= tswapal(base_addr
);
4387 target_ldt_info
->limit
= tswap32(limit
);
4388 target_ldt_info
->flags
= tswap32(flags
);
4389 unlock_user_struct(target_ldt_info
, ptr
, 1);
4392 #endif /* TARGET_I386 && TARGET_ABI32 */
4394 #ifndef TARGET_ABI32
4395 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4402 case TARGET_ARCH_SET_GS
:
4403 case TARGET_ARCH_SET_FS
:
4404 if (code
== TARGET_ARCH_SET_GS
)
4408 cpu_x86_load_seg(env
, idx
, 0);
4409 env
->segs
[idx
].base
= addr
;
4411 case TARGET_ARCH_GET_GS
:
4412 case TARGET_ARCH_GET_FS
:
4413 if (code
== TARGET_ARCH_GET_GS
)
4417 val
= env
->segs
[idx
].base
;
4418 if (put_user(val
, addr
, abi_ulong
))
4419 ret
= -TARGET_EFAULT
;
4422 ret
= -TARGET_EINVAL
;
4429 #endif /* defined(TARGET_I386) */
4431 #define NEW_STACK_SIZE 0x40000
4434 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4437 pthread_mutex_t mutex
;
4438 pthread_cond_t cond
;
4441 abi_ulong child_tidptr
;
4442 abi_ulong parent_tidptr
;
4446 static void *clone_func(void *arg
)
4448 new_thread_info
*info
= arg
;
4454 cpu
= ENV_GET_CPU(env
);
4456 ts
= (TaskState
*)cpu
->opaque
;
4457 info
->tid
= gettid();
4458 cpu
->host_tid
= info
->tid
;
4460 if (info
->child_tidptr
)
4461 put_user_u32(info
->tid
, info
->child_tidptr
);
4462 if (info
->parent_tidptr
)
4463 put_user_u32(info
->tid
, info
->parent_tidptr
);
4464 /* Enable signals. */
4465 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4466 /* Signal to the parent that we're ready. */
4467 pthread_mutex_lock(&info
->mutex
);
4468 pthread_cond_broadcast(&info
->cond
);
4469 pthread_mutex_unlock(&info
->mutex
);
4470 /* Wait until the parent has finshed initializing the tls state. */
4471 pthread_mutex_lock(&clone_lock
);
4472 pthread_mutex_unlock(&clone_lock
);
4478 /* do_fork() Must return host values and target errnos (unlike most
4479 do_*() functions). */
4480 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4481 abi_ulong parent_tidptr
, target_ulong newtls
,
4482 abi_ulong child_tidptr
)
4484 CPUState
*cpu
= ENV_GET_CPU(env
);
4488 CPUArchState
*new_env
;
4489 unsigned int nptl_flags
;
4492 /* Emulate vfork() with fork() */
4493 if (flags
& CLONE_VFORK
)
4494 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4496 if (flags
& CLONE_VM
) {
4497 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4498 new_thread_info info
;
4499 pthread_attr_t attr
;
4501 ts
= g_malloc0(sizeof(TaskState
));
4502 init_task_state(ts
);
4503 /* we create a new CPU instance. */
4504 new_env
= cpu_copy(env
);
4505 /* Init regs that differ from the parent. */
4506 cpu_clone_regs(new_env
, newsp
);
4507 new_cpu
= ENV_GET_CPU(new_env
);
4508 new_cpu
->opaque
= ts
;
4509 ts
->bprm
= parent_ts
->bprm
;
4510 ts
->info
= parent_ts
->info
;
4512 flags
&= ~CLONE_NPTL_FLAGS2
;
4514 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4515 ts
->child_tidptr
= child_tidptr
;
4518 if (nptl_flags
& CLONE_SETTLS
)
4519 cpu_set_tls (new_env
, newtls
);
4521 /* Grab a mutex so that thread setup appears atomic. */
4522 pthread_mutex_lock(&clone_lock
);
4524 memset(&info
, 0, sizeof(info
));
4525 pthread_mutex_init(&info
.mutex
, NULL
);
4526 pthread_mutex_lock(&info
.mutex
);
4527 pthread_cond_init(&info
.cond
, NULL
);
4529 if (nptl_flags
& CLONE_CHILD_SETTID
)
4530 info
.child_tidptr
= child_tidptr
;
4531 if (nptl_flags
& CLONE_PARENT_SETTID
)
4532 info
.parent_tidptr
= parent_tidptr
;
4534 ret
= pthread_attr_init(&attr
);
4535 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4536 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4537 /* It is not safe to deliver signals until the child has finished
4538 initializing, so temporarily block all signals. */
4539 sigfillset(&sigmask
);
4540 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4542 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4543 /* TODO: Free new CPU state if thread creation failed. */
4545 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4546 pthread_attr_destroy(&attr
);
4548 /* Wait for the child to initialize. */
4549 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4551 if (flags
& CLONE_PARENT_SETTID
)
4552 put_user_u32(ret
, parent_tidptr
);
4556 pthread_mutex_unlock(&info
.mutex
);
4557 pthread_cond_destroy(&info
.cond
);
4558 pthread_mutex_destroy(&info
.mutex
);
4559 pthread_mutex_unlock(&clone_lock
);
4561 /* if no CLONE_VM, we consider it is a fork */
4562 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4567 /* Child Process. */
4568 cpu_clone_regs(env
, newsp
);
4570 /* There is a race condition here. The parent process could
4571 theoretically read the TID in the child process before the child
4572 tid is set. This would require using either ptrace
4573 (not implemented) or having *_tidptr to point at a shared memory
4574 mapping. We can't repeat the spinlock hack used above because
4575 the child process gets its own copy of the lock. */
4576 if (flags
& CLONE_CHILD_SETTID
)
4577 put_user_u32(gettid(), child_tidptr
);
4578 if (flags
& CLONE_PARENT_SETTID
)
4579 put_user_u32(gettid(), parent_tidptr
);
4580 ts
= (TaskState
*)cpu
->opaque
;
4581 if (flags
& CLONE_SETTLS
)
4582 cpu_set_tls (env
, newtls
);
4583 if (flags
& CLONE_CHILD_CLEARTID
)
4584 ts
->child_tidptr
= child_tidptr
;
4592 /* warning : doesn't handle linux specific flags... */
4593 static int target_to_host_fcntl_cmd(int cmd
)
4596 case TARGET_F_DUPFD
:
4597 case TARGET_F_GETFD
:
4598 case TARGET_F_SETFD
:
4599 case TARGET_F_GETFL
:
4600 case TARGET_F_SETFL
:
4602 case TARGET_F_GETLK
:
4604 case TARGET_F_SETLK
:
4606 case TARGET_F_SETLKW
:
4608 case TARGET_F_GETOWN
:
4610 case TARGET_F_SETOWN
:
4612 case TARGET_F_GETSIG
:
4614 case TARGET_F_SETSIG
:
4616 #if TARGET_ABI_BITS == 32
4617 case TARGET_F_GETLK64
:
4619 case TARGET_F_SETLK64
:
4621 case TARGET_F_SETLKW64
:
4624 case TARGET_F_SETLEASE
:
4626 case TARGET_F_GETLEASE
:
4628 #ifdef F_DUPFD_CLOEXEC
4629 case TARGET_F_DUPFD_CLOEXEC
:
4630 return F_DUPFD_CLOEXEC
;
4632 case TARGET_F_NOTIFY
:
4635 case TARGET_F_GETOWN_EX
:
4639 case TARGET_F_SETOWN_EX
:
4643 return -TARGET_EINVAL
;
4645 return -TARGET_EINVAL
;
4648 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4649 static const bitmask_transtbl flock_tbl
[] = {
4650 TRANSTBL_CONVERT(F_RDLCK
),
4651 TRANSTBL_CONVERT(F_WRLCK
),
4652 TRANSTBL_CONVERT(F_UNLCK
),
4653 TRANSTBL_CONVERT(F_EXLCK
),
4654 TRANSTBL_CONVERT(F_SHLCK
),
4658 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4661 struct target_flock
*target_fl
;
4662 struct flock64 fl64
;
4663 struct target_flock64
*target_fl64
;
4665 struct f_owner_ex fox
;
4666 struct target_f_owner_ex
*target_fox
;
4669 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4671 if (host_cmd
== -TARGET_EINVAL
)
4675 case TARGET_F_GETLK
:
4676 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4677 return -TARGET_EFAULT
;
4679 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4680 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4681 fl
.l_start
= tswapal(target_fl
->l_start
);
4682 fl
.l_len
= tswapal(target_fl
->l_len
);
4683 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4684 unlock_user_struct(target_fl
, arg
, 0);
4685 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4687 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4688 return -TARGET_EFAULT
;
4690 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4691 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4692 target_fl
->l_start
= tswapal(fl
.l_start
);
4693 target_fl
->l_len
= tswapal(fl
.l_len
);
4694 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4695 unlock_user_struct(target_fl
, arg
, 1);
4699 case TARGET_F_SETLK
:
4700 case TARGET_F_SETLKW
:
4701 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4702 return -TARGET_EFAULT
;
4704 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4705 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4706 fl
.l_start
= tswapal(target_fl
->l_start
);
4707 fl
.l_len
= tswapal(target_fl
->l_len
);
4708 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4709 unlock_user_struct(target_fl
, arg
, 0);
4710 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4713 case TARGET_F_GETLK64
:
4714 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4715 return -TARGET_EFAULT
;
4717 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4718 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4719 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4720 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4721 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4722 unlock_user_struct(target_fl64
, arg
, 0);
4723 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4725 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4726 return -TARGET_EFAULT
;
4727 target_fl64
->l_type
=
4728 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4729 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4730 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4731 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4732 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4733 unlock_user_struct(target_fl64
, arg
, 1);
4736 case TARGET_F_SETLK64
:
4737 case TARGET_F_SETLKW64
:
4738 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4739 return -TARGET_EFAULT
;
4741 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4742 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4743 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4744 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4745 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4746 unlock_user_struct(target_fl64
, arg
, 0);
4747 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4750 case TARGET_F_GETFL
:
4751 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4753 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4757 case TARGET_F_SETFL
:
4758 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4762 case TARGET_F_GETOWN_EX
:
4763 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4765 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4766 return -TARGET_EFAULT
;
4767 target_fox
->type
= tswap32(fox
.type
);
4768 target_fox
->pid
= tswap32(fox
.pid
);
4769 unlock_user_struct(target_fox
, arg
, 1);
4775 case TARGET_F_SETOWN_EX
:
4776 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4777 return -TARGET_EFAULT
;
4778 fox
.type
= tswap32(target_fox
->type
);
4779 fox
.pid
= tswap32(target_fox
->pid
);
4780 unlock_user_struct(target_fox
, arg
, 0);
4781 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4785 case TARGET_F_SETOWN
:
4786 case TARGET_F_GETOWN
:
4787 case TARGET_F_SETSIG
:
4788 case TARGET_F_GETSIG
:
4789 case TARGET_F_SETLEASE
:
4790 case TARGET_F_GETLEASE
:
4791 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4795 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4803 static inline int high2lowuid(int uid
)
4811 static inline int high2lowgid(int gid
)
4819 static inline int low2highuid(int uid
)
4821 if ((int16_t)uid
== -1)
4827 static inline int low2highgid(int gid
)
4829 if ((int16_t)gid
== -1)
4834 static inline int tswapid(int id
)
4839 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4841 #else /* !USE_UID16 */
4842 static inline int high2lowuid(int uid
)
4846 static inline int high2lowgid(int gid
)
4850 static inline int low2highuid(int uid
)
4854 static inline int low2highgid(int gid
)
4858 static inline int tswapid(int id
)
4863 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4865 #endif /* USE_UID16 */
4867 void syscall_init(void)
4870 const argtype
*arg_type
;
4874 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4875 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4876 #include "syscall_types.h"
4878 #undef STRUCT_SPECIAL
4880 /* Build target_to_host_errno_table[] table from
4881 * host_to_target_errno_table[]. */
4882 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4883 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4886 /* we patch the ioctl size if necessary. We rely on the fact that
4887 no ioctl has all the bits at '1' in the size field */
4889 while (ie
->target_cmd
!= 0) {
4890 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4891 TARGET_IOC_SIZEMASK
) {
4892 arg_type
= ie
->arg_type
;
4893 if (arg_type
[0] != TYPE_PTR
) {
4894 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4899 size
= thunk_type_size(arg_type
, 0);
4900 ie
->target_cmd
= (ie
->target_cmd
&
4901 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4902 (size
<< TARGET_IOC_SIZESHIFT
);
4905 /* automatic consistency check if same arch */
4906 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4907 (defined(__x86_64__) && defined(TARGET_X86_64))
4908 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4909 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4910 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4917 #if TARGET_ABI_BITS == 32
4918 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4920 #ifdef TARGET_WORDS_BIGENDIAN
4921 return ((uint64_t)word0
<< 32) | word1
;
4923 return ((uint64_t)word1
<< 32) | word0
;
4926 #else /* TARGET_ABI_BITS == 32 */
4927 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4931 #endif /* TARGET_ABI_BITS != 32 */
4933 #ifdef TARGET_NR_truncate64
4934 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4939 if (regpairs_aligned(cpu_env
)) {
4943 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4947 #ifdef TARGET_NR_ftruncate64
4948 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4953 if (regpairs_aligned(cpu_env
)) {
4957 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4961 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4962 abi_ulong target_addr
)
4964 struct target_timespec
*target_ts
;
4966 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4967 return -TARGET_EFAULT
;
4968 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4969 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4970 unlock_user_struct(target_ts
, target_addr
, 0);
4974 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4975 struct timespec
*host_ts
)
4977 struct target_timespec
*target_ts
;
4979 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4980 return -TARGET_EFAULT
;
4981 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4982 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4983 unlock_user_struct(target_ts
, target_addr
, 1);
4987 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
4988 abi_ulong target_addr
)
4990 struct target_itimerspec
*target_itspec
;
4992 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
4993 return -TARGET_EFAULT
;
4996 host_itspec
->it_interval
.tv_sec
=
4997 tswapal(target_itspec
->it_interval
.tv_sec
);
4998 host_itspec
->it_interval
.tv_nsec
=
4999 tswapal(target_itspec
->it_interval
.tv_nsec
);
5000 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5001 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5003 unlock_user_struct(target_itspec
, target_addr
, 1);
5007 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5008 struct itimerspec
*host_its
)
5010 struct target_itimerspec
*target_itspec
;
5012 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5013 return -TARGET_EFAULT
;
5016 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5017 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5019 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5020 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5022 unlock_user_struct(target_itspec
, target_addr
, 0);
5026 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5027 abi_ulong target_addr
)
5029 struct target_sigevent
*target_sevp
;
5031 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5032 return -TARGET_EFAULT
;
5035 /* This union is awkward on 64 bit systems because it has a 32 bit
5036 * integer and a pointer in it; we follow the conversion approach
5037 * used for handling sigval types in signal.c so the guest should get
5038 * the correct value back even if we did a 64 bit byteswap and it's
5039 * using the 32 bit integer.
5041 host_sevp
->sigev_value
.sival_ptr
=
5042 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5043 host_sevp
->sigev_signo
=
5044 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5045 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5046 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5048 unlock_user_struct(target_sevp
, target_addr
, 1);
5052 #if defined(TARGET_NR_mlockall)
5053 static inline int target_to_host_mlockall_arg(int arg
)
5057 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5058 result
|= MCL_CURRENT
;
5060 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5061 result
|= MCL_FUTURE
;
5067 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5068 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5069 abi_ulong target_addr
,
5070 struct stat
*host_st
)
5072 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5073 if (((CPUARMState
*)cpu_env
)->eabi
) {
5074 struct target_eabi_stat64
*target_st
;
5076 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5077 return -TARGET_EFAULT
;
5078 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5079 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5080 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5081 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5082 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5084 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5085 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5086 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5087 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5088 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5089 __put_user(host_st
->st_size
, &target_st
->st_size
);
5090 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5091 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5092 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5093 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5094 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5095 unlock_user_struct(target_st
, target_addr
, 1);
5099 #if defined(TARGET_HAS_STRUCT_STAT64)
5100 struct target_stat64
*target_st
;
5102 struct target_stat
*target_st
;
5105 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5106 return -TARGET_EFAULT
;
5107 memset(target_st
, 0, sizeof(*target_st
));
5108 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5109 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5110 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5111 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5113 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5114 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5115 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5116 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5117 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5118 /* XXX: better use of kernel struct */
5119 __put_user(host_st
->st_size
, &target_st
->st_size
);
5120 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5121 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5122 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5123 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5124 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5125 unlock_user_struct(target_st
, target_addr
, 1);
5132 /* ??? Using host futex calls even when target atomic operations
5133 are not really atomic probably breaks things. However implementing
5134 futexes locally would make futexes shared between multiple processes
5135 tricky. However they're probably useless because guest atomic
5136 operations won't work either. */
5137 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5138 target_ulong uaddr2
, int val3
)
5140 struct timespec ts
, *pts
;
5143 /* ??? We assume FUTEX_* constants are the same on both host
5145 #ifdef FUTEX_CMD_MASK
5146 base_op
= op
& FUTEX_CMD_MASK
;
5152 case FUTEX_WAIT_BITSET
:
5155 target_to_host_timespec(pts
, timeout
);
5159 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5162 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5164 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5166 case FUTEX_CMP_REQUEUE
:
5168 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5169 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5170 But the prototype takes a `struct timespec *'; insert casts
5171 to satisfy the compiler. We do not need to tswap TIMEOUT
5172 since it's not compared to guest memory. */
5173 pts
= (struct timespec
*)(uintptr_t) timeout
;
5174 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5176 (base_op
== FUTEX_CMP_REQUEUE
5180 return -TARGET_ENOSYS
;
5184 /* Map host to target signal numbers for the wait family of syscalls.
5185 Assume all other status bits are the same. */
5186 int host_to_target_waitstatus(int status
)
5188 if (WIFSIGNALED(status
)) {
5189 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5191 if (WIFSTOPPED(status
)) {
5192 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5198 static int open_self_cmdline(void *cpu_env
, int fd
)
5201 bool word_skipped
= false;
5203 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5213 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5215 fd_orig
= close(fd_orig
);
5217 } else if (nb_read
== 0) {
5221 if (!word_skipped
) {
5222 /* Skip the first string, which is the path to qemu-*-static
5223 instead of the actual command. */
5224 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5226 /* Null byte found, skip one string */
5228 nb_read
-= cp_buf
- buf
;
5229 word_skipped
= true;
5234 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5241 return close(fd_orig
);
5244 static int open_self_maps(void *cpu_env
, int fd
)
5246 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5247 TaskState
*ts
= cpu
->opaque
;
5253 fp
= fopen("/proc/self/maps", "r");
5258 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5259 int fields
, dev_maj
, dev_min
, inode
;
5260 uint64_t min
, max
, offset
;
5261 char flag_r
, flag_w
, flag_x
, flag_p
;
5262 char path
[512] = "";
5263 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5264 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5265 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5267 if ((fields
< 10) || (fields
> 11)) {
5270 if (h2g_valid(min
)) {
5271 int flags
= page_get_flags(h2g(min
));
5272 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5273 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5276 if (h2g(min
) == ts
->info
->stack_limit
) {
5277 pstrcpy(path
, sizeof(path
), " [stack]");
5279 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5280 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5281 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5282 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5283 path
[0] ? " " : "", path
);
5293 static int open_self_stat(void *cpu_env
, int fd
)
5295 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5296 TaskState
*ts
= cpu
->opaque
;
5297 abi_ulong start_stack
= ts
->info
->start_stack
;
5300 for (i
= 0; i
< 44; i
++) {
5308 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5309 } else if (i
== 1) {
5311 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5312 } else if (i
== 27) {
5315 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5317 /* for the rest, there is MasterCard */
5318 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5322 if (write(fd
, buf
, len
) != len
) {
5330 static int open_self_auxv(void *cpu_env
, int fd
)
5332 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5333 TaskState
*ts
= cpu
->opaque
;
5334 abi_ulong auxv
= ts
->info
->saved_auxv
;
5335 abi_ulong len
= ts
->info
->auxv_len
;
5339 * Auxiliary vector is stored in target process stack.
5340 * read in whole auxv vector and copy it to file
5342 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5346 r
= write(fd
, ptr
, len
);
5353 lseek(fd
, 0, SEEK_SET
);
5354 unlock_user(ptr
, auxv
, len
);
5360 static int is_proc_myself(const char *filename
, const char *entry
)
5362 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5363 filename
+= strlen("/proc/");
5364 if (!strncmp(filename
, "self/", strlen("self/"))) {
5365 filename
+= strlen("self/");
5366 } else if (*filename
>= '1' && *filename
<= '9') {
5368 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5369 if (!strncmp(filename
, myself
, strlen(myself
))) {
5370 filename
+= strlen(myself
);
5377 if (!strcmp(filename
, entry
)) {
5384 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5385 static int is_proc(const char *filename
, const char *entry
)
5387 return strcmp(filename
, entry
) == 0;
5390 static int open_net_route(void *cpu_env
, int fd
)
5397 fp
= fopen("/proc/net/route", "r");
5404 read
= getline(&line
, &len
, fp
);
5405 dprintf(fd
, "%s", line
);
5409 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5411 uint32_t dest
, gw
, mask
;
5412 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5413 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5414 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5415 &mask
, &mtu
, &window
, &irtt
);
5416 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5417 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5418 metric
, tswap32(mask
), mtu
, window
, irtt
);
5428 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5431 const char *filename
;
5432 int (*fill
)(void *cpu_env
, int fd
);
5433 int (*cmp
)(const char *s1
, const char *s2
);
5435 const struct fake_open
*fake_open
;
5436 static const struct fake_open fakes
[] = {
5437 { "maps", open_self_maps
, is_proc_myself
},
5438 { "stat", open_self_stat
, is_proc_myself
},
5439 { "auxv", open_self_auxv
, is_proc_myself
},
5440 { "cmdline", open_self_cmdline
, is_proc_myself
},
5441 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5442 { "/proc/net/route", open_net_route
, is_proc
},
5444 { NULL
, NULL
, NULL
}
5447 if (is_proc_myself(pathname
, "exe")) {
5448 int execfd
= qemu_getauxval(AT_EXECFD
);
5449 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5452 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5453 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5458 if (fake_open
->filename
) {
5460 char filename
[PATH_MAX
];
5463 /* create temporary file to map stat to */
5464 tmpdir
= getenv("TMPDIR");
5467 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5468 fd
= mkstemp(filename
);
5474 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5478 lseek(fd
, 0, SEEK_SET
);
5483 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5486 #define TIMER_MAGIC 0x0caf0000
5487 #define TIMER_MAGIC_MASK 0xffff0000
5489 /* Convert QEMU provided timer ID back to internal 16bit index format */
5490 static target_timer_t
get_timer_id(abi_long arg
)
5492 target_timer_t timerid
= arg
;
5494 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5495 return -TARGET_EINVAL
;
5500 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5501 return -TARGET_EINVAL
;
5507 /* do_syscall() should always have a single exit point at the end so
5508 that actions, such as logging of syscall results, can be performed.
5509 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5510 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5511 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5512 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5515 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5522 gemu_log("syscall %d", num
);
5525 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5528 case TARGET_NR_exit
:
5529 /* In old applications this may be used to implement _exit(2).
5530 However in threaded applictions it is used for thread termination,
5531 and _exit_group is used for application termination.
5532 Do thread termination if we have more then one thread. */
5533 /* FIXME: This probably breaks if a signal arrives. We should probably
5534 be disabling signals. */
5535 if (CPU_NEXT(first_cpu
)) {
5539 /* Remove the CPU from the list. */
5540 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5543 if (ts
->child_tidptr
) {
5544 put_user_u32(0, ts
->child_tidptr
);
5545 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5549 object_unref(OBJECT(cpu
));
5556 gdb_exit(cpu_env
, arg1
);
5558 ret
= 0; /* avoid warning */
5560 case TARGET_NR_read
:
5564 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5566 ret
= get_errno(read(arg1
, p
, arg3
));
5567 unlock_user(p
, arg2
, ret
);
5570 case TARGET_NR_write
:
5571 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5573 ret
= get_errno(write(arg1
, p
, arg3
));
5574 unlock_user(p
, arg2
, 0);
5576 case TARGET_NR_open
:
5577 if (!(p
= lock_user_string(arg1
)))
5579 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5580 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5582 unlock_user(p
, arg1
, 0);
5584 case TARGET_NR_openat
:
5585 if (!(p
= lock_user_string(arg2
)))
5587 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5588 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5590 unlock_user(p
, arg2
, 0);
5592 case TARGET_NR_close
:
5593 ret
= get_errno(close(arg1
));
5598 case TARGET_NR_fork
:
5599 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5601 #ifdef TARGET_NR_waitpid
5602 case TARGET_NR_waitpid
:
5605 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5606 if (!is_error(ret
) && arg2
&& ret
5607 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5612 #ifdef TARGET_NR_waitid
5613 case TARGET_NR_waitid
:
5617 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5618 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5619 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5621 host_to_target_siginfo(p
, &info
);
5622 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5627 #ifdef TARGET_NR_creat /* not on alpha */
5628 case TARGET_NR_creat
:
5629 if (!(p
= lock_user_string(arg1
)))
5631 ret
= get_errno(creat(p
, arg2
));
5632 unlock_user(p
, arg1
, 0);
5635 case TARGET_NR_link
:
5638 p
= lock_user_string(arg1
);
5639 p2
= lock_user_string(arg2
);
5641 ret
= -TARGET_EFAULT
;
5643 ret
= get_errno(link(p
, p2
));
5644 unlock_user(p2
, arg2
, 0);
5645 unlock_user(p
, arg1
, 0);
5648 #if defined(TARGET_NR_linkat)
5649 case TARGET_NR_linkat
:
5654 p
= lock_user_string(arg2
);
5655 p2
= lock_user_string(arg4
);
5657 ret
= -TARGET_EFAULT
;
5659 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5660 unlock_user(p
, arg2
, 0);
5661 unlock_user(p2
, arg4
, 0);
5665 case TARGET_NR_unlink
:
5666 if (!(p
= lock_user_string(arg1
)))
5668 ret
= get_errno(unlink(p
));
5669 unlock_user(p
, arg1
, 0);
5671 #if defined(TARGET_NR_unlinkat)
5672 case TARGET_NR_unlinkat
:
5673 if (!(p
= lock_user_string(arg2
)))
5675 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5676 unlock_user(p
, arg2
, 0);
5679 case TARGET_NR_execve
:
5681 char **argp
, **envp
;
5684 abi_ulong guest_argp
;
5685 abi_ulong guest_envp
;
5692 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5693 if (get_user_ual(addr
, gp
))
5701 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5702 if (get_user_ual(addr
, gp
))
5709 argp
= alloca((argc
+ 1) * sizeof(void *));
5710 envp
= alloca((envc
+ 1) * sizeof(void *));
5712 for (gp
= guest_argp
, q
= argp
; gp
;
5713 gp
+= sizeof(abi_ulong
), q
++) {
5714 if (get_user_ual(addr
, gp
))
5718 if (!(*q
= lock_user_string(addr
)))
5720 total_size
+= strlen(*q
) + 1;
5724 for (gp
= guest_envp
, q
= envp
; gp
;
5725 gp
+= sizeof(abi_ulong
), q
++) {
5726 if (get_user_ual(addr
, gp
))
5730 if (!(*q
= lock_user_string(addr
)))
5732 total_size
+= strlen(*q
) + 1;
5736 /* This case will not be caught by the host's execve() if its
5737 page size is bigger than the target's. */
5738 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5739 ret
= -TARGET_E2BIG
;
5742 if (!(p
= lock_user_string(arg1
)))
5744 ret
= get_errno(execve(p
, argp
, envp
));
5745 unlock_user(p
, arg1
, 0);
5750 ret
= -TARGET_EFAULT
;
5753 for (gp
= guest_argp
, q
= argp
; *q
;
5754 gp
+= sizeof(abi_ulong
), q
++) {
5755 if (get_user_ual(addr
, gp
)
5758 unlock_user(*q
, addr
, 0);
5760 for (gp
= guest_envp
, q
= envp
; *q
;
5761 gp
+= sizeof(abi_ulong
), q
++) {
5762 if (get_user_ual(addr
, gp
)
5765 unlock_user(*q
, addr
, 0);
5769 case TARGET_NR_chdir
:
5770 if (!(p
= lock_user_string(arg1
)))
5772 ret
= get_errno(chdir(p
));
5773 unlock_user(p
, arg1
, 0);
5775 #ifdef TARGET_NR_time
5776 case TARGET_NR_time
:
5779 ret
= get_errno(time(&host_time
));
5782 && put_user_sal(host_time
, arg1
))
5787 case TARGET_NR_mknod
:
5788 if (!(p
= lock_user_string(arg1
)))
5790 ret
= get_errno(mknod(p
, arg2
, arg3
));
5791 unlock_user(p
, arg1
, 0);
5793 #if defined(TARGET_NR_mknodat)
5794 case TARGET_NR_mknodat
:
5795 if (!(p
= lock_user_string(arg2
)))
5797 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5798 unlock_user(p
, arg2
, 0);
5801 case TARGET_NR_chmod
:
5802 if (!(p
= lock_user_string(arg1
)))
5804 ret
= get_errno(chmod(p
, arg2
));
5805 unlock_user(p
, arg1
, 0);
5807 #ifdef TARGET_NR_break
5808 case TARGET_NR_break
:
5811 #ifdef TARGET_NR_oldstat
5812 case TARGET_NR_oldstat
:
5815 case TARGET_NR_lseek
:
5816 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5818 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5819 /* Alpha specific */
5820 case TARGET_NR_getxpid
:
5821 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5822 ret
= get_errno(getpid());
5825 #ifdef TARGET_NR_getpid
5826 case TARGET_NR_getpid
:
5827 ret
= get_errno(getpid());
5830 case TARGET_NR_mount
:
5832 /* need to look at the data field */
5836 p
= lock_user_string(arg1
);
5844 p2
= lock_user_string(arg2
);
5847 unlock_user(p
, arg1
, 0);
5853 p3
= lock_user_string(arg3
);
5856 unlock_user(p
, arg1
, 0);
5858 unlock_user(p2
, arg2
, 0);
5865 /* FIXME - arg5 should be locked, but it isn't clear how to
5866 * do that since it's not guaranteed to be a NULL-terminated
5870 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
5872 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
5874 ret
= get_errno(ret
);
5877 unlock_user(p
, arg1
, 0);
5879 unlock_user(p2
, arg2
, 0);
5881 unlock_user(p3
, arg3
, 0);
5885 #ifdef TARGET_NR_umount
5886 case TARGET_NR_umount
:
5887 if (!(p
= lock_user_string(arg1
)))
5889 ret
= get_errno(umount(p
));
5890 unlock_user(p
, arg1
, 0);
5893 #ifdef TARGET_NR_stime /* not on alpha */
5894 case TARGET_NR_stime
:
5897 if (get_user_sal(host_time
, arg1
))
5899 ret
= get_errno(stime(&host_time
));
5903 case TARGET_NR_ptrace
:
5905 #ifdef TARGET_NR_alarm /* not on alpha */
5906 case TARGET_NR_alarm
:
5910 #ifdef TARGET_NR_oldfstat
5911 case TARGET_NR_oldfstat
:
5914 #ifdef TARGET_NR_pause /* not on alpha */
5915 case TARGET_NR_pause
:
5916 ret
= get_errno(pause());
5919 #ifdef TARGET_NR_utime
5920 case TARGET_NR_utime
:
5922 struct utimbuf tbuf
, *host_tbuf
;
5923 struct target_utimbuf
*target_tbuf
;
5925 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5927 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5928 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5929 unlock_user_struct(target_tbuf
, arg2
, 0);
5934 if (!(p
= lock_user_string(arg1
)))
5936 ret
= get_errno(utime(p
, host_tbuf
));
5937 unlock_user(p
, arg1
, 0);
5941 case TARGET_NR_utimes
:
5943 struct timeval
*tvp
, tv
[2];
5945 if (copy_from_user_timeval(&tv
[0], arg2
)
5946 || copy_from_user_timeval(&tv
[1],
5947 arg2
+ sizeof(struct target_timeval
)))
5953 if (!(p
= lock_user_string(arg1
)))
5955 ret
= get_errno(utimes(p
, tvp
));
5956 unlock_user(p
, arg1
, 0);
5959 #if defined(TARGET_NR_futimesat)
5960 case TARGET_NR_futimesat
:
5962 struct timeval
*tvp
, tv
[2];
5964 if (copy_from_user_timeval(&tv
[0], arg3
)
5965 || copy_from_user_timeval(&tv
[1],
5966 arg3
+ sizeof(struct target_timeval
)))
5972 if (!(p
= lock_user_string(arg2
)))
5974 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5975 unlock_user(p
, arg2
, 0);
5979 #ifdef TARGET_NR_stty
5980 case TARGET_NR_stty
:
5983 #ifdef TARGET_NR_gtty
5984 case TARGET_NR_gtty
:
5987 case TARGET_NR_access
:
5988 if (!(p
= lock_user_string(arg1
)))
5990 ret
= get_errno(access(path(p
), arg2
));
5991 unlock_user(p
, arg1
, 0);
5993 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5994 case TARGET_NR_faccessat
:
5995 if (!(p
= lock_user_string(arg2
)))
5997 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5998 unlock_user(p
, arg2
, 0);
6001 #ifdef TARGET_NR_nice /* not on alpha */
6002 case TARGET_NR_nice
:
6003 ret
= get_errno(nice(arg1
));
6006 #ifdef TARGET_NR_ftime
6007 case TARGET_NR_ftime
:
6010 case TARGET_NR_sync
:
6014 case TARGET_NR_kill
:
6015 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6017 case TARGET_NR_rename
:
6020 p
= lock_user_string(arg1
);
6021 p2
= lock_user_string(arg2
);
6023 ret
= -TARGET_EFAULT
;
6025 ret
= get_errno(rename(p
, p2
));
6026 unlock_user(p2
, arg2
, 0);
6027 unlock_user(p
, arg1
, 0);
6030 #if defined(TARGET_NR_renameat)
6031 case TARGET_NR_renameat
:
6034 p
= lock_user_string(arg2
);
6035 p2
= lock_user_string(arg4
);
6037 ret
= -TARGET_EFAULT
;
6039 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6040 unlock_user(p2
, arg4
, 0);
6041 unlock_user(p
, arg2
, 0);
6045 case TARGET_NR_mkdir
:
6046 if (!(p
= lock_user_string(arg1
)))
6048 ret
= get_errno(mkdir(p
, arg2
));
6049 unlock_user(p
, arg1
, 0);
6051 #if defined(TARGET_NR_mkdirat)
6052 case TARGET_NR_mkdirat
:
6053 if (!(p
= lock_user_string(arg2
)))
6055 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6056 unlock_user(p
, arg2
, 0);
6059 case TARGET_NR_rmdir
:
6060 if (!(p
= lock_user_string(arg1
)))
6062 ret
= get_errno(rmdir(p
));
6063 unlock_user(p
, arg1
, 0);
6066 ret
= get_errno(dup(arg1
));
6068 case TARGET_NR_pipe
:
6069 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6071 #ifdef TARGET_NR_pipe2
6072 case TARGET_NR_pipe2
:
6073 ret
= do_pipe(cpu_env
, arg1
,
6074 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6077 case TARGET_NR_times
:
6079 struct target_tms
*tmsp
;
6081 ret
= get_errno(times(&tms
));
6083 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6086 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6087 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6088 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6089 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6092 ret
= host_to_target_clock_t(ret
);
6095 #ifdef TARGET_NR_prof
6096 case TARGET_NR_prof
:
6099 #ifdef TARGET_NR_signal
6100 case TARGET_NR_signal
:
6103 case TARGET_NR_acct
:
6105 ret
= get_errno(acct(NULL
));
6107 if (!(p
= lock_user_string(arg1
)))
6109 ret
= get_errno(acct(path(p
)));
6110 unlock_user(p
, arg1
, 0);
6113 #ifdef TARGET_NR_umount2
6114 case TARGET_NR_umount2
:
6115 if (!(p
= lock_user_string(arg1
)))
6117 ret
= get_errno(umount2(p
, arg2
));
6118 unlock_user(p
, arg1
, 0);
6121 #ifdef TARGET_NR_lock
6122 case TARGET_NR_lock
:
6125 case TARGET_NR_ioctl
:
6126 ret
= do_ioctl(arg1
, arg2
, arg3
);
6128 case TARGET_NR_fcntl
:
6129 ret
= do_fcntl(arg1
, arg2
, arg3
);
6131 #ifdef TARGET_NR_mpx
6135 case TARGET_NR_setpgid
:
6136 ret
= get_errno(setpgid(arg1
, arg2
));
6138 #ifdef TARGET_NR_ulimit
6139 case TARGET_NR_ulimit
:
6142 #ifdef TARGET_NR_oldolduname
6143 case TARGET_NR_oldolduname
:
6146 case TARGET_NR_umask
:
6147 ret
= get_errno(umask(arg1
));
6149 case TARGET_NR_chroot
:
6150 if (!(p
= lock_user_string(arg1
)))
6152 ret
= get_errno(chroot(p
));
6153 unlock_user(p
, arg1
, 0);
6155 case TARGET_NR_ustat
:
6157 case TARGET_NR_dup2
:
6158 ret
= get_errno(dup2(arg1
, arg2
));
6160 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6161 case TARGET_NR_dup3
:
6162 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6165 #ifdef TARGET_NR_getppid /* not on alpha */
6166 case TARGET_NR_getppid
:
6167 ret
= get_errno(getppid());
6170 case TARGET_NR_getpgrp
:
6171 ret
= get_errno(getpgrp());
6173 case TARGET_NR_setsid
:
6174 ret
= get_errno(setsid());
6176 #ifdef TARGET_NR_sigaction
6177 case TARGET_NR_sigaction
:
6179 #if defined(TARGET_ALPHA)
6180 struct target_sigaction act
, oact
, *pact
= 0;
6181 struct target_old_sigaction
*old_act
;
6183 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6185 act
._sa_handler
= old_act
->_sa_handler
;
6186 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6187 act
.sa_flags
= old_act
->sa_flags
;
6188 act
.sa_restorer
= 0;
6189 unlock_user_struct(old_act
, arg2
, 0);
6192 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6193 if (!is_error(ret
) && arg3
) {
6194 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6196 old_act
->_sa_handler
= oact
._sa_handler
;
6197 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6198 old_act
->sa_flags
= oact
.sa_flags
;
6199 unlock_user_struct(old_act
, arg3
, 1);
6201 #elif defined(TARGET_MIPS)
6202 struct target_sigaction act
, oact
, *pact
, *old_act
;
6205 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6207 act
._sa_handler
= old_act
->_sa_handler
;
6208 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6209 act
.sa_flags
= old_act
->sa_flags
;
6210 unlock_user_struct(old_act
, arg2
, 0);
6216 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6218 if (!is_error(ret
) && arg3
) {
6219 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6221 old_act
->_sa_handler
= oact
._sa_handler
;
6222 old_act
->sa_flags
= oact
.sa_flags
;
6223 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6224 old_act
->sa_mask
.sig
[1] = 0;
6225 old_act
->sa_mask
.sig
[2] = 0;
6226 old_act
->sa_mask
.sig
[3] = 0;
6227 unlock_user_struct(old_act
, arg3
, 1);
6230 struct target_old_sigaction
*old_act
;
6231 struct target_sigaction act
, oact
, *pact
;
6233 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6235 act
._sa_handler
= old_act
->_sa_handler
;
6236 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6237 act
.sa_flags
= old_act
->sa_flags
;
6238 act
.sa_restorer
= old_act
->sa_restorer
;
6239 unlock_user_struct(old_act
, arg2
, 0);
6244 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6245 if (!is_error(ret
) && arg3
) {
6246 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6248 old_act
->_sa_handler
= oact
._sa_handler
;
6249 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6250 old_act
->sa_flags
= oact
.sa_flags
;
6251 old_act
->sa_restorer
= oact
.sa_restorer
;
6252 unlock_user_struct(old_act
, arg3
, 1);
6258 case TARGET_NR_rt_sigaction
:
6260 #if defined(TARGET_ALPHA)
6261 struct target_sigaction act
, oact
, *pact
= 0;
6262 struct target_rt_sigaction
*rt_act
;
6263 /* ??? arg4 == sizeof(sigset_t). */
6265 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6267 act
._sa_handler
= rt_act
->_sa_handler
;
6268 act
.sa_mask
= rt_act
->sa_mask
;
6269 act
.sa_flags
= rt_act
->sa_flags
;
6270 act
.sa_restorer
= arg5
;
6271 unlock_user_struct(rt_act
, arg2
, 0);
6274 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6275 if (!is_error(ret
) && arg3
) {
6276 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6278 rt_act
->_sa_handler
= oact
._sa_handler
;
6279 rt_act
->sa_mask
= oact
.sa_mask
;
6280 rt_act
->sa_flags
= oact
.sa_flags
;
6281 unlock_user_struct(rt_act
, arg3
, 1);
6284 struct target_sigaction
*act
;
6285 struct target_sigaction
*oact
;
6288 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6293 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6294 ret
= -TARGET_EFAULT
;
6295 goto rt_sigaction_fail
;
6299 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6302 unlock_user_struct(act
, arg2
, 0);
6304 unlock_user_struct(oact
, arg3
, 1);
6308 #ifdef TARGET_NR_sgetmask /* not on alpha */
6309 case TARGET_NR_sgetmask
:
6312 abi_ulong target_set
;
6313 do_sigprocmask(0, NULL
, &cur_set
);
6314 host_to_target_old_sigset(&target_set
, &cur_set
);
6319 #ifdef TARGET_NR_ssetmask /* not on alpha */
6320 case TARGET_NR_ssetmask
:
6322 sigset_t set
, oset
, cur_set
;
6323 abi_ulong target_set
= arg1
;
6324 do_sigprocmask(0, NULL
, &cur_set
);
6325 target_to_host_old_sigset(&set
, &target_set
);
6326 sigorset(&set
, &set
, &cur_set
);
6327 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6328 host_to_target_old_sigset(&target_set
, &oset
);
6333 #ifdef TARGET_NR_sigprocmask
6334 case TARGET_NR_sigprocmask
:
6336 #if defined(TARGET_ALPHA)
6337 sigset_t set
, oldset
;
6342 case TARGET_SIG_BLOCK
:
6345 case TARGET_SIG_UNBLOCK
:
6348 case TARGET_SIG_SETMASK
:
6352 ret
= -TARGET_EINVAL
;
6356 target_to_host_old_sigset(&set
, &mask
);
6358 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6359 if (!is_error(ret
)) {
6360 host_to_target_old_sigset(&mask
, &oldset
);
6362 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6365 sigset_t set
, oldset
, *set_ptr
;
6370 case TARGET_SIG_BLOCK
:
6373 case TARGET_SIG_UNBLOCK
:
6376 case TARGET_SIG_SETMASK
:
6380 ret
= -TARGET_EINVAL
;
6383 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6385 target_to_host_old_sigset(&set
, p
);
6386 unlock_user(p
, arg2
, 0);
6392 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6393 if (!is_error(ret
) && arg3
) {
6394 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6396 host_to_target_old_sigset(p
, &oldset
);
6397 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6403 case TARGET_NR_rt_sigprocmask
:
6406 sigset_t set
, oldset
, *set_ptr
;
6410 case TARGET_SIG_BLOCK
:
6413 case TARGET_SIG_UNBLOCK
:
6416 case TARGET_SIG_SETMASK
:
6420 ret
= -TARGET_EINVAL
;
6423 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6425 target_to_host_sigset(&set
, p
);
6426 unlock_user(p
, arg2
, 0);
6432 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6433 if (!is_error(ret
) && arg3
) {
6434 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6436 host_to_target_sigset(p
, &oldset
);
6437 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6441 #ifdef TARGET_NR_sigpending
6442 case TARGET_NR_sigpending
:
6445 ret
= get_errno(sigpending(&set
));
6446 if (!is_error(ret
)) {
6447 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6449 host_to_target_old_sigset(p
, &set
);
6450 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6455 case TARGET_NR_rt_sigpending
:
6458 ret
= get_errno(sigpending(&set
));
6459 if (!is_error(ret
)) {
6460 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6462 host_to_target_sigset(p
, &set
);
6463 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6467 #ifdef TARGET_NR_sigsuspend
6468 case TARGET_NR_sigsuspend
:
6471 #if defined(TARGET_ALPHA)
6472 abi_ulong mask
= arg1
;
6473 target_to_host_old_sigset(&set
, &mask
);
6475 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6477 target_to_host_old_sigset(&set
, p
);
6478 unlock_user(p
, arg1
, 0);
6480 ret
= get_errno(sigsuspend(&set
));
6484 case TARGET_NR_rt_sigsuspend
:
6487 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6489 target_to_host_sigset(&set
, p
);
6490 unlock_user(p
, arg1
, 0);
6491 ret
= get_errno(sigsuspend(&set
));
6494 case TARGET_NR_rt_sigtimedwait
:
6497 struct timespec uts
, *puts
;
6500 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6502 target_to_host_sigset(&set
, p
);
6503 unlock_user(p
, arg1
, 0);
6506 target_to_host_timespec(puts
, arg3
);
6510 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6511 if (!is_error(ret
)) {
6513 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6518 host_to_target_siginfo(p
, &uinfo
);
6519 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6521 ret
= host_to_target_signal(ret
);
6525 case TARGET_NR_rt_sigqueueinfo
:
6528 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6530 target_to_host_siginfo(&uinfo
, p
);
6531 unlock_user(p
, arg1
, 0);
6532 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6535 #ifdef TARGET_NR_sigreturn
6536 case TARGET_NR_sigreturn
:
6537 /* NOTE: ret is eax, so not transcoding must be done */
6538 ret
= do_sigreturn(cpu_env
);
6541 case TARGET_NR_rt_sigreturn
:
6542 /* NOTE: ret is eax, so not transcoding must be done */
6543 ret
= do_rt_sigreturn(cpu_env
);
6545 case TARGET_NR_sethostname
:
6546 if (!(p
= lock_user_string(arg1
)))
6548 ret
= get_errno(sethostname(p
, arg2
));
6549 unlock_user(p
, arg1
, 0);
6551 case TARGET_NR_setrlimit
:
6553 int resource
= target_to_host_resource(arg1
);
6554 struct target_rlimit
*target_rlim
;
6556 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6558 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6559 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6560 unlock_user_struct(target_rlim
, arg2
, 0);
6561 ret
= get_errno(setrlimit(resource
, &rlim
));
6564 case TARGET_NR_getrlimit
:
6566 int resource
= target_to_host_resource(arg1
);
6567 struct target_rlimit
*target_rlim
;
6570 ret
= get_errno(getrlimit(resource
, &rlim
));
6571 if (!is_error(ret
)) {
6572 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6574 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6575 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6576 unlock_user_struct(target_rlim
, arg2
, 1);
6580 case TARGET_NR_getrusage
:
6582 struct rusage rusage
;
6583 ret
= get_errno(getrusage(arg1
, &rusage
));
6584 if (!is_error(ret
)) {
6585 ret
= host_to_target_rusage(arg2
, &rusage
);
6589 case TARGET_NR_gettimeofday
:
6592 ret
= get_errno(gettimeofday(&tv
, NULL
));
6593 if (!is_error(ret
)) {
6594 if (copy_to_user_timeval(arg1
, &tv
))
6599 case TARGET_NR_settimeofday
:
6601 struct timeval tv
, *ptv
= NULL
;
6602 struct timezone tz
, *ptz
= NULL
;
6605 if (copy_from_user_timeval(&tv
, arg1
)) {
6612 if (copy_from_user_timezone(&tz
, arg2
)) {
6618 ret
= get_errno(settimeofday(ptv
, ptz
));
6621 #if defined(TARGET_NR_select)
6622 case TARGET_NR_select
:
6623 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6624 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6627 struct target_sel_arg_struct
*sel
;
6628 abi_ulong inp
, outp
, exp
, tvp
;
6631 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6633 nsel
= tswapal(sel
->n
);
6634 inp
= tswapal(sel
->inp
);
6635 outp
= tswapal(sel
->outp
);
6636 exp
= tswapal(sel
->exp
);
6637 tvp
= tswapal(sel
->tvp
);
6638 unlock_user_struct(sel
, arg1
, 0);
6639 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6644 #ifdef TARGET_NR_pselect6
6645 case TARGET_NR_pselect6
:
6647 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6648 fd_set rfds
, wfds
, efds
;
6649 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6650 struct timespec ts
, *ts_ptr
;
6653 * The 6th arg is actually two args smashed together,
6654 * so we cannot use the C library.
6662 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6663 target_sigset_t
*target_sigset
;
6671 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6675 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6679 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6685 * This takes a timespec, and not a timeval, so we cannot
6686 * use the do_select() helper ...
6689 if (target_to_host_timespec(&ts
, ts_addr
)) {
6697 /* Extract the two packed args for the sigset */
6700 sig
.size
= _NSIG
/ 8;
6702 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6706 arg_sigset
= tswapal(arg7
[0]);
6707 arg_sigsize
= tswapal(arg7
[1]);
6708 unlock_user(arg7
, arg6
, 0);
6712 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6713 /* Like the kernel, we enforce correct size sigsets */
6714 ret
= -TARGET_EINVAL
;
6717 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6718 sizeof(*target_sigset
), 1);
6719 if (!target_sigset
) {
6722 target_to_host_sigset(&set
, target_sigset
);
6723 unlock_user(target_sigset
, arg_sigset
, 0);
6731 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6734 if (!is_error(ret
)) {
6735 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6737 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6739 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6742 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6748 case TARGET_NR_symlink
:
6751 p
= lock_user_string(arg1
);
6752 p2
= lock_user_string(arg2
);
6754 ret
= -TARGET_EFAULT
;
6756 ret
= get_errno(symlink(p
, p2
));
6757 unlock_user(p2
, arg2
, 0);
6758 unlock_user(p
, arg1
, 0);
6761 #if defined(TARGET_NR_symlinkat)
6762 case TARGET_NR_symlinkat
:
6765 p
= lock_user_string(arg1
);
6766 p2
= lock_user_string(arg3
);
6768 ret
= -TARGET_EFAULT
;
6770 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6771 unlock_user(p2
, arg3
, 0);
6772 unlock_user(p
, arg1
, 0);
6776 #ifdef TARGET_NR_oldlstat
6777 case TARGET_NR_oldlstat
:
6780 case TARGET_NR_readlink
:
6783 p
= lock_user_string(arg1
);
6784 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6786 ret
= -TARGET_EFAULT
;
6788 /* Short circuit this for the magic exe check. */
6789 ret
= -TARGET_EINVAL
;
6790 } else if (is_proc_myself((const char *)p
, "exe")) {
6791 char real
[PATH_MAX
], *temp
;
6792 temp
= realpath(exec_path
, real
);
6793 /* Return value is # of bytes that we wrote to the buffer. */
6795 ret
= get_errno(-1);
6797 /* Don't worry about sign mismatch as earlier mapping
6798 * logic would have thrown a bad address error. */
6799 ret
= MIN(strlen(real
), arg3
);
6800 /* We cannot NUL terminate the string. */
6801 memcpy(p2
, real
, ret
);
6804 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6806 unlock_user(p2
, arg2
, ret
);
6807 unlock_user(p
, arg1
, 0);
6810 #if defined(TARGET_NR_readlinkat)
6811 case TARGET_NR_readlinkat
:
6814 p
= lock_user_string(arg2
);
6815 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6817 ret
= -TARGET_EFAULT
;
6818 } else if (is_proc_myself((const char *)p
, "exe")) {
6819 char real
[PATH_MAX
], *temp
;
6820 temp
= realpath(exec_path
, real
);
6821 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6822 snprintf((char *)p2
, arg4
, "%s", real
);
6824 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6826 unlock_user(p2
, arg3
, ret
);
6827 unlock_user(p
, arg2
, 0);
6831 #ifdef TARGET_NR_uselib
6832 case TARGET_NR_uselib
:
6835 #ifdef TARGET_NR_swapon
6836 case TARGET_NR_swapon
:
6837 if (!(p
= lock_user_string(arg1
)))
6839 ret
= get_errno(swapon(p
, arg2
));
6840 unlock_user(p
, arg1
, 0);
6843 case TARGET_NR_reboot
:
6844 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6845 /* arg4 must be ignored in all other cases */
6846 p
= lock_user_string(arg4
);
6850 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6851 unlock_user(p
, arg4
, 0);
6853 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6856 #ifdef TARGET_NR_readdir
6857 case TARGET_NR_readdir
:
6860 #ifdef TARGET_NR_mmap
6861 case TARGET_NR_mmap
:
6862 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6863 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6864 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6865 || defined(TARGET_S390X)
6868 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6869 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6877 unlock_user(v
, arg1
, 0);
6878 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6879 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6883 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6884 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6890 #ifdef TARGET_NR_mmap2
6891 case TARGET_NR_mmap2
:
6893 #define MMAP_SHIFT 12
6895 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6896 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6898 arg6
<< MMAP_SHIFT
));
6901 case TARGET_NR_munmap
:
6902 ret
= get_errno(target_munmap(arg1
, arg2
));
6904 case TARGET_NR_mprotect
:
6906 TaskState
*ts
= cpu
->opaque
;
6907 /* Special hack to detect libc making the stack executable. */
6908 if ((arg3
& PROT_GROWSDOWN
)
6909 && arg1
>= ts
->info
->stack_limit
6910 && arg1
<= ts
->info
->start_stack
) {
6911 arg3
&= ~PROT_GROWSDOWN
;
6912 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6913 arg1
= ts
->info
->stack_limit
;
6916 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6918 #ifdef TARGET_NR_mremap
6919 case TARGET_NR_mremap
:
6920 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6923 /* ??? msync/mlock/munlock are broken for softmmu. */
6924 #ifdef TARGET_NR_msync
6925 case TARGET_NR_msync
:
6926 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6929 #ifdef TARGET_NR_mlock
6930 case TARGET_NR_mlock
:
6931 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6934 #ifdef TARGET_NR_munlock
6935 case TARGET_NR_munlock
:
6936 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6939 #ifdef TARGET_NR_mlockall
6940 case TARGET_NR_mlockall
:
6941 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
6944 #ifdef TARGET_NR_munlockall
6945 case TARGET_NR_munlockall
:
6946 ret
= get_errno(munlockall());
6949 case TARGET_NR_truncate
:
6950 if (!(p
= lock_user_string(arg1
)))
6952 ret
= get_errno(truncate(p
, arg2
));
6953 unlock_user(p
, arg1
, 0);
6955 case TARGET_NR_ftruncate
:
6956 ret
= get_errno(ftruncate(arg1
, arg2
));
6958 case TARGET_NR_fchmod
:
6959 ret
= get_errno(fchmod(arg1
, arg2
));
6961 #if defined(TARGET_NR_fchmodat)
6962 case TARGET_NR_fchmodat
:
6963 if (!(p
= lock_user_string(arg2
)))
6965 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6966 unlock_user(p
, arg2
, 0);
6969 case TARGET_NR_getpriority
:
6970 /* Note that negative values are valid for getpriority, so we must
6971 differentiate based on errno settings. */
6973 ret
= getpriority(arg1
, arg2
);
6974 if (ret
== -1 && errno
!= 0) {
6975 ret
= -host_to_target_errno(errno
);
6979 /* Return value is the unbiased priority. Signal no error. */
6980 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6982 /* Return value is a biased priority to avoid negative numbers. */
6986 case TARGET_NR_setpriority
:
6987 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6989 #ifdef TARGET_NR_profil
6990 case TARGET_NR_profil
:
6993 case TARGET_NR_statfs
:
6994 if (!(p
= lock_user_string(arg1
)))
6996 ret
= get_errno(statfs(path(p
), &stfs
));
6997 unlock_user(p
, arg1
, 0);
6999 if (!is_error(ret
)) {
7000 struct target_statfs
*target_stfs
;
7002 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7004 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7005 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7006 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7007 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7008 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7009 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7010 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7011 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7012 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7013 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7014 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7015 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7016 unlock_user_struct(target_stfs
, arg2
, 1);
7019 case TARGET_NR_fstatfs
:
7020 ret
= get_errno(fstatfs(arg1
, &stfs
));
7021 goto convert_statfs
;
7022 #ifdef TARGET_NR_statfs64
7023 case TARGET_NR_statfs64
:
7024 if (!(p
= lock_user_string(arg1
)))
7026 ret
= get_errno(statfs(path(p
), &stfs
));
7027 unlock_user(p
, arg1
, 0);
7029 if (!is_error(ret
)) {
7030 struct target_statfs64
*target_stfs
;
7032 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7034 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7035 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7036 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7037 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7038 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7039 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7040 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7041 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7042 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7043 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7044 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7045 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7046 unlock_user_struct(target_stfs
, arg3
, 1);
7049 case TARGET_NR_fstatfs64
:
7050 ret
= get_errno(fstatfs(arg1
, &stfs
));
7051 goto convert_statfs64
;
7053 #ifdef TARGET_NR_ioperm
7054 case TARGET_NR_ioperm
:
7057 #ifdef TARGET_NR_socketcall
7058 case TARGET_NR_socketcall
:
7059 ret
= do_socketcall(arg1
, arg2
);
7062 #ifdef TARGET_NR_accept
7063 case TARGET_NR_accept
:
7064 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7067 #ifdef TARGET_NR_accept4
7068 case TARGET_NR_accept4
:
7069 #ifdef CONFIG_ACCEPT4
7070 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7076 #ifdef TARGET_NR_bind
7077 case TARGET_NR_bind
:
7078 ret
= do_bind(arg1
, arg2
, arg3
);
7081 #ifdef TARGET_NR_connect
7082 case TARGET_NR_connect
:
7083 ret
= do_connect(arg1
, arg2
, arg3
);
7086 #ifdef TARGET_NR_getpeername
7087 case TARGET_NR_getpeername
:
7088 ret
= do_getpeername(arg1
, arg2
, arg3
);
7091 #ifdef TARGET_NR_getsockname
7092 case TARGET_NR_getsockname
:
7093 ret
= do_getsockname(arg1
, arg2
, arg3
);
7096 #ifdef TARGET_NR_getsockopt
7097 case TARGET_NR_getsockopt
:
7098 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7101 #ifdef TARGET_NR_listen
7102 case TARGET_NR_listen
:
7103 ret
= get_errno(listen(arg1
, arg2
));
7106 #ifdef TARGET_NR_recv
7107 case TARGET_NR_recv
:
7108 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7111 #ifdef TARGET_NR_recvfrom
7112 case TARGET_NR_recvfrom
:
7113 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7116 #ifdef TARGET_NR_recvmsg
7117 case TARGET_NR_recvmsg
:
7118 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7121 #ifdef TARGET_NR_send
7122 case TARGET_NR_send
:
7123 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7126 #ifdef TARGET_NR_sendmsg
7127 case TARGET_NR_sendmsg
:
7128 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7131 #ifdef TARGET_NR_sendmmsg
7132 case TARGET_NR_sendmmsg
:
7133 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7135 case TARGET_NR_recvmmsg
:
7136 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7139 #ifdef TARGET_NR_sendto
7140 case TARGET_NR_sendto
:
7141 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7144 #ifdef TARGET_NR_shutdown
7145 case TARGET_NR_shutdown
:
7146 ret
= get_errno(shutdown(arg1
, arg2
));
7149 #ifdef TARGET_NR_socket
7150 case TARGET_NR_socket
:
7151 ret
= do_socket(arg1
, arg2
, arg3
);
7154 #ifdef TARGET_NR_socketpair
7155 case TARGET_NR_socketpair
:
7156 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7159 #ifdef TARGET_NR_setsockopt
7160 case TARGET_NR_setsockopt
:
7161 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7165 case TARGET_NR_syslog
:
7166 if (!(p
= lock_user_string(arg2
)))
7168 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7169 unlock_user(p
, arg2
, 0);
7172 case TARGET_NR_setitimer
:
7174 struct itimerval value
, ovalue
, *pvalue
;
7178 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7179 || copy_from_user_timeval(&pvalue
->it_value
,
7180 arg2
+ sizeof(struct target_timeval
)))
7185 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7186 if (!is_error(ret
) && arg3
) {
7187 if (copy_to_user_timeval(arg3
,
7188 &ovalue
.it_interval
)
7189 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7195 case TARGET_NR_getitimer
:
7197 struct itimerval value
;
7199 ret
= get_errno(getitimer(arg1
, &value
));
7200 if (!is_error(ret
) && arg2
) {
7201 if (copy_to_user_timeval(arg2
,
7203 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7209 case TARGET_NR_stat
:
7210 if (!(p
= lock_user_string(arg1
)))
7212 ret
= get_errno(stat(path(p
), &st
));
7213 unlock_user(p
, arg1
, 0);
7215 case TARGET_NR_lstat
:
7216 if (!(p
= lock_user_string(arg1
)))
7218 ret
= get_errno(lstat(path(p
), &st
));
7219 unlock_user(p
, arg1
, 0);
7221 case TARGET_NR_fstat
:
7223 ret
= get_errno(fstat(arg1
, &st
));
7225 if (!is_error(ret
)) {
7226 struct target_stat
*target_st
;
7228 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7230 memset(target_st
, 0, sizeof(*target_st
));
7231 __put_user(st
.st_dev
, &target_st
->st_dev
);
7232 __put_user(st
.st_ino
, &target_st
->st_ino
);
7233 __put_user(st
.st_mode
, &target_st
->st_mode
);
7234 __put_user(st
.st_uid
, &target_st
->st_uid
);
7235 __put_user(st
.st_gid
, &target_st
->st_gid
);
7236 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7237 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7238 __put_user(st
.st_size
, &target_st
->st_size
);
7239 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7240 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7241 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7242 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7243 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7244 unlock_user_struct(target_st
, arg2
, 1);
7248 #ifdef TARGET_NR_olduname
7249 case TARGET_NR_olduname
:
7252 #ifdef TARGET_NR_iopl
7253 case TARGET_NR_iopl
:
7256 case TARGET_NR_vhangup
:
7257 ret
= get_errno(vhangup());
7259 #ifdef TARGET_NR_idle
7260 case TARGET_NR_idle
:
7263 #ifdef TARGET_NR_syscall
7264 case TARGET_NR_syscall
:
7265 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7266 arg6
, arg7
, arg8
, 0);
7269 case TARGET_NR_wait4
:
7272 abi_long status_ptr
= arg2
;
7273 struct rusage rusage
, *rusage_ptr
;
7274 abi_ulong target_rusage
= arg4
;
7275 abi_long rusage_err
;
7277 rusage_ptr
= &rusage
;
7280 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7281 if (!is_error(ret
)) {
7282 if (status_ptr
&& ret
) {
7283 status
= host_to_target_waitstatus(status
);
7284 if (put_user_s32(status
, status_ptr
))
7287 if (target_rusage
) {
7288 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7296 #ifdef TARGET_NR_swapoff
7297 case TARGET_NR_swapoff
:
7298 if (!(p
= lock_user_string(arg1
)))
7300 ret
= get_errno(swapoff(p
));
7301 unlock_user(p
, arg1
, 0);
7304 case TARGET_NR_sysinfo
:
7306 struct target_sysinfo
*target_value
;
7307 struct sysinfo value
;
7308 ret
= get_errno(sysinfo(&value
));
7309 if (!is_error(ret
) && arg1
)
7311 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7313 __put_user(value
.uptime
, &target_value
->uptime
);
7314 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7315 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7316 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7317 __put_user(value
.totalram
, &target_value
->totalram
);
7318 __put_user(value
.freeram
, &target_value
->freeram
);
7319 __put_user(value
.sharedram
, &target_value
->sharedram
);
7320 __put_user(value
.bufferram
, &target_value
->bufferram
);
7321 __put_user(value
.totalswap
, &target_value
->totalswap
);
7322 __put_user(value
.freeswap
, &target_value
->freeswap
);
7323 __put_user(value
.procs
, &target_value
->procs
);
7324 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7325 __put_user(value
.freehigh
, &target_value
->freehigh
);
7326 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7327 unlock_user_struct(target_value
, arg1
, 1);
7331 #ifdef TARGET_NR_ipc
7333 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7336 #ifdef TARGET_NR_semget
7337 case TARGET_NR_semget
:
7338 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7341 #ifdef TARGET_NR_semop
7342 case TARGET_NR_semop
:
7343 ret
= do_semop(arg1
, arg2
, arg3
);
7346 #ifdef TARGET_NR_semctl
7347 case TARGET_NR_semctl
:
7348 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
7351 #ifdef TARGET_NR_msgctl
7352 case TARGET_NR_msgctl
:
7353 ret
= do_msgctl(arg1
, arg2
, arg3
);
7356 #ifdef TARGET_NR_msgget
7357 case TARGET_NR_msgget
:
7358 ret
= get_errno(msgget(arg1
, arg2
));
7361 #ifdef TARGET_NR_msgrcv
7362 case TARGET_NR_msgrcv
:
7363 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7366 #ifdef TARGET_NR_msgsnd
7367 case TARGET_NR_msgsnd
:
7368 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7371 #ifdef TARGET_NR_shmget
7372 case TARGET_NR_shmget
:
7373 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7376 #ifdef TARGET_NR_shmctl
7377 case TARGET_NR_shmctl
:
7378 ret
= do_shmctl(arg1
, arg2
, arg3
);
7381 #ifdef TARGET_NR_shmat
7382 case TARGET_NR_shmat
:
7383 ret
= do_shmat(arg1
, arg2
, arg3
);
7386 #ifdef TARGET_NR_shmdt
7387 case TARGET_NR_shmdt
:
7388 ret
= do_shmdt(arg1
);
7391 case TARGET_NR_fsync
:
7392 ret
= get_errno(fsync(arg1
));
7394 case TARGET_NR_clone
:
7395 /* Linux manages to have three different orderings for its
7396 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7397 * match the kernel's CONFIG_CLONE_* settings.
7398 * Microblaze is further special in that it uses a sixth
7399 * implicit argument to clone for the TLS pointer.
7401 #if defined(TARGET_MICROBLAZE)
7402 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7403 #elif defined(TARGET_CLONE_BACKWARDS)
7404 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7405 #elif defined(TARGET_CLONE_BACKWARDS2)
7406 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7408 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7411 #ifdef __NR_exit_group
7412 /* new thread calls */
7413 case TARGET_NR_exit_group
:
7417 gdb_exit(cpu_env
, arg1
);
7418 ret
= get_errno(exit_group(arg1
));
7421 case TARGET_NR_setdomainname
:
7422 if (!(p
= lock_user_string(arg1
)))
7424 ret
= get_errno(setdomainname(p
, arg2
));
7425 unlock_user(p
, arg1
, 0);
7427 case TARGET_NR_uname
:
7428 /* no need to transcode because we use the linux syscall */
7430 struct new_utsname
* buf
;
7432 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7434 ret
= get_errno(sys_uname(buf
));
7435 if (!is_error(ret
)) {
7436 /* Overrite the native machine name with whatever is being
7438 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7439 /* Allow the user to override the reported release. */
7440 if (qemu_uname_release
&& *qemu_uname_release
)
7441 strcpy (buf
->release
, qemu_uname_release
);
7443 unlock_user_struct(buf
, arg1
, 1);
7447 case TARGET_NR_modify_ldt
:
7448 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7450 #if !defined(TARGET_X86_64)
7451 case TARGET_NR_vm86old
:
7453 case TARGET_NR_vm86
:
7454 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7458 case TARGET_NR_adjtimex
:
7460 #ifdef TARGET_NR_create_module
7461 case TARGET_NR_create_module
:
7463 case TARGET_NR_init_module
:
7464 case TARGET_NR_delete_module
:
7465 #ifdef TARGET_NR_get_kernel_syms
7466 case TARGET_NR_get_kernel_syms
:
7469 case TARGET_NR_quotactl
:
7471 case TARGET_NR_getpgid
:
7472 ret
= get_errno(getpgid(arg1
));
7474 case TARGET_NR_fchdir
:
7475 ret
= get_errno(fchdir(arg1
));
7477 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7478 case TARGET_NR_bdflush
:
7481 #ifdef TARGET_NR_sysfs
7482 case TARGET_NR_sysfs
:
7485 case TARGET_NR_personality
:
7486 ret
= get_errno(personality(arg1
));
7488 #ifdef TARGET_NR_afs_syscall
7489 case TARGET_NR_afs_syscall
:
7492 #ifdef TARGET_NR__llseek /* Not on alpha */
7493 case TARGET_NR__llseek
:
7496 #if !defined(__NR_llseek)
7497 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7499 ret
= get_errno(res
);
7504 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7506 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7512 case TARGET_NR_getdents
:
7513 #ifdef __NR_getdents
7514 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7516 struct target_dirent
*target_dirp
;
7517 struct linux_dirent
*dirp
;
7518 abi_long count
= arg3
;
7520 dirp
= malloc(count
);
7522 ret
= -TARGET_ENOMEM
;
7526 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7527 if (!is_error(ret
)) {
7528 struct linux_dirent
*de
;
7529 struct target_dirent
*tde
;
7531 int reclen
, treclen
;
7532 int count1
, tnamelen
;
7536 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7540 reclen
= de
->d_reclen
;
7541 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7542 assert(tnamelen
>= 0);
7543 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7544 assert(count1
+ treclen
<= count
);
7545 tde
->d_reclen
= tswap16(treclen
);
7546 tde
->d_ino
= tswapal(de
->d_ino
);
7547 tde
->d_off
= tswapal(de
->d_off
);
7548 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7549 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7551 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7555 unlock_user(target_dirp
, arg2
, ret
);
7561 struct linux_dirent
*dirp
;
7562 abi_long count
= arg3
;
7564 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7566 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7567 if (!is_error(ret
)) {
7568 struct linux_dirent
*de
;
7573 reclen
= de
->d_reclen
;
7576 de
->d_reclen
= tswap16(reclen
);
7577 tswapls(&de
->d_ino
);
7578 tswapls(&de
->d_off
);
7579 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7583 unlock_user(dirp
, arg2
, ret
);
7587 /* Implement getdents in terms of getdents64 */
7589 struct linux_dirent64
*dirp
;
7590 abi_long count
= arg3
;
7592 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7596 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7597 if (!is_error(ret
)) {
7598 /* Convert the dirent64 structs to target dirent. We do this
7599 * in-place, since we can guarantee that a target_dirent is no
7600 * larger than a dirent64; however this means we have to be
7601 * careful to read everything before writing in the new format.
7603 struct linux_dirent64
*de
;
7604 struct target_dirent
*tde
;
7609 tde
= (struct target_dirent
*)dirp
;
7611 int namelen
, treclen
;
7612 int reclen
= de
->d_reclen
;
7613 uint64_t ino
= de
->d_ino
;
7614 int64_t off
= de
->d_off
;
7615 uint8_t type
= de
->d_type
;
7617 namelen
= strlen(de
->d_name
);
7618 treclen
= offsetof(struct target_dirent
, d_name
)
7620 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7622 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7623 tde
->d_ino
= tswapal(ino
);
7624 tde
->d_off
= tswapal(off
);
7625 tde
->d_reclen
= tswap16(treclen
);
7626 /* The target_dirent type is in what was formerly a padding
7627 * byte at the end of the structure:
7629 *(((char *)tde
) + treclen
- 1) = type
;
7631 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7632 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7638 unlock_user(dirp
, arg2
, ret
);
7642 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7643 case TARGET_NR_getdents64
:
7645 struct linux_dirent64
*dirp
;
7646 abi_long count
= arg3
;
7647 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7649 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7650 if (!is_error(ret
)) {
7651 struct linux_dirent64
*de
;
7656 reclen
= de
->d_reclen
;
7659 de
->d_reclen
= tswap16(reclen
);
7660 tswap64s((uint64_t *)&de
->d_ino
);
7661 tswap64s((uint64_t *)&de
->d_off
);
7662 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7666 unlock_user(dirp
, arg2
, ret
);
7669 #endif /* TARGET_NR_getdents64 */
7670 #if defined(TARGET_NR__newselect)
7671 case TARGET_NR__newselect
:
7672 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7675 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7676 # ifdef TARGET_NR_poll
7677 case TARGET_NR_poll
:
7679 # ifdef TARGET_NR_ppoll
7680 case TARGET_NR_ppoll
:
7683 struct target_pollfd
*target_pfd
;
7684 unsigned int nfds
= arg2
;
7689 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7693 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7694 for(i
= 0; i
< nfds
; i
++) {
7695 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7696 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7699 # ifdef TARGET_NR_ppoll
7700 if (num
== TARGET_NR_ppoll
) {
7701 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7702 target_sigset_t
*target_set
;
7703 sigset_t _set
, *set
= &_set
;
7706 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7707 unlock_user(target_pfd
, arg1
, 0);
7715 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7717 unlock_user(target_pfd
, arg1
, 0);
7720 target_to_host_sigset(set
, target_set
);
7725 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7727 if (!is_error(ret
) && arg3
) {
7728 host_to_target_timespec(arg3
, timeout_ts
);
7731 unlock_user(target_set
, arg4
, 0);
7735 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7737 if (!is_error(ret
)) {
7738 for(i
= 0; i
< nfds
; i
++) {
7739 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7742 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7746 case TARGET_NR_flock
:
7747 /* NOTE: the flock constant seems to be the same for every
7749 ret
= get_errno(flock(arg1
, arg2
));
7751 case TARGET_NR_readv
:
7753 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7755 ret
= get_errno(readv(arg1
, vec
, arg3
));
7756 unlock_iovec(vec
, arg2
, arg3
, 1);
7758 ret
= -host_to_target_errno(errno
);
7762 case TARGET_NR_writev
:
7764 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7766 ret
= get_errno(writev(arg1
, vec
, arg3
));
7767 unlock_iovec(vec
, arg2
, arg3
, 0);
7769 ret
= -host_to_target_errno(errno
);
7773 case TARGET_NR_getsid
:
7774 ret
= get_errno(getsid(arg1
));
7776 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7777 case TARGET_NR_fdatasync
:
7778 ret
= get_errno(fdatasync(arg1
));
7781 case TARGET_NR__sysctl
:
7782 /* We don't implement this, but ENOTDIR is always a safe
7784 ret
= -TARGET_ENOTDIR
;
7786 case TARGET_NR_sched_getaffinity
:
7788 unsigned int mask_size
;
7789 unsigned long *mask
;
7792 * sched_getaffinity needs multiples of ulong, so need to take
7793 * care of mismatches between target ulong and host ulong sizes.
7795 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7796 ret
= -TARGET_EINVAL
;
7799 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7801 mask
= alloca(mask_size
);
7802 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7804 if (!is_error(ret
)) {
7806 /* More data returned than the caller's buffer will fit.
7807 * This only happens if sizeof(abi_long) < sizeof(long)
7808 * and the caller passed us a buffer holding an odd number
7809 * of abi_longs. If the host kernel is actually using the
7810 * extra 4 bytes then fail EINVAL; otherwise we can just
7811 * ignore them and only copy the interesting part.
7813 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
7814 if (numcpus
> arg2
* 8) {
7815 ret
= -TARGET_EINVAL
;
7821 if (copy_to_user(arg3
, mask
, ret
)) {
7827 case TARGET_NR_sched_setaffinity
:
7829 unsigned int mask_size
;
7830 unsigned long *mask
;
7833 * sched_setaffinity needs multiples of ulong, so need to take
7834 * care of mismatches between target ulong and host ulong sizes.
7836 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7837 ret
= -TARGET_EINVAL
;
7840 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7842 mask
= alloca(mask_size
);
7843 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7846 memcpy(mask
, p
, arg2
);
7847 unlock_user_struct(p
, arg2
, 0);
7849 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7852 case TARGET_NR_sched_setparam
:
7854 struct sched_param
*target_schp
;
7855 struct sched_param schp
;
7858 return -TARGET_EINVAL
;
7860 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7862 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7863 unlock_user_struct(target_schp
, arg2
, 0);
7864 ret
= get_errno(sched_setparam(arg1
, &schp
));
7867 case TARGET_NR_sched_getparam
:
7869 struct sched_param
*target_schp
;
7870 struct sched_param schp
;
7873 return -TARGET_EINVAL
;
7875 ret
= get_errno(sched_getparam(arg1
, &schp
));
7876 if (!is_error(ret
)) {
7877 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7879 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7880 unlock_user_struct(target_schp
, arg2
, 1);
7884 case TARGET_NR_sched_setscheduler
:
7886 struct sched_param
*target_schp
;
7887 struct sched_param schp
;
7889 return -TARGET_EINVAL
;
7891 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7893 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7894 unlock_user_struct(target_schp
, arg3
, 0);
7895 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7898 case TARGET_NR_sched_getscheduler
:
7899 ret
= get_errno(sched_getscheduler(arg1
));
7901 case TARGET_NR_sched_yield
:
7902 ret
= get_errno(sched_yield());
7904 case TARGET_NR_sched_get_priority_max
:
7905 ret
= get_errno(sched_get_priority_max(arg1
));
7907 case TARGET_NR_sched_get_priority_min
:
7908 ret
= get_errno(sched_get_priority_min(arg1
));
7910 case TARGET_NR_sched_rr_get_interval
:
7913 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7914 if (!is_error(ret
)) {
7915 ret
= host_to_target_timespec(arg2
, &ts
);
7919 case TARGET_NR_nanosleep
:
7921 struct timespec req
, rem
;
7922 target_to_host_timespec(&req
, arg1
);
7923 ret
= get_errno(nanosleep(&req
, &rem
));
7924 if (is_error(ret
) && arg2
) {
7925 host_to_target_timespec(arg2
, &rem
);
7929 #ifdef TARGET_NR_query_module
7930 case TARGET_NR_query_module
:
7933 #ifdef TARGET_NR_nfsservctl
7934 case TARGET_NR_nfsservctl
:
7937 case TARGET_NR_prctl
:
7939 case PR_GET_PDEATHSIG
:
7942 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7943 if (!is_error(ret
) && arg2
7944 && put_user_ual(deathsig
, arg2
)) {
7952 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7956 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7958 unlock_user(name
, arg2
, 16);
7963 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7967 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7969 unlock_user(name
, arg2
, 0);
7974 /* Most prctl options have no pointer arguments */
7975 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7979 #ifdef TARGET_NR_arch_prctl
7980 case TARGET_NR_arch_prctl
:
7981 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7982 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7988 #ifdef TARGET_NR_pread64
7989 case TARGET_NR_pread64
:
7990 if (regpairs_aligned(cpu_env
)) {
7994 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7996 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7997 unlock_user(p
, arg2
, ret
);
7999 case TARGET_NR_pwrite64
:
8000 if (regpairs_aligned(cpu_env
)) {
8004 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8006 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8007 unlock_user(p
, arg2
, 0);
8010 case TARGET_NR_getcwd
:
8011 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8013 ret
= get_errno(sys_getcwd1(p
, arg2
));
8014 unlock_user(p
, arg1
, ret
);
8016 case TARGET_NR_capget
:
8017 case TARGET_NR_capset
:
8019 struct target_user_cap_header
*target_header
;
8020 struct target_user_cap_data
*target_data
= NULL
;
8021 struct __user_cap_header_struct header
;
8022 struct __user_cap_data_struct data
[2];
8023 struct __user_cap_data_struct
*dataptr
= NULL
;
8024 int i
, target_datalen
;
8027 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8030 header
.version
= tswap32(target_header
->version
);
8031 header
.pid
= tswap32(target_header
->pid
);
8033 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8034 /* Version 2 and up takes pointer to two user_data structs */
8038 target_datalen
= sizeof(*target_data
) * data_items
;
8041 if (num
== TARGET_NR_capget
) {
8042 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8044 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8047 unlock_user_struct(target_header
, arg1
, 0);
8051 if (num
== TARGET_NR_capset
) {
8052 for (i
= 0; i
< data_items
; i
++) {
8053 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8054 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8055 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8062 if (num
== TARGET_NR_capget
) {
8063 ret
= get_errno(capget(&header
, dataptr
));
8065 ret
= get_errno(capset(&header
, dataptr
));
8068 /* The kernel always updates version for both capget and capset */
8069 target_header
->version
= tswap32(header
.version
);
8070 unlock_user_struct(target_header
, arg1
, 1);
8073 if (num
== TARGET_NR_capget
) {
8074 for (i
= 0; i
< data_items
; i
++) {
8075 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8076 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8077 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8079 unlock_user(target_data
, arg2
, target_datalen
);
8081 unlock_user(target_data
, arg2
, 0);
8086 case TARGET_NR_sigaltstack
:
8087 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8088 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8089 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8090 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8096 #ifdef CONFIG_SENDFILE
8097 case TARGET_NR_sendfile
:
8102 ret
= get_user_sal(off
, arg3
);
8103 if (is_error(ret
)) {
8108 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8109 if (!is_error(ret
) && arg3
) {
8110 abi_long ret2
= put_user_sal(off
, arg3
);
8111 if (is_error(ret2
)) {
8117 #ifdef TARGET_NR_sendfile64
8118 case TARGET_NR_sendfile64
:
8123 ret
= get_user_s64(off
, arg3
);
8124 if (is_error(ret
)) {
8129 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8130 if (!is_error(ret
) && arg3
) {
8131 abi_long ret2
= put_user_s64(off
, arg3
);
8132 if (is_error(ret2
)) {
8140 case TARGET_NR_sendfile
:
8141 #ifdef TARGET_NR_sendfile64
8142 case TARGET_NR_sendfile64
:
8147 #ifdef TARGET_NR_getpmsg
8148 case TARGET_NR_getpmsg
:
8151 #ifdef TARGET_NR_putpmsg
8152 case TARGET_NR_putpmsg
:
8155 #ifdef TARGET_NR_vfork
8156 case TARGET_NR_vfork
:
8157 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8161 #ifdef TARGET_NR_ugetrlimit
8162 case TARGET_NR_ugetrlimit
:
8165 int resource
= target_to_host_resource(arg1
);
8166 ret
= get_errno(getrlimit(resource
, &rlim
));
8167 if (!is_error(ret
)) {
8168 struct target_rlimit
*target_rlim
;
8169 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8171 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8172 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8173 unlock_user_struct(target_rlim
, arg2
, 1);
8178 #ifdef TARGET_NR_truncate64
8179 case TARGET_NR_truncate64
:
8180 if (!(p
= lock_user_string(arg1
)))
8182 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8183 unlock_user(p
, arg1
, 0);
8186 #ifdef TARGET_NR_ftruncate64
8187 case TARGET_NR_ftruncate64
:
8188 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8191 #ifdef TARGET_NR_stat64
8192 case TARGET_NR_stat64
:
8193 if (!(p
= lock_user_string(arg1
)))
8195 ret
= get_errno(stat(path(p
), &st
));
8196 unlock_user(p
, arg1
, 0);
8198 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8201 #ifdef TARGET_NR_lstat64
8202 case TARGET_NR_lstat64
:
8203 if (!(p
= lock_user_string(arg1
)))
8205 ret
= get_errno(lstat(path(p
), &st
));
8206 unlock_user(p
, arg1
, 0);
8208 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8211 #ifdef TARGET_NR_fstat64
8212 case TARGET_NR_fstat64
:
8213 ret
= get_errno(fstat(arg1
, &st
));
8215 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8218 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8219 #ifdef TARGET_NR_fstatat64
8220 case TARGET_NR_fstatat64
:
8222 #ifdef TARGET_NR_newfstatat
8223 case TARGET_NR_newfstatat
:
8225 if (!(p
= lock_user_string(arg2
)))
8227 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8229 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8232 case TARGET_NR_lchown
:
8233 if (!(p
= lock_user_string(arg1
)))
8235 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8236 unlock_user(p
, arg1
, 0);
8238 #ifdef TARGET_NR_getuid
8239 case TARGET_NR_getuid
:
8240 ret
= get_errno(high2lowuid(getuid()));
8243 #ifdef TARGET_NR_getgid
8244 case TARGET_NR_getgid
:
8245 ret
= get_errno(high2lowgid(getgid()));
8248 #ifdef TARGET_NR_geteuid
8249 case TARGET_NR_geteuid
:
8250 ret
= get_errno(high2lowuid(geteuid()));
8253 #ifdef TARGET_NR_getegid
8254 case TARGET_NR_getegid
:
8255 ret
= get_errno(high2lowgid(getegid()));
8258 case TARGET_NR_setreuid
:
8259 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8261 case TARGET_NR_setregid
:
8262 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8264 case TARGET_NR_getgroups
:
8266 int gidsetsize
= arg1
;
8267 target_id
*target_grouplist
;
8271 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8272 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8273 if (gidsetsize
== 0)
8275 if (!is_error(ret
)) {
8276 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8277 if (!target_grouplist
)
8279 for(i
= 0;i
< ret
; i
++)
8280 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8281 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8285 case TARGET_NR_setgroups
:
8287 int gidsetsize
= arg1
;
8288 target_id
*target_grouplist
;
8289 gid_t
*grouplist
= NULL
;
8292 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8293 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8294 if (!target_grouplist
) {
8295 ret
= -TARGET_EFAULT
;
8298 for (i
= 0; i
< gidsetsize
; i
++) {
8299 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8301 unlock_user(target_grouplist
, arg2
, 0);
8303 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8306 case TARGET_NR_fchown
:
8307 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8309 #if defined(TARGET_NR_fchownat)
8310 case TARGET_NR_fchownat
:
8311 if (!(p
= lock_user_string(arg2
)))
8313 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8314 low2highgid(arg4
), arg5
));
8315 unlock_user(p
, arg2
, 0);
8318 #ifdef TARGET_NR_setresuid
8319 case TARGET_NR_setresuid
:
8320 ret
= get_errno(setresuid(low2highuid(arg1
),
8322 low2highuid(arg3
)));
8325 #ifdef TARGET_NR_getresuid
8326 case TARGET_NR_getresuid
:
8328 uid_t ruid
, euid
, suid
;
8329 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8330 if (!is_error(ret
)) {
8331 if (put_user_id(high2lowuid(ruid
), arg1
)
8332 || put_user_id(high2lowuid(euid
), arg2
)
8333 || put_user_id(high2lowuid(suid
), arg3
))
8339 #ifdef TARGET_NR_getresgid
8340 case TARGET_NR_setresgid
:
8341 ret
= get_errno(setresgid(low2highgid(arg1
),
8343 low2highgid(arg3
)));
8346 #ifdef TARGET_NR_getresgid
8347 case TARGET_NR_getresgid
:
8349 gid_t rgid
, egid
, sgid
;
8350 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8351 if (!is_error(ret
)) {
8352 if (put_user_id(high2lowgid(rgid
), arg1
)
8353 || put_user_id(high2lowgid(egid
), arg2
)
8354 || put_user_id(high2lowgid(sgid
), arg3
))
8360 case TARGET_NR_chown
:
8361 if (!(p
= lock_user_string(arg1
)))
8363 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8364 unlock_user(p
, arg1
, 0);
8366 case TARGET_NR_setuid
:
8367 ret
= get_errno(setuid(low2highuid(arg1
)));
8369 case TARGET_NR_setgid
:
8370 ret
= get_errno(setgid(low2highgid(arg1
)));
8372 case TARGET_NR_setfsuid
:
8373 ret
= get_errno(setfsuid(arg1
));
8375 case TARGET_NR_setfsgid
:
8376 ret
= get_errno(setfsgid(arg1
));
8379 #ifdef TARGET_NR_lchown32
8380 case TARGET_NR_lchown32
:
8381 if (!(p
= lock_user_string(arg1
)))
8383 ret
= get_errno(lchown(p
, arg2
, arg3
));
8384 unlock_user(p
, arg1
, 0);
8387 #ifdef TARGET_NR_getuid32
8388 case TARGET_NR_getuid32
:
8389 ret
= get_errno(getuid());
8393 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8394 /* Alpha specific */
8395 case TARGET_NR_getxuid
:
8399 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8401 ret
= get_errno(getuid());
8404 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8405 /* Alpha specific */
8406 case TARGET_NR_getxgid
:
8410 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8412 ret
= get_errno(getgid());
8415 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8416 /* Alpha specific */
8417 case TARGET_NR_osf_getsysinfo
:
8418 ret
= -TARGET_EOPNOTSUPP
;
8420 case TARGET_GSI_IEEE_FP_CONTROL
:
8422 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8424 /* Copied from linux ieee_fpcr_to_swcr. */
8425 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8426 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8427 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8428 | SWCR_TRAP_ENABLE_DZE
8429 | SWCR_TRAP_ENABLE_OVF
);
8430 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8431 | SWCR_TRAP_ENABLE_INE
);
8432 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8433 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8435 if (put_user_u64 (swcr
, arg2
))
8441 /* case GSI_IEEE_STATE_AT_SIGNAL:
8442 -- Not implemented in linux kernel.
8444 -- Retrieves current unaligned access state; not much used.
8446 -- Retrieves implver information; surely not used.
8448 -- Grabs a copy of the HWRPB; surely not used.
8453 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8454 /* Alpha specific */
8455 case TARGET_NR_osf_setsysinfo
:
8456 ret
= -TARGET_EOPNOTSUPP
;
8458 case TARGET_SSI_IEEE_FP_CONTROL
:
8460 uint64_t swcr
, fpcr
, orig_fpcr
;
8462 if (get_user_u64 (swcr
, arg2
)) {
8465 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8466 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8468 /* Copied from linux ieee_swcr_to_fpcr. */
8469 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8470 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8471 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8472 | SWCR_TRAP_ENABLE_DZE
8473 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8474 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8475 | SWCR_TRAP_ENABLE_INE
)) << 57;
8476 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8477 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8479 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8484 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8486 uint64_t exc
, fpcr
, orig_fpcr
;
8489 if (get_user_u64(exc
, arg2
)) {
8493 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8495 /* We only add to the exception status here. */
8496 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8498 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8501 /* Old exceptions are not signaled. */
8502 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8504 /* If any exceptions set by this call,
8505 and are unmasked, send a signal. */
8507 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8508 si_code
= TARGET_FPE_FLTRES
;
8510 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8511 si_code
= TARGET_FPE_FLTUND
;
8513 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8514 si_code
= TARGET_FPE_FLTOVF
;
8516 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8517 si_code
= TARGET_FPE_FLTDIV
;
8519 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8520 si_code
= TARGET_FPE_FLTINV
;
8523 target_siginfo_t info
;
8524 info
.si_signo
= SIGFPE
;
8526 info
.si_code
= si_code
;
8527 info
._sifields
._sigfault
._addr
8528 = ((CPUArchState
*)cpu_env
)->pc
;
8529 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8534 /* case SSI_NVPAIRS:
8535 -- Used with SSIN_UACPROC to enable unaligned accesses.
8536 case SSI_IEEE_STATE_AT_SIGNAL:
8537 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8538 -- Not implemented in linux kernel
8543 #ifdef TARGET_NR_osf_sigprocmask
8544 /* Alpha specific. */
8545 case TARGET_NR_osf_sigprocmask
:
8549 sigset_t set
, oldset
;
8552 case TARGET_SIG_BLOCK
:
8555 case TARGET_SIG_UNBLOCK
:
8558 case TARGET_SIG_SETMASK
:
8562 ret
= -TARGET_EINVAL
;
8566 target_to_host_old_sigset(&set
, &mask
);
8567 do_sigprocmask(how
, &set
, &oldset
);
8568 host_to_target_old_sigset(&mask
, &oldset
);
8574 #ifdef TARGET_NR_getgid32
8575 case TARGET_NR_getgid32
:
8576 ret
= get_errno(getgid());
8579 #ifdef TARGET_NR_geteuid32
8580 case TARGET_NR_geteuid32
:
8581 ret
= get_errno(geteuid());
8584 #ifdef TARGET_NR_getegid32
8585 case TARGET_NR_getegid32
:
8586 ret
= get_errno(getegid());
8589 #ifdef TARGET_NR_setreuid32
8590 case TARGET_NR_setreuid32
:
8591 ret
= get_errno(setreuid(arg1
, arg2
));
8594 #ifdef TARGET_NR_setregid32
8595 case TARGET_NR_setregid32
:
8596 ret
= get_errno(setregid(arg1
, arg2
));
8599 #ifdef TARGET_NR_getgroups32
8600 case TARGET_NR_getgroups32
:
8602 int gidsetsize
= arg1
;
8603 uint32_t *target_grouplist
;
8607 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8608 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8609 if (gidsetsize
== 0)
8611 if (!is_error(ret
)) {
8612 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8613 if (!target_grouplist
) {
8614 ret
= -TARGET_EFAULT
;
8617 for(i
= 0;i
< ret
; i
++)
8618 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8619 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8624 #ifdef TARGET_NR_setgroups32
8625 case TARGET_NR_setgroups32
:
8627 int gidsetsize
= arg1
;
8628 uint32_t *target_grouplist
;
8632 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8633 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8634 if (!target_grouplist
) {
8635 ret
= -TARGET_EFAULT
;
8638 for(i
= 0;i
< gidsetsize
; i
++)
8639 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8640 unlock_user(target_grouplist
, arg2
, 0);
8641 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8645 #ifdef TARGET_NR_fchown32
8646 case TARGET_NR_fchown32
:
8647 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8650 #ifdef TARGET_NR_setresuid32
8651 case TARGET_NR_setresuid32
:
8652 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8655 #ifdef TARGET_NR_getresuid32
8656 case TARGET_NR_getresuid32
:
8658 uid_t ruid
, euid
, suid
;
8659 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8660 if (!is_error(ret
)) {
8661 if (put_user_u32(ruid
, arg1
)
8662 || put_user_u32(euid
, arg2
)
8663 || put_user_u32(suid
, arg3
))
8669 #ifdef TARGET_NR_setresgid32
8670 case TARGET_NR_setresgid32
:
8671 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8674 #ifdef TARGET_NR_getresgid32
8675 case TARGET_NR_getresgid32
:
8677 gid_t rgid
, egid
, sgid
;
8678 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8679 if (!is_error(ret
)) {
8680 if (put_user_u32(rgid
, arg1
)
8681 || put_user_u32(egid
, arg2
)
8682 || put_user_u32(sgid
, arg3
))
8688 #ifdef TARGET_NR_chown32
8689 case TARGET_NR_chown32
:
8690 if (!(p
= lock_user_string(arg1
)))
8692 ret
= get_errno(chown(p
, arg2
, arg3
));
8693 unlock_user(p
, arg1
, 0);
8696 #ifdef TARGET_NR_setuid32
8697 case TARGET_NR_setuid32
:
8698 ret
= get_errno(setuid(arg1
));
8701 #ifdef TARGET_NR_setgid32
8702 case TARGET_NR_setgid32
:
8703 ret
= get_errno(setgid(arg1
));
8706 #ifdef TARGET_NR_setfsuid32
8707 case TARGET_NR_setfsuid32
:
8708 ret
= get_errno(setfsuid(arg1
));
8711 #ifdef TARGET_NR_setfsgid32
8712 case TARGET_NR_setfsgid32
:
8713 ret
= get_errno(setfsgid(arg1
));
8717 case TARGET_NR_pivot_root
:
8719 #ifdef TARGET_NR_mincore
8720 case TARGET_NR_mincore
:
8723 ret
= -TARGET_EFAULT
;
8724 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8726 if (!(p
= lock_user_string(arg3
)))
8728 ret
= get_errno(mincore(a
, arg2
, p
));
8729 unlock_user(p
, arg3
, ret
);
8731 unlock_user(a
, arg1
, 0);
8735 #ifdef TARGET_NR_arm_fadvise64_64
8736 case TARGET_NR_arm_fadvise64_64
:
8739 * arm_fadvise64_64 looks like fadvise64_64 but
8740 * with different argument order
8748 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8749 #ifdef TARGET_NR_fadvise64_64
8750 case TARGET_NR_fadvise64_64
:
8752 #ifdef TARGET_NR_fadvise64
8753 case TARGET_NR_fadvise64
:
8757 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8758 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8759 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8760 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8764 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8767 #ifdef TARGET_NR_madvise
8768 case TARGET_NR_madvise
:
8769 /* A straight passthrough may not be safe because qemu sometimes
8770 turns private file-backed mappings into anonymous mappings.
8771 This will break MADV_DONTNEED.
8772 This is a hint, so ignoring and returning success is ok. */
8776 #if TARGET_ABI_BITS == 32
8777 case TARGET_NR_fcntl64
:
8781 struct target_flock64
*target_fl
;
8783 struct target_eabi_flock64
*target_efl
;
8786 cmd
= target_to_host_fcntl_cmd(arg2
);
8787 if (cmd
== -TARGET_EINVAL
) {
8793 case TARGET_F_GETLK64
:
8795 if (((CPUARMState
*)cpu_env
)->eabi
) {
8796 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8798 fl
.l_type
= tswap16(target_efl
->l_type
);
8799 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8800 fl
.l_start
= tswap64(target_efl
->l_start
);
8801 fl
.l_len
= tswap64(target_efl
->l_len
);
8802 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8803 unlock_user_struct(target_efl
, arg3
, 0);
8807 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8809 fl
.l_type
= tswap16(target_fl
->l_type
);
8810 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8811 fl
.l_start
= tswap64(target_fl
->l_start
);
8812 fl
.l_len
= tswap64(target_fl
->l_len
);
8813 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8814 unlock_user_struct(target_fl
, arg3
, 0);
8816 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8819 if (((CPUARMState
*)cpu_env
)->eabi
) {
8820 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8822 target_efl
->l_type
= tswap16(fl
.l_type
);
8823 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8824 target_efl
->l_start
= tswap64(fl
.l_start
);
8825 target_efl
->l_len
= tswap64(fl
.l_len
);
8826 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8827 unlock_user_struct(target_efl
, arg3
, 1);
8831 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8833 target_fl
->l_type
= tswap16(fl
.l_type
);
8834 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8835 target_fl
->l_start
= tswap64(fl
.l_start
);
8836 target_fl
->l_len
= tswap64(fl
.l_len
);
8837 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8838 unlock_user_struct(target_fl
, arg3
, 1);
8843 case TARGET_F_SETLK64
:
8844 case TARGET_F_SETLKW64
:
8846 if (((CPUARMState
*)cpu_env
)->eabi
) {
8847 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8849 fl
.l_type
= tswap16(target_efl
->l_type
);
8850 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8851 fl
.l_start
= tswap64(target_efl
->l_start
);
8852 fl
.l_len
= tswap64(target_efl
->l_len
);
8853 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8854 unlock_user_struct(target_efl
, arg3
, 0);
8858 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8860 fl
.l_type
= tswap16(target_fl
->l_type
);
8861 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8862 fl
.l_start
= tswap64(target_fl
->l_start
);
8863 fl
.l_len
= tswap64(target_fl
->l_len
);
8864 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8865 unlock_user_struct(target_fl
, arg3
, 0);
8867 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8870 ret
= do_fcntl(arg1
, arg2
, arg3
);
8876 #ifdef TARGET_NR_cacheflush
8877 case TARGET_NR_cacheflush
:
8878 /* self-modifying code is handled automatically, so nothing needed */
8882 #ifdef TARGET_NR_security
8883 case TARGET_NR_security
:
8886 #ifdef TARGET_NR_getpagesize
8887 case TARGET_NR_getpagesize
:
8888 ret
= TARGET_PAGE_SIZE
;
8891 case TARGET_NR_gettid
:
8892 ret
= get_errno(gettid());
8894 #ifdef TARGET_NR_readahead
8895 case TARGET_NR_readahead
:
8896 #if TARGET_ABI_BITS == 32
8897 if (regpairs_aligned(cpu_env
)) {
8902 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8904 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8909 #ifdef TARGET_NR_setxattr
8910 case TARGET_NR_listxattr
:
8911 case TARGET_NR_llistxattr
:
8915 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8917 ret
= -TARGET_EFAULT
;
8921 p
= lock_user_string(arg1
);
8923 if (num
== TARGET_NR_listxattr
) {
8924 ret
= get_errno(listxattr(p
, b
, arg3
));
8926 ret
= get_errno(llistxattr(p
, b
, arg3
));
8929 ret
= -TARGET_EFAULT
;
8931 unlock_user(p
, arg1
, 0);
8932 unlock_user(b
, arg2
, arg3
);
8935 case TARGET_NR_flistxattr
:
8939 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8941 ret
= -TARGET_EFAULT
;
8945 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8946 unlock_user(b
, arg2
, arg3
);
8949 case TARGET_NR_setxattr
:
8950 case TARGET_NR_lsetxattr
:
8952 void *p
, *n
, *v
= 0;
8954 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8956 ret
= -TARGET_EFAULT
;
8960 p
= lock_user_string(arg1
);
8961 n
= lock_user_string(arg2
);
8963 if (num
== TARGET_NR_setxattr
) {
8964 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8966 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8969 ret
= -TARGET_EFAULT
;
8971 unlock_user(p
, arg1
, 0);
8972 unlock_user(n
, arg2
, 0);
8973 unlock_user(v
, arg3
, 0);
8976 case TARGET_NR_fsetxattr
:
8980 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8982 ret
= -TARGET_EFAULT
;
8986 n
= lock_user_string(arg2
);
8988 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8990 ret
= -TARGET_EFAULT
;
8992 unlock_user(n
, arg2
, 0);
8993 unlock_user(v
, arg3
, 0);
8996 case TARGET_NR_getxattr
:
8997 case TARGET_NR_lgetxattr
:
8999 void *p
, *n
, *v
= 0;
9001 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9003 ret
= -TARGET_EFAULT
;
9007 p
= lock_user_string(arg1
);
9008 n
= lock_user_string(arg2
);
9010 if (num
== TARGET_NR_getxattr
) {
9011 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9013 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9016 ret
= -TARGET_EFAULT
;
9018 unlock_user(p
, arg1
, 0);
9019 unlock_user(n
, arg2
, 0);
9020 unlock_user(v
, arg3
, arg4
);
9023 case TARGET_NR_fgetxattr
:
9027 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9029 ret
= -TARGET_EFAULT
;
9033 n
= lock_user_string(arg2
);
9035 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9037 ret
= -TARGET_EFAULT
;
9039 unlock_user(n
, arg2
, 0);
9040 unlock_user(v
, arg3
, arg4
);
9043 case TARGET_NR_removexattr
:
9044 case TARGET_NR_lremovexattr
:
9047 p
= lock_user_string(arg1
);
9048 n
= lock_user_string(arg2
);
9050 if (num
== TARGET_NR_removexattr
) {
9051 ret
= get_errno(removexattr(p
, n
));
9053 ret
= get_errno(lremovexattr(p
, n
));
9056 ret
= -TARGET_EFAULT
;
9058 unlock_user(p
, arg1
, 0);
9059 unlock_user(n
, arg2
, 0);
9062 case TARGET_NR_fremovexattr
:
9065 n
= lock_user_string(arg2
);
9067 ret
= get_errno(fremovexattr(arg1
, n
));
9069 ret
= -TARGET_EFAULT
;
9071 unlock_user(n
, arg2
, 0);
9075 #endif /* CONFIG_ATTR */
9076 #ifdef TARGET_NR_set_thread_area
9077 case TARGET_NR_set_thread_area
:
9078 #if defined(TARGET_MIPS)
9079 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9082 #elif defined(TARGET_CRIS)
9084 ret
= -TARGET_EINVAL
;
9086 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9090 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9091 ret
= do_set_thread_area(cpu_env
, arg1
);
9093 #elif defined(TARGET_M68K)
9095 TaskState
*ts
= cpu
->opaque
;
9096 ts
->tp_value
= arg1
;
9101 goto unimplemented_nowarn
;
9104 #ifdef TARGET_NR_get_thread_area
9105 case TARGET_NR_get_thread_area
:
9106 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9107 ret
= do_get_thread_area(cpu_env
, arg1
);
9109 #elif defined(TARGET_M68K)
9111 TaskState
*ts
= cpu
->opaque
;
9116 goto unimplemented_nowarn
;
9119 #ifdef TARGET_NR_getdomainname
9120 case TARGET_NR_getdomainname
:
9121 goto unimplemented_nowarn
;
9124 #ifdef TARGET_NR_clock_gettime
9125 case TARGET_NR_clock_gettime
:
9128 ret
= get_errno(clock_gettime(arg1
, &ts
));
9129 if (!is_error(ret
)) {
9130 host_to_target_timespec(arg2
, &ts
);
9135 #ifdef TARGET_NR_clock_getres
9136 case TARGET_NR_clock_getres
:
9139 ret
= get_errno(clock_getres(arg1
, &ts
));
9140 if (!is_error(ret
)) {
9141 host_to_target_timespec(arg2
, &ts
);
9146 #ifdef TARGET_NR_clock_nanosleep
9147 case TARGET_NR_clock_nanosleep
:
9150 target_to_host_timespec(&ts
, arg3
);
9151 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9153 host_to_target_timespec(arg4
, &ts
);
9155 #if defined(TARGET_PPC)
9156 /* clock_nanosleep is odd in that it returns positive errno values.
9157 * On PPC, CR0 bit 3 should be set in such a situation. */
9159 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9166 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9167 case TARGET_NR_set_tid_address
:
9168 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9172 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9173 case TARGET_NR_tkill
:
9174 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9178 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9179 case TARGET_NR_tgkill
:
9180 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9181 target_to_host_signal(arg3
)));
9185 #ifdef TARGET_NR_set_robust_list
9186 case TARGET_NR_set_robust_list
:
9187 case TARGET_NR_get_robust_list
:
9188 /* The ABI for supporting robust futexes has userspace pass
9189 * the kernel a pointer to a linked list which is updated by
9190 * userspace after the syscall; the list is walked by the kernel
9191 * when the thread exits. Since the linked list in QEMU guest
9192 * memory isn't a valid linked list for the host and we have
9193 * no way to reliably intercept the thread-death event, we can't
9194 * support these. Silently return ENOSYS so that guest userspace
9195 * falls back to a non-robust futex implementation (which should
9196 * be OK except in the corner case of the guest crashing while
9197 * holding a mutex that is shared with another process via
9200 goto unimplemented_nowarn
;
9203 #if defined(TARGET_NR_utimensat)
9204 case TARGET_NR_utimensat
:
9206 struct timespec
*tsp
, ts
[2];
9210 target_to_host_timespec(ts
, arg3
);
9211 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9215 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9217 if (!(p
= lock_user_string(arg2
))) {
9218 ret
= -TARGET_EFAULT
;
9221 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9222 unlock_user(p
, arg2
, 0);
9227 case TARGET_NR_futex
:
9228 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9230 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9231 case TARGET_NR_inotify_init
:
9232 ret
= get_errno(sys_inotify_init());
9235 #ifdef CONFIG_INOTIFY1
9236 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9237 case TARGET_NR_inotify_init1
:
9238 ret
= get_errno(sys_inotify_init1(arg1
));
9242 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9243 case TARGET_NR_inotify_add_watch
:
9244 p
= lock_user_string(arg2
);
9245 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9246 unlock_user(p
, arg2
, 0);
9249 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9250 case TARGET_NR_inotify_rm_watch
:
9251 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9256 case TARGET_NR_mq_open
:
9258 struct mq_attr posix_mq_attr
, *attrp
;
9260 p
= lock_user_string(arg1
- 1);
9262 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9263 attrp
= &posix_mq_attr
;
9267 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9268 unlock_user (p
, arg1
, 0);
9272 case TARGET_NR_mq_unlink
:
9273 p
= lock_user_string(arg1
- 1);
9274 ret
= get_errno(mq_unlink(p
));
9275 unlock_user (p
, arg1
, 0);
9278 case TARGET_NR_mq_timedsend
:
9282 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9284 target_to_host_timespec(&ts
, arg5
);
9285 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9286 host_to_target_timespec(arg5
, &ts
);
9289 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9290 unlock_user (p
, arg2
, arg3
);
9294 case TARGET_NR_mq_timedreceive
:
9299 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9301 target_to_host_timespec(&ts
, arg5
);
9302 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9303 host_to_target_timespec(arg5
, &ts
);
9306 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9307 unlock_user (p
, arg2
, arg3
);
9309 put_user_u32(prio
, arg4
);
9313 /* Not implemented for now... */
9314 /* case TARGET_NR_mq_notify: */
9317 case TARGET_NR_mq_getsetattr
:
9319 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9322 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9323 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9326 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9327 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9334 #ifdef CONFIG_SPLICE
9335 #ifdef TARGET_NR_tee
9338 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9342 #ifdef TARGET_NR_splice
9343 case TARGET_NR_splice
:
9345 loff_t loff_in
, loff_out
;
9346 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9348 get_user_u64(loff_in
, arg2
);
9349 ploff_in
= &loff_in
;
9352 get_user_u64(loff_out
, arg2
);
9353 ploff_out
= &loff_out
;
9355 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9359 #ifdef TARGET_NR_vmsplice
9360 case TARGET_NR_vmsplice
:
9362 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9364 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9365 unlock_iovec(vec
, arg2
, arg3
, 0);
9367 ret
= -host_to_target_errno(errno
);
9372 #endif /* CONFIG_SPLICE */
9373 #ifdef CONFIG_EVENTFD
9374 #if defined(TARGET_NR_eventfd)
9375 case TARGET_NR_eventfd
:
9376 ret
= get_errno(eventfd(arg1
, 0));
9379 #if defined(TARGET_NR_eventfd2)
9380 case TARGET_NR_eventfd2
:
9382 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9383 if (arg2
& TARGET_O_NONBLOCK
) {
9384 host_flags
|= O_NONBLOCK
;
9386 if (arg2
& TARGET_O_CLOEXEC
) {
9387 host_flags
|= O_CLOEXEC
;
9389 ret
= get_errno(eventfd(arg1
, host_flags
));
9393 #endif /* CONFIG_EVENTFD */
9394 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9395 case TARGET_NR_fallocate
:
9396 #if TARGET_ABI_BITS == 32
9397 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9398 target_offset64(arg5
, arg6
)));
9400 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9404 #if defined(CONFIG_SYNC_FILE_RANGE)
9405 #if defined(TARGET_NR_sync_file_range)
9406 case TARGET_NR_sync_file_range
:
9407 #if TARGET_ABI_BITS == 32
9408 #if defined(TARGET_MIPS)
9409 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9410 target_offset64(arg5
, arg6
), arg7
));
9412 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9413 target_offset64(arg4
, arg5
), arg6
));
9414 #endif /* !TARGET_MIPS */
9416 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9420 #if defined(TARGET_NR_sync_file_range2)
9421 case TARGET_NR_sync_file_range2
:
9422 /* This is like sync_file_range but the arguments are reordered */
9423 #if TARGET_ABI_BITS == 32
9424 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9425 target_offset64(arg5
, arg6
), arg2
));
9427 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9432 #if defined(CONFIG_EPOLL)
9433 #if defined(TARGET_NR_epoll_create)
9434 case TARGET_NR_epoll_create
:
9435 ret
= get_errno(epoll_create(arg1
));
9438 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9439 case TARGET_NR_epoll_create1
:
9440 ret
= get_errno(epoll_create1(arg1
));
9443 #if defined(TARGET_NR_epoll_ctl)
9444 case TARGET_NR_epoll_ctl
:
9446 struct epoll_event ep
;
9447 struct epoll_event
*epp
= 0;
9449 struct target_epoll_event
*target_ep
;
9450 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9453 ep
.events
= tswap32(target_ep
->events
);
9454 /* The epoll_data_t union is just opaque data to the kernel,
9455 * so we transfer all 64 bits across and need not worry what
9456 * actual data type it is.
9458 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9459 unlock_user_struct(target_ep
, arg4
, 0);
9462 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9467 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9468 #define IMPLEMENT_EPOLL_PWAIT
9470 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9471 #if defined(TARGET_NR_epoll_wait)
9472 case TARGET_NR_epoll_wait
:
9474 #if defined(IMPLEMENT_EPOLL_PWAIT)
9475 case TARGET_NR_epoll_pwait
:
9478 struct target_epoll_event
*target_ep
;
9479 struct epoll_event
*ep
;
9481 int maxevents
= arg3
;
9484 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9485 maxevents
* sizeof(struct target_epoll_event
), 1);
9490 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9493 #if defined(IMPLEMENT_EPOLL_PWAIT)
9494 case TARGET_NR_epoll_pwait
:
9496 target_sigset_t
*target_set
;
9497 sigset_t _set
, *set
= &_set
;
9500 target_set
= lock_user(VERIFY_READ
, arg5
,
9501 sizeof(target_sigset_t
), 1);
9503 unlock_user(target_ep
, arg2
, 0);
9506 target_to_host_sigset(set
, target_set
);
9507 unlock_user(target_set
, arg5
, 0);
9512 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9516 #if defined(TARGET_NR_epoll_wait)
9517 case TARGET_NR_epoll_wait
:
9518 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9522 ret
= -TARGET_ENOSYS
;
9524 if (!is_error(ret
)) {
9526 for (i
= 0; i
< ret
; i
++) {
9527 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9528 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9531 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9536 #ifdef TARGET_NR_prlimit64
9537 case TARGET_NR_prlimit64
:
9539 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9540 struct target_rlimit64
*target_rnew
, *target_rold
;
9541 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9542 int resource
= target_to_host_resource(arg2
);
9544 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9547 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9548 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9549 unlock_user_struct(target_rnew
, arg3
, 0);
9553 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
9554 if (!is_error(ret
) && arg4
) {
9555 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9558 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9559 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9560 unlock_user_struct(target_rold
, arg4
, 1);
9565 #ifdef TARGET_NR_gethostname
9566 case TARGET_NR_gethostname
:
9568 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9570 ret
= get_errno(gethostname(name
, arg2
));
9571 unlock_user(name
, arg1
, arg2
);
9573 ret
= -TARGET_EFAULT
;
9578 #ifdef TARGET_NR_atomic_cmpxchg_32
9579 case TARGET_NR_atomic_cmpxchg_32
:
9581 /* should use start_exclusive from main.c */
9582 abi_ulong mem_value
;
9583 if (get_user_u32(mem_value
, arg6
)) {
9584 target_siginfo_t info
;
9585 info
.si_signo
= SIGSEGV
;
9587 info
.si_code
= TARGET_SEGV_MAPERR
;
9588 info
._sifields
._sigfault
._addr
= arg6
;
9589 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9593 if (mem_value
== arg2
)
9594 put_user_u32(arg1
, arg6
);
9599 #ifdef TARGET_NR_atomic_barrier
9600 case TARGET_NR_atomic_barrier
:
9602 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9608 #ifdef TARGET_NR_timer_create
9609 case TARGET_NR_timer_create
:
9611 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9613 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9616 int timer_index
= next_free_host_timer();
9618 if (timer_index
< 0) {
9619 ret
= -TARGET_EAGAIN
;
9621 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9624 phost_sevp
= &host_sevp
;
9625 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
9631 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9635 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
9644 #ifdef TARGET_NR_timer_settime
9645 case TARGET_NR_timer_settime
:
9647 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9648 * struct itimerspec * old_value */
9649 target_timer_t timerid
= get_timer_id(arg1
);
9653 } else if (arg3
== 0) {
9654 ret
= -TARGET_EINVAL
;
9656 timer_t htimer
= g_posix_timers
[timerid
];
9657 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9659 target_to_host_itimerspec(&hspec_new
, arg3
);
9661 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9662 host_to_target_itimerspec(arg2
, &hspec_old
);
9668 #ifdef TARGET_NR_timer_gettime
9669 case TARGET_NR_timer_gettime
:
9671 /* args: timer_t timerid, struct itimerspec *curr_value */
9672 target_timer_t timerid
= get_timer_id(arg1
);
9677 ret
= -TARGET_EFAULT
;
9679 timer_t htimer
= g_posix_timers
[timerid
];
9680 struct itimerspec hspec
;
9681 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9683 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9684 ret
= -TARGET_EFAULT
;
9691 #ifdef TARGET_NR_timer_getoverrun
9692 case TARGET_NR_timer_getoverrun
:
9694 /* args: timer_t timerid */
9695 target_timer_t timerid
= get_timer_id(arg1
);
9700 timer_t htimer
= g_posix_timers
[timerid
];
9701 ret
= get_errno(timer_getoverrun(htimer
));
9707 #ifdef TARGET_NR_timer_delete
9708 case TARGET_NR_timer_delete
:
9710 /* args: timer_t timerid */
9711 target_timer_t timerid
= get_timer_id(arg1
);
9716 timer_t htimer
= g_posix_timers
[timerid
];
9717 ret
= get_errno(timer_delete(htimer
));
9718 g_posix_timers
[timerid
] = 0;
9724 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9725 case TARGET_NR_timerfd_create
:
9726 ret
= get_errno(timerfd_create(arg1
,
9727 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
9731 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9732 case TARGET_NR_timerfd_gettime
:
9734 struct itimerspec its_curr
;
9736 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
9738 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
9745 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9746 case TARGET_NR_timerfd_settime
:
9748 struct itimerspec its_new
, its_old
, *p_new
;
9751 if (target_to_host_itimerspec(&its_new
, arg3
)) {
9759 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
9761 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
9768 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9769 case TARGET_NR_ioprio_get
:
9770 ret
= get_errno(ioprio_get(arg1
, arg2
));
9774 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9775 case TARGET_NR_ioprio_set
:
9776 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
9780 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9781 case TARGET_NR_setns
:
9782 ret
= get_errno(setns(arg1
, arg2
));
9785 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9786 case TARGET_NR_unshare
:
9787 ret
= get_errno(unshare(arg1
));
9793 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9794 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9795 unimplemented_nowarn
:
9797 ret
= -TARGET_ENOSYS
;
9802 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9805 print_syscall_ret(num
, ret
);
9808 ret
= -TARGET_EFAULT
;