4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn
)(void *), void *child_stack_base
,
51 size_t stack_size
, int flags
, void *arg
, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #include "qemu/sockets.h"
71 #include <sys/timerfd.h>
77 #include <sys/eventfd.h>
80 #include <sys/epoll.h>
83 #include "qemu/xattr.h"
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 #define __NR__llseek __NR_lseek
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
209 _syscall0(int, gettid
)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
226 loff_t
*, res
, uint
, wh
);
228 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
229 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group
,int,error_code
)
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address
,int *,tidptr
)
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
244 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
254 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
255 struct __user_cap_data_struct
*, data
);
256 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
257 struct __user_cap_data_struct
*, data
);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get
, int, which
, int, who
)
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
265 static bitmask_transtbl fcntl_flags_tbl
[] = {
266 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
267 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
268 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
269 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
270 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
271 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
272 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
273 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
274 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
275 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
276 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
277 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
278 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
289 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
298 static int sys_getcwd1(char *buf
, size_t size
)
300 if (getcwd(buf
, size
) == NULL
) {
301 /* getcwd() sets errno */
304 return strlen(buf
)+1;
307 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
310 * open(2) has extra parameter 'mode' when called with
313 if ((flags
& O_CREAT
) != 0) {
314 return (openat(dirfd
, pathname
, flags
, mode
));
316 return (openat(dirfd
, pathname
, flags
));
319 #ifdef TARGET_NR_utimensat
320 #ifdef CONFIG_UTIMENSAT
321 static int sys_utimensat(int dirfd
, const char *pathname
,
322 const struct timespec times
[2], int flags
)
324 if (pathname
== NULL
)
325 return futimens(dirfd
, times
);
327 return utimensat(dirfd
, pathname
, times
, flags
);
329 #elif defined(__NR_utimensat)
330 #define __NR_sys_utimensat __NR_utimensat
331 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
332 const struct timespec
*,tsp
,int,flags
)
334 static int sys_utimensat(int dirfd
, const char *pathname
,
335 const struct timespec times
[2], int flags
)
341 #endif /* TARGET_NR_utimensat */
343 #ifdef CONFIG_INOTIFY
344 #include <sys/inotify.h>
346 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
347 static int sys_inotify_init(void)
349 return (inotify_init());
352 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
353 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
355 return (inotify_add_watch(fd
, pathname
, mask
));
358 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
359 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
361 return (inotify_rm_watch(fd
, wd
));
364 #ifdef CONFIG_INOTIFY1
365 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
366 static int sys_inotify_init1(int flags
)
368 return (inotify_init1(flags
));
373 /* Userspace can usually survive runtime without inotify */
374 #undef TARGET_NR_inotify_init
375 #undef TARGET_NR_inotify_init1
376 #undef TARGET_NR_inotify_add_watch
377 #undef TARGET_NR_inotify_rm_watch
378 #endif /* CONFIG_INOTIFY */
380 #if defined(TARGET_NR_ppoll)
382 # define __NR_ppoll -1
384 #define __NR_sys_ppoll __NR_ppoll
385 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
386 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
390 #if defined(TARGET_NR_pselect6)
391 #ifndef __NR_pselect6
392 # define __NR_pselect6 -1
394 #define __NR_sys_pselect6 __NR_pselect6
395 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
396 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
399 #if defined(TARGET_NR_prlimit64)
400 #ifndef __NR_prlimit64
401 # define __NR_prlimit64 -1
403 #define __NR_sys_prlimit64 __NR_prlimit64
404 /* The glibc rlimit structure may not be that used by the underlying syscall */
405 struct host_rlimit64
{
409 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
410 const struct host_rlimit64
*, new_limit
,
411 struct host_rlimit64
*, old_limit
)
415 #if defined(TARGET_NR_timer_create)
416 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
417 static timer_t g_posix_timers
[32] = { 0, } ;
419 static inline int next_free_host_timer(void)
422 /* FIXME: Does finding the next free slot require a lock? */
423 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
424 if (g_posix_timers
[k
] == 0) {
425 g_posix_timers
[k
] = (timer_t
) 1;
433 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
435 static inline int regpairs_aligned(void *cpu_env
) {
436 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
438 #elif defined(TARGET_MIPS)
439 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
440 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
441 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
442 * of registers which translates to the same as ARM/MIPS, because we start with
444 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
446 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
449 #define ERRNO_TABLE_SIZE 1200
451 /* target_to_host_errno_table[] is initialized from
452 * host_to_target_errno_table[] in syscall_init(). */
453 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
457 * This list is the union of errno values overridden in asm-<arch>/errno.h
458 * minus the errnos that are not actually generic to all archs.
460 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
461 [EIDRM
] = TARGET_EIDRM
,
462 [ECHRNG
] = TARGET_ECHRNG
,
463 [EL2NSYNC
] = TARGET_EL2NSYNC
,
464 [EL3HLT
] = TARGET_EL3HLT
,
465 [EL3RST
] = TARGET_EL3RST
,
466 [ELNRNG
] = TARGET_ELNRNG
,
467 [EUNATCH
] = TARGET_EUNATCH
,
468 [ENOCSI
] = TARGET_ENOCSI
,
469 [EL2HLT
] = TARGET_EL2HLT
,
470 [EDEADLK
] = TARGET_EDEADLK
,
471 [ENOLCK
] = TARGET_ENOLCK
,
472 [EBADE
] = TARGET_EBADE
,
473 [EBADR
] = TARGET_EBADR
,
474 [EXFULL
] = TARGET_EXFULL
,
475 [ENOANO
] = TARGET_ENOANO
,
476 [EBADRQC
] = TARGET_EBADRQC
,
477 [EBADSLT
] = TARGET_EBADSLT
,
478 [EBFONT
] = TARGET_EBFONT
,
479 [ENOSTR
] = TARGET_ENOSTR
,
480 [ENODATA
] = TARGET_ENODATA
,
481 [ETIME
] = TARGET_ETIME
,
482 [ENOSR
] = TARGET_ENOSR
,
483 [ENONET
] = TARGET_ENONET
,
484 [ENOPKG
] = TARGET_ENOPKG
,
485 [EREMOTE
] = TARGET_EREMOTE
,
486 [ENOLINK
] = TARGET_ENOLINK
,
487 [EADV
] = TARGET_EADV
,
488 [ESRMNT
] = TARGET_ESRMNT
,
489 [ECOMM
] = TARGET_ECOMM
,
490 [EPROTO
] = TARGET_EPROTO
,
491 [EDOTDOT
] = TARGET_EDOTDOT
,
492 [EMULTIHOP
] = TARGET_EMULTIHOP
,
493 [EBADMSG
] = TARGET_EBADMSG
,
494 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
495 [EOVERFLOW
] = TARGET_EOVERFLOW
,
496 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
497 [EBADFD
] = TARGET_EBADFD
,
498 [EREMCHG
] = TARGET_EREMCHG
,
499 [ELIBACC
] = TARGET_ELIBACC
,
500 [ELIBBAD
] = TARGET_ELIBBAD
,
501 [ELIBSCN
] = TARGET_ELIBSCN
,
502 [ELIBMAX
] = TARGET_ELIBMAX
,
503 [ELIBEXEC
] = TARGET_ELIBEXEC
,
504 [EILSEQ
] = TARGET_EILSEQ
,
505 [ENOSYS
] = TARGET_ENOSYS
,
506 [ELOOP
] = TARGET_ELOOP
,
507 [ERESTART
] = TARGET_ERESTART
,
508 [ESTRPIPE
] = TARGET_ESTRPIPE
,
509 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
510 [EUSERS
] = TARGET_EUSERS
,
511 [ENOTSOCK
] = TARGET_ENOTSOCK
,
512 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
513 [EMSGSIZE
] = TARGET_EMSGSIZE
,
514 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
515 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
516 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
517 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
518 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
519 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
520 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
521 [EADDRINUSE
] = TARGET_EADDRINUSE
,
522 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
523 [ENETDOWN
] = TARGET_ENETDOWN
,
524 [ENETUNREACH
] = TARGET_ENETUNREACH
,
525 [ENETRESET
] = TARGET_ENETRESET
,
526 [ECONNABORTED
] = TARGET_ECONNABORTED
,
527 [ECONNRESET
] = TARGET_ECONNRESET
,
528 [ENOBUFS
] = TARGET_ENOBUFS
,
529 [EISCONN
] = TARGET_EISCONN
,
530 [ENOTCONN
] = TARGET_ENOTCONN
,
531 [EUCLEAN
] = TARGET_EUCLEAN
,
532 [ENOTNAM
] = TARGET_ENOTNAM
,
533 [ENAVAIL
] = TARGET_ENAVAIL
,
534 [EISNAM
] = TARGET_EISNAM
,
535 [EREMOTEIO
] = TARGET_EREMOTEIO
,
536 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
537 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
538 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
539 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
540 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
541 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
542 [EALREADY
] = TARGET_EALREADY
,
543 [EINPROGRESS
] = TARGET_EINPROGRESS
,
544 [ESTALE
] = TARGET_ESTALE
,
545 [ECANCELED
] = TARGET_ECANCELED
,
546 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
547 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
549 [ENOKEY
] = TARGET_ENOKEY
,
552 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
555 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
558 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
561 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
563 #ifdef ENOTRECOVERABLE
564 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
568 static inline int host_to_target_errno(int err
)
570 if(host_to_target_errno_table
[err
])
571 return host_to_target_errno_table
[err
];
575 static inline int target_to_host_errno(int err
)
577 if (target_to_host_errno_table
[err
])
578 return target_to_host_errno_table
[err
];
582 static inline abi_long
get_errno(abi_long ret
)
585 return -host_to_target_errno(errno
);
590 static inline int is_error(abi_long ret
)
592 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
595 char *target_strerror(int err
)
597 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
600 return strerror(target_to_host_errno(err
));
603 static inline int host_to_target_sock_type(int host_type
)
607 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
609 target_type
= TARGET_SOCK_DGRAM
;
612 target_type
= TARGET_SOCK_STREAM
;
615 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
619 #if defined(SOCK_CLOEXEC)
620 if (host_type
& SOCK_CLOEXEC
) {
621 target_type
|= TARGET_SOCK_CLOEXEC
;
625 #if defined(SOCK_NONBLOCK)
626 if (host_type
& SOCK_NONBLOCK
) {
627 target_type
|= TARGET_SOCK_NONBLOCK
;
634 static abi_ulong target_brk
;
635 static abi_ulong target_original_brk
;
636 static abi_ulong brk_page
;
638 void target_set_brk(abi_ulong new_brk
)
640 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
641 brk_page
= HOST_PAGE_ALIGN(target_brk
);
644 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
645 #define DEBUGF_BRK(message, args...)
647 /* do_brk() must return target values and target errnos. */
648 abi_long
do_brk(abi_ulong new_brk
)
650 abi_long mapped_addr
;
653 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
656 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
659 if (new_brk
< target_original_brk
) {
660 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
665 /* If the new brk is less than the highest page reserved to the
666 * target heap allocation, set it and we're almost done... */
667 if (new_brk
<= brk_page
) {
668 /* Heap contents are initialized to zero, as for anonymous
670 if (new_brk
> target_brk
) {
671 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
673 target_brk
= new_brk
;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
678 /* We need to allocate more memory after the brk... Note that
679 * we don't use MAP_FIXED because that will map over the top of
680 * any existing mapping (like the one with the host libc or qemu
681 * itself); instead we treat "mapped but at wrong address" as
682 * a failure and unmap again.
684 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
685 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
686 PROT_READ
|PROT_WRITE
,
687 MAP_ANON
|MAP_PRIVATE
, 0, 0));
689 if (mapped_addr
== brk_page
) {
690 /* Heap contents are initialized to zero, as for anonymous
691 * mapped pages. Technically the new pages are already
692 * initialized to zero since they *are* anonymous mapped
693 * pages, however we have to take care with the contents that
694 * come from the remaining part of the previous page: it may
695 * contains garbage data due to a previous heap usage (grown
697 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
699 target_brk
= new_brk
;
700 brk_page
= HOST_PAGE_ALIGN(target_brk
);
701 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
704 } else if (mapped_addr
!= -1) {
705 /* Mapped but at wrong address, meaning there wasn't actually
706 * enough space for this brk.
708 target_munmap(mapped_addr
, new_alloc_size
);
710 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
713 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
716 #if defined(TARGET_ALPHA)
717 /* We (partially) emulate OSF/1 on Alpha, which requires we
718 return a proper errno, not an unchanged brk value. */
719 return -TARGET_ENOMEM
;
721 /* For everything else, return the previous break. */
725 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
726 abi_ulong target_fds_addr
,
730 abi_ulong b
, *target_fds
;
732 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
733 if (!(target_fds
= lock_user(VERIFY_READ
,
735 sizeof(abi_ulong
) * nw
,
737 return -TARGET_EFAULT
;
741 for (i
= 0; i
< nw
; i
++) {
742 /* grab the abi_ulong */
743 __get_user(b
, &target_fds
[i
]);
744 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
745 /* check the bit inside the abi_ulong */
752 unlock_user(target_fds
, target_fds_addr
, 0);
757 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
758 abi_ulong target_fds_addr
,
761 if (target_fds_addr
) {
762 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
763 return -TARGET_EFAULT
;
771 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
777 abi_ulong
*target_fds
;
779 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
780 if (!(target_fds
= lock_user(VERIFY_WRITE
,
782 sizeof(abi_ulong
) * nw
,
784 return -TARGET_EFAULT
;
787 for (i
= 0; i
< nw
; i
++) {
789 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
790 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
793 __put_user(v
, &target_fds
[i
]);
796 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
801 #if defined(__alpha__)
807 static inline abi_long
host_to_target_clock_t(long ticks
)
809 #if HOST_HZ == TARGET_HZ
812 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
816 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
817 const struct rusage
*rusage
)
819 struct target_rusage
*target_rusage
;
821 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
822 return -TARGET_EFAULT
;
823 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
824 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
825 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
826 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
827 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
828 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
829 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
830 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
831 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
832 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
833 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
834 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
835 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
836 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
837 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
838 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
839 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
840 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
841 unlock_user_struct(target_rusage
, target_addr
, 1);
846 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
848 abi_ulong target_rlim_swap
;
851 target_rlim_swap
= tswapal(target_rlim
);
852 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
853 return RLIM_INFINITY
;
855 result
= target_rlim_swap
;
856 if (target_rlim_swap
!= (rlim_t
)result
)
857 return RLIM_INFINITY
;
862 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
864 abi_ulong target_rlim_swap
;
867 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
868 target_rlim_swap
= TARGET_RLIM_INFINITY
;
870 target_rlim_swap
= rlim
;
871 result
= tswapal(target_rlim_swap
);
876 static inline int target_to_host_resource(int code
)
879 case TARGET_RLIMIT_AS
:
881 case TARGET_RLIMIT_CORE
:
883 case TARGET_RLIMIT_CPU
:
885 case TARGET_RLIMIT_DATA
:
887 case TARGET_RLIMIT_FSIZE
:
889 case TARGET_RLIMIT_LOCKS
:
891 case TARGET_RLIMIT_MEMLOCK
:
892 return RLIMIT_MEMLOCK
;
893 case TARGET_RLIMIT_MSGQUEUE
:
894 return RLIMIT_MSGQUEUE
;
895 case TARGET_RLIMIT_NICE
:
897 case TARGET_RLIMIT_NOFILE
:
898 return RLIMIT_NOFILE
;
899 case TARGET_RLIMIT_NPROC
:
901 case TARGET_RLIMIT_RSS
:
903 case TARGET_RLIMIT_RTPRIO
:
904 return RLIMIT_RTPRIO
;
905 case TARGET_RLIMIT_SIGPENDING
:
906 return RLIMIT_SIGPENDING
;
907 case TARGET_RLIMIT_STACK
:
914 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
915 abi_ulong target_tv_addr
)
917 struct target_timeval
*target_tv
;
919 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
920 return -TARGET_EFAULT
;
922 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
923 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
925 unlock_user_struct(target_tv
, target_tv_addr
, 0);
930 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
931 const struct timeval
*tv
)
933 struct target_timeval
*target_tv
;
935 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
936 return -TARGET_EFAULT
;
938 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
939 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
941 unlock_user_struct(target_tv
, target_tv_addr
, 1);
946 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
947 abi_ulong target_tz_addr
)
949 struct target_timezone
*target_tz
;
951 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
952 return -TARGET_EFAULT
;
955 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
956 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
958 unlock_user_struct(target_tz
, target_tz_addr
, 0);
963 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
966 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
967 abi_ulong target_mq_attr_addr
)
969 struct target_mq_attr
*target_mq_attr
;
971 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
972 target_mq_attr_addr
, 1))
973 return -TARGET_EFAULT
;
975 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
976 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
977 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
978 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
980 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
985 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
986 const struct mq_attr
*attr
)
988 struct target_mq_attr
*target_mq_attr
;
990 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
991 target_mq_attr_addr
, 0))
992 return -TARGET_EFAULT
;
994 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
995 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
996 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
997 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
999 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1005 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1006 /* do_select() must return target values and target errnos. */
1007 static abi_long
do_select(int n
,
1008 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1009 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1011 fd_set rfds
, wfds
, efds
;
1012 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1013 struct timeval tv
, *tv_ptr
;
1016 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1020 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1024 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1029 if (target_tv_addr
) {
1030 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1031 return -TARGET_EFAULT
;
1037 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1039 if (!is_error(ret
)) {
1040 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1041 return -TARGET_EFAULT
;
1042 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1043 return -TARGET_EFAULT
;
1044 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1045 return -TARGET_EFAULT
;
1047 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1048 return -TARGET_EFAULT
;
1055 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1058 return pipe2(host_pipe
, flags
);
1064 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1065 int flags
, int is_pipe2
)
1069 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1072 return get_errno(ret
);
1074 /* Several targets have special calling conventions for the original
1075 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1077 #if defined(TARGET_ALPHA)
1078 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1079 return host_pipe
[0];
1080 #elif defined(TARGET_MIPS)
1081 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1082 return host_pipe
[0];
1083 #elif defined(TARGET_SH4)
1084 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1085 return host_pipe
[0];
1086 #elif defined(TARGET_SPARC)
1087 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1088 return host_pipe
[0];
1092 if (put_user_s32(host_pipe
[0], pipedes
)
1093 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1094 return -TARGET_EFAULT
;
1095 return get_errno(ret
);
1098 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1099 abi_ulong target_addr
,
1102 struct target_ip_mreqn
*target_smreqn
;
1104 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1106 return -TARGET_EFAULT
;
1107 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1108 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1109 if (len
== sizeof(struct target_ip_mreqn
))
1110 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1111 unlock_user(target_smreqn
, target_addr
, 0);
1116 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1117 abi_ulong target_addr
,
1120 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1121 sa_family_t sa_family
;
1122 struct target_sockaddr
*target_saddr
;
1124 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1126 return -TARGET_EFAULT
;
1128 sa_family
= tswap16(target_saddr
->sa_family
);
1130 /* Oops. The caller might send a incomplete sun_path; sun_path
1131 * must be terminated by \0 (see the manual page), but
1132 * unfortunately it is quite common to specify sockaddr_un
1133 * length as "strlen(x->sun_path)" while it should be
1134 * "strlen(...) + 1". We'll fix that here if needed.
1135 * Linux kernel has a similar feature.
1138 if (sa_family
== AF_UNIX
) {
1139 if (len
< unix_maxlen
&& len
> 0) {
1140 char *cp
= (char*)target_saddr
;
1142 if ( cp
[len
-1] && !cp
[len
] )
1145 if (len
> unix_maxlen
)
1149 memcpy(addr
, target_saddr
, len
);
1150 addr
->sa_family
= sa_family
;
1151 if (sa_family
== AF_PACKET
) {
1152 struct target_sockaddr_ll
*lladdr
;
1154 lladdr
= (struct target_sockaddr_ll
*)addr
;
1155 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1156 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1158 unlock_user(target_saddr
, target_addr
, 0);
1163 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1164 struct sockaddr
*addr
,
1167 struct target_sockaddr
*target_saddr
;
1169 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1171 return -TARGET_EFAULT
;
1172 memcpy(target_saddr
, addr
, len
);
1173 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1174 unlock_user(target_saddr
, target_addr
, len
);
1179 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1180 struct target_msghdr
*target_msgh
)
1182 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1183 abi_long msg_controllen
;
1184 abi_ulong target_cmsg_addr
;
1185 struct target_cmsghdr
*target_cmsg
;
1186 socklen_t space
= 0;
1188 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1189 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1191 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1192 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1194 return -TARGET_EFAULT
;
1196 while (cmsg
&& target_cmsg
) {
1197 void *data
= CMSG_DATA(cmsg
);
1198 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1200 int len
= tswapal(target_cmsg
->cmsg_len
)
1201 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1203 space
+= CMSG_SPACE(len
);
1204 if (space
> msgh
->msg_controllen
) {
1205 space
-= CMSG_SPACE(len
);
1206 /* This is a QEMU bug, since we allocated the payload
1207 * area ourselves (unlike overflow in host-to-target
1208 * conversion, which is just the guest giving us a buffer
1209 * that's too small). It can't happen for the payload types
1210 * we currently support; if it becomes an issue in future
1211 * we would need to improve our allocation strategy to
1212 * something more intelligent than "twice the size of the
1213 * target buffer we're reading from".
1215 gemu_log("Host cmsg overflow\n");
1219 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1220 cmsg
->cmsg_level
= SOL_SOCKET
;
1222 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1224 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1225 cmsg
->cmsg_len
= CMSG_LEN(len
);
1227 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1228 int *fd
= (int *)data
;
1229 int *target_fd
= (int *)target_data
;
1230 int i
, numfds
= len
/ sizeof(int);
1232 for (i
= 0; i
< numfds
; i
++) {
1233 __get_user(fd
[i
], target_fd
+ i
);
1235 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1236 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1237 struct ucred
*cred
= (struct ucred
*)data
;
1238 struct target_ucred
*target_cred
=
1239 (struct target_ucred
*)target_data
;
1241 __get_user(cred
->pid
, &target_cred
->pid
);
1242 __get_user(cred
->uid
, &target_cred
->uid
);
1243 __get_user(cred
->gid
, &target_cred
->gid
);
1245 gemu_log("Unsupported ancillary data: %d/%d\n",
1246 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1247 memcpy(data
, target_data
, len
);
1250 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1251 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1253 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1255 msgh
->msg_controllen
= space
;
1259 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1260 struct msghdr
*msgh
)
1262 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1263 abi_long msg_controllen
;
1264 abi_ulong target_cmsg_addr
;
1265 struct target_cmsghdr
*target_cmsg
;
1266 socklen_t space
= 0;
1268 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1269 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1271 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1272 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1274 return -TARGET_EFAULT
;
1276 while (cmsg
&& target_cmsg
) {
1277 void *data
= CMSG_DATA(cmsg
);
1278 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1280 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1281 int tgt_len
, tgt_space
;
1283 /* We never copy a half-header but may copy half-data;
1284 * this is Linux's behaviour in put_cmsg(). Note that
1285 * truncation here is a guest problem (which we report
1286 * to the guest via the CTRUNC bit), unlike truncation
1287 * in target_to_host_cmsg, which is a QEMU bug.
1289 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1290 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1294 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1295 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1297 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1299 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1301 tgt_len
= TARGET_CMSG_LEN(len
);
1303 /* Payload types which need a different size of payload on
1304 * the target must adjust tgt_len here.
1306 switch (cmsg
->cmsg_level
) {
1308 switch (cmsg
->cmsg_type
) {
1310 tgt_len
= sizeof(struct target_timeval
);
1319 if (msg_controllen
< tgt_len
) {
1320 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1321 tgt_len
= msg_controllen
;
1324 /* We must now copy-and-convert len bytes of payload
1325 * into tgt_len bytes of destination space. Bear in mind
1326 * that in both source and destination we may be dealing
1327 * with a truncated value!
1329 switch (cmsg
->cmsg_level
) {
1331 switch (cmsg
->cmsg_type
) {
1334 int *fd
= (int *)data
;
1335 int *target_fd
= (int *)target_data
;
1336 int i
, numfds
= tgt_len
/ sizeof(int);
1338 for (i
= 0; i
< numfds
; i
++) {
1339 __put_user(fd
[i
], target_fd
+ i
);
1345 struct timeval
*tv
= (struct timeval
*)data
;
1346 struct target_timeval
*target_tv
=
1347 (struct target_timeval
*)target_data
;
1349 if (len
!= sizeof(struct timeval
) ||
1350 tgt_len
!= sizeof(struct target_timeval
)) {
1354 /* copy struct timeval to target */
1355 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1356 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1359 case SCM_CREDENTIALS
:
1361 struct ucred
*cred
= (struct ucred
*)data
;
1362 struct target_ucred
*target_cred
=
1363 (struct target_ucred
*)target_data
;
1365 __put_user(cred
->pid
, &target_cred
->pid
);
1366 __put_user(cred
->uid
, &target_cred
->uid
);
1367 __put_user(cred
->gid
, &target_cred
->gid
);
1377 gemu_log("Unsupported ancillary data: %d/%d\n",
1378 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1379 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1380 if (tgt_len
> len
) {
1381 memset(target_data
+ len
, 0, tgt_len
- len
);
1385 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1386 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1387 if (msg_controllen
< tgt_space
) {
1388 tgt_space
= msg_controllen
;
1390 msg_controllen
-= tgt_space
;
1392 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1393 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1395 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1397 target_msgh
->msg_controllen
= tswapal(space
);
1401 /* do_setsockopt() Must return target values and target errnos. */
1402 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1403 abi_ulong optval_addr
, socklen_t optlen
)
1407 struct ip_mreqn
*ip_mreq
;
1408 struct ip_mreq_source
*ip_mreq_source
;
1412 /* TCP options all take an 'int' value. */
1413 if (optlen
< sizeof(uint32_t))
1414 return -TARGET_EINVAL
;
1416 if (get_user_u32(val
, optval_addr
))
1417 return -TARGET_EFAULT
;
1418 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1425 case IP_ROUTER_ALERT
:
1429 case IP_MTU_DISCOVER
:
1435 case IP_MULTICAST_TTL
:
1436 case IP_MULTICAST_LOOP
:
1438 if (optlen
>= sizeof(uint32_t)) {
1439 if (get_user_u32(val
, optval_addr
))
1440 return -TARGET_EFAULT
;
1441 } else if (optlen
>= 1) {
1442 if (get_user_u8(val
, optval_addr
))
1443 return -TARGET_EFAULT
;
1445 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1447 case IP_ADD_MEMBERSHIP
:
1448 case IP_DROP_MEMBERSHIP
:
1449 if (optlen
< sizeof (struct target_ip_mreq
) ||
1450 optlen
> sizeof (struct target_ip_mreqn
))
1451 return -TARGET_EINVAL
;
1453 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1454 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1455 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1458 case IP_BLOCK_SOURCE
:
1459 case IP_UNBLOCK_SOURCE
:
1460 case IP_ADD_SOURCE_MEMBERSHIP
:
1461 case IP_DROP_SOURCE_MEMBERSHIP
:
1462 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1463 return -TARGET_EINVAL
;
1465 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1466 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1467 unlock_user (ip_mreq_source
, optval_addr
, 0);
1476 case IPV6_MTU_DISCOVER
:
1479 case IPV6_RECVPKTINFO
:
1481 if (optlen
< sizeof(uint32_t)) {
1482 return -TARGET_EINVAL
;
1484 if (get_user_u32(val
, optval_addr
)) {
1485 return -TARGET_EFAULT
;
1487 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1488 &val
, sizeof(val
)));
1497 /* struct icmp_filter takes an u32 value */
1498 if (optlen
< sizeof(uint32_t)) {
1499 return -TARGET_EINVAL
;
1502 if (get_user_u32(val
, optval_addr
)) {
1503 return -TARGET_EFAULT
;
1505 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1506 &val
, sizeof(val
)));
1513 case TARGET_SOL_SOCKET
:
1515 case TARGET_SO_RCVTIMEO
:
1519 optname
= SO_RCVTIMEO
;
1522 if (optlen
!= sizeof(struct target_timeval
)) {
1523 return -TARGET_EINVAL
;
1526 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1527 return -TARGET_EFAULT
;
1530 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1534 case TARGET_SO_SNDTIMEO
:
1535 optname
= SO_SNDTIMEO
;
1537 case TARGET_SO_ATTACH_FILTER
:
1539 struct target_sock_fprog
*tfprog
;
1540 struct target_sock_filter
*tfilter
;
1541 struct sock_fprog fprog
;
1542 struct sock_filter
*filter
;
1545 if (optlen
!= sizeof(*tfprog
)) {
1546 return -TARGET_EINVAL
;
1548 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1549 return -TARGET_EFAULT
;
1551 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1552 tswapal(tfprog
->filter
), 0)) {
1553 unlock_user_struct(tfprog
, optval_addr
, 1);
1554 return -TARGET_EFAULT
;
1557 fprog
.len
= tswap16(tfprog
->len
);
1558 filter
= malloc(fprog
.len
* sizeof(*filter
));
1559 if (filter
== NULL
) {
1560 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1561 unlock_user_struct(tfprog
, optval_addr
, 1);
1562 return -TARGET_ENOMEM
;
1564 for (i
= 0; i
< fprog
.len
; i
++) {
1565 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1566 filter
[i
].jt
= tfilter
[i
].jt
;
1567 filter
[i
].jf
= tfilter
[i
].jf
;
1568 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1570 fprog
.filter
= filter
;
1572 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1573 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1576 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1577 unlock_user_struct(tfprog
, optval_addr
, 1);
1580 case TARGET_SO_BINDTODEVICE
:
1582 char *dev_ifname
, *addr_ifname
;
1584 if (optlen
> IFNAMSIZ
- 1) {
1585 optlen
= IFNAMSIZ
- 1;
1587 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1589 return -TARGET_EFAULT
;
1591 optname
= SO_BINDTODEVICE
;
1592 addr_ifname
= alloca(IFNAMSIZ
);
1593 memcpy(addr_ifname
, dev_ifname
, optlen
);
1594 addr_ifname
[optlen
] = 0;
1595 ret
= get_errno(setsockopt(sockfd
, level
, optname
, addr_ifname
, optlen
));
1596 unlock_user (dev_ifname
, optval_addr
, 0);
1599 /* Options with 'int' argument. */
1600 case TARGET_SO_DEBUG
:
1603 case TARGET_SO_REUSEADDR
:
1604 optname
= SO_REUSEADDR
;
1606 case TARGET_SO_TYPE
:
1609 case TARGET_SO_ERROR
:
1612 case TARGET_SO_DONTROUTE
:
1613 optname
= SO_DONTROUTE
;
1615 case TARGET_SO_BROADCAST
:
1616 optname
= SO_BROADCAST
;
1618 case TARGET_SO_SNDBUF
:
1619 optname
= SO_SNDBUF
;
1621 case TARGET_SO_SNDBUFFORCE
:
1622 optname
= SO_SNDBUFFORCE
;
1624 case TARGET_SO_RCVBUF
:
1625 optname
= SO_RCVBUF
;
1627 case TARGET_SO_RCVBUFFORCE
:
1628 optname
= SO_RCVBUFFORCE
;
1630 case TARGET_SO_KEEPALIVE
:
1631 optname
= SO_KEEPALIVE
;
1633 case TARGET_SO_OOBINLINE
:
1634 optname
= SO_OOBINLINE
;
1636 case TARGET_SO_NO_CHECK
:
1637 optname
= SO_NO_CHECK
;
1639 case TARGET_SO_PRIORITY
:
1640 optname
= SO_PRIORITY
;
1643 case TARGET_SO_BSDCOMPAT
:
1644 optname
= SO_BSDCOMPAT
;
1647 case TARGET_SO_PASSCRED
:
1648 optname
= SO_PASSCRED
;
1650 case TARGET_SO_PASSSEC
:
1651 optname
= SO_PASSSEC
;
1653 case TARGET_SO_TIMESTAMP
:
1654 optname
= SO_TIMESTAMP
;
1656 case TARGET_SO_RCVLOWAT
:
1657 optname
= SO_RCVLOWAT
;
1663 if (optlen
< sizeof(uint32_t))
1664 return -TARGET_EINVAL
;
1666 if (get_user_u32(val
, optval_addr
))
1667 return -TARGET_EFAULT
;
1668 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1672 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1673 ret
= -TARGET_ENOPROTOOPT
;
1678 /* do_getsockopt() Must return target values and target errnos. */
1679 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1680 abi_ulong optval_addr
, abi_ulong optlen
)
1687 case TARGET_SOL_SOCKET
:
1690 /* These don't just return a single integer */
1691 case TARGET_SO_LINGER
:
1692 case TARGET_SO_RCVTIMEO
:
1693 case TARGET_SO_SNDTIMEO
:
1694 case TARGET_SO_PEERNAME
:
1696 case TARGET_SO_PEERCRED
: {
1699 struct target_ucred
*tcr
;
1701 if (get_user_u32(len
, optlen
)) {
1702 return -TARGET_EFAULT
;
1705 return -TARGET_EINVAL
;
1709 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1717 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1718 return -TARGET_EFAULT
;
1720 __put_user(cr
.pid
, &tcr
->pid
);
1721 __put_user(cr
.uid
, &tcr
->uid
);
1722 __put_user(cr
.gid
, &tcr
->gid
);
1723 unlock_user_struct(tcr
, optval_addr
, 1);
1724 if (put_user_u32(len
, optlen
)) {
1725 return -TARGET_EFAULT
;
1729 /* Options with 'int' argument. */
1730 case TARGET_SO_DEBUG
:
1733 case TARGET_SO_REUSEADDR
:
1734 optname
= SO_REUSEADDR
;
1736 case TARGET_SO_TYPE
:
1739 case TARGET_SO_ERROR
:
1742 case TARGET_SO_DONTROUTE
:
1743 optname
= SO_DONTROUTE
;
1745 case TARGET_SO_BROADCAST
:
1746 optname
= SO_BROADCAST
;
1748 case TARGET_SO_SNDBUF
:
1749 optname
= SO_SNDBUF
;
1751 case TARGET_SO_RCVBUF
:
1752 optname
= SO_RCVBUF
;
1754 case TARGET_SO_KEEPALIVE
:
1755 optname
= SO_KEEPALIVE
;
1757 case TARGET_SO_OOBINLINE
:
1758 optname
= SO_OOBINLINE
;
1760 case TARGET_SO_NO_CHECK
:
1761 optname
= SO_NO_CHECK
;
1763 case TARGET_SO_PRIORITY
:
1764 optname
= SO_PRIORITY
;
1767 case TARGET_SO_BSDCOMPAT
:
1768 optname
= SO_BSDCOMPAT
;
1771 case TARGET_SO_PASSCRED
:
1772 optname
= SO_PASSCRED
;
1774 case TARGET_SO_TIMESTAMP
:
1775 optname
= SO_TIMESTAMP
;
1777 case TARGET_SO_RCVLOWAT
:
1778 optname
= SO_RCVLOWAT
;
1780 case TARGET_SO_ACCEPTCONN
:
1781 optname
= SO_ACCEPTCONN
;
1788 /* TCP options all take an 'int' value. */
1790 if (get_user_u32(len
, optlen
))
1791 return -TARGET_EFAULT
;
1793 return -TARGET_EINVAL
;
1795 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1798 if (optname
== SO_TYPE
) {
1799 val
= host_to_target_sock_type(val
);
1804 if (put_user_u32(val
, optval_addr
))
1805 return -TARGET_EFAULT
;
1807 if (put_user_u8(val
, optval_addr
))
1808 return -TARGET_EFAULT
;
1810 if (put_user_u32(len
, optlen
))
1811 return -TARGET_EFAULT
;
1818 case IP_ROUTER_ALERT
:
1822 case IP_MTU_DISCOVER
:
1828 case IP_MULTICAST_TTL
:
1829 case IP_MULTICAST_LOOP
:
1830 if (get_user_u32(len
, optlen
))
1831 return -TARGET_EFAULT
;
1833 return -TARGET_EINVAL
;
1835 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1838 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1840 if (put_user_u32(len
, optlen
)
1841 || put_user_u8(val
, optval_addr
))
1842 return -TARGET_EFAULT
;
1844 if (len
> sizeof(int))
1846 if (put_user_u32(len
, optlen
)
1847 || put_user_u32(val
, optval_addr
))
1848 return -TARGET_EFAULT
;
1852 ret
= -TARGET_ENOPROTOOPT
;
1858 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1860 ret
= -TARGET_EOPNOTSUPP
;
1866 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1867 int count
, int copy
)
1869 struct target_iovec
*target_vec
;
1871 abi_ulong total_len
, max_len
;
1874 bool bad_address
= false;
1880 if (count
< 0 || count
> IOV_MAX
) {
1885 vec
= calloc(count
, sizeof(struct iovec
));
1891 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1892 count
* sizeof(struct target_iovec
), 1);
1893 if (target_vec
== NULL
) {
1898 /* ??? If host page size > target page size, this will result in a
1899 value larger than what we can actually support. */
1900 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1903 for (i
= 0; i
< count
; i
++) {
1904 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1905 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1910 } else if (len
== 0) {
1911 /* Zero length pointer is ignored. */
1912 vec
[i
].iov_base
= 0;
1914 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1915 /* If the first buffer pointer is bad, this is a fault. But
1916 * subsequent bad buffers will result in a partial write; this
1917 * is realized by filling the vector with null pointers and
1919 if (!vec
[i
].iov_base
) {
1930 if (len
> max_len
- total_len
) {
1931 len
= max_len
- total_len
;
1934 vec
[i
].iov_len
= len
;
1938 unlock_user(target_vec
, target_addr
, 0);
1943 if (tswapal(target_vec
[i
].iov_len
) > 0) {
1944 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
1947 unlock_user(target_vec
, target_addr
, 0);
1954 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1955 int count
, int copy
)
1957 struct target_iovec
*target_vec
;
1960 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1961 count
* sizeof(struct target_iovec
), 1);
1963 for (i
= 0; i
< count
; i
++) {
1964 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1965 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1969 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1971 unlock_user(target_vec
, target_addr
, 0);
1977 static inline int target_to_host_sock_type(int *type
)
1980 int target_type
= *type
;
1982 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1983 case TARGET_SOCK_DGRAM
:
1984 host_type
= SOCK_DGRAM
;
1986 case TARGET_SOCK_STREAM
:
1987 host_type
= SOCK_STREAM
;
1990 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1993 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1994 #if defined(SOCK_CLOEXEC)
1995 host_type
|= SOCK_CLOEXEC
;
1997 return -TARGET_EINVAL
;
2000 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2001 #if defined(SOCK_NONBLOCK)
2002 host_type
|= SOCK_NONBLOCK
;
2003 #elif !defined(O_NONBLOCK)
2004 return -TARGET_EINVAL
;
2011 /* Try to emulate socket type flags after socket creation. */
2012 static int sock_flags_fixup(int fd
, int target_type
)
2014 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2015 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2016 int flags
= fcntl(fd
, F_GETFL
);
2017 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2019 return -TARGET_EINVAL
;
2026 /* do_socket() Must return target values and target errnos. */
2027 static abi_long
do_socket(int domain
, int type
, int protocol
)
2029 int target_type
= type
;
2032 ret
= target_to_host_sock_type(&type
);
2037 if (domain
== PF_NETLINK
)
2038 return -TARGET_EAFNOSUPPORT
;
2039 ret
= get_errno(socket(domain
, type
, protocol
));
2041 ret
= sock_flags_fixup(ret
, target_type
);
2046 /* do_bind() Must return target values and target errnos. */
2047 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2053 if ((int)addrlen
< 0) {
2054 return -TARGET_EINVAL
;
2057 addr
= alloca(addrlen
+1);
2059 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2063 return get_errno(bind(sockfd
, addr
, addrlen
));
2066 /* do_connect() Must return target values and target errnos. */
2067 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2073 if ((int)addrlen
< 0) {
2074 return -TARGET_EINVAL
;
2077 addr
= alloca(addrlen
+1);
2079 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2083 return get_errno(connect(sockfd
, addr
, addrlen
));
2086 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2087 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2088 int flags
, int send
)
2094 abi_ulong target_vec
;
2096 if (msgp
->msg_name
) {
2097 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2098 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2099 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
2105 msg
.msg_name
= NULL
;
2106 msg
.msg_namelen
= 0;
2108 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2109 msg
.msg_control
= alloca(msg
.msg_controllen
);
2110 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2112 count
= tswapal(msgp
->msg_iovlen
);
2113 target_vec
= tswapal(msgp
->msg_iov
);
2114 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2115 target_vec
, count
, send
);
2117 ret
= -host_to_target_errno(errno
);
2120 msg
.msg_iovlen
= count
;
2124 ret
= target_to_host_cmsg(&msg
, msgp
);
2126 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2128 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2129 if (!is_error(ret
)) {
2131 ret
= host_to_target_cmsg(msgp
, &msg
);
2132 if (!is_error(ret
)) {
2133 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2134 if (msg
.msg_name
!= NULL
) {
2135 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2136 msg
.msg_name
, msg
.msg_namelen
);
2148 unlock_iovec(vec
, target_vec
, count
, !send
);
2153 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2154 int flags
, int send
)
2157 struct target_msghdr
*msgp
;
2159 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2163 return -TARGET_EFAULT
;
2165 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2166 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2170 #ifdef TARGET_NR_sendmmsg
2171 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2172 * so it might not have this *mmsg-specific flag either.
2174 #ifndef MSG_WAITFORONE
2175 #define MSG_WAITFORONE 0x10000
2178 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2179 unsigned int vlen
, unsigned int flags
,
2182 struct target_mmsghdr
*mmsgp
;
2186 if (vlen
> UIO_MAXIOV
) {
2190 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2192 return -TARGET_EFAULT
;
2195 for (i
= 0; i
< vlen
; i
++) {
2196 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2197 if (is_error(ret
)) {
2200 mmsgp
[i
].msg_len
= tswap32(ret
);
2201 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2202 if (flags
& MSG_WAITFORONE
) {
2203 flags
|= MSG_DONTWAIT
;
2207 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2209 /* Return number of datagrams sent if we sent any at all;
2210 * otherwise return the error.
2219 /* If we don't have a system accept4() then just call accept.
2220 * The callsites to do_accept4() will ensure that they don't
2221 * pass a non-zero flags argument in this config.
2223 #ifndef CONFIG_ACCEPT4
2224 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2225 socklen_t
*addrlen
, int flags
)
2228 return accept(sockfd
, addr
, addrlen
);
2232 /* do_accept4() Must return target values and target errnos. */
2233 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2234 abi_ulong target_addrlen_addr
, int flags
)
2241 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2243 if (target_addr
== 0) {
2244 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2247 /* linux returns EINVAL if addrlen pointer is invalid */
2248 if (get_user_u32(addrlen
, target_addrlen_addr
))
2249 return -TARGET_EINVAL
;
2251 if ((int)addrlen
< 0) {
2252 return -TARGET_EINVAL
;
2255 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2256 return -TARGET_EINVAL
;
2258 addr
= alloca(addrlen
);
2260 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2261 if (!is_error(ret
)) {
2262 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2263 if (put_user_u32(addrlen
, target_addrlen_addr
))
2264 ret
= -TARGET_EFAULT
;
2269 /* do_getpeername() Must return target values and target errnos. */
2270 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2271 abi_ulong target_addrlen_addr
)
2277 if (get_user_u32(addrlen
, target_addrlen_addr
))
2278 return -TARGET_EFAULT
;
2280 if ((int)addrlen
< 0) {
2281 return -TARGET_EINVAL
;
2284 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2285 return -TARGET_EFAULT
;
2287 addr
= alloca(addrlen
);
2289 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2290 if (!is_error(ret
)) {
2291 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2292 if (put_user_u32(addrlen
, target_addrlen_addr
))
2293 ret
= -TARGET_EFAULT
;
2298 /* do_getsockname() Must return target values and target errnos. */
2299 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2300 abi_ulong target_addrlen_addr
)
2306 if (get_user_u32(addrlen
, target_addrlen_addr
))
2307 return -TARGET_EFAULT
;
2309 if ((int)addrlen
< 0) {
2310 return -TARGET_EINVAL
;
2313 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2314 return -TARGET_EFAULT
;
2316 addr
= alloca(addrlen
);
2318 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2319 if (!is_error(ret
)) {
2320 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2321 if (put_user_u32(addrlen
, target_addrlen_addr
))
2322 ret
= -TARGET_EFAULT
;
2327 /* do_socketpair() Must return target values and target errnos. */
2328 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2329 abi_ulong target_tab_addr
)
2334 target_to_host_sock_type(&type
);
2336 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2337 if (!is_error(ret
)) {
2338 if (put_user_s32(tab
[0], target_tab_addr
)
2339 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2340 ret
= -TARGET_EFAULT
;
2345 /* do_sendto() Must return target values and target errnos. */
2346 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2347 abi_ulong target_addr
, socklen_t addrlen
)
2353 if ((int)addrlen
< 0) {
2354 return -TARGET_EINVAL
;
2357 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2359 return -TARGET_EFAULT
;
2361 addr
= alloca(addrlen
+1);
2362 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2364 unlock_user(host_msg
, msg
, 0);
2367 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2369 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2371 unlock_user(host_msg
, msg
, 0);
2375 /* do_recvfrom() Must return target values and target errnos. */
2376 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2377 abi_ulong target_addr
,
2378 abi_ulong target_addrlen
)
2385 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2387 return -TARGET_EFAULT
;
2389 if (get_user_u32(addrlen
, target_addrlen
)) {
2390 ret
= -TARGET_EFAULT
;
2393 if ((int)addrlen
< 0) {
2394 ret
= -TARGET_EINVAL
;
2397 addr
= alloca(addrlen
);
2398 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2400 addr
= NULL
; /* To keep compiler quiet. */
2401 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2403 if (!is_error(ret
)) {
2405 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2406 if (put_user_u32(addrlen
, target_addrlen
)) {
2407 ret
= -TARGET_EFAULT
;
2411 unlock_user(host_msg
, msg
, len
);
2414 unlock_user(host_msg
, msg
, 0);
2419 #ifdef TARGET_NR_socketcall
2420 /* do_socketcall() Must return target values and target errnos. */
2421 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2423 static const unsigned ac
[] = { /* number of arguments per call */
2424 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2425 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2426 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2427 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2428 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2429 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2430 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2431 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2432 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2433 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2434 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2435 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2436 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2437 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2438 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2439 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2440 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2441 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2443 abi_long a
[6]; /* max 6 args */
2445 /* first, collect the arguments in a[] according to ac[] */
2446 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2448 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2449 for (i
= 0; i
< ac
[num
]; ++i
) {
2450 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2451 return -TARGET_EFAULT
;
2456 /* now when we have the args, actually handle the call */
2458 case SOCKOP_socket
: /* domain, type, protocol */
2459 return do_socket(a
[0], a
[1], a
[2]);
2460 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2461 return do_bind(a
[0], a
[1], a
[2]);
2462 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2463 return do_connect(a
[0], a
[1], a
[2]);
2464 case SOCKOP_listen
: /* sockfd, backlog */
2465 return get_errno(listen(a
[0], a
[1]));
2466 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2467 return do_accept4(a
[0], a
[1], a
[2], 0);
2468 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2469 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2470 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2471 return do_getsockname(a
[0], a
[1], a
[2]);
2472 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2473 return do_getpeername(a
[0], a
[1], a
[2]);
2474 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2475 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2476 case SOCKOP_send
: /* sockfd, msg, len, flags */
2477 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2478 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2479 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2480 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2481 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2482 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2483 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2484 case SOCKOP_shutdown
: /* sockfd, how */
2485 return get_errno(shutdown(a
[0], a
[1]));
2486 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2487 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2488 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2489 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2490 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2491 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2492 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2493 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2495 gemu_log("Unsupported socketcall: %d\n", num
);
2496 return -TARGET_ENOSYS
;
2501 #define N_SHM_REGIONS 32
2503 static struct shm_region
{
2506 } shm_regions
[N_SHM_REGIONS
];
2508 struct target_semid_ds
2510 struct target_ipc_perm sem_perm
;
2511 abi_ulong sem_otime
;
2512 #if !defined(TARGET_PPC64)
2513 abi_ulong __unused1
;
2515 abi_ulong sem_ctime
;
2516 #if !defined(TARGET_PPC64)
2517 abi_ulong __unused2
;
2519 abi_ulong sem_nsems
;
2520 abi_ulong __unused3
;
2521 abi_ulong __unused4
;
2524 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2525 abi_ulong target_addr
)
2527 struct target_ipc_perm
*target_ip
;
2528 struct target_semid_ds
*target_sd
;
2530 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2531 return -TARGET_EFAULT
;
2532 target_ip
= &(target_sd
->sem_perm
);
2533 host_ip
->__key
= tswap32(target_ip
->__key
);
2534 host_ip
->uid
= tswap32(target_ip
->uid
);
2535 host_ip
->gid
= tswap32(target_ip
->gid
);
2536 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2537 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2538 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2539 host_ip
->mode
= tswap32(target_ip
->mode
);
2541 host_ip
->mode
= tswap16(target_ip
->mode
);
2543 #if defined(TARGET_PPC)
2544 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2546 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2548 unlock_user_struct(target_sd
, target_addr
, 0);
2552 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2553 struct ipc_perm
*host_ip
)
2555 struct target_ipc_perm
*target_ip
;
2556 struct target_semid_ds
*target_sd
;
2558 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2559 return -TARGET_EFAULT
;
2560 target_ip
= &(target_sd
->sem_perm
);
2561 target_ip
->__key
= tswap32(host_ip
->__key
);
2562 target_ip
->uid
= tswap32(host_ip
->uid
);
2563 target_ip
->gid
= tswap32(host_ip
->gid
);
2564 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2565 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2566 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2567 target_ip
->mode
= tswap32(host_ip
->mode
);
2569 target_ip
->mode
= tswap16(host_ip
->mode
);
2571 #if defined(TARGET_PPC)
2572 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2574 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2576 unlock_user_struct(target_sd
, target_addr
, 1);
2580 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2581 abi_ulong target_addr
)
2583 struct target_semid_ds
*target_sd
;
2585 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2586 return -TARGET_EFAULT
;
2587 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2588 return -TARGET_EFAULT
;
2589 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2590 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2591 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2592 unlock_user_struct(target_sd
, target_addr
, 0);
2596 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2597 struct semid_ds
*host_sd
)
2599 struct target_semid_ds
*target_sd
;
2601 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2602 return -TARGET_EFAULT
;
2603 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2604 return -TARGET_EFAULT
;
2605 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2606 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2607 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2608 unlock_user_struct(target_sd
, target_addr
, 1);
2612 struct target_seminfo
{
2625 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2626 struct seminfo
*host_seminfo
)
2628 struct target_seminfo
*target_seminfo
;
2629 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2630 return -TARGET_EFAULT
;
2631 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2632 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2633 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2634 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2635 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2636 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2637 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2638 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2639 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2640 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2641 unlock_user_struct(target_seminfo
, target_addr
, 1);
2647 struct semid_ds
*buf
;
2648 unsigned short *array
;
2649 struct seminfo
*__buf
;
2652 union target_semun
{
2659 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2660 abi_ulong target_addr
)
2663 unsigned short *array
;
2665 struct semid_ds semid_ds
;
2668 semun
.buf
= &semid_ds
;
2670 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2672 return get_errno(ret
);
2674 nsems
= semid_ds
.sem_nsems
;
2676 *host_array
= malloc(nsems
*sizeof(unsigned short));
2678 return -TARGET_ENOMEM
;
2680 array
= lock_user(VERIFY_READ
, target_addr
,
2681 nsems
*sizeof(unsigned short), 1);
2684 return -TARGET_EFAULT
;
2687 for(i
=0; i
<nsems
; i
++) {
2688 __get_user((*host_array
)[i
], &array
[i
]);
2690 unlock_user(array
, target_addr
, 0);
2695 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2696 unsigned short **host_array
)
2699 unsigned short *array
;
2701 struct semid_ds semid_ds
;
2704 semun
.buf
= &semid_ds
;
2706 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2708 return get_errno(ret
);
2710 nsems
= semid_ds
.sem_nsems
;
2712 array
= lock_user(VERIFY_WRITE
, target_addr
,
2713 nsems
*sizeof(unsigned short), 0);
2715 return -TARGET_EFAULT
;
2717 for(i
=0; i
<nsems
; i
++) {
2718 __put_user((*host_array
)[i
], &array
[i
]);
2721 unlock_user(array
, target_addr
, 1);
2726 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2727 union target_semun target_su
)
2730 struct semid_ds dsarg
;
2731 unsigned short *array
= NULL
;
2732 struct seminfo seminfo
;
2733 abi_long ret
= -TARGET_EINVAL
;
2740 /* In 64 bit cross-endian situations, we will erroneously pick up
2741 * the wrong half of the union for the "val" element. To rectify
2742 * this, the entire 8-byte structure is byteswapped, followed by
2743 * a swap of the 4 byte val field. In other cases, the data is
2744 * already in proper host byte order. */
2745 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2746 target_su
.buf
= tswapal(target_su
.buf
);
2747 arg
.val
= tswap32(target_su
.val
);
2749 arg
.val
= target_su
.val
;
2751 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2755 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2759 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2760 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2767 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2771 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2772 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2778 arg
.__buf
= &seminfo
;
2779 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2780 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2788 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2795 struct target_sembuf
{
2796 unsigned short sem_num
;
2801 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2802 abi_ulong target_addr
,
2805 struct target_sembuf
*target_sembuf
;
2808 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2809 nsops
*sizeof(struct target_sembuf
), 1);
2811 return -TARGET_EFAULT
;
2813 for(i
=0; i
<nsops
; i
++) {
2814 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2815 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2816 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2819 unlock_user(target_sembuf
, target_addr
, 0);
2824 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2826 struct sembuf sops
[nsops
];
2828 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2829 return -TARGET_EFAULT
;
2831 return get_errno(semop(semid
, sops
, nsops
));
2834 struct target_msqid_ds
2836 struct target_ipc_perm msg_perm
;
2837 abi_ulong msg_stime
;
2838 #if TARGET_ABI_BITS == 32
2839 abi_ulong __unused1
;
2841 abi_ulong msg_rtime
;
2842 #if TARGET_ABI_BITS == 32
2843 abi_ulong __unused2
;
2845 abi_ulong msg_ctime
;
2846 #if TARGET_ABI_BITS == 32
2847 abi_ulong __unused3
;
2849 abi_ulong __msg_cbytes
;
2851 abi_ulong msg_qbytes
;
2852 abi_ulong msg_lspid
;
2853 abi_ulong msg_lrpid
;
2854 abi_ulong __unused4
;
2855 abi_ulong __unused5
;
2858 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2859 abi_ulong target_addr
)
2861 struct target_msqid_ds
*target_md
;
2863 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2864 return -TARGET_EFAULT
;
2865 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2866 return -TARGET_EFAULT
;
2867 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2868 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2869 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2870 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2871 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2872 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2873 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2874 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2875 unlock_user_struct(target_md
, target_addr
, 0);
2879 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2880 struct msqid_ds
*host_md
)
2882 struct target_msqid_ds
*target_md
;
2884 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2885 return -TARGET_EFAULT
;
2886 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2887 return -TARGET_EFAULT
;
2888 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2889 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2890 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2891 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2892 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2893 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2894 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2895 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2896 unlock_user_struct(target_md
, target_addr
, 1);
2900 struct target_msginfo
{
2908 unsigned short int msgseg
;
2911 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2912 struct msginfo
*host_msginfo
)
2914 struct target_msginfo
*target_msginfo
;
2915 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2916 return -TARGET_EFAULT
;
2917 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2918 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2919 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2920 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2921 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2922 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2923 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2924 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2925 unlock_user_struct(target_msginfo
, target_addr
, 1);
2929 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2931 struct msqid_ds dsarg
;
2932 struct msginfo msginfo
;
2933 abi_long ret
= -TARGET_EINVAL
;
2941 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2942 return -TARGET_EFAULT
;
2943 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2944 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2945 return -TARGET_EFAULT
;
2948 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2952 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2953 if (host_to_target_msginfo(ptr
, &msginfo
))
2954 return -TARGET_EFAULT
;
2961 struct target_msgbuf
{
2966 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2967 ssize_t msgsz
, int msgflg
)
2969 struct target_msgbuf
*target_mb
;
2970 struct msgbuf
*host_mb
;
2974 return -TARGET_EINVAL
;
2977 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2978 return -TARGET_EFAULT
;
2979 host_mb
= malloc(msgsz
+sizeof(long));
2981 unlock_user_struct(target_mb
, msgp
, 0);
2982 return -TARGET_ENOMEM
;
2984 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2985 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2986 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2988 unlock_user_struct(target_mb
, msgp
, 0);
2993 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2994 unsigned int msgsz
, abi_long msgtyp
,
2997 struct target_msgbuf
*target_mb
;
2999 struct msgbuf
*host_mb
;
3002 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3003 return -TARGET_EFAULT
;
3005 host_mb
= g_malloc(msgsz
+sizeof(long));
3006 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3009 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3010 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3011 if (!target_mtext
) {
3012 ret
= -TARGET_EFAULT
;
3015 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3016 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3019 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3023 unlock_user_struct(target_mb
, msgp
, 1);
3028 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3029 abi_ulong target_addr
)
3031 struct target_shmid_ds
*target_sd
;
3033 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3034 return -TARGET_EFAULT
;
3035 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3036 return -TARGET_EFAULT
;
3037 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3038 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3039 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3040 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3041 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3042 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3043 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3044 unlock_user_struct(target_sd
, target_addr
, 0);
3048 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3049 struct shmid_ds
*host_sd
)
3051 struct target_shmid_ds
*target_sd
;
3053 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3054 return -TARGET_EFAULT
;
3055 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3056 return -TARGET_EFAULT
;
3057 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3058 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3059 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3060 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3061 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3062 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3063 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3064 unlock_user_struct(target_sd
, target_addr
, 1);
3068 struct target_shminfo
{
3076 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3077 struct shminfo
*host_shminfo
)
3079 struct target_shminfo
*target_shminfo
;
3080 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3081 return -TARGET_EFAULT
;
3082 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3083 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3084 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3085 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3086 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3087 unlock_user_struct(target_shminfo
, target_addr
, 1);
3091 struct target_shm_info
{
3096 abi_ulong swap_attempts
;
3097 abi_ulong swap_successes
;
3100 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3101 struct shm_info
*host_shm_info
)
3103 struct target_shm_info
*target_shm_info
;
3104 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3105 return -TARGET_EFAULT
;
3106 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3107 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3108 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3109 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3110 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3111 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3112 unlock_user_struct(target_shm_info
, target_addr
, 1);
3116 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3118 struct shmid_ds dsarg
;
3119 struct shminfo shminfo
;
3120 struct shm_info shm_info
;
3121 abi_long ret
= -TARGET_EINVAL
;
3129 if (target_to_host_shmid_ds(&dsarg
, buf
))
3130 return -TARGET_EFAULT
;
3131 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3132 if (host_to_target_shmid_ds(buf
, &dsarg
))
3133 return -TARGET_EFAULT
;
3136 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3137 if (host_to_target_shminfo(buf
, &shminfo
))
3138 return -TARGET_EFAULT
;
3141 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3142 if (host_to_target_shm_info(buf
, &shm_info
))
3143 return -TARGET_EFAULT
;
3148 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3155 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3159 struct shmid_ds shm_info
;
3162 /* find out the length of the shared memory segment */
3163 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3164 if (is_error(ret
)) {
3165 /* can't get length, bail out */
3172 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3174 abi_ulong mmap_start
;
3176 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3178 if (mmap_start
== -1) {
3180 host_raddr
= (void *)-1;
3182 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3185 if (host_raddr
== (void *)-1) {
3187 return get_errno((long)host_raddr
);
3189 raddr
=h2g((unsigned long)host_raddr
);
3191 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3192 PAGE_VALID
| PAGE_READ
|
3193 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3195 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3196 if (shm_regions
[i
].start
== 0) {
3197 shm_regions
[i
].start
= raddr
;
3198 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3208 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3212 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3213 if (shm_regions
[i
].start
== shmaddr
) {
3214 shm_regions
[i
].start
= 0;
3215 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3220 return get_errno(shmdt(g2h(shmaddr
)));
3223 #ifdef TARGET_NR_ipc
3224 /* ??? This only works with linear mappings. */
3225 /* do_ipc() must return target values and target errnos. */
3226 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3227 abi_long second
, abi_long third
,
3228 abi_long ptr
, abi_long fifth
)
3233 version
= call
>> 16;
3238 ret
= do_semop(first
, ptr
, second
);
3242 ret
= get_errno(semget(first
, second
, third
));
3245 case IPCOP_semctl
: {
3246 /* The semun argument to semctl is passed by value, so dereference the
3249 get_user_ual(atptr
, ptr
);
3250 ret
= do_semctl(first
, second
, third
,
3251 (union target_semun
) atptr
);
3256 ret
= get_errno(msgget(first
, second
));
3260 ret
= do_msgsnd(first
, ptr
, second
, third
);
3264 ret
= do_msgctl(first
, second
, ptr
);
3271 struct target_ipc_kludge
{
3276 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3277 ret
= -TARGET_EFAULT
;
3281 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3283 unlock_user_struct(tmp
, ptr
, 0);
3287 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3296 raddr
= do_shmat(first
, ptr
, second
);
3297 if (is_error(raddr
))
3298 return get_errno(raddr
);
3299 if (put_user_ual(raddr
, third
))
3300 return -TARGET_EFAULT
;
3304 ret
= -TARGET_EINVAL
;
3309 ret
= do_shmdt(ptr
);
3313 /* IPC_* flag values are the same on all linux platforms */
3314 ret
= get_errno(shmget(first
, second
, third
));
3317 /* IPC_* and SHM_* command values are the same on all linux platforms */
3319 ret
= do_shmctl(first
, second
, ptr
);
3322 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3323 ret
= -TARGET_ENOSYS
;
3330 /* kernel structure types definitions */
3332 #define STRUCT(name, ...) STRUCT_ ## name,
3333 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3335 #include "syscall_types.h"
3339 #undef STRUCT_SPECIAL
3341 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3342 #define STRUCT_SPECIAL(name)
3343 #include "syscall_types.h"
3345 #undef STRUCT_SPECIAL
3347 typedef struct IOCTLEntry IOCTLEntry
;
3349 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3350 int fd
, int cmd
, abi_long arg
);
3354 unsigned int host_cmd
;
3357 do_ioctl_fn
*do_ioctl
;
3358 const argtype arg_type
[5];
3361 #define IOC_R 0x0001
3362 #define IOC_W 0x0002
3363 #define IOC_RW (IOC_R | IOC_W)
3365 #define MAX_STRUCT_SIZE 4096
3367 #ifdef CONFIG_FIEMAP
3368 /* So fiemap access checks don't overflow on 32 bit systems.
3369 * This is very slightly smaller than the limit imposed by
3370 * the underlying kernel.
3372 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3373 / sizeof(struct fiemap_extent))
3375 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3376 int fd
, int cmd
, abi_long arg
)
3378 /* The parameter for this ioctl is a struct fiemap followed
3379 * by an array of struct fiemap_extent whose size is set
3380 * in fiemap->fm_extent_count. The array is filled in by the
3383 int target_size_in
, target_size_out
;
3385 const argtype
*arg_type
= ie
->arg_type
;
3386 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3389 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3393 assert(arg_type
[0] == TYPE_PTR
);
3394 assert(ie
->access
== IOC_RW
);
3396 target_size_in
= thunk_type_size(arg_type
, 0);
3397 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3399 return -TARGET_EFAULT
;
3401 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3402 unlock_user(argptr
, arg
, 0);
3403 fm
= (struct fiemap
*)buf_temp
;
3404 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3405 return -TARGET_EINVAL
;
3408 outbufsz
= sizeof (*fm
) +
3409 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3411 if (outbufsz
> MAX_STRUCT_SIZE
) {
3412 /* We can't fit all the extents into the fixed size buffer.
3413 * Allocate one that is large enough and use it instead.
3415 fm
= malloc(outbufsz
);
3417 return -TARGET_ENOMEM
;
3419 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3422 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3423 if (!is_error(ret
)) {
3424 target_size_out
= target_size_in
;
3425 /* An extent_count of 0 means we were only counting the extents
3426 * so there are no structs to copy
3428 if (fm
->fm_extent_count
!= 0) {
3429 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3431 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3433 ret
= -TARGET_EFAULT
;
3435 /* Convert the struct fiemap */
3436 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3437 if (fm
->fm_extent_count
!= 0) {
3438 p
= argptr
+ target_size_in
;
3439 /* ...and then all the struct fiemap_extents */
3440 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3441 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3446 unlock_user(argptr
, arg
, target_size_out
);
3456 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3457 int fd
, int cmd
, abi_long arg
)
3459 const argtype
*arg_type
= ie
->arg_type
;
3463 struct ifconf
*host_ifconf
;
3465 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3466 int target_ifreq_size
;
3471 abi_long target_ifc_buf
;
3475 assert(arg_type
[0] == TYPE_PTR
);
3476 assert(ie
->access
== IOC_RW
);
3479 target_size
= thunk_type_size(arg_type
, 0);
3481 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3483 return -TARGET_EFAULT
;
3484 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3485 unlock_user(argptr
, arg
, 0);
3487 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3488 target_ifc_len
= host_ifconf
->ifc_len
;
3489 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3491 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3492 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3493 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3495 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3496 if (outbufsz
> MAX_STRUCT_SIZE
) {
3497 /* We can't fit all the extents into the fixed size buffer.
3498 * Allocate one that is large enough and use it instead.
3500 host_ifconf
= malloc(outbufsz
);
3502 return -TARGET_ENOMEM
;
3504 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3507 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3509 host_ifconf
->ifc_len
= host_ifc_len
;
3510 host_ifconf
->ifc_buf
= host_ifc_buf
;
3512 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3513 if (!is_error(ret
)) {
3514 /* convert host ifc_len to target ifc_len */
3516 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3517 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3518 host_ifconf
->ifc_len
= target_ifc_len
;
3520 /* restore target ifc_buf */
3522 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3524 /* copy struct ifconf to target user */
3526 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3528 return -TARGET_EFAULT
;
3529 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3530 unlock_user(argptr
, arg
, target_size
);
3532 /* copy ifreq[] to target user */
3534 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3535 for (i
= 0; i
< nb_ifreq
; i
++) {
3536 thunk_convert(argptr
+ i
* target_ifreq_size
,
3537 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3538 ifreq_arg_type
, THUNK_TARGET
);
3540 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3550 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3551 int cmd
, abi_long arg
)
3554 struct dm_ioctl
*host_dm
;
3555 abi_long guest_data
;
3556 uint32_t guest_data_size
;
3558 const argtype
*arg_type
= ie
->arg_type
;
3560 void *big_buf
= NULL
;
3564 target_size
= thunk_type_size(arg_type
, 0);
3565 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3567 ret
= -TARGET_EFAULT
;
3570 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3571 unlock_user(argptr
, arg
, 0);
3573 /* buf_temp is too small, so fetch things into a bigger buffer */
3574 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3575 memcpy(big_buf
, buf_temp
, target_size
);
3579 guest_data
= arg
+ host_dm
->data_start
;
3580 if ((guest_data
- arg
) < 0) {
3584 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3585 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3587 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3588 switch (ie
->host_cmd
) {
3590 case DM_LIST_DEVICES
:
3593 case DM_DEV_SUSPEND
:
3596 case DM_TABLE_STATUS
:
3597 case DM_TABLE_CLEAR
:
3599 case DM_LIST_VERSIONS
:
3603 case DM_DEV_SET_GEOMETRY
:
3604 /* data contains only strings */
3605 memcpy(host_data
, argptr
, guest_data_size
);
3608 memcpy(host_data
, argptr
, guest_data_size
);
3609 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3613 void *gspec
= argptr
;
3614 void *cur_data
= host_data
;
3615 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3616 int spec_size
= thunk_type_size(arg_type
, 0);
3619 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3620 struct dm_target_spec
*spec
= cur_data
;
3624 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3625 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3627 spec
->next
= sizeof(*spec
) + slen
;
3628 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3630 cur_data
+= spec
->next
;
3635 ret
= -TARGET_EINVAL
;
3636 unlock_user(argptr
, guest_data
, 0);
3639 unlock_user(argptr
, guest_data
, 0);
3641 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3642 if (!is_error(ret
)) {
3643 guest_data
= arg
+ host_dm
->data_start
;
3644 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3645 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3646 switch (ie
->host_cmd
) {
3651 case DM_DEV_SUSPEND
:
3654 case DM_TABLE_CLEAR
:
3656 case DM_DEV_SET_GEOMETRY
:
3657 /* no return data */
3659 case DM_LIST_DEVICES
:
3661 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3662 uint32_t remaining_data
= guest_data_size
;
3663 void *cur_data
= argptr
;
3664 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3665 int nl_size
= 12; /* can't use thunk_size due to alignment */
3668 uint32_t next
= nl
->next
;
3670 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3672 if (remaining_data
< nl
->next
) {
3673 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3676 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3677 strcpy(cur_data
+ nl_size
, nl
->name
);
3678 cur_data
+= nl
->next
;
3679 remaining_data
-= nl
->next
;
3683 nl
= (void*)nl
+ next
;
3688 case DM_TABLE_STATUS
:
3690 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3691 void *cur_data
= argptr
;
3692 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3693 int spec_size
= thunk_type_size(arg_type
, 0);
3696 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3697 uint32_t next
= spec
->next
;
3698 int slen
= strlen((char*)&spec
[1]) + 1;
3699 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3700 if (guest_data_size
< spec
->next
) {
3701 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3704 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3705 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3706 cur_data
= argptr
+ spec
->next
;
3707 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3713 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3714 int count
= *(uint32_t*)hdata
;
3715 uint64_t *hdev
= hdata
+ 8;
3716 uint64_t *gdev
= argptr
+ 8;
3719 *(uint32_t*)argptr
= tswap32(count
);
3720 for (i
= 0; i
< count
; i
++) {
3721 *gdev
= tswap64(*hdev
);
3727 case DM_LIST_VERSIONS
:
3729 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3730 uint32_t remaining_data
= guest_data_size
;
3731 void *cur_data
= argptr
;
3732 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3733 int vers_size
= thunk_type_size(arg_type
, 0);
3736 uint32_t next
= vers
->next
;
3738 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3740 if (remaining_data
< vers
->next
) {
3741 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3744 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3745 strcpy(cur_data
+ vers_size
, vers
->name
);
3746 cur_data
+= vers
->next
;
3747 remaining_data
-= vers
->next
;
3751 vers
= (void*)vers
+ next
;
3756 unlock_user(argptr
, guest_data
, 0);
3757 ret
= -TARGET_EINVAL
;
3760 unlock_user(argptr
, guest_data
, guest_data_size
);
3762 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3764 ret
= -TARGET_EFAULT
;
3767 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3768 unlock_user(argptr
, arg
, target_size
);
3775 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3776 int cmd
, abi_long arg
)
3780 const argtype
*arg_type
= ie
->arg_type
;
3781 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3784 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3785 struct blkpg_partition host_part
;
3787 /* Read and convert blkpg */
3789 target_size
= thunk_type_size(arg_type
, 0);
3790 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3792 ret
= -TARGET_EFAULT
;
3795 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3796 unlock_user(argptr
, arg
, 0);
3798 switch (host_blkpg
->op
) {
3799 case BLKPG_ADD_PARTITION
:
3800 case BLKPG_DEL_PARTITION
:
3801 /* payload is struct blkpg_partition */
3804 /* Unknown opcode */
3805 ret
= -TARGET_EINVAL
;
3809 /* Read and convert blkpg->data */
3810 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3811 target_size
= thunk_type_size(part_arg_type
, 0);
3812 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3814 ret
= -TARGET_EFAULT
;
3817 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3818 unlock_user(argptr
, arg
, 0);
3820 /* Swizzle the data pointer to our local copy and call! */
3821 host_blkpg
->data
= &host_part
;
3822 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3828 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3829 int fd
, int cmd
, abi_long arg
)
3831 const argtype
*arg_type
= ie
->arg_type
;
3832 const StructEntry
*se
;
3833 const argtype
*field_types
;
3834 const int *dst_offsets
, *src_offsets
;
3837 abi_ulong
*target_rt_dev_ptr
;
3838 unsigned long *host_rt_dev_ptr
;
3842 assert(ie
->access
== IOC_W
);
3843 assert(*arg_type
== TYPE_PTR
);
3845 assert(*arg_type
== TYPE_STRUCT
);
3846 target_size
= thunk_type_size(arg_type
, 0);
3847 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3849 return -TARGET_EFAULT
;
3852 assert(*arg_type
== (int)STRUCT_rtentry
);
3853 se
= struct_entries
+ *arg_type
++;
3854 assert(se
->convert
[0] == NULL
);
3855 /* convert struct here to be able to catch rt_dev string */
3856 field_types
= se
->field_types
;
3857 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3858 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3859 for (i
= 0; i
< se
->nb_fields
; i
++) {
3860 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3861 assert(*field_types
== TYPE_PTRVOID
);
3862 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3863 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3864 if (*target_rt_dev_ptr
!= 0) {
3865 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3866 tswapal(*target_rt_dev_ptr
));
3867 if (!*host_rt_dev_ptr
) {
3868 unlock_user(argptr
, arg
, 0);
3869 return -TARGET_EFAULT
;
3872 *host_rt_dev_ptr
= 0;
3877 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3878 argptr
+ src_offsets
[i
],
3879 field_types
, THUNK_HOST
);
3881 unlock_user(argptr
, arg
, 0);
3883 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3884 if (*host_rt_dev_ptr
!= 0) {
3885 unlock_user((void *)*host_rt_dev_ptr
,
3886 *target_rt_dev_ptr
, 0);
3891 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3892 int fd
, int cmd
, abi_long arg
)
3894 int sig
= target_to_host_signal(arg
);
3895 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
3898 static IOCTLEntry ioctl_entries
[] = {
3899 #define IOCTL(cmd, access, ...) \
3900 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3901 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3902 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3907 /* ??? Implement proper locking for ioctls. */
3908 /* do_ioctl() Must return target values and target errnos. */
3909 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
3911 const IOCTLEntry
*ie
;
3912 const argtype
*arg_type
;
3914 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3920 if (ie
->target_cmd
== 0) {
3921 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3922 return -TARGET_ENOSYS
;
3924 if (ie
->target_cmd
== cmd
)
3928 arg_type
= ie
->arg_type
;
3930 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3933 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3936 switch(arg_type
[0]) {
3939 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3943 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3947 target_size
= thunk_type_size(arg_type
, 0);
3948 switch(ie
->access
) {
3950 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3951 if (!is_error(ret
)) {
3952 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3954 return -TARGET_EFAULT
;
3955 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3956 unlock_user(argptr
, arg
, target_size
);
3960 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3962 return -TARGET_EFAULT
;
3963 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3964 unlock_user(argptr
, arg
, 0);
3965 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3969 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3971 return -TARGET_EFAULT
;
3972 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3973 unlock_user(argptr
, arg
, 0);
3974 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3975 if (!is_error(ret
)) {
3976 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3978 return -TARGET_EFAULT
;
3979 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3980 unlock_user(argptr
, arg
, target_size
);
3986 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3987 (long)cmd
, arg_type
[0]);
3988 ret
= -TARGET_ENOSYS
;
3994 static const bitmask_transtbl iflag_tbl
[] = {
3995 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3996 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3997 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3998 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3999 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4000 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4001 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4002 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4003 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4004 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4005 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4006 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4007 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4008 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4012 static const bitmask_transtbl oflag_tbl
[] = {
4013 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4014 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4015 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4016 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4017 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4018 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4019 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4020 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4021 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4022 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4023 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4024 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4025 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4026 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4027 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4028 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4029 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4030 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4031 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4032 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4033 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4034 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4035 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4036 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4040 static const bitmask_transtbl cflag_tbl
[] = {
4041 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4042 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4043 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4044 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4045 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4046 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4047 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4048 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4049 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4050 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4051 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4052 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4053 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4054 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4055 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4056 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4057 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4058 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4059 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4060 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4061 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4062 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4063 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4064 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4065 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4066 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4067 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4068 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4069 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4070 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4071 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4075 static const bitmask_transtbl lflag_tbl
[] = {
4076 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4077 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4078 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4079 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4080 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4081 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4082 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4083 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4084 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4085 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4086 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4087 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4088 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4089 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4090 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4094 static void target_to_host_termios (void *dst
, const void *src
)
4096 struct host_termios
*host
= dst
;
4097 const struct target_termios
*target
= src
;
4100 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4102 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4104 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4106 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4107 host
->c_line
= target
->c_line
;
4109 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4110 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4111 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4112 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4113 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4114 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4115 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4116 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4117 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4118 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4119 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4120 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4121 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4122 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4123 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4124 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4125 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4126 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4129 static void host_to_target_termios (void *dst
, const void *src
)
4131 struct target_termios
*target
= dst
;
4132 const struct host_termios
*host
= src
;
4135 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4137 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4139 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4141 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4142 target
->c_line
= host
->c_line
;
4144 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4145 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4146 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4147 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4148 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4149 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4150 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4151 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4152 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4153 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4154 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4155 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4156 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4157 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4158 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4159 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4160 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4161 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4164 static const StructEntry struct_termios_def
= {
4165 .convert
= { host_to_target_termios
, target_to_host_termios
},
4166 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4167 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4170 static bitmask_transtbl mmap_flags_tbl
[] = {
4171 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4172 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4173 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4174 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4175 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4176 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4177 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4178 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4179 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4184 #if defined(TARGET_I386)
4186 /* NOTE: there is really one LDT for all the threads */
4187 static uint8_t *ldt_table
;
4189 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4196 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4197 if (size
> bytecount
)
4199 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4201 return -TARGET_EFAULT
;
4202 /* ??? Should this by byteswapped? */
4203 memcpy(p
, ldt_table
, size
);
4204 unlock_user(p
, ptr
, size
);
4208 /* XXX: add locking support */
4209 static abi_long
write_ldt(CPUX86State
*env
,
4210 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4212 struct target_modify_ldt_ldt_s ldt_info
;
4213 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4214 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4215 int seg_not_present
, useable
, lm
;
4216 uint32_t *lp
, entry_1
, entry_2
;
4218 if (bytecount
!= sizeof(ldt_info
))
4219 return -TARGET_EINVAL
;
4220 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4221 return -TARGET_EFAULT
;
4222 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4223 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4224 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4225 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4226 unlock_user_struct(target_ldt_info
, ptr
, 0);
4228 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4229 return -TARGET_EINVAL
;
4230 seg_32bit
= ldt_info
.flags
& 1;
4231 contents
= (ldt_info
.flags
>> 1) & 3;
4232 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4233 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4234 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4235 useable
= (ldt_info
.flags
>> 6) & 1;
4239 lm
= (ldt_info
.flags
>> 7) & 1;
4241 if (contents
== 3) {
4243 return -TARGET_EINVAL
;
4244 if (seg_not_present
== 0)
4245 return -TARGET_EINVAL
;
4247 /* allocate the LDT */
4249 env
->ldt
.base
= target_mmap(0,
4250 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4251 PROT_READ
|PROT_WRITE
,
4252 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4253 if (env
->ldt
.base
== -1)
4254 return -TARGET_ENOMEM
;
4255 memset(g2h(env
->ldt
.base
), 0,
4256 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4257 env
->ldt
.limit
= 0xffff;
4258 ldt_table
= g2h(env
->ldt
.base
);
4261 /* NOTE: same code as Linux kernel */
4262 /* Allow LDTs to be cleared by the user. */
4263 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4266 read_exec_only
== 1 &&
4268 limit_in_pages
== 0 &&
4269 seg_not_present
== 1 &&
4277 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4278 (ldt_info
.limit
& 0x0ffff);
4279 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4280 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4281 (ldt_info
.limit
& 0xf0000) |
4282 ((read_exec_only
^ 1) << 9) |
4284 ((seg_not_present
^ 1) << 15) |
4286 (limit_in_pages
<< 23) |
4290 entry_2
|= (useable
<< 20);
4292 /* Install the new entry ... */
4294 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4295 lp
[0] = tswap32(entry_1
);
4296 lp
[1] = tswap32(entry_2
);
4300 /* specific and weird i386 syscalls */
4301 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4302 unsigned long bytecount
)
4308 ret
= read_ldt(ptr
, bytecount
);
4311 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4314 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4317 ret
= -TARGET_ENOSYS
;
4323 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4324 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4326 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4327 struct target_modify_ldt_ldt_s ldt_info
;
4328 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4329 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4330 int seg_not_present
, useable
, lm
;
4331 uint32_t *lp
, entry_1
, entry_2
;
4334 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4335 if (!target_ldt_info
)
4336 return -TARGET_EFAULT
;
4337 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4338 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4339 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4340 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4341 if (ldt_info
.entry_number
== -1) {
4342 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4343 if (gdt_table
[i
] == 0) {
4344 ldt_info
.entry_number
= i
;
4345 target_ldt_info
->entry_number
= tswap32(i
);
4350 unlock_user_struct(target_ldt_info
, ptr
, 1);
4352 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4353 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4354 return -TARGET_EINVAL
;
4355 seg_32bit
= ldt_info
.flags
& 1;
4356 contents
= (ldt_info
.flags
>> 1) & 3;
4357 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4358 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4359 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4360 useable
= (ldt_info
.flags
>> 6) & 1;
4364 lm
= (ldt_info
.flags
>> 7) & 1;
4367 if (contents
== 3) {
4368 if (seg_not_present
== 0)
4369 return -TARGET_EINVAL
;
4372 /* NOTE: same code as Linux kernel */
4373 /* Allow LDTs to be cleared by the user. */
4374 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4375 if ((contents
== 0 &&
4376 read_exec_only
== 1 &&
4378 limit_in_pages
== 0 &&
4379 seg_not_present
== 1 &&
4387 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4388 (ldt_info
.limit
& 0x0ffff);
4389 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4390 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4391 (ldt_info
.limit
& 0xf0000) |
4392 ((read_exec_only
^ 1) << 9) |
4394 ((seg_not_present
^ 1) << 15) |
4396 (limit_in_pages
<< 23) |
4401 /* Install the new entry ... */
4403 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4404 lp
[0] = tswap32(entry_1
);
4405 lp
[1] = tswap32(entry_2
);
4409 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4411 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4412 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4413 uint32_t base_addr
, limit
, flags
;
4414 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4415 int seg_not_present
, useable
, lm
;
4416 uint32_t *lp
, entry_1
, entry_2
;
4418 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4419 if (!target_ldt_info
)
4420 return -TARGET_EFAULT
;
4421 idx
= tswap32(target_ldt_info
->entry_number
);
4422 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4423 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4424 unlock_user_struct(target_ldt_info
, ptr
, 1);
4425 return -TARGET_EINVAL
;
4427 lp
= (uint32_t *)(gdt_table
+ idx
);
4428 entry_1
= tswap32(lp
[0]);
4429 entry_2
= tswap32(lp
[1]);
4431 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4432 contents
= (entry_2
>> 10) & 3;
4433 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4434 seg_32bit
= (entry_2
>> 22) & 1;
4435 limit_in_pages
= (entry_2
>> 23) & 1;
4436 useable
= (entry_2
>> 20) & 1;
4440 lm
= (entry_2
>> 21) & 1;
4442 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4443 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4444 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4445 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4446 base_addr
= (entry_1
>> 16) |
4447 (entry_2
& 0xff000000) |
4448 ((entry_2
& 0xff) << 16);
4449 target_ldt_info
->base_addr
= tswapal(base_addr
);
4450 target_ldt_info
->limit
= tswap32(limit
);
4451 target_ldt_info
->flags
= tswap32(flags
);
4452 unlock_user_struct(target_ldt_info
, ptr
, 1);
4455 #endif /* TARGET_I386 && TARGET_ABI32 */
4457 #ifndef TARGET_ABI32
4458 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4465 case TARGET_ARCH_SET_GS
:
4466 case TARGET_ARCH_SET_FS
:
4467 if (code
== TARGET_ARCH_SET_GS
)
4471 cpu_x86_load_seg(env
, idx
, 0);
4472 env
->segs
[idx
].base
= addr
;
4474 case TARGET_ARCH_GET_GS
:
4475 case TARGET_ARCH_GET_FS
:
4476 if (code
== TARGET_ARCH_GET_GS
)
4480 val
= env
->segs
[idx
].base
;
4481 if (put_user(val
, addr
, abi_ulong
))
4482 ret
= -TARGET_EFAULT
;
4485 ret
= -TARGET_EINVAL
;
4492 #endif /* defined(TARGET_I386) */
4494 #define NEW_STACK_SIZE 0x40000
4497 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4500 pthread_mutex_t mutex
;
4501 pthread_cond_t cond
;
4504 abi_ulong child_tidptr
;
4505 abi_ulong parent_tidptr
;
4509 static void * QEMU_NORETURN
clone_func(void *arg
)
4511 new_thread_info
*info
= arg
;
4516 rcu_register_thread();
4518 cpu
= ENV_GET_CPU(env
);
4520 ts
= (TaskState
*)cpu
->opaque
;
4521 info
->tid
= gettid();
4522 cpu
->host_tid
= info
->tid
;
4524 if (info
->child_tidptr
)
4525 put_user_u32(info
->tid
, info
->child_tidptr
);
4526 if (info
->parent_tidptr
)
4527 put_user_u32(info
->tid
, info
->parent_tidptr
);
4528 /* Enable signals. */
4529 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4530 /* Signal to the parent that we're ready. */
4531 pthread_mutex_lock(&info
->mutex
);
4532 pthread_cond_broadcast(&info
->cond
);
4533 pthread_mutex_unlock(&info
->mutex
);
4534 /* Wait until the parent has finshed initializing the tls state. */
4535 pthread_mutex_lock(&clone_lock
);
4536 pthread_mutex_unlock(&clone_lock
);
4541 /* do_fork() Must return host values and target errnos (unlike most
4542 do_*() functions). */
4543 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4544 abi_ulong parent_tidptr
, target_ulong newtls
,
4545 abi_ulong child_tidptr
)
4547 CPUState
*cpu
= ENV_GET_CPU(env
);
4551 CPUArchState
*new_env
;
4552 unsigned int nptl_flags
;
4555 /* Emulate vfork() with fork() */
4556 if (flags
& CLONE_VFORK
)
4557 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4559 if (flags
& CLONE_VM
) {
4560 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4561 new_thread_info info
;
4562 pthread_attr_t attr
;
4564 ts
= g_malloc0(sizeof(TaskState
));
4565 init_task_state(ts
);
4566 /* we create a new CPU instance. */
4567 new_env
= cpu_copy(env
);
4568 /* Init regs that differ from the parent. */
4569 cpu_clone_regs(new_env
, newsp
);
4570 new_cpu
= ENV_GET_CPU(new_env
);
4571 new_cpu
->opaque
= ts
;
4572 ts
->bprm
= parent_ts
->bprm
;
4573 ts
->info
= parent_ts
->info
;
4575 flags
&= ~CLONE_NPTL_FLAGS2
;
4577 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4578 ts
->child_tidptr
= child_tidptr
;
4581 if (nptl_flags
& CLONE_SETTLS
)
4582 cpu_set_tls (new_env
, newtls
);
4584 /* Grab a mutex so that thread setup appears atomic. */
4585 pthread_mutex_lock(&clone_lock
);
4587 memset(&info
, 0, sizeof(info
));
4588 pthread_mutex_init(&info
.mutex
, NULL
);
4589 pthread_mutex_lock(&info
.mutex
);
4590 pthread_cond_init(&info
.cond
, NULL
);
4592 if (nptl_flags
& CLONE_CHILD_SETTID
)
4593 info
.child_tidptr
= child_tidptr
;
4594 if (nptl_flags
& CLONE_PARENT_SETTID
)
4595 info
.parent_tidptr
= parent_tidptr
;
4597 ret
= pthread_attr_init(&attr
);
4598 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4599 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4600 /* It is not safe to deliver signals until the child has finished
4601 initializing, so temporarily block all signals. */
4602 sigfillset(&sigmask
);
4603 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4605 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4606 /* TODO: Free new CPU state if thread creation failed. */
4608 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4609 pthread_attr_destroy(&attr
);
4611 /* Wait for the child to initialize. */
4612 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4614 if (flags
& CLONE_PARENT_SETTID
)
4615 put_user_u32(ret
, parent_tidptr
);
4619 pthread_mutex_unlock(&info
.mutex
);
4620 pthread_cond_destroy(&info
.cond
);
4621 pthread_mutex_destroy(&info
.mutex
);
4622 pthread_mutex_unlock(&clone_lock
);
4624 /* if no CLONE_VM, we consider it is a fork */
4625 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4630 /* Child Process. */
4632 cpu_clone_regs(env
, newsp
);
4634 /* There is a race condition here. The parent process could
4635 theoretically read the TID in the child process before the child
4636 tid is set. This would require using either ptrace
4637 (not implemented) or having *_tidptr to point at a shared memory
4638 mapping. We can't repeat the spinlock hack used above because
4639 the child process gets its own copy of the lock. */
4640 if (flags
& CLONE_CHILD_SETTID
)
4641 put_user_u32(gettid(), child_tidptr
);
4642 if (flags
& CLONE_PARENT_SETTID
)
4643 put_user_u32(gettid(), parent_tidptr
);
4644 ts
= (TaskState
*)cpu
->opaque
;
4645 if (flags
& CLONE_SETTLS
)
4646 cpu_set_tls (env
, newtls
);
4647 if (flags
& CLONE_CHILD_CLEARTID
)
4648 ts
->child_tidptr
= child_tidptr
;
4656 /* warning : doesn't handle linux specific flags... */
4657 static int target_to_host_fcntl_cmd(int cmd
)
4660 case TARGET_F_DUPFD
:
4661 case TARGET_F_GETFD
:
4662 case TARGET_F_SETFD
:
4663 case TARGET_F_GETFL
:
4664 case TARGET_F_SETFL
:
4666 case TARGET_F_GETLK
:
4668 case TARGET_F_SETLK
:
4670 case TARGET_F_SETLKW
:
4672 case TARGET_F_GETOWN
:
4674 case TARGET_F_SETOWN
:
4676 case TARGET_F_GETSIG
:
4678 case TARGET_F_SETSIG
:
4680 #if TARGET_ABI_BITS == 32
4681 case TARGET_F_GETLK64
:
4683 case TARGET_F_SETLK64
:
4685 case TARGET_F_SETLKW64
:
4688 case TARGET_F_SETLEASE
:
4690 case TARGET_F_GETLEASE
:
4692 #ifdef F_DUPFD_CLOEXEC
4693 case TARGET_F_DUPFD_CLOEXEC
:
4694 return F_DUPFD_CLOEXEC
;
4696 case TARGET_F_NOTIFY
:
4699 case TARGET_F_GETOWN_EX
:
4703 case TARGET_F_SETOWN_EX
:
4707 return -TARGET_EINVAL
;
4709 return -TARGET_EINVAL
;
4712 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4713 static const bitmask_transtbl flock_tbl
[] = {
4714 TRANSTBL_CONVERT(F_RDLCK
),
4715 TRANSTBL_CONVERT(F_WRLCK
),
4716 TRANSTBL_CONVERT(F_UNLCK
),
4717 TRANSTBL_CONVERT(F_EXLCK
),
4718 TRANSTBL_CONVERT(F_SHLCK
),
4722 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4725 struct target_flock
*target_fl
;
4726 struct flock64 fl64
;
4727 struct target_flock64
*target_fl64
;
4729 struct f_owner_ex fox
;
4730 struct target_f_owner_ex
*target_fox
;
4733 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4735 if (host_cmd
== -TARGET_EINVAL
)
4739 case TARGET_F_GETLK
:
4740 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4741 return -TARGET_EFAULT
;
4743 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4744 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4745 fl
.l_start
= tswapal(target_fl
->l_start
);
4746 fl
.l_len
= tswapal(target_fl
->l_len
);
4747 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4748 unlock_user_struct(target_fl
, arg
, 0);
4749 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4751 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4752 return -TARGET_EFAULT
;
4754 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4755 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4756 target_fl
->l_start
= tswapal(fl
.l_start
);
4757 target_fl
->l_len
= tswapal(fl
.l_len
);
4758 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4759 unlock_user_struct(target_fl
, arg
, 1);
4763 case TARGET_F_SETLK
:
4764 case TARGET_F_SETLKW
:
4765 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4766 return -TARGET_EFAULT
;
4768 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4769 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4770 fl
.l_start
= tswapal(target_fl
->l_start
);
4771 fl
.l_len
= tswapal(target_fl
->l_len
);
4772 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4773 unlock_user_struct(target_fl
, arg
, 0);
4774 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4777 case TARGET_F_GETLK64
:
4778 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4779 return -TARGET_EFAULT
;
4781 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4782 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4783 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4784 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4785 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4786 unlock_user_struct(target_fl64
, arg
, 0);
4787 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4789 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4790 return -TARGET_EFAULT
;
4791 target_fl64
->l_type
=
4792 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4793 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4794 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4795 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4796 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4797 unlock_user_struct(target_fl64
, arg
, 1);
4800 case TARGET_F_SETLK64
:
4801 case TARGET_F_SETLKW64
:
4802 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4803 return -TARGET_EFAULT
;
4805 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4806 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4807 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4808 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4809 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4810 unlock_user_struct(target_fl64
, arg
, 0);
4811 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4814 case TARGET_F_GETFL
:
4815 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4817 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4821 case TARGET_F_SETFL
:
4822 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4826 case TARGET_F_GETOWN_EX
:
4827 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4829 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4830 return -TARGET_EFAULT
;
4831 target_fox
->type
= tswap32(fox
.type
);
4832 target_fox
->pid
= tswap32(fox
.pid
);
4833 unlock_user_struct(target_fox
, arg
, 1);
4839 case TARGET_F_SETOWN_EX
:
4840 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4841 return -TARGET_EFAULT
;
4842 fox
.type
= tswap32(target_fox
->type
);
4843 fox
.pid
= tswap32(target_fox
->pid
);
4844 unlock_user_struct(target_fox
, arg
, 0);
4845 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4849 case TARGET_F_SETOWN
:
4850 case TARGET_F_GETOWN
:
4851 case TARGET_F_SETSIG
:
4852 case TARGET_F_GETSIG
:
4853 case TARGET_F_SETLEASE
:
4854 case TARGET_F_GETLEASE
:
4855 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4859 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4867 static inline int high2lowuid(int uid
)
4875 static inline int high2lowgid(int gid
)
4883 static inline int low2highuid(int uid
)
4885 if ((int16_t)uid
== -1)
4891 static inline int low2highgid(int gid
)
4893 if ((int16_t)gid
== -1)
4898 static inline int tswapid(int id
)
4903 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4905 #else /* !USE_UID16 */
4906 static inline int high2lowuid(int uid
)
4910 static inline int high2lowgid(int gid
)
4914 static inline int low2highuid(int uid
)
4918 static inline int low2highgid(int gid
)
4922 static inline int tswapid(int id
)
4927 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4929 #endif /* USE_UID16 */
4931 void syscall_init(void)
4934 const argtype
*arg_type
;
4938 thunk_init(STRUCT_MAX
);
4940 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4941 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4942 #include "syscall_types.h"
4944 #undef STRUCT_SPECIAL
4946 /* Build target_to_host_errno_table[] table from
4947 * host_to_target_errno_table[]. */
4948 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4949 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4952 /* we patch the ioctl size if necessary. We rely on the fact that
4953 no ioctl has all the bits at '1' in the size field */
4955 while (ie
->target_cmd
!= 0) {
4956 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4957 TARGET_IOC_SIZEMASK
) {
4958 arg_type
= ie
->arg_type
;
4959 if (arg_type
[0] != TYPE_PTR
) {
4960 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4965 size
= thunk_type_size(arg_type
, 0);
4966 ie
->target_cmd
= (ie
->target_cmd
&
4967 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4968 (size
<< TARGET_IOC_SIZESHIFT
);
4971 /* automatic consistency check if same arch */
4972 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4973 (defined(__x86_64__) && defined(TARGET_X86_64))
4974 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4975 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4976 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4983 #if TARGET_ABI_BITS == 32
4984 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4986 #ifdef TARGET_WORDS_BIGENDIAN
4987 return ((uint64_t)word0
<< 32) | word1
;
4989 return ((uint64_t)word1
<< 32) | word0
;
4992 #else /* TARGET_ABI_BITS == 32 */
4993 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4997 #endif /* TARGET_ABI_BITS != 32 */
4999 #ifdef TARGET_NR_truncate64
5000 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5005 if (regpairs_aligned(cpu_env
)) {
5009 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5013 #ifdef TARGET_NR_ftruncate64
5014 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5019 if (regpairs_aligned(cpu_env
)) {
5023 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5027 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5028 abi_ulong target_addr
)
5030 struct target_timespec
*target_ts
;
5032 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5033 return -TARGET_EFAULT
;
5034 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5035 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5036 unlock_user_struct(target_ts
, target_addr
, 0);
5040 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5041 struct timespec
*host_ts
)
5043 struct target_timespec
*target_ts
;
5045 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5046 return -TARGET_EFAULT
;
5047 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5048 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5049 unlock_user_struct(target_ts
, target_addr
, 1);
5053 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5054 abi_ulong target_addr
)
5056 struct target_itimerspec
*target_itspec
;
5058 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5059 return -TARGET_EFAULT
;
5062 host_itspec
->it_interval
.tv_sec
=
5063 tswapal(target_itspec
->it_interval
.tv_sec
);
5064 host_itspec
->it_interval
.tv_nsec
=
5065 tswapal(target_itspec
->it_interval
.tv_nsec
);
5066 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5067 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5069 unlock_user_struct(target_itspec
, target_addr
, 1);
5073 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5074 struct itimerspec
*host_its
)
5076 struct target_itimerspec
*target_itspec
;
5078 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5079 return -TARGET_EFAULT
;
5082 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5083 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5085 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5086 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5088 unlock_user_struct(target_itspec
, target_addr
, 0);
5092 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5093 abi_ulong target_addr
)
5095 struct target_sigevent
*target_sevp
;
5097 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5098 return -TARGET_EFAULT
;
5101 /* This union is awkward on 64 bit systems because it has a 32 bit
5102 * integer and a pointer in it; we follow the conversion approach
5103 * used for handling sigval types in signal.c so the guest should get
5104 * the correct value back even if we did a 64 bit byteswap and it's
5105 * using the 32 bit integer.
5107 host_sevp
->sigev_value
.sival_ptr
=
5108 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5109 host_sevp
->sigev_signo
=
5110 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5111 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5112 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5114 unlock_user_struct(target_sevp
, target_addr
, 1);
5118 #if defined(TARGET_NR_mlockall)
5119 static inline int target_to_host_mlockall_arg(int arg
)
5123 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5124 result
|= MCL_CURRENT
;
5126 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5127 result
|= MCL_FUTURE
;
5133 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5134 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5135 abi_ulong target_addr
,
5136 struct stat
*host_st
)
5138 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5139 if (((CPUARMState
*)cpu_env
)->eabi
) {
5140 struct target_eabi_stat64
*target_st
;
5142 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5143 return -TARGET_EFAULT
;
5144 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5145 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5146 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5147 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5148 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5150 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5151 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5152 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5153 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5154 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5155 __put_user(host_st
->st_size
, &target_st
->st_size
);
5156 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5157 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5158 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5159 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5160 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5161 unlock_user_struct(target_st
, target_addr
, 1);
5165 #if defined(TARGET_HAS_STRUCT_STAT64)
5166 struct target_stat64
*target_st
;
5168 struct target_stat
*target_st
;
5171 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5172 return -TARGET_EFAULT
;
5173 memset(target_st
, 0, sizeof(*target_st
));
5174 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5175 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5176 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5177 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5179 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5180 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5181 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5182 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5183 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5184 /* XXX: better use of kernel struct */
5185 __put_user(host_st
->st_size
, &target_st
->st_size
);
5186 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5187 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5188 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5189 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5190 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5191 unlock_user_struct(target_st
, target_addr
, 1);
5198 /* ??? Using host futex calls even when target atomic operations
5199 are not really atomic probably breaks things. However implementing
5200 futexes locally would make futexes shared between multiple processes
5201 tricky. However they're probably useless because guest atomic
5202 operations won't work either. */
5203 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5204 target_ulong uaddr2
, int val3
)
5206 struct timespec ts
, *pts
;
5209 /* ??? We assume FUTEX_* constants are the same on both host
5211 #ifdef FUTEX_CMD_MASK
5212 base_op
= op
& FUTEX_CMD_MASK
;
5218 case FUTEX_WAIT_BITSET
:
5221 target_to_host_timespec(pts
, timeout
);
5225 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5228 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5230 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5232 case FUTEX_CMP_REQUEUE
:
5234 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5235 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5236 But the prototype takes a `struct timespec *'; insert casts
5237 to satisfy the compiler. We do not need to tswap TIMEOUT
5238 since it's not compared to guest memory. */
5239 pts
= (struct timespec
*)(uintptr_t) timeout
;
5240 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5242 (base_op
== FUTEX_CMP_REQUEUE
5246 return -TARGET_ENOSYS
;
5250 /* Map host to target signal numbers for the wait family of syscalls.
5251 Assume all other status bits are the same. */
5252 int host_to_target_waitstatus(int status
)
5254 if (WIFSIGNALED(status
)) {
5255 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5257 if (WIFSTOPPED(status
)) {
5258 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5264 static int open_self_cmdline(void *cpu_env
, int fd
)
5267 bool word_skipped
= false;
5269 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5279 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5281 fd_orig
= close(fd_orig
);
5283 } else if (nb_read
== 0) {
5287 if (!word_skipped
) {
5288 /* Skip the first string, which is the path to qemu-*-static
5289 instead of the actual command. */
5290 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5292 /* Null byte found, skip one string */
5294 nb_read
-= cp_buf
- buf
;
5295 word_skipped
= true;
5300 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5307 return close(fd_orig
);
5310 static int open_self_maps(void *cpu_env
, int fd
)
5312 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5313 TaskState
*ts
= cpu
->opaque
;
5319 fp
= fopen("/proc/self/maps", "r");
5324 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5325 int fields
, dev_maj
, dev_min
, inode
;
5326 uint64_t min
, max
, offset
;
5327 char flag_r
, flag_w
, flag_x
, flag_p
;
5328 char path
[512] = "";
5329 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5330 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5331 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5333 if ((fields
< 10) || (fields
> 11)) {
5336 if (h2g_valid(min
)) {
5337 int flags
= page_get_flags(h2g(min
));
5338 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5339 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5342 if (h2g(min
) == ts
->info
->stack_limit
) {
5343 pstrcpy(path
, sizeof(path
), " [stack]");
5345 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5346 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5347 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5348 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5349 path
[0] ? " " : "", path
);
5359 static int open_self_stat(void *cpu_env
, int fd
)
5361 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5362 TaskState
*ts
= cpu
->opaque
;
5363 abi_ulong start_stack
= ts
->info
->start_stack
;
5366 for (i
= 0; i
< 44; i
++) {
5374 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5375 } else if (i
== 1) {
5377 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5378 } else if (i
== 27) {
5381 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5383 /* for the rest, there is MasterCard */
5384 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5388 if (write(fd
, buf
, len
) != len
) {
5396 static int open_self_auxv(void *cpu_env
, int fd
)
5398 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5399 TaskState
*ts
= cpu
->opaque
;
5400 abi_ulong auxv
= ts
->info
->saved_auxv
;
5401 abi_ulong len
= ts
->info
->auxv_len
;
5405 * Auxiliary vector is stored in target process stack.
5406 * read in whole auxv vector and copy it to file
5408 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5412 r
= write(fd
, ptr
, len
);
5419 lseek(fd
, 0, SEEK_SET
);
5420 unlock_user(ptr
, auxv
, len
);
5426 static int is_proc_myself(const char *filename
, const char *entry
)
5428 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5429 filename
+= strlen("/proc/");
5430 if (!strncmp(filename
, "self/", strlen("self/"))) {
5431 filename
+= strlen("self/");
5432 } else if (*filename
>= '1' && *filename
<= '9') {
5434 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5435 if (!strncmp(filename
, myself
, strlen(myself
))) {
5436 filename
+= strlen(myself
);
5443 if (!strcmp(filename
, entry
)) {
5450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5451 static int is_proc(const char *filename
, const char *entry
)
5453 return strcmp(filename
, entry
) == 0;
5456 static int open_net_route(void *cpu_env
, int fd
)
5463 fp
= fopen("/proc/net/route", "r");
5470 read
= getline(&line
, &len
, fp
);
5471 dprintf(fd
, "%s", line
);
5475 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5477 uint32_t dest
, gw
, mask
;
5478 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5479 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5480 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5481 &mask
, &mtu
, &window
, &irtt
);
5482 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5483 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5484 metric
, tswap32(mask
), mtu
, window
, irtt
);
5494 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5497 const char *filename
;
5498 int (*fill
)(void *cpu_env
, int fd
);
5499 int (*cmp
)(const char *s1
, const char *s2
);
5501 const struct fake_open
*fake_open
;
5502 static const struct fake_open fakes
[] = {
5503 { "maps", open_self_maps
, is_proc_myself
},
5504 { "stat", open_self_stat
, is_proc_myself
},
5505 { "auxv", open_self_auxv
, is_proc_myself
},
5506 { "cmdline", open_self_cmdline
, is_proc_myself
},
5507 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5508 { "/proc/net/route", open_net_route
, is_proc
},
5510 { NULL
, NULL
, NULL
}
5513 if (is_proc_myself(pathname
, "exe")) {
5514 int execfd
= qemu_getauxval(AT_EXECFD
);
5515 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5518 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5519 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5524 if (fake_open
->filename
) {
5526 char filename
[PATH_MAX
];
5529 /* create temporary file to map stat to */
5530 tmpdir
= getenv("TMPDIR");
5533 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5534 fd
= mkstemp(filename
);
5540 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5544 lseek(fd
, 0, SEEK_SET
);
5549 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5552 #define TIMER_MAGIC 0x0caf0000
5553 #define TIMER_MAGIC_MASK 0xffff0000
5555 /* Convert QEMU provided timer ID back to internal 16bit index format */
5556 static target_timer_t
get_timer_id(abi_long arg
)
5558 target_timer_t timerid
= arg
;
5560 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5561 return -TARGET_EINVAL
;
5566 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5567 return -TARGET_EINVAL
;
5573 /* do_syscall() should always have a single exit point at the end so
5574 that actions, such as logging of syscall results, can be performed.
5575 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5576 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5577 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5578 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5581 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5588 gemu_log("syscall %d", num
);
5591 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5594 case TARGET_NR_exit
:
5595 /* In old applications this may be used to implement _exit(2).
5596 However in threaded applictions it is used for thread termination,
5597 and _exit_group is used for application termination.
5598 Do thread termination if we have more then one thread. */
5599 /* FIXME: This probably breaks if a signal arrives. We should probably
5600 be disabling signals. */
5601 if (CPU_NEXT(first_cpu
)) {
5605 /* Remove the CPU from the list. */
5606 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5609 if (ts
->child_tidptr
) {
5610 put_user_u32(0, ts
->child_tidptr
);
5611 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5615 object_unref(OBJECT(cpu
));
5617 rcu_unregister_thread();
5623 gdb_exit(cpu_env
, arg1
);
5625 ret
= 0; /* avoid warning */
5627 case TARGET_NR_read
:
5631 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5633 ret
= get_errno(read(arg1
, p
, arg3
));
5634 unlock_user(p
, arg2
, ret
);
5637 case TARGET_NR_write
:
5638 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5640 ret
= get_errno(write(arg1
, p
, arg3
));
5641 unlock_user(p
, arg2
, 0);
5643 #ifdef TARGET_NR_open
5644 case TARGET_NR_open
:
5645 if (!(p
= lock_user_string(arg1
)))
5647 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5648 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5650 unlock_user(p
, arg1
, 0);
5653 case TARGET_NR_openat
:
5654 if (!(p
= lock_user_string(arg2
)))
5656 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5657 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5659 unlock_user(p
, arg2
, 0);
5661 case TARGET_NR_close
:
5662 ret
= get_errno(close(arg1
));
5667 #ifdef TARGET_NR_fork
5668 case TARGET_NR_fork
:
5669 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5672 #ifdef TARGET_NR_waitpid
5673 case TARGET_NR_waitpid
:
5676 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5677 if (!is_error(ret
) && arg2
&& ret
5678 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5683 #ifdef TARGET_NR_waitid
5684 case TARGET_NR_waitid
:
5688 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5689 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5690 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5692 host_to_target_siginfo(p
, &info
);
5693 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5698 #ifdef TARGET_NR_creat /* not on alpha */
5699 case TARGET_NR_creat
:
5700 if (!(p
= lock_user_string(arg1
)))
5702 ret
= get_errno(creat(p
, arg2
));
5703 unlock_user(p
, arg1
, 0);
5706 #ifdef TARGET_NR_link
5707 case TARGET_NR_link
:
5710 p
= lock_user_string(arg1
);
5711 p2
= lock_user_string(arg2
);
5713 ret
= -TARGET_EFAULT
;
5715 ret
= get_errno(link(p
, p2
));
5716 unlock_user(p2
, arg2
, 0);
5717 unlock_user(p
, arg1
, 0);
5721 #if defined(TARGET_NR_linkat)
5722 case TARGET_NR_linkat
:
5727 p
= lock_user_string(arg2
);
5728 p2
= lock_user_string(arg4
);
5730 ret
= -TARGET_EFAULT
;
5732 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5733 unlock_user(p
, arg2
, 0);
5734 unlock_user(p2
, arg4
, 0);
5738 #ifdef TARGET_NR_unlink
5739 case TARGET_NR_unlink
:
5740 if (!(p
= lock_user_string(arg1
)))
5742 ret
= get_errno(unlink(p
));
5743 unlock_user(p
, arg1
, 0);
5746 #if defined(TARGET_NR_unlinkat)
5747 case TARGET_NR_unlinkat
:
5748 if (!(p
= lock_user_string(arg2
)))
5750 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5751 unlock_user(p
, arg2
, 0);
5754 case TARGET_NR_execve
:
5756 char **argp
, **envp
;
5759 abi_ulong guest_argp
;
5760 abi_ulong guest_envp
;
5767 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5768 if (get_user_ual(addr
, gp
))
5776 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5777 if (get_user_ual(addr
, gp
))
5784 argp
= alloca((argc
+ 1) * sizeof(void *));
5785 envp
= alloca((envc
+ 1) * sizeof(void *));
5787 for (gp
= guest_argp
, q
= argp
; gp
;
5788 gp
+= sizeof(abi_ulong
), q
++) {
5789 if (get_user_ual(addr
, gp
))
5793 if (!(*q
= lock_user_string(addr
)))
5795 total_size
+= strlen(*q
) + 1;
5799 for (gp
= guest_envp
, q
= envp
; gp
;
5800 gp
+= sizeof(abi_ulong
), q
++) {
5801 if (get_user_ual(addr
, gp
))
5805 if (!(*q
= lock_user_string(addr
)))
5807 total_size
+= strlen(*q
) + 1;
5811 /* This case will not be caught by the host's execve() if its
5812 page size is bigger than the target's. */
5813 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5814 ret
= -TARGET_E2BIG
;
5817 if (!(p
= lock_user_string(arg1
)))
5819 ret
= get_errno(execve(p
, argp
, envp
));
5820 unlock_user(p
, arg1
, 0);
5825 ret
= -TARGET_EFAULT
;
5828 for (gp
= guest_argp
, q
= argp
; *q
;
5829 gp
+= sizeof(abi_ulong
), q
++) {
5830 if (get_user_ual(addr
, gp
)
5833 unlock_user(*q
, addr
, 0);
5835 for (gp
= guest_envp
, q
= envp
; *q
;
5836 gp
+= sizeof(abi_ulong
), q
++) {
5837 if (get_user_ual(addr
, gp
)
5840 unlock_user(*q
, addr
, 0);
5844 case TARGET_NR_chdir
:
5845 if (!(p
= lock_user_string(arg1
)))
5847 ret
= get_errno(chdir(p
));
5848 unlock_user(p
, arg1
, 0);
5850 #ifdef TARGET_NR_time
5851 case TARGET_NR_time
:
5854 ret
= get_errno(time(&host_time
));
5857 && put_user_sal(host_time
, arg1
))
5862 #ifdef TARGET_NR_mknod
5863 case TARGET_NR_mknod
:
5864 if (!(p
= lock_user_string(arg1
)))
5866 ret
= get_errno(mknod(p
, arg2
, arg3
));
5867 unlock_user(p
, arg1
, 0);
5870 #if defined(TARGET_NR_mknodat)
5871 case TARGET_NR_mknodat
:
5872 if (!(p
= lock_user_string(arg2
)))
5874 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5875 unlock_user(p
, arg2
, 0);
5878 #ifdef TARGET_NR_chmod
5879 case TARGET_NR_chmod
:
5880 if (!(p
= lock_user_string(arg1
)))
5882 ret
= get_errno(chmod(p
, arg2
));
5883 unlock_user(p
, arg1
, 0);
5886 #ifdef TARGET_NR_break
5887 case TARGET_NR_break
:
5890 #ifdef TARGET_NR_oldstat
5891 case TARGET_NR_oldstat
:
5894 case TARGET_NR_lseek
:
5895 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5897 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5898 /* Alpha specific */
5899 case TARGET_NR_getxpid
:
5900 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5901 ret
= get_errno(getpid());
5904 #ifdef TARGET_NR_getpid
5905 case TARGET_NR_getpid
:
5906 ret
= get_errno(getpid());
5909 case TARGET_NR_mount
:
5911 /* need to look at the data field */
5915 p
= lock_user_string(arg1
);
5923 p2
= lock_user_string(arg2
);
5926 unlock_user(p
, arg1
, 0);
5932 p3
= lock_user_string(arg3
);
5935 unlock_user(p
, arg1
, 0);
5937 unlock_user(p2
, arg2
, 0);
5944 /* FIXME - arg5 should be locked, but it isn't clear how to
5945 * do that since it's not guaranteed to be a NULL-terminated
5949 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
5951 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
5953 ret
= get_errno(ret
);
5956 unlock_user(p
, arg1
, 0);
5958 unlock_user(p2
, arg2
, 0);
5960 unlock_user(p3
, arg3
, 0);
5964 #ifdef TARGET_NR_umount
5965 case TARGET_NR_umount
:
5966 if (!(p
= lock_user_string(arg1
)))
5968 ret
= get_errno(umount(p
));
5969 unlock_user(p
, arg1
, 0);
5972 #ifdef TARGET_NR_stime /* not on alpha */
5973 case TARGET_NR_stime
:
5976 if (get_user_sal(host_time
, arg1
))
5978 ret
= get_errno(stime(&host_time
));
5982 case TARGET_NR_ptrace
:
5984 #ifdef TARGET_NR_alarm /* not on alpha */
5985 case TARGET_NR_alarm
:
5989 #ifdef TARGET_NR_oldfstat
5990 case TARGET_NR_oldfstat
:
5993 #ifdef TARGET_NR_pause /* not on alpha */
5994 case TARGET_NR_pause
:
5995 ret
= get_errno(pause());
5998 #ifdef TARGET_NR_utime
5999 case TARGET_NR_utime
:
6001 struct utimbuf tbuf
, *host_tbuf
;
6002 struct target_utimbuf
*target_tbuf
;
6004 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6006 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6007 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6008 unlock_user_struct(target_tbuf
, arg2
, 0);
6013 if (!(p
= lock_user_string(arg1
)))
6015 ret
= get_errno(utime(p
, host_tbuf
));
6016 unlock_user(p
, arg1
, 0);
6020 #ifdef TARGET_NR_utimes
6021 case TARGET_NR_utimes
:
6023 struct timeval
*tvp
, tv
[2];
6025 if (copy_from_user_timeval(&tv
[0], arg2
)
6026 || copy_from_user_timeval(&tv
[1],
6027 arg2
+ sizeof(struct target_timeval
)))
6033 if (!(p
= lock_user_string(arg1
)))
6035 ret
= get_errno(utimes(p
, tvp
));
6036 unlock_user(p
, arg1
, 0);
6040 #if defined(TARGET_NR_futimesat)
6041 case TARGET_NR_futimesat
:
6043 struct timeval
*tvp
, tv
[2];
6045 if (copy_from_user_timeval(&tv
[0], arg3
)
6046 || copy_from_user_timeval(&tv
[1],
6047 arg3
+ sizeof(struct target_timeval
)))
6053 if (!(p
= lock_user_string(arg2
)))
6055 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6056 unlock_user(p
, arg2
, 0);
6060 #ifdef TARGET_NR_stty
6061 case TARGET_NR_stty
:
6064 #ifdef TARGET_NR_gtty
6065 case TARGET_NR_gtty
:
6068 #ifdef TARGET_NR_access
6069 case TARGET_NR_access
:
6070 if (!(p
= lock_user_string(arg1
)))
6072 ret
= get_errno(access(path(p
), arg2
));
6073 unlock_user(p
, arg1
, 0);
6076 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6077 case TARGET_NR_faccessat
:
6078 if (!(p
= lock_user_string(arg2
)))
6080 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6081 unlock_user(p
, arg2
, 0);
6084 #ifdef TARGET_NR_nice /* not on alpha */
6085 case TARGET_NR_nice
:
6086 ret
= get_errno(nice(arg1
));
6089 #ifdef TARGET_NR_ftime
6090 case TARGET_NR_ftime
:
6093 case TARGET_NR_sync
:
6097 case TARGET_NR_kill
:
6098 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6100 #ifdef TARGET_NR_rename
6101 case TARGET_NR_rename
:
6104 p
= lock_user_string(arg1
);
6105 p2
= lock_user_string(arg2
);
6107 ret
= -TARGET_EFAULT
;
6109 ret
= get_errno(rename(p
, p2
));
6110 unlock_user(p2
, arg2
, 0);
6111 unlock_user(p
, arg1
, 0);
6115 #if defined(TARGET_NR_renameat)
6116 case TARGET_NR_renameat
:
6119 p
= lock_user_string(arg2
);
6120 p2
= lock_user_string(arg4
);
6122 ret
= -TARGET_EFAULT
;
6124 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6125 unlock_user(p2
, arg4
, 0);
6126 unlock_user(p
, arg2
, 0);
6130 #ifdef TARGET_NR_mkdir
6131 case TARGET_NR_mkdir
:
6132 if (!(p
= lock_user_string(arg1
)))
6134 ret
= get_errno(mkdir(p
, arg2
));
6135 unlock_user(p
, arg1
, 0);
6138 #if defined(TARGET_NR_mkdirat)
6139 case TARGET_NR_mkdirat
:
6140 if (!(p
= lock_user_string(arg2
)))
6142 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6143 unlock_user(p
, arg2
, 0);
6146 #ifdef TARGET_NR_rmdir
6147 case TARGET_NR_rmdir
:
6148 if (!(p
= lock_user_string(arg1
)))
6150 ret
= get_errno(rmdir(p
));
6151 unlock_user(p
, arg1
, 0);
6155 ret
= get_errno(dup(arg1
));
6157 #ifdef TARGET_NR_pipe
6158 case TARGET_NR_pipe
:
6159 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6162 #ifdef TARGET_NR_pipe2
6163 case TARGET_NR_pipe2
:
6164 ret
= do_pipe(cpu_env
, arg1
,
6165 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6168 case TARGET_NR_times
:
6170 struct target_tms
*tmsp
;
6172 ret
= get_errno(times(&tms
));
6174 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6177 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6178 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6179 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6180 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6183 ret
= host_to_target_clock_t(ret
);
6186 #ifdef TARGET_NR_prof
6187 case TARGET_NR_prof
:
6190 #ifdef TARGET_NR_signal
6191 case TARGET_NR_signal
:
6194 case TARGET_NR_acct
:
6196 ret
= get_errno(acct(NULL
));
6198 if (!(p
= lock_user_string(arg1
)))
6200 ret
= get_errno(acct(path(p
)));
6201 unlock_user(p
, arg1
, 0);
6204 #ifdef TARGET_NR_umount2
6205 case TARGET_NR_umount2
:
6206 if (!(p
= lock_user_string(arg1
)))
6208 ret
= get_errno(umount2(p
, arg2
));
6209 unlock_user(p
, arg1
, 0);
6212 #ifdef TARGET_NR_lock
6213 case TARGET_NR_lock
:
6216 case TARGET_NR_ioctl
:
6217 ret
= do_ioctl(arg1
, arg2
, arg3
);
6219 case TARGET_NR_fcntl
:
6220 ret
= do_fcntl(arg1
, arg2
, arg3
);
6222 #ifdef TARGET_NR_mpx
6226 case TARGET_NR_setpgid
:
6227 ret
= get_errno(setpgid(arg1
, arg2
));
6229 #ifdef TARGET_NR_ulimit
6230 case TARGET_NR_ulimit
:
6233 #ifdef TARGET_NR_oldolduname
6234 case TARGET_NR_oldolduname
:
6237 case TARGET_NR_umask
:
6238 ret
= get_errno(umask(arg1
));
6240 case TARGET_NR_chroot
:
6241 if (!(p
= lock_user_string(arg1
)))
6243 ret
= get_errno(chroot(p
));
6244 unlock_user(p
, arg1
, 0);
6246 #ifdef TARGET_NR_ustat
6247 case TARGET_NR_ustat
:
6250 #ifdef TARGET_NR_dup2
6251 case TARGET_NR_dup2
:
6252 ret
= get_errno(dup2(arg1
, arg2
));
6255 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6256 case TARGET_NR_dup3
:
6257 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6260 #ifdef TARGET_NR_getppid /* not on alpha */
6261 case TARGET_NR_getppid
:
6262 ret
= get_errno(getppid());
6265 #ifdef TARGET_NR_getpgrp
6266 case TARGET_NR_getpgrp
:
6267 ret
= get_errno(getpgrp());
6270 case TARGET_NR_setsid
:
6271 ret
= get_errno(setsid());
6273 #ifdef TARGET_NR_sigaction
6274 case TARGET_NR_sigaction
:
6276 #if defined(TARGET_ALPHA)
6277 struct target_sigaction act
, oact
, *pact
= 0;
6278 struct target_old_sigaction
*old_act
;
6280 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6282 act
._sa_handler
= old_act
->_sa_handler
;
6283 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6284 act
.sa_flags
= old_act
->sa_flags
;
6285 act
.sa_restorer
= 0;
6286 unlock_user_struct(old_act
, arg2
, 0);
6289 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6290 if (!is_error(ret
) && arg3
) {
6291 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6293 old_act
->_sa_handler
= oact
._sa_handler
;
6294 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6295 old_act
->sa_flags
= oact
.sa_flags
;
6296 unlock_user_struct(old_act
, arg3
, 1);
6298 #elif defined(TARGET_MIPS)
6299 struct target_sigaction act
, oact
, *pact
, *old_act
;
6302 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6304 act
._sa_handler
= old_act
->_sa_handler
;
6305 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6306 act
.sa_flags
= old_act
->sa_flags
;
6307 unlock_user_struct(old_act
, arg2
, 0);
6313 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6315 if (!is_error(ret
) && arg3
) {
6316 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6318 old_act
->_sa_handler
= oact
._sa_handler
;
6319 old_act
->sa_flags
= oact
.sa_flags
;
6320 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6321 old_act
->sa_mask
.sig
[1] = 0;
6322 old_act
->sa_mask
.sig
[2] = 0;
6323 old_act
->sa_mask
.sig
[3] = 0;
6324 unlock_user_struct(old_act
, arg3
, 1);
6327 struct target_old_sigaction
*old_act
;
6328 struct target_sigaction act
, oact
, *pact
;
6330 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6332 act
._sa_handler
= old_act
->_sa_handler
;
6333 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6334 act
.sa_flags
= old_act
->sa_flags
;
6335 act
.sa_restorer
= old_act
->sa_restorer
;
6336 unlock_user_struct(old_act
, arg2
, 0);
6341 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6342 if (!is_error(ret
) && arg3
) {
6343 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6345 old_act
->_sa_handler
= oact
._sa_handler
;
6346 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6347 old_act
->sa_flags
= oact
.sa_flags
;
6348 old_act
->sa_restorer
= oact
.sa_restorer
;
6349 unlock_user_struct(old_act
, arg3
, 1);
6355 case TARGET_NR_rt_sigaction
:
6357 #if defined(TARGET_ALPHA)
6358 struct target_sigaction act
, oact
, *pact
= 0;
6359 struct target_rt_sigaction
*rt_act
;
6360 /* ??? arg4 == sizeof(sigset_t). */
6362 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6364 act
._sa_handler
= rt_act
->_sa_handler
;
6365 act
.sa_mask
= rt_act
->sa_mask
;
6366 act
.sa_flags
= rt_act
->sa_flags
;
6367 act
.sa_restorer
= arg5
;
6368 unlock_user_struct(rt_act
, arg2
, 0);
6371 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6372 if (!is_error(ret
) && arg3
) {
6373 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6375 rt_act
->_sa_handler
= oact
._sa_handler
;
6376 rt_act
->sa_mask
= oact
.sa_mask
;
6377 rt_act
->sa_flags
= oact
.sa_flags
;
6378 unlock_user_struct(rt_act
, arg3
, 1);
6381 struct target_sigaction
*act
;
6382 struct target_sigaction
*oact
;
6385 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6390 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6391 ret
= -TARGET_EFAULT
;
6392 goto rt_sigaction_fail
;
6396 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6399 unlock_user_struct(act
, arg2
, 0);
6401 unlock_user_struct(oact
, arg3
, 1);
6405 #ifdef TARGET_NR_sgetmask /* not on alpha */
6406 case TARGET_NR_sgetmask
:
6409 abi_ulong target_set
;
6410 do_sigprocmask(0, NULL
, &cur_set
);
6411 host_to_target_old_sigset(&target_set
, &cur_set
);
6416 #ifdef TARGET_NR_ssetmask /* not on alpha */
6417 case TARGET_NR_ssetmask
:
6419 sigset_t set
, oset
, cur_set
;
6420 abi_ulong target_set
= arg1
;
6421 do_sigprocmask(0, NULL
, &cur_set
);
6422 target_to_host_old_sigset(&set
, &target_set
);
6423 sigorset(&set
, &set
, &cur_set
);
6424 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6425 host_to_target_old_sigset(&target_set
, &oset
);
6430 #ifdef TARGET_NR_sigprocmask
6431 case TARGET_NR_sigprocmask
:
6433 #if defined(TARGET_ALPHA)
6434 sigset_t set
, oldset
;
6439 case TARGET_SIG_BLOCK
:
6442 case TARGET_SIG_UNBLOCK
:
6445 case TARGET_SIG_SETMASK
:
6449 ret
= -TARGET_EINVAL
;
6453 target_to_host_old_sigset(&set
, &mask
);
6455 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6456 if (!is_error(ret
)) {
6457 host_to_target_old_sigset(&mask
, &oldset
);
6459 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6462 sigset_t set
, oldset
, *set_ptr
;
6467 case TARGET_SIG_BLOCK
:
6470 case TARGET_SIG_UNBLOCK
:
6473 case TARGET_SIG_SETMASK
:
6477 ret
= -TARGET_EINVAL
;
6480 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6482 target_to_host_old_sigset(&set
, p
);
6483 unlock_user(p
, arg2
, 0);
6489 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6490 if (!is_error(ret
) && arg3
) {
6491 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6493 host_to_target_old_sigset(p
, &oldset
);
6494 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6500 case TARGET_NR_rt_sigprocmask
:
6503 sigset_t set
, oldset
, *set_ptr
;
6507 case TARGET_SIG_BLOCK
:
6510 case TARGET_SIG_UNBLOCK
:
6513 case TARGET_SIG_SETMASK
:
6517 ret
= -TARGET_EINVAL
;
6520 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6522 target_to_host_sigset(&set
, p
);
6523 unlock_user(p
, arg2
, 0);
6529 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6530 if (!is_error(ret
) && arg3
) {
6531 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6533 host_to_target_sigset(p
, &oldset
);
6534 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6538 #ifdef TARGET_NR_sigpending
6539 case TARGET_NR_sigpending
:
6542 ret
= get_errno(sigpending(&set
));
6543 if (!is_error(ret
)) {
6544 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6546 host_to_target_old_sigset(p
, &set
);
6547 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6552 case TARGET_NR_rt_sigpending
:
6555 ret
= get_errno(sigpending(&set
));
6556 if (!is_error(ret
)) {
6557 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6559 host_to_target_sigset(p
, &set
);
6560 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6564 #ifdef TARGET_NR_sigsuspend
6565 case TARGET_NR_sigsuspend
:
6568 #if defined(TARGET_ALPHA)
6569 abi_ulong mask
= arg1
;
6570 target_to_host_old_sigset(&set
, &mask
);
6572 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6574 target_to_host_old_sigset(&set
, p
);
6575 unlock_user(p
, arg1
, 0);
6577 ret
= get_errno(sigsuspend(&set
));
6581 case TARGET_NR_rt_sigsuspend
:
6584 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6586 target_to_host_sigset(&set
, p
);
6587 unlock_user(p
, arg1
, 0);
6588 ret
= get_errno(sigsuspend(&set
));
6591 case TARGET_NR_rt_sigtimedwait
:
6594 struct timespec uts
, *puts
;
6597 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6599 target_to_host_sigset(&set
, p
);
6600 unlock_user(p
, arg1
, 0);
6603 target_to_host_timespec(puts
, arg3
);
6607 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6608 if (!is_error(ret
)) {
6610 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6615 host_to_target_siginfo(p
, &uinfo
);
6616 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6618 ret
= host_to_target_signal(ret
);
6622 case TARGET_NR_rt_sigqueueinfo
:
6625 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6627 target_to_host_siginfo(&uinfo
, p
);
6628 unlock_user(p
, arg1
, 0);
6629 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6632 #ifdef TARGET_NR_sigreturn
6633 case TARGET_NR_sigreturn
:
6634 /* NOTE: ret is eax, so not transcoding must be done */
6635 ret
= do_sigreturn(cpu_env
);
6638 case TARGET_NR_rt_sigreturn
:
6639 /* NOTE: ret is eax, so not transcoding must be done */
6640 ret
= do_rt_sigreturn(cpu_env
);
6642 case TARGET_NR_sethostname
:
6643 if (!(p
= lock_user_string(arg1
)))
6645 ret
= get_errno(sethostname(p
, arg2
));
6646 unlock_user(p
, arg1
, 0);
6648 case TARGET_NR_setrlimit
:
6650 int resource
= target_to_host_resource(arg1
);
6651 struct target_rlimit
*target_rlim
;
6653 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6655 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6656 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6657 unlock_user_struct(target_rlim
, arg2
, 0);
6658 ret
= get_errno(setrlimit(resource
, &rlim
));
6661 case TARGET_NR_getrlimit
:
6663 int resource
= target_to_host_resource(arg1
);
6664 struct target_rlimit
*target_rlim
;
6667 ret
= get_errno(getrlimit(resource
, &rlim
));
6668 if (!is_error(ret
)) {
6669 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6671 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6672 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6673 unlock_user_struct(target_rlim
, arg2
, 1);
6677 case TARGET_NR_getrusage
:
6679 struct rusage rusage
;
6680 ret
= get_errno(getrusage(arg1
, &rusage
));
6681 if (!is_error(ret
)) {
6682 ret
= host_to_target_rusage(arg2
, &rusage
);
6686 case TARGET_NR_gettimeofday
:
6689 ret
= get_errno(gettimeofday(&tv
, NULL
));
6690 if (!is_error(ret
)) {
6691 if (copy_to_user_timeval(arg1
, &tv
))
6696 case TARGET_NR_settimeofday
:
6698 struct timeval tv
, *ptv
= NULL
;
6699 struct timezone tz
, *ptz
= NULL
;
6702 if (copy_from_user_timeval(&tv
, arg1
)) {
6709 if (copy_from_user_timezone(&tz
, arg2
)) {
6715 ret
= get_errno(settimeofday(ptv
, ptz
));
6718 #if defined(TARGET_NR_select)
6719 case TARGET_NR_select
:
6720 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6721 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6724 struct target_sel_arg_struct
*sel
;
6725 abi_ulong inp
, outp
, exp
, tvp
;
6728 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6730 nsel
= tswapal(sel
->n
);
6731 inp
= tswapal(sel
->inp
);
6732 outp
= tswapal(sel
->outp
);
6733 exp
= tswapal(sel
->exp
);
6734 tvp
= tswapal(sel
->tvp
);
6735 unlock_user_struct(sel
, arg1
, 0);
6736 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6741 #ifdef TARGET_NR_pselect6
6742 case TARGET_NR_pselect6
:
6744 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6745 fd_set rfds
, wfds
, efds
;
6746 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6747 struct timespec ts
, *ts_ptr
;
6750 * The 6th arg is actually two args smashed together,
6751 * so we cannot use the C library.
6759 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6760 target_sigset_t
*target_sigset
;
6768 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6772 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6776 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6782 * This takes a timespec, and not a timeval, so we cannot
6783 * use the do_select() helper ...
6786 if (target_to_host_timespec(&ts
, ts_addr
)) {
6794 /* Extract the two packed args for the sigset */
6797 sig
.size
= _NSIG
/ 8;
6799 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6803 arg_sigset
= tswapal(arg7
[0]);
6804 arg_sigsize
= tswapal(arg7
[1]);
6805 unlock_user(arg7
, arg6
, 0);
6809 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6810 /* Like the kernel, we enforce correct size sigsets */
6811 ret
= -TARGET_EINVAL
;
6814 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6815 sizeof(*target_sigset
), 1);
6816 if (!target_sigset
) {
6819 target_to_host_sigset(&set
, target_sigset
);
6820 unlock_user(target_sigset
, arg_sigset
, 0);
6828 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6831 if (!is_error(ret
)) {
6832 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6834 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6836 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6839 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6845 #ifdef TARGET_NR_symlink
6846 case TARGET_NR_symlink
:
6849 p
= lock_user_string(arg1
);
6850 p2
= lock_user_string(arg2
);
6852 ret
= -TARGET_EFAULT
;
6854 ret
= get_errno(symlink(p
, p2
));
6855 unlock_user(p2
, arg2
, 0);
6856 unlock_user(p
, arg1
, 0);
6860 #if defined(TARGET_NR_symlinkat)
6861 case TARGET_NR_symlinkat
:
6864 p
= lock_user_string(arg1
);
6865 p2
= lock_user_string(arg3
);
6867 ret
= -TARGET_EFAULT
;
6869 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6870 unlock_user(p2
, arg3
, 0);
6871 unlock_user(p
, arg1
, 0);
6875 #ifdef TARGET_NR_oldlstat
6876 case TARGET_NR_oldlstat
:
6879 #ifdef TARGET_NR_readlink
6880 case TARGET_NR_readlink
:
6883 p
= lock_user_string(arg1
);
6884 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6886 ret
= -TARGET_EFAULT
;
6888 /* Short circuit this for the magic exe check. */
6889 ret
= -TARGET_EINVAL
;
6890 } else if (is_proc_myself((const char *)p
, "exe")) {
6891 char real
[PATH_MAX
], *temp
;
6892 temp
= realpath(exec_path
, real
);
6893 /* Return value is # of bytes that we wrote to the buffer. */
6895 ret
= get_errno(-1);
6897 /* Don't worry about sign mismatch as earlier mapping
6898 * logic would have thrown a bad address error. */
6899 ret
= MIN(strlen(real
), arg3
);
6900 /* We cannot NUL terminate the string. */
6901 memcpy(p2
, real
, ret
);
6904 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6906 unlock_user(p2
, arg2
, ret
);
6907 unlock_user(p
, arg1
, 0);
6911 #if defined(TARGET_NR_readlinkat)
6912 case TARGET_NR_readlinkat
:
6915 p
= lock_user_string(arg2
);
6916 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6918 ret
= -TARGET_EFAULT
;
6919 } else if (is_proc_myself((const char *)p
, "exe")) {
6920 char real
[PATH_MAX
], *temp
;
6921 temp
= realpath(exec_path
, real
);
6922 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6923 snprintf((char *)p2
, arg4
, "%s", real
);
6925 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6927 unlock_user(p2
, arg3
, ret
);
6928 unlock_user(p
, arg2
, 0);
6932 #ifdef TARGET_NR_uselib
6933 case TARGET_NR_uselib
:
6936 #ifdef TARGET_NR_swapon
6937 case TARGET_NR_swapon
:
6938 if (!(p
= lock_user_string(arg1
)))
6940 ret
= get_errno(swapon(p
, arg2
));
6941 unlock_user(p
, arg1
, 0);
6944 case TARGET_NR_reboot
:
6945 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6946 /* arg4 must be ignored in all other cases */
6947 p
= lock_user_string(arg4
);
6951 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6952 unlock_user(p
, arg4
, 0);
6954 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6957 #ifdef TARGET_NR_readdir
6958 case TARGET_NR_readdir
:
6961 #ifdef TARGET_NR_mmap
6962 case TARGET_NR_mmap
:
6963 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6964 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6965 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6966 || defined(TARGET_S390X)
6969 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6970 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6978 unlock_user(v
, arg1
, 0);
6979 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6980 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6984 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6985 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6991 #ifdef TARGET_NR_mmap2
6992 case TARGET_NR_mmap2
:
6994 #define MMAP_SHIFT 12
6996 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6997 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6999 arg6
<< MMAP_SHIFT
));
7002 case TARGET_NR_munmap
:
7003 ret
= get_errno(target_munmap(arg1
, arg2
));
7005 case TARGET_NR_mprotect
:
7007 TaskState
*ts
= cpu
->opaque
;
7008 /* Special hack to detect libc making the stack executable. */
7009 if ((arg3
& PROT_GROWSDOWN
)
7010 && arg1
>= ts
->info
->stack_limit
7011 && arg1
<= ts
->info
->start_stack
) {
7012 arg3
&= ~PROT_GROWSDOWN
;
7013 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7014 arg1
= ts
->info
->stack_limit
;
7017 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7019 #ifdef TARGET_NR_mremap
7020 case TARGET_NR_mremap
:
7021 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7024 /* ??? msync/mlock/munlock are broken for softmmu. */
7025 #ifdef TARGET_NR_msync
7026 case TARGET_NR_msync
:
7027 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7030 #ifdef TARGET_NR_mlock
7031 case TARGET_NR_mlock
:
7032 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7035 #ifdef TARGET_NR_munlock
7036 case TARGET_NR_munlock
:
7037 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7040 #ifdef TARGET_NR_mlockall
7041 case TARGET_NR_mlockall
:
7042 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7045 #ifdef TARGET_NR_munlockall
7046 case TARGET_NR_munlockall
:
7047 ret
= get_errno(munlockall());
7050 case TARGET_NR_truncate
:
7051 if (!(p
= lock_user_string(arg1
)))
7053 ret
= get_errno(truncate(p
, arg2
));
7054 unlock_user(p
, arg1
, 0);
7056 case TARGET_NR_ftruncate
:
7057 ret
= get_errno(ftruncate(arg1
, arg2
));
7059 case TARGET_NR_fchmod
:
7060 ret
= get_errno(fchmod(arg1
, arg2
));
7062 #if defined(TARGET_NR_fchmodat)
7063 case TARGET_NR_fchmodat
:
7064 if (!(p
= lock_user_string(arg2
)))
7066 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7067 unlock_user(p
, arg2
, 0);
7070 case TARGET_NR_getpriority
:
7071 /* Note that negative values are valid for getpriority, so we must
7072 differentiate based on errno settings. */
7074 ret
= getpriority(arg1
, arg2
);
7075 if (ret
== -1 && errno
!= 0) {
7076 ret
= -host_to_target_errno(errno
);
7080 /* Return value is the unbiased priority. Signal no error. */
7081 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7083 /* Return value is a biased priority to avoid negative numbers. */
7087 case TARGET_NR_setpriority
:
7088 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7090 #ifdef TARGET_NR_profil
7091 case TARGET_NR_profil
:
7094 case TARGET_NR_statfs
:
7095 if (!(p
= lock_user_string(arg1
)))
7097 ret
= get_errno(statfs(path(p
), &stfs
));
7098 unlock_user(p
, arg1
, 0);
7100 if (!is_error(ret
)) {
7101 struct target_statfs
*target_stfs
;
7103 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7105 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7106 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7107 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7108 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7109 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7110 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7111 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7112 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7113 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7114 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7115 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7116 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7117 unlock_user_struct(target_stfs
, arg2
, 1);
7120 case TARGET_NR_fstatfs
:
7121 ret
= get_errno(fstatfs(arg1
, &stfs
));
7122 goto convert_statfs
;
7123 #ifdef TARGET_NR_statfs64
7124 case TARGET_NR_statfs64
:
7125 if (!(p
= lock_user_string(arg1
)))
7127 ret
= get_errno(statfs(path(p
), &stfs
));
7128 unlock_user(p
, arg1
, 0);
7130 if (!is_error(ret
)) {
7131 struct target_statfs64
*target_stfs
;
7133 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7135 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7136 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7137 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7138 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7139 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7140 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7141 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7142 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7143 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7144 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7145 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7146 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7147 unlock_user_struct(target_stfs
, arg3
, 1);
7150 case TARGET_NR_fstatfs64
:
7151 ret
= get_errno(fstatfs(arg1
, &stfs
));
7152 goto convert_statfs64
;
7154 #ifdef TARGET_NR_ioperm
7155 case TARGET_NR_ioperm
:
7158 #ifdef TARGET_NR_socketcall
7159 case TARGET_NR_socketcall
:
7160 ret
= do_socketcall(arg1
, arg2
);
7163 #ifdef TARGET_NR_accept
7164 case TARGET_NR_accept
:
7165 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7168 #ifdef TARGET_NR_accept4
7169 case TARGET_NR_accept4
:
7170 #ifdef CONFIG_ACCEPT4
7171 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7177 #ifdef TARGET_NR_bind
7178 case TARGET_NR_bind
:
7179 ret
= do_bind(arg1
, arg2
, arg3
);
7182 #ifdef TARGET_NR_connect
7183 case TARGET_NR_connect
:
7184 ret
= do_connect(arg1
, arg2
, arg3
);
7187 #ifdef TARGET_NR_getpeername
7188 case TARGET_NR_getpeername
:
7189 ret
= do_getpeername(arg1
, arg2
, arg3
);
7192 #ifdef TARGET_NR_getsockname
7193 case TARGET_NR_getsockname
:
7194 ret
= do_getsockname(arg1
, arg2
, arg3
);
7197 #ifdef TARGET_NR_getsockopt
7198 case TARGET_NR_getsockopt
:
7199 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7202 #ifdef TARGET_NR_listen
7203 case TARGET_NR_listen
:
7204 ret
= get_errno(listen(arg1
, arg2
));
7207 #ifdef TARGET_NR_recv
7208 case TARGET_NR_recv
:
7209 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7212 #ifdef TARGET_NR_recvfrom
7213 case TARGET_NR_recvfrom
:
7214 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7217 #ifdef TARGET_NR_recvmsg
7218 case TARGET_NR_recvmsg
:
7219 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7222 #ifdef TARGET_NR_send
7223 case TARGET_NR_send
:
7224 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7227 #ifdef TARGET_NR_sendmsg
7228 case TARGET_NR_sendmsg
:
7229 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7232 #ifdef TARGET_NR_sendmmsg
7233 case TARGET_NR_sendmmsg
:
7234 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7236 case TARGET_NR_recvmmsg
:
7237 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7240 #ifdef TARGET_NR_sendto
7241 case TARGET_NR_sendto
:
7242 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7245 #ifdef TARGET_NR_shutdown
7246 case TARGET_NR_shutdown
:
7247 ret
= get_errno(shutdown(arg1
, arg2
));
7250 #ifdef TARGET_NR_socket
7251 case TARGET_NR_socket
:
7252 ret
= do_socket(arg1
, arg2
, arg3
);
7255 #ifdef TARGET_NR_socketpair
7256 case TARGET_NR_socketpair
:
7257 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7260 #ifdef TARGET_NR_setsockopt
7261 case TARGET_NR_setsockopt
:
7262 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7266 case TARGET_NR_syslog
:
7267 if (!(p
= lock_user_string(arg2
)))
7269 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7270 unlock_user(p
, arg2
, 0);
7273 case TARGET_NR_setitimer
:
7275 struct itimerval value
, ovalue
, *pvalue
;
7279 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7280 || copy_from_user_timeval(&pvalue
->it_value
,
7281 arg2
+ sizeof(struct target_timeval
)))
7286 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7287 if (!is_error(ret
) && arg3
) {
7288 if (copy_to_user_timeval(arg3
,
7289 &ovalue
.it_interval
)
7290 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7296 case TARGET_NR_getitimer
:
7298 struct itimerval value
;
7300 ret
= get_errno(getitimer(arg1
, &value
));
7301 if (!is_error(ret
) && arg2
) {
7302 if (copy_to_user_timeval(arg2
,
7304 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7310 #ifdef TARGET_NR_stat
7311 case TARGET_NR_stat
:
7312 if (!(p
= lock_user_string(arg1
)))
7314 ret
= get_errno(stat(path(p
), &st
));
7315 unlock_user(p
, arg1
, 0);
7318 #ifdef TARGET_NR_lstat
7319 case TARGET_NR_lstat
:
7320 if (!(p
= lock_user_string(arg1
)))
7322 ret
= get_errno(lstat(path(p
), &st
));
7323 unlock_user(p
, arg1
, 0);
7326 case TARGET_NR_fstat
:
7328 ret
= get_errno(fstat(arg1
, &st
));
7329 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7332 if (!is_error(ret
)) {
7333 struct target_stat
*target_st
;
7335 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7337 memset(target_st
, 0, sizeof(*target_st
));
7338 __put_user(st
.st_dev
, &target_st
->st_dev
);
7339 __put_user(st
.st_ino
, &target_st
->st_ino
);
7340 __put_user(st
.st_mode
, &target_st
->st_mode
);
7341 __put_user(st
.st_uid
, &target_st
->st_uid
);
7342 __put_user(st
.st_gid
, &target_st
->st_gid
);
7343 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7344 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7345 __put_user(st
.st_size
, &target_st
->st_size
);
7346 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7347 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7348 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7349 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7350 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7351 unlock_user_struct(target_st
, arg2
, 1);
7355 #ifdef TARGET_NR_olduname
7356 case TARGET_NR_olduname
:
7359 #ifdef TARGET_NR_iopl
7360 case TARGET_NR_iopl
:
7363 case TARGET_NR_vhangup
:
7364 ret
= get_errno(vhangup());
7366 #ifdef TARGET_NR_idle
7367 case TARGET_NR_idle
:
7370 #ifdef TARGET_NR_syscall
7371 case TARGET_NR_syscall
:
7372 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7373 arg6
, arg7
, arg8
, 0);
7376 case TARGET_NR_wait4
:
7379 abi_long status_ptr
= arg2
;
7380 struct rusage rusage
, *rusage_ptr
;
7381 abi_ulong target_rusage
= arg4
;
7382 abi_long rusage_err
;
7384 rusage_ptr
= &rusage
;
7387 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7388 if (!is_error(ret
)) {
7389 if (status_ptr
&& ret
) {
7390 status
= host_to_target_waitstatus(status
);
7391 if (put_user_s32(status
, status_ptr
))
7394 if (target_rusage
) {
7395 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7403 #ifdef TARGET_NR_swapoff
7404 case TARGET_NR_swapoff
:
7405 if (!(p
= lock_user_string(arg1
)))
7407 ret
= get_errno(swapoff(p
));
7408 unlock_user(p
, arg1
, 0);
7411 case TARGET_NR_sysinfo
:
7413 struct target_sysinfo
*target_value
;
7414 struct sysinfo value
;
7415 ret
= get_errno(sysinfo(&value
));
7416 if (!is_error(ret
) && arg1
)
7418 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7420 __put_user(value
.uptime
, &target_value
->uptime
);
7421 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7422 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7423 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7424 __put_user(value
.totalram
, &target_value
->totalram
);
7425 __put_user(value
.freeram
, &target_value
->freeram
);
7426 __put_user(value
.sharedram
, &target_value
->sharedram
);
7427 __put_user(value
.bufferram
, &target_value
->bufferram
);
7428 __put_user(value
.totalswap
, &target_value
->totalswap
);
7429 __put_user(value
.freeswap
, &target_value
->freeswap
);
7430 __put_user(value
.procs
, &target_value
->procs
);
7431 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7432 __put_user(value
.freehigh
, &target_value
->freehigh
);
7433 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7434 unlock_user_struct(target_value
, arg1
, 1);
7438 #ifdef TARGET_NR_ipc
7440 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7443 #ifdef TARGET_NR_semget
7444 case TARGET_NR_semget
:
7445 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7448 #ifdef TARGET_NR_semop
7449 case TARGET_NR_semop
:
7450 ret
= do_semop(arg1
, arg2
, arg3
);
7453 #ifdef TARGET_NR_semctl
7454 case TARGET_NR_semctl
:
7455 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
7458 #ifdef TARGET_NR_msgctl
7459 case TARGET_NR_msgctl
:
7460 ret
= do_msgctl(arg1
, arg2
, arg3
);
7463 #ifdef TARGET_NR_msgget
7464 case TARGET_NR_msgget
:
7465 ret
= get_errno(msgget(arg1
, arg2
));
7468 #ifdef TARGET_NR_msgrcv
7469 case TARGET_NR_msgrcv
:
7470 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7473 #ifdef TARGET_NR_msgsnd
7474 case TARGET_NR_msgsnd
:
7475 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7478 #ifdef TARGET_NR_shmget
7479 case TARGET_NR_shmget
:
7480 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7483 #ifdef TARGET_NR_shmctl
7484 case TARGET_NR_shmctl
:
7485 ret
= do_shmctl(arg1
, arg2
, arg3
);
7488 #ifdef TARGET_NR_shmat
7489 case TARGET_NR_shmat
:
7490 ret
= do_shmat(arg1
, arg2
, arg3
);
7493 #ifdef TARGET_NR_shmdt
7494 case TARGET_NR_shmdt
:
7495 ret
= do_shmdt(arg1
);
7498 case TARGET_NR_fsync
:
7499 ret
= get_errno(fsync(arg1
));
7501 case TARGET_NR_clone
:
7502 /* Linux manages to have three different orderings for its
7503 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7504 * match the kernel's CONFIG_CLONE_* settings.
7505 * Microblaze is further special in that it uses a sixth
7506 * implicit argument to clone for the TLS pointer.
7508 #if defined(TARGET_MICROBLAZE)
7509 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7510 #elif defined(TARGET_CLONE_BACKWARDS)
7511 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7512 #elif defined(TARGET_CLONE_BACKWARDS2)
7513 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7515 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7518 #ifdef __NR_exit_group
7519 /* new thread calls */
7520 case TARGET_NR_exit_group
:
7524 gdb_exit(cpu_env
, arg1
);
7525 ret
= get_errno(exit_group(arg1
));
7528 case TARGET_NR_setdomainname
:
7529 if (!(p
= lock_user_string(arg1
)))
7531 ret
= get_errno(setdomainname(p
, arg2
));
7532 unlock_user(p
, arg1
, 0);
7534 case TARGET_NR_uname
:
7535 /* no need to transcode because we use the linux syscall */
7537 struct new_utsname
* buf
;
7539 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7541 ret
= get_errno(sys_uname(buf
));
7542 if (!is_error(ret
)) {
7543 /* Overrite the native machine name with whatever is being
7545 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7546 /* Allow the user to override the reported release. */
7547 if (qemu_uname_release
&& *qemu_uname_release
)
7548 strcpy (buf
->release
, qemu_uname_release
);
7550 unlock_user_struct(buf
, arg1
, 1);
7554 case TARGET_NR_modify_ldt
:
7555 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7557 #if !defined(TARGET_X86_64)
7558 case TARGET_NR_vm86old
:
7560 case TARGET_NR_vm86
:
7561 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7565 case TARGET_NR_adjtimex
:
7567 #ifdef TARGET_NR_create_module
7568 case TARGET_NR_create_module
:
7570 case TARGET_NR_init_module
:
7571 case TARGET_NR_delete_module
:
7572 #ifdef TARGET_NR_get_kernel_syms
7573 case TARGET_NR_get_kernel_syms
:
7576 case TARGET_NR_quotactl
:
7578 case TARGET_NR_getpgid
:
7579 ret
= get_errno(getpgid(arg1
));
7581 case TARGET_NR_fchdir
:
7582 ret
= get_errno(fchdir(arg1
));
7584 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7585 case TARGET_NR_bdflush
:
7588 #ifdef TARGET_NR_sysfs
7589 case TARGET_NR_sysfs
:
7592 case TARGET_NR_personality
:
7593 ret
= get_errno(personality(arg1
));
7595 #ifdef TARGET_NR_afs_syscall
7596 case TARGET_NR_afs_syscall
:
7599 #ifdef TARGET_NR__llseek /* Not on alpha */
7600 case TARGET_NR__llseek
:
7603 #if !defined(__NR_llseek)
7604 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7606 ret
= get_errno(res
);
7611 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7613 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7619 #ifdef TARGET_NR_getdents
7620 case TARGET_NR_getdents
:
7621 #ifdef __NR_getdents
7622 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7624 struct target_dirent
*target_dirp
;
7625 struct linux_dirent
*dirp
;
7626 abi_long count
= arg3
;
7628 dirp
= malloc(count
);
7630 ret
= -TARGET_ENOMEM
;
7634 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7635 if (!is_error(ret
)) {
7636 struct linux_dirent
*de
;
7637 struct target_dirent
*tde
;
7639 int reclen
, treclen
;
7640 int count1
, tnamelen
;
7644 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7648 reclen
= de
->d_reclen
;
7649 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7650 assert(tnamelen
>= 0);
7651 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7652 assert(count1
+ treclen
<= count
);
7653 tde
->d_reclen
= tswap16(treclen
);
7654 tde
->d_ino
= tswapal(de
->d_ino
);
7655 tde
->d_off
= tswapal(de
->d_off
);
7656 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7657 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7659 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7663 unlock_user(target_dirp
, arg2
, ret
);
7669 struct linux_dirent
*dirp
;
7670 abi_long count
= arg3
;
7672 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7674 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7675 if (!is_error(ret
)) {
7676 struct linux_dirent
*de
;
7681 reclen
= de
->d_reclen
;
7684 de
->d_reclen
= tswap16(reclen
);
7685 tswapls(&de
->d_ino
);
7686 tswapls(&de
->d_off
);
7687 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7691 unlock_user(dirp
, arg2
, ret
);
7695 /* Implement getdents in terms of getdents64 */
7697 struct linux_dirent64
*dirp
;
7698 abi_long count
= arg3
;
7700 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7704 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7705 if (!is_error(ret
)) {
7706 /* Convert the dirent64 structs to target dirent. We do this
7707 * in-place, since we can guarantee that a target_dirent is no
7708 * larger than a dirent64; however this means we have to be
7709 * careful to read everything before writing in the new format.
7711 struct linux_dirent64
*de
;
7712 struct target_dirent
*tde
;
7717 tde
= (struct target_dirent
*)dirp
;
7719 int namelen
, treclen
;
7720 int reclen
= de
->d_reclen
;
7721 uint64_t ino
= de
->d_ino
;
7722 int64_t off
= de
->d_off
;
7723 uint8_t type
= de
->d_type
;
7725 namelen
= strlen(de
->d_name
);
7726 treclen
= offsetof(struct target_dirent
, d_name
)
7728 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7730 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7731 tde
->d_ino
= tswapal(ino
);
7732 tde
->d_off
= tswapal(off
);
7733 tde
->d_reclen
= tswap16(treclen
);
7734 /* The target_dirent type is in what was formerly a padding
7735 * byte at the end of the structure:
7737 *(((char *)tde
) + treclen
- 1) = type
;
7739 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7740 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7746 unlock_user(dirp
, arg2
, ret
);
7750 #endif /* TARGET_NR_getdents */
7751 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7752 case TARGET_NR_getdents64
:
7754 struct linux_dirent64
*dirp
;
7755 abi_long count
= arg3
;
7756 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7758 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7759 if (!is_error(ret
)) {
7760 struct linux_dirent64
*de
;
7765 reclen
= de
->d_reclen
;
7768 de
->d_reclen
= tswap16(reclen
);
7769 tswap64s((uint64_t *)&de
->d_ino
);
7770 tswap64s((uint64_t *)&de
->d_off
);
7771 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7775 unlock_user(dirp
, arg2
, ret
);
7778 #endif /* TARGET_NR_getdents64 */
7779 #if defined(TARGET_NR__newselect)
7780 case TARGET_NR__newselect
:
7781 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7784 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7785 # ifdef TARGET_NR_poll
7786 case TARGET_NR_poll
:
7788 # ifdef TARGET_NR_ppoll
7789 case TARGET_NR_ppoll
:
7792 struct target_pollfd
*target_pfd
;
7793 unsigned int nfds
= arg2
;
7798 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7802 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7803 for(i
= 0; i
< nfds
; i
++) {
7804 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7805 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7808 # ifdef TARGET_NR_ppoll
7809 if (num
== TARGET_NR_ppoll
) {
7810 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7811 target_sigset_t
*target_set
;
7812 sigset_t _set
, *set
= &_set
;
7815 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7816 unlock_user(target_pfd
, arg1
, 0);
7824 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7826 unlock_user(target_pfd
, arg1
, 0);
7829 target_to_host_sigset(set
, target_set
);
7834 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7836 if (!is_error(ret
) && arg3
) {
7837 host_to_target_timespec(arg3
, timeout_ts
);
7840 unlock_user(target_set
, arg4
, 0);
7844 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7846 if (!is_error(ret
)) {
7847 for(i
= 0; i
< nfds
; i
++) {
7848 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7851 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7855 case TARGET_NR_flock
:
7856 /* NOTE: the flock constant seems to be the same for every
7858 ret
= get_errno(flock(arg1
, arg2
));
7860 case TARGET_NR_readv
:
7862 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7864 ret
= get_errno(readv(arg1
, vec
, arg3
));
7865 unlock_iovec(vec
, arg2
, arg3
, 1);
7867 ret
= -host_to_target_errno(errno
);
7871 case TARGET_NR_writev
:
7873 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7875 ret
= get_errno(writev(arg1
, vec
, arg3
));
7876 unlock_iovec(vec
, arg2
, arg3
, 0);
7878 ret
= -host_to_target_errno(errno
);
7882 case TARGET_NR_getsid
:
7883 ret
= get_errno(getsid(arg1
));
7885 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7886 case TARGET_NR_fdatasync
:
7887 ret
= get_errno(fdatasync(arg1
));
7890 #ifdef TARGET_NR__sysctl
7891 case TARGET_NR__sysctl
:
7892 /* We don't implement this, but ENOTDIR is always a safe
7894 ret
= -TARGET_ENOTDIR
;
7897 case TARGET_NR_sched_getaffinity
:
7899 unsigned int mask_size
;
7900 unsigned long *mask
;
7903 * sched_getaffinity needs multiples of ulong, so need to take
7904 * care of mismatches between target ulong and host ulong sizes.
7906 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7907 ret
= -TARGET_EINVAL
;
7910 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7912 mask
= alloca(mask_size
);
7913 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7915 if (!is_error(ret
)) {
7917 /* More data returned than the caller's buffer will fit.
7918 * This only happens if sizeof(abi_long) < sizeof(long)
7919 * and the caller passed us a buffer holding an odd number
7920 * of abi_longs. If the host kernel is actually using the
7921 * extra 4 bytes then fail EINVAL; otherwise we can just
7922 * ignore them and only copy the interesting part.
7924 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
7925 if (numcpus
> arg2
* 8) {
7926 ret
= -TARGET_EINVAL
;
7932 if (copy_to_user(arg3
, mask
, ret
)) {
7938 case TARGET_NR_sched_setaffinity
:
7940 unsigned int mask_size
;
7941 unsigned long *mask
;
7944 * sched_setaffinity needs multiples of ulong, so need to take
7945 * care of mismatches between target ulong and host ulong sizes.
7947 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7948 ret
= -TARGET_EINVAL
;
7951 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7953 mask
= alloca(mask_size
);
7954 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7957 memcpy(mask
, p
, arg2
);
7958 unlock_user_struct(p
, arg2
, 0);
7960 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7963 case TARGET_NR_sched_setparam
:
7965 struct sched_param
*target_schp
;
7966 struct sched_param schp
;
7969 return -TARGET_EINVAL
;
7971 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7973 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7974 unlock_user_struct(target_schp
, arg2
, 0);
7975 ret
= get_errno(sched_setparam(arg1
, &schp
));
7978 case TARGET_NR_sched_getparam
:
7980 struct sched_param
*target_schp
;
7981 struct sched_param schp
;
7984 return -TARGET_EINVAL
;
7986 ret
= get_errno(sched_getparam(arg1
, &schp
));
7987 if (!is_error(ret
)) {
7988 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7990 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7991 unlock_user_struct(target_schp
, arg2
, 1);
7995 case TARGET_NR_sched_setscheduler
:
7997 struct sched_param
*target_schp
;
7998 struct sched_param schp
;
8000 return -TARGET_EINVAL
;
8002 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8004 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8005 unlock_user_struct(target_schp
, arg3
, 0);
8006 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8009 case TARGET_NR_sched_getscheduler
:
8010 ret
= get_errno(sched_getscheduler(arg1
));
8012 case TARGET_NR_sched_yield
:
8013 ret
= get_errno(sched_yield());
8015 case TARGET_NR_sched_get_priority_max
:
8016 ret
= get_errno(sched_get_priority_max(arg1
));
8018 case TARGET_NR_sched_get_priority_min
:
8019 ret
= get_errno(sched_get_priority_min(arg1
));
8021 case TARGET_NR_sched_rr_get_interval
:
8024 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8025 if (!is_error(ret
)) {
8026 ret
= host_to_target_timespec(arg2
, &ts
);
8030 case TARGET_NR_nanosleep
:
8032 struct timespec req
, rem
;
8033 target_to_host_timespec(&req
, arg1
);
8034 ret
= get_errno(nanosleep(&req
, &rem
));
8035 if (is_error(ret
) && arg2
) {
8036 host_to_target_timespec(arg2
, &rem
);
8040 #ifdef TARGET_NR_query_module
8041 case TARGET_NR_query_module
:
8044 #ifdef TARGET_NR_nfsservctl
8045 case TARGET_NR_nfsservctl
:
8048 case TARGET_NR_prctl
:
8050 case PR_GET_PDEATHSIG
:
8053 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8054 if (!is_error(ret
) && arg2
8055 && put_user_ual(deathsig
, arg2
)) {
8063 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8067 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8069 unlock_user(name
, arg2
, 16);
8074 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8078 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8080 unlock_user(name
, arg2
, 0);
8085 /* Most prctl options have no pointer arguments */
8086 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8090 #ifdef TARGET_NR_arch_prctl
8091 case TARGET_NR_arch_prctl
:
8092 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8093 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8099 #ifdef TARGET_NR_pread64
8100 case TARGET_NR_pread64
:
8101 if (regpairs_aligned(cpu_env
)) {
8105 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8107 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8108 unlock_user(p
, arg2
, ret
);
8110 case TARGET_NR_pwrite64
:
8111 if (regpairs_aligned(cpu_env
)) {
8115 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8117 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8118 unlock_user(p
, arg2
, 0);
8121 case TARGET_NR_getcwd
:
8122 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8124 ret
= get_errno(sys_getcwd1(p
, arg2
));
8125 unlock_user(p
, arg1
, ret
);
8127 case TARGET_NR_capget
:
8128 case TARGET_NR_capset
:
8130 struct target_user_cap_header
*target_header
;
8131 struct target_user_cap_data
*target_data
= NULL
;
8132 struct __user_cap_header_struct header
;
8133 struct __user_cap_data_struct data
[2];
8134 struct __user_cap_data_struct
*dataptr
= NULL
;
8135 int i
, target_datalen
;
8138 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8141 header
.version
= tswap32(target_header
->version
);
8142 header
.pid
= tswap32(target_header
->pid
);
8144 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8145 /* Version 2 and up takes pointer to two user_data structs */
8149 target_datalen
= sizeof(*target_data
) * data_items
;
8152 if (num
== TARGET_NR_capget
) {
8153 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8155 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8158 unlock_user_struct(target_header
, arg1
, 0);
8162 if (num
== TARGET_NR_capset
) {
8163 for (i
= 0; i
< data_items
; i
++) {
8164 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8165 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8166 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8173 if (num
== TARGET_NR_capget
) {
8174 ret
= get_errno(capget(&header
, dataptr
));
8176 ret
= get_errno(capset(&header
, dataptr
));
8179 /* The kernel always updates version for both capget and capset */
8180 target_header
->version
= tswap32(header
.version
);
8181 unlock_user_struct(target_header
, arg1
, 1);
8184 if (num
== TARGET_NR_capget
) {
8185 for (i
= 0; i
< data_items
; i
++) {
8186 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8187 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8188 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8190 unlock_user(target_data
, arg2
, target_datalen
);
8192 unlock_user(target_data
, arg2
, 0);
8197 case TARGET_NR_sigaltstack
:
8198 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8199 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8200 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8201 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8207 #ifdef CONFIG_SENDFILE
8208 case TARGET_NR_sendfile
:
8213 ret
= get_user_sal(off
, arg3
);
8214 if (is_error(ret
)) {
8219 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8220 if (!is_error(ret
) && arg3
) {
8221 abi_long ret2
= put_user_sal(off
, arg3
);
8222 if (is_error(ret2
)) {
8228 #ifdef TARGET_NR_sendfile64
8229 case TARGET_NR_sendfile64
:
8234 ret
= get_user_s64(off
, arg3
);
8235 if (is_error(ret
)) {
8240 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8241 if (!is_error(ret
) && arg3
) {
8242 abi_long ret2
= put_user_s64(off
, arg3
);
8243 if (is_error(ret2
)) {
8251 case TARGET_NR_sendfile
:
8252 #ifdef TARGET_NR_sendfile64
8253 case TARGET_NR_sendfile64
:
8258 #ifdef TARGET_NR_getpmsg
8259 case TARGET_NR_getpmsg
:
8262 #ifdef TARGET_NR_putpmsg
8263 case TARGET_NR_putpmsg
:
8266 #ifdef TARGET_NR_vfork
8267 case TARGET_NR_vfork
:
8268 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8272 #ifdef TARGET_NR_ugetrlimit
8273 case TARGET_NR_ugetrlimit
:
8276 int resource
= target_to_host_resource(arg1
);
8277 ret
= get_errno(getrlimit(resource
, &rlim
));
8278 if (!is_error(ret
)) {
8279 struct target_rlimit
*target_rlim
;
8280 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8282 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8283 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8284 unlock_user_struct(target_rlim
, arg2
, 1);
8289 #ifdef TARGET_NR_truncate64
8290 case TARGET_NR_truncate64
:
8291 if (!(p
= lock_user_string(arg1
)))
8293 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8294 unlock_user(p
, arg1
, 0);
8297 #ifdef TARGET_NR_ftruncate64
8298 case TARGET_NR_ftruncate64
:
8299 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8302 #ifdef TARGET_NR_stat64
8303 case TARGET_NR_stat64
:
8304 if (!(p
= lock_user_string(arg1
)))
8306 ret
= get_errno(stat(path(p
), &st
));
8307 unlock_user(p
, arg1
, 0);
8309 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8312 #ifdef TARGET_NR_lstat64
8313 case TARGET_NR_lstat64
:
8314 if (!(p
= lock_user_string(arg1
)))
8316 ret
= get_errno(lstat(path(p
), &st
));
8317 unlock_user(p
, arg1
, 0);
8319 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8322 #ifdef TARGET_NR_fstat64
8323 case TARGET_NR_fstat64
:
8324 ret
= get_errno(fstat(arg1
, &st
));
8326 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8329 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8330 #ifdef TARGET_NR_fstatat64
8331 case TARGET_NR_fstatat64
:
8333 #ifdef TARGET_NR_newfstatat
8334 case TARGET_NR_newfstatat
:
8336 if (!(p
= lock_user_string(arg2
)))
8338 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8340 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8343 #ifdef TARGET_NR_lchown
8344 case TARGET_NR_lchown
:
8345 if (!(p
= lock_user_string(arg1
)))
8347 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8348 unlock_user(p
, arg1
, 0);
8351 #ifdef TARGET_NR_getuid
8352 case TARGET_NR_getuid
:
8353 ret
= get_errno(high2lowuid(getuid()));
8356 #ifdef TARGET_NR_getgid
8357 case TARGET_NR_getgid
:
8358 ret
= get_errno(high2lowgid(getgid()));
8361 #ifdef TARGET_NR_geteuid
8362 case TARGET_NR_geteuid
:
8363 ret
= get_errno(high2lowuid(geteuid()));
8366 #ifdef TARGET_NR_getegid
8367 case TARGET_NR_getegid
:
8368 ret
= get_errno(high2lowgid(getegid()));
8371 case TARGET_NR_setreuid
:
8372 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8374 case TARGET_NR_setregid
:
8375 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8377 case TARGET_NR_getgroups
:
8379 int gidsetsize
= arg1
;
8380 target_id
*target_grouplist
;
8384 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8385 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8386 if (gidsetsize
== 0)
8388 if (!is_error(ret
)) {
8389 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8390 if (!target_grouplist
)
8392 for(i
= 0;i
< ret
; i
++)
8393 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8394 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8398 case TARGET_NR_setgroups
:
8400 int gidsetsize
= arg1
;
8401 target_id
*target_grouplist
;
8402 gid_t
*grouplist
= NULL
;
8405 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8406 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8407 if (!target_grouplist
) {
8408 ret
= -TARGET_EFAULT
;
8411 for (i
= 0; i
< gidsetsize
; i
++) {
8412 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8414 unlock_user(target_grouplist
, arg2
, 0);
8416 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8419 case TARGET_NR_fchown
:
8420 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8422 #if defined(TARGET_NR_fchownat)
8423 case TARGET_NR_fchownat
:
8424 if (!(p
= lock_user_string(arg2
)))
8426 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8427 low2highgid(arg4
), arg5
));
8428 unlock_user(p
, arg2
, 0);
8431 #ifdef TARGET_NR_setresuid
8432 case TARGET_NR_setresuid
:
8433 ret
= get_errno(setresuid(low2highuid(arg1
),
8435 low2highuid(arg3
)));
8438 #ifdef TARGET_NR_getresuid
8439 case TARGET_NR_getresuid
:
8441 uid_t ruid
, euid
, suid
;
8442 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8443 if (!is_error(ret
)) {
8444 if (put_user_id(high2lowuid(ruid
), arg1
)
8445 || put_user_id(high2lowuid(euid
), arg2
)
8446 || put_user_id(high2lowuid(suid
), arg3
))
8452 #ifdef TARGET_NR_getresgid
8453 case TARGET_NR_setresgid
:
8454 ret
= get_errno(setresgid(low2highgid(arg1
),
8456 low2highgid(arg3
)));
8459 #ifdef TARGET_NR_getresgid
8460 case TARGET_NR_getresgid
:
8462 gid_t rgid
, egid
, sgid
;
8463 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8464 if (!is_error(ret
)) {
8465 if (put_user_id(high2lowgid(rgid
), arg1
)
8466 || put_user_id(high2lowgid(egid
), arg2
)
8467 || put_user_id(high2lowgid(sgid
), arg3
))
8473 #ifdef TARGET_NR_chown
8474 case TARGET_NR_chown
:
8475 if (!(p
= lock_user_string(arg1
)))
8477 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8478 unlock_user(p
, arg1
, 0);
8481 case TARGET_NR_setuid
:
8482 ret
= get_errno(setuid(low2highuid(arg1
)));
8484 case TARGET_NR_setgid
:
8485 ret
= get_errno(setgid(low2highgid(arg1
)));
8487 case TARGET_NR_setfsuid
:
8488 ret
= get_errno(setfsuid(arg1
));
8490 case TARGET_NR_setfsgid
:
8491 ret
= get_errno(setfsgid(arg1
));
8494 #ifdef TARGET_NR_lchown32
8495 case TARGET_NR_lchown32
:
8496 if (!(p
= lock_user_string(arg1
)))
8498 ret
= get_errno(lchown(p
, arg2
, arg3
));
8499 unlock_user(p
, arg1
, 0);
8502 #ifdef TARGET_NR_getuid32
8503 case TARGET_NR_getuid32
:
8504 ret
= get_errno(getuid());
8508 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8509 /* Alpha specific */
8510 case TARGET_NR_getxuid
:
8514 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8516 ret
= get_errno(getuid());
8519 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8520 /* Alpha specific */
8521 case TARGET_NR_getxgid
:
8525 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8527 ret
= get_errno(getgid());
8530 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8531 /* Alpha specific */
8532 case TARGET_NR_osf_getsysinfo
:
8533 ret
= -TARGET_EOPNOTSUPP
;
8535 case TARGET_GSI_IEEE_FP_CONTROL
:
8537 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8539 /* Copied from linux ieee_fpcr_to_swcr. */
8540 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8541 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8542 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8543 | SWCR_TRAP_ENABLE_DZE
8544 | SWCR_TRAP_ENABLE_OVF
);
8545 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8546 | SWCR_TRAP_ENABLE_INE
);
8547 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8548 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8550 if (put_user_u64 (swcr
, arg2
))
8556 /* case GSI_IEEE_STATE_AT_SIGNAL:
8557 -- Not implemented in linux kernel.
8559 -- Retrieves current unaligned access state; not much used.
8561 -- Retrieves implver information; surely not used.
8563 -- Grabs a copy of the HWRPB; surely not used.
8568 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8569 /* Alpha specific */
8570 case TARGET_NR_osf_setsysinfo
:
8571 ret
= -TARGET_EOPNOTSUPP
;
8573 case TARGET_SSI_IEEE_FP_CONTROL
:
8575 uint64_t swcr
, fpcr
, orig_fpcr
;
8577 if (get_user_u64 (swcr
, arg2
)) {
8580 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8581 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8583 /* Copied from linux ieee_swcr_to_fpcr. */
8584 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8585 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8586 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8587 | SWCR_TRAP_ENABLE_DZE
8588 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8589 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8590 | SWCR_TRAP_ENABLE_INE
)) << 57;
8591 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8592 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8594 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8599 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8601 uint64_t exc
, fpcr
, orig_fpcr
;
8604 if (get_user_u64(exc
, arg2
)) {
8608 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8610 /* We only add to the exception status here. */
8611 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8613 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8616 /* Old exceptions are not signaled. */
8617 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8619 /* If any exceptions set by this call,
8620 and are unmasked, send a signal. */
8622 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8623 si_code
= TARGET_FPE_FLTRES
;
8625 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8626 si_code
= TARGET_FPE_FLTUND
;
8628 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8629 si_code
= TARGET_FPE_FLTOVF
;
8631 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8632 si_code
= TARGET_FPE_FLTDIV
;
8634 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8635 si_code
= TARGET_FPE_FLTINV
;
8638 target_siginfo_t info
;
8639 info
.si_signo
= SIGFPE
;
8641 info
.si_code
= si_code
;
8642 info
._sifields
._sigfault
._addr
8643 = ((CPUArchState
*)cpu_env
)->pc
;
8644 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8649 /* case SSI_NVPAIRS:
8650 -- Used with SSIN_UACPROC to enable unaligned accesses.
8651 case SSI_IEEE_STATE_AT_SIGNAL:
8652 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8653 -- Not implemented in linux kernel
8658 #ifdef TARGET_NR_osf_sigprocmask
8659 /* Alpha specific. */
8660 case TARGET_NR_osf_sigprocmask
:
8664 sigset_t set
, oldset
;
8667 case TARGET_SIG_BLOCK
:
8670 case TARGET_SIG_UNBLOCK
:
8673 case TARGET_SIG_SETMASK
:
8677 ret
= -TARGET_EINVAL
;
8681 target_to_host_old_sigset(&set
, &mask
);
8682 do_sigprocmask(how
, &set
, &oldset
);
8683 host_to_target_old_sigset(&mask
, &oldset
);
8689 #ifdef TARGET_NR_getgid32
8690 case TARGET_NR_getgid32
:
8691 ret
= get_errno(getgid());
8694 #ifdef TARGET_NR_geteuid32
8695 case TARGET_NR_geteuid32
:
8696 ret
= get_errno(geteuid());
8699 #ifdef TARGET_NR_getegid32
8700 case TARGET_NR_getegid32
:
8701 ret
= get_errno(getegid());
8704 #ifdef TARGET_NR_setreuid32
8705 case TARGET_NR_setreuid32
:
8706 ret
= get_errno(setreuid(arg1
, arg2
));
8709 #ifdef TARGET_NR_setregid32
8710 case TARGET_NR_setregid32
:
8711 ret
= get_errno(setregid(arg1
, arg2
));
8714 #ifdef TARGET_NR_getgroups32
8715 case TARGET_NR_getgroups32
:
8717 int gidsetsize
= arg1
;
8718 uint32_t *target_grouplist
;
8722 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8723 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8724 if (gidsetsize
== 0)
8726 if (!is_error(ret
)) {
8727 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8728 if (!target_grouplist
) {
8729 ret
= -TARGET_EFAULT
;
8732 for(i
= 0;i
< ret
; i
++)
8733 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8734 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8739 #ifdef TARGET_NR_setgroups32
8740 case TARGET_NR_setgroups32
:
8742 int gidsetsize
= arg1
;
8743 uint32_t *target_grouplist
;
8747 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8748 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8749 if (!target_grouplist
) {
8750 ret
= -TARGET_EFAULT
;
8753 for(i
= 0;i
< gidsetsize
; i
++)
8754 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8755 unlock_user(target_grouplist
, arg2
, 0);
8756 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8760 #ifdef TARGET_NR_fchown32
8761 case TARGET_NR_fchown32
:
8762 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8765 #ifdef TARGET_NR_setresuid32
8766 case TARGET_NR_setresuid32
:
8767 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8770 #ifdef TARGET_NR_getresuid32
8771 case TARGET_NR_getresuid32
:
8773 uid_t ruid
, euid
, suid
;
8774 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8775 if (!is_error(ret
)) {
8776 if (put_user_u32(ruid
, arg1
)
8777 || put_user_u32(euid
, arg2
)
8778 || put_user_u32(suid
, arg3
))
8784 #ifdef TARGET_NR_setresgid32
8785 case TARGET_NR_setresgid32
:
8786 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8789 #ifdef TARGET_NR_getresgid32
8790 case TARGET_NR_getresgid32
:
8792 gid_t rgid
, egid
, sgid
;
8793 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8794 if (!is_error(ret
)) {
8795 if (put_user_u32(rgid
, arg1
)
8796 || put_user_u32(egid
, arg2
)
8797 || put_user_u32(sgid
, arg3
))
8803 #ifdef TARGET_NR_chown32
8804 case TARGET_NR_chown32
:
8805 if (!(p
= lock_user_string(arg1
)))
8807 ret
= get_errno(chown(p
, arg2
, arg3
));
8808 unlock_user(p
, arg1
, 0);
8811 #ifdef TARGET_NR_setuid32
8812 case TARGET_NR_setuid32
:
8813 ret
= get_errno(setuid(arg1
));
8816 #ifdef TARGET_NR_setgid32
8817 case TARGET_NR_setgid32
:
8818 ret
= get_errno(setgid(arg1
));
8821 #ifdef TARGET_NR_setfsuid32
8822 case TARGET_NR_setfsuid32
:
8823 ret
= get_errno(setfsuid(arg1
));
8826 #ifdef TARGET_NR_setfsgid32
8827 case TARGET_NR_setfsgid32
:
8828 ret
= get_errno(setfsgid(arg1
));
8832 case TARGET_NR_pivot_root
:
8834 #ifdef TARGET_NR_mincore
8835 case TARGET_NR_mincore
:
8838 ret
= -TARGET_EFAULT
;
8839 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8841 if (!(p
= lock_user_string(arg3
)))
8843 ret
= get_errno(mincore(a
, arg2
, p
));
8844 unlock_user(p
, arg3
, ret
);
8846 unlock_user(a
, arg1
, 0);
8850 #ifdef TARGET_NR_arm_fadvise64_64
8851 case TARGET_NR_arm_fadvise64_64
:
8854 * arm_fadvise64_64 looks like fadvise64_64 but
8855 * with different argument order
8863 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8864 #ifdef TARGET_NR_fadvise64_64
8865 case TARGET_NR_fadvise64_64
:
8867 #ifdef TARGET_NR_fadvise64
8868 case TARGET_NR_fadvise64
:
8872 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8873 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8874 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8875 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8879 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8882 #ifdef TARGET_NR_madvise
8883 case TARGET_NR_madvise
:
8884 /* A straight passthrough may not be safe because qemu sometimes
8885 turns private file-backed mappings into anonymous mappings.
8886 This will break MADV_DONTNEED.
8887 This is a hint, so ignoring and returning success is ok. */
8891 #if TARGET_ABI_BITS == 32
8892 case TARGET_NR_fcntl64
:
8896 struct target_flock64
*target_fl
;
8898 struct target_eabi_flock64
*target_efl
;
8901 cmd
= target_to_host_fcntl_cmd(arg2
);
8902 if (cmd
== -TARGET_EINVAL
) {
8908 case TARGET_F_GETLK64
:
8910 if (((CPUARMState
*)cpu_env
)->eabi
) {
8911 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8913 fl
.l_type
= tswap16(target_efl
->l_type
);
8914 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8915 fl
.l_start
= tswap64(target_efl
->l_start
);
8916 fl
.l_len
= tswap64(target_efl
->l_len
);
8917 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8918 unlock_user_struct(target_efl
, arg3
, 0);
8922 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8924 fl
.l_type
= tswap16(target_fl
->l_type
);
8925 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8926 fl
.l_start
= tswap64(target_fl
->l_start
);
8927 fl
.l_len
= tswap64(target_fl
->l_len
);
8928 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8929 unlock_user_struct(target_fl
, arg3
, 0);
8931 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8934 if (((CPUARMState
*)cpu_env
)->eabi
) {
8935 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8937 target_efl
->l_type
= tswap16(fl
.l_type
);
8938 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8939 target_efl
->l_start
= tswap64(fl
.l_start
);
8940 target_efl
->l_len
= tswap64(fl
.l_len
);
8941 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8942 unlock_user_struct(target_efl
, arg3
, 1);
8946 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8948 target_fl
->l_type
= tswap16(fl
.l_type
);
8949 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8950 target_fl
->l_start
= tswap64(fl
.l_start
);
8951 target_fl
->l_len
= tswap64(fl
.l_len
);
8952 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8953 unlock_user_struct(target_fl
, arg3
, 1);
8958 case TARGET_F_SETLK64
:
8959 case TARGET_F_SETLKW64
:
8961 if (((CPUARMState
*)cpu_env
)->eabi
) {
8962 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8964 fl
.l_type
= tswap16(target_efl
->l_type
);
8965 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8966 fl
.l_start
= tswap64(target_efl
->l_start
);
8967 fl
.l_len
= tswap64(target_efl
->l_len
);
8968 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8969 unlock_user_struct(target_efl
, arg3
, 0);
8973 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8975 fl
.l_type
= tswap16(target_fl
->l_type
);
8976 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8977 fl
.l_start
= tswap64(target_fl
->l_start
);
8978 fl
.l_len
= tswap64(target_fl
->l_len
);
8979 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8980 unlock_user_struct(target_fl
, arg3
, 0);
8982 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8985 ret
= do_fcntl(arg1
, arg2
, arg3
);
8991 #ifdef TARGET_NR_cacheflush
8992 case TARGET_NR_cacheflush
:
8993 /* self-modifying code is handled automatically, so nothing needed */
8997 #ifdef TARGET_NR_security
8998 case TARGET_NR_security
:
9001 #ifdef TARGET_NR_getpagesize
9002 case TARGET_NR_getpagesize
:
9003 ret
= TARGET_PAGE_SIZE
;
9006 case TARGET_NR_gettid
:
9007 ret
= get_errno(gettid());
9009 #ifdef TARGET_NR_readahead
9010 case TARGET_NR_readahead
:
9011 #if TARGET_ABI_BITS == 32
9012 if (regpairs_aligned(cpu_env
)) {
9017 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9019 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9024 #ifdef TARGET_NR_setxattr
9025 case TARGET_NR_listxattr
:
9026 case TARGET_NR_llistxattr
:
9030 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9032 ret
= -TARGET_EFAULT
;
9036 p
= lock_user_string(arg1
);
9038 if (num
== TARGET_NR_listxattr
) {
9039 ret
= get_errno(listxattr(p
, b
, arg3
));
9041 ret
= get_errno(llistxattr(p
, b
, arg3
));
9044 ret
= -TARGET_EFAULT
;
9046 unlock_user(p
, arg1
, 0);
9047 unlock_user(b
, arg2
, arg3
);
9050 case TARGET_NR_flistxattr
:
9054 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9056 ret
= -TARGET_EFAULT
;
9060 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9061 unlock_user(b
, arg2
, arg3
);
9064 case TARGET_NR_setxattr
:
9065 case TARGET_NR_lsetxattr
:
9067 void *p
, *n
, *v
= 0;
9069 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9071 ret
= -TARGET_EFAULT
;
9075 p
= lock_user_string(arg1
);
9076 n
= lock_user_string(arg2
);
9078 if (num
== TARGET_NR_setxattr
) {
9079 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9081 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9084 ret
= -TARGET_EFAULT
;
9086 unlock_user(p
, arg1
, 0);
9087 unlock_user(n
, arg2
, 0);
9088 unlock_user(v
, arg3
, 0);
9091 case TARGET_NR_fsetxattr
:
9095 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9097 ret
= -TARGET_EFAULT
;
9101 n
= lock_user_string(arg2
);
9103 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9105 ret
= -TARGET_EFAULT
;
9107 unlock_user(n
, arg2
, 0);
9108 unlock_user(v
, arg3
, 0);
9111 case TARGET_NR_getxattr
:
9112 case TARGET_NR_lgetxattr
:
9114 void *p
, *n
, *v
= 0;
9116 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9118 ret
= -TARGET_EFAULT
;
9122 p
= lock_user_string(arg1
);
9123 n
= lock_user_string(arg2
);
9125 if (num
== TARGET_NR_getxattr
) {
9126 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9128 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9131 ret
= -TARGET_EFAULT
;
9133 unlock_user(p
, arg1
, 0);
9134 unlock_user(n
, arg2
, 0);
9135 unlock_user(v
, arg3
, arg4
);
9138 case TARGET_NR_fgetxattr
:
9142 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9144 ret
= -TARGET_EFAULT
;
9148 n
= lock_user_string(arg2
);
9150 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9152 ret
= -TARGET_EFAULT
;
9154 unlock_user(n
, arg2
, 0);
9155 unlock_user(v
, arg3
, arg4
);
9158 case TARGET_NR_removexattr
:
9159 case TARGET_NR_lremovexattr
:
9162 p
= lock_user_string(arg1
);
9163 n
= lock_user_string(arg2
);
9165 if (num
== TARGET_NR_removexattr
) {
9166 ret
= get_errno(removexattr(p
, n
));
9168 ret
= get_errno(lremovexattr(p
, n
));
9171 ret
= -TARGET_EFAULT
;
9173 unlock_user(p
, arg1
, 0);
9174 unlock_user(n
, arg2
, 0);
9177 case TARGET_NR_fremovexattr
:
9180 n
= lock_user_string(arg2
);
9182 ret
= get_errno(fremovexattr(arg1
, n
));
9184 ret
= -TARGET_EFAULT
;
9186 unlock_user(n
, arg2
, 0);
9190 #endif /* CONFIG_ATTR */
9191 #ifdef TARGET_NR_set_thread_area
9192 case TARGET_NR_set_thread_area
:
9193 #if defined(TARGET_MIPS)
9194 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9197 #elif defined(TARGET_CRIS)
9199 ret
= -TARGET_EINVAL
;
9201 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9205 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9206 ret
= do_set_thread_area(cpu_env
, arg1
);
9208 #elif defined(TARGET_M68K)
9210 TaskState
*ts
= cpu
->opaque
;
9211 ts
->tp_value
= arg1
;
9216 goto unimplemented_nowarn
;
9219 #ifdef TARGET_NR_get_thread_area
9220 case TARGET_NR_get_thread_area
:
9221 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9222 ret
= do_get_thread_area(cpu_env
, arg1
);
9224 #elif defined(TARGET_M68K)
9226 TaskState
*ts
= cpu
->opaque
;
9231 goto unimplemented_nowarn
;
9234 #ifdef TARGET_NR_getdomainname
9235 case TARGET_NR_getdomainname
:
9236 goto unimplemented_nowarn
;
9239 #ifdef TARGET_NR_clock_gettime
9240 case TARGET_NR_clock_gettime
:
9243 ret
= get_errno(clock_gettime(arg1
, &ts
));
9244 if (!is_error(ret
)) {
9245 host_to_target_timespec(arg2
, &ts
);
9250 #ifdef TARGET_NR_clock_getres
9251 case TARGET_NR_clock_getres
:
9254 ret
= get_errno(clock_getres(arg1
, &ts
));
9255 if (!is_error(ret
)) {
9256 host_to_target_timespec(arg2
, &ts
);
9261 #ifdef TARGET_NR_clock_nanosleep
9262 case TARGET_NR_clock_nanosleep
:
9265 target_to_host_timespec(&ts
, arg3
);
9266 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9268 host_to_target_timespec(arg4
, &ts
);
9270 #if defined(TARGET_PPC)
9271 /* clock_nanosleep is odd in that it returns positive errno values.
9272 * On PPC, CR0 bit 3 should be set in such a situation. */
9274 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9282 case TARGET_NR_set_tid_address
:
9283 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9287 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9288 case TARGET_NR_tkill
:
9289 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9293 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9294 case TARGET_NR_tgkill
:
9295 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9296 target_to_host_signal(arg3
)));
9300 #ifdef TARGET_NR_set_robust_list
9301 case TARGET_NR_set_robust_list
:
9302 case TARGET_NR_get_robust_list
:
9303 /* The ABI for supporting robust futexes has userspace pass
9304 * the kernel a pointer to a linked list which is updated by
9305 * userspace after the syscall; the list is walked by the kernel
9306 * when the thread exits. Since the linked list in QEMU guest
9307 * memory isn't a valid linked list for the host and we have
9308 * no way to reliably intercept the thread-death event, we can't
9309 * support these. Silently return ENOSYS so that guest userspace
9310 * falls back to a non-robust futex implementation (which should
9311 * be OK except in the corner case of the guest crashing while
9312 * holding a mutex that is shared with another process via
9315 goto unimplemented_nowarn
;
9318 #if defined(TARGET_NR_utimensat)
9319 case TARGET_NR_utimensat
:
9321 struct timespec
*tsp
, ts
[2];
9325 target_to_host_timespec(ts
, arg3
);
9326 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9330 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9332 if (!(p
= lock_user_string(arg2
))) {
9333 ret
= -TARGET_EFAULT
;
9336 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9337 unlock_user(p
, arg2
, 0);
9342 case TARGET_NR_futex
:
9343 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9346 case TARGET_NR_inotify_init
:
9347 ret
= get_errno(sys_inotify_init());
9350 #ifdef CONFIG_INOTIFY1
9351 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9352 case TARGET_NR_inotify_init1
:
9353 ret
= get_errno(sys_inotify_init1(arg1
));
9357 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9358 case TARGET_NR_inotify_add_watch
:
9359 p
= lock_user_string(arg2
);
9360 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9361 unlock_user(p
, arg2
, 0);
9364 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9365 case TARGET_NR_inotify_rm_watch
:
9366 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9370 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9371 case TARGET_NR_mq_open
:
9373 struct mq_attr posix_mq_attr
, *attrp
;
9375 p
= lock_user_string(arg1
- 1);
9377 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9378 attrp
= &posix_mq_attr
;
9382 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9383 unlock_user (p
, arg1
, 0);
9387 case TARGET_NR_mq_unlink
:
9388 p
= lock_user_string(arg1
- 1);
9389 ret
= get_errno(mq_unlink(p
));
9390 unlock_user (p
, arg1
, 0);
9393 case TARGET_NR_mq_timedsend
:
9397 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9399 target_to_host_timespec(&ts
, arg5
);
9400 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9401 host_to_target_timespec(arg5
, &ts
);
9404 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9405 unlock_user (p
, arg2
, arg3
);
9409 case TARGET_NR_mq_timedreceive
:
9414 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9416 target_to_host_timespec(&ts
, arg5
);
9417 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9418 host_to_target_timespec(arg5
, &ts
);
9421 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9422 unlock_user (p
, arg2
, arg3
);
9424 put_user_u32(prio
, arg4
);
9428 /* Not implemented for now... */
9429 /* case TARGET_NR_mq_notify: */
9432 case TARGET_NR_mq_getsetattr
:
9434 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9437 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9438 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9441 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9442 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9449 #ifdef CONFIG_SPLICE
9450 #ifdef TARGET_NR_tee
9453 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9457 #ifdef TARGET_NR_splice
9458 case TARGET_NR_splice
:
9460 loff_t loff_in
, loff_out
;
9461 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9463 if (get_user_u64(loff_in
, arg2
)) {
9466 ploff_in
= &loff_in
;
9469 if (get_user_u64(loff_out
, arg4
)) {
9472 ploff_out
= &loff_out
;
9474 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9476 if (put_user_u64(loff_in
, arg2
)) {
9481 if (put_user_u64(loff_out
, arg4
)) {
9488 #ifdef TARGET_NR_vmsplice
9489 case TARGET_NR_vmsplice
:
9491 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9493 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9494 unlock_iovec(vec
, arg2
, arg3
, 0);
9496 ret
= -host_to_target_errno(errno
);
9501 #endif /* CONFIG_SPLICE */
9502 #ifdef CONFIG_EVENTFD
9503 #if defined(TARGET_NR_eventfd)
9504 case TARGET_NR_eventfd
:
9505 ret
= get_errno(eventfd(arg1
, 0));
9508 #if defined(TARGET_NR_eventfd2)
9509 case TARGET_NR_eventfd2
:
9511 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9512 if (arg2
& TARGET_O_NONBLOCK
) {
9513 host_flags
|= O_NONBLOCK
;
9515 if (arg2
& TARGET_O_CLOEXEC
) {
9516 host_flags
|= O_CLOEXEC
;
9518 ret
= get_errno(eventfd(arg1
, host_flags
));
9522 #endif /* CONFIG_EVENTFD */
9523 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9524 case TARGET_NR_fallocate
:
9525 #if TARGET_ABI_BITS == 32
9526 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9527 target_offset64(arg5
, arg6
)));
9529 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9533 #if defined(CONFIG_SYNC_FILE_RANGE)
9534 #if defined(TARGET_NR_sync_file_range)
9535 case TARGET_NR_sync_file_range
:
9536 #if TARGET_ABI_BITS == 32
9537 #if defined(TARGET_MIPS)
9538 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9539 target_offset64(arg5
, arg6
), arg7
));
9541 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9542 target_offset64(arg4
, arg5
), arg6
));
9543 #endif /* !TARGET_MIPS */
9545 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9549 #if defined(TARGET_NR_sync_file_range2)
9550 case TARGET_NR_sync_file_range2
:
9551 /* This is like sync_file_range but the arguments are reordered */
9552 #if TARGET_ABI_BITS == 32
9553 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9554 target_offset64(arg5
, arg6
), arg2
));
9556 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9561 #if defined(CONFIG_EPOLL)
9562 #if defined(TARGET_NR_epoll_create)
9563 case TARGET_NR_epoll_create
:
9564 ret
= get_errno(epoll_create(arg1
));
9567 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9568 case TARGET_NR_epoll_create1
:
9569 ret
= get_errno(epoll_create1(arg1
));
9572 #if defined(TARGET_NR_epoll_ctl)
9573 case TARGET_NR_epoll_ctl
:
9575 struct epoll_event ep
;
9576 struct epoll_event
*epp
= 0;
9578 struct target_epoll_event
*target_ep
;
9579 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9582 ep
.events
= tswap32(target_ep
->events
);
9583 /* The epoll_data_t union is just opaque data to the kernel,
9584 * so we transfer all 64 bits across and need not worry what
9585 * actual data type it is.
9587 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9588 unlock_user_struct(target_ep
, arg4
, 0);
9591 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9596 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9597 #define IMPLEMENT_EPOLL_PWAIT
9599 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9600 #if defined(TARGET_NR_epoll_wait)
9601 case TARGET_NR_epoll_wait
:
9603 #if defined(IMPLEMENT_EPOLL_PWAIT)
9604 case TARGET_NR_epoll_pwait
:
9607 struct target_epoll_event
*target_ep
;
9608 struct epoll_event
*ep
;
9610 int maxevents
= arg3
;
9613 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9614 maxevents
* sizeof(struct target_epoll_event
), 1);
9619 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9622 #if defined(IMPLEMENT_EPOLL_PWAIT)
9623 case TARGET_NR_epoll_pwait
:
9625 target_sigset_t
*target_set
;
9626 sigset_t _set
, *set
= &_set
;
9629 target_set
= lock_user(VERIFY_READ
, arg5
,
9630 sizeof(target_sigset_t
), 1);
9632 unlock_user(target_ep
, arg2
, 0);
9635 target_to_host_sigset(set
, target_set
);
9636 unlock_user(target_set
, arg5
, 0);
9641 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9645 #if defined(TARGET_NR_epoll_wait)
9646 case TARGET_NR_epoll_wait
:
9647 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9651 ret
= -TARGET_ENOSYS
;
9653 if (!is_error(ret
)) {
9655 for (i
= 0; i
< ret
; i
++) {
9656 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9657 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9660 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9665 #ifdef TARGET_NR_prlimit64
9666 case TARGET_NR_prlimit64
:
9668 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9669 struct target_rlimit64
*target_rnew
, *target_rold
;
9670 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9671 int resource
= target_to_host_resource(arg2
);
9673 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9676 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9677 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9678 unlock_user_struct(target_rnew
, arg3
, 0);
9682 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
9683 if (!is_error(ret
) && arg4
) {
9684 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9687 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9688 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9689 unlock_user_struct(target_rold
, arg4
, 1);
9694 #ifdef TARGET_NR_gethostname
9695 case TARGET_NR_gethostname
:
9697 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9699 ret
= get_errno(gethostname(name
, arg2
));
9700 unlock_user(name
, arg1
, arg2
);
9702 ret
= -TARGET_EFAULT
;
9707 #ifdef TARGET_NR_atomic_cmpxchg_32
9708 case TARGET_NR_atomic_cmpxchg_32
:
9710 /* should use start_exclusive from main.c */
9711 abi_ulong mem_value
;
9712 if (get_user_u32(mem_value
, arg6
)) {
9713 target_siginfo_t info
;
9714 info
.si_signo
= SIGSEGV
;
9716 info
.si_code
= TARGET_SEGV_MAPERR
;
9717 info
._sifields
._sigfault
._addr
= arg6
;
9718 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9722 if (mem_value
== arg2
)
9723 put_user_u32(arg1
, arg6
);
9728 #ifdef TARGET_NR_atomic_barrier
9729 case TARGET_NR_atomic_barrier
:
9731 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9737 #ifdef TARGET_NR_timer_create
9738 case TARGET_NR_timer_create
:
9740 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9742 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9745 int timer_index
= next_free_host_timer();
9747 if (timer_index
< 0) {
9748 ret
= -TARGET_EAGAIN
;
9750 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9753 phost_sevp
= &host_sevp
;
9754 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
9760 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9764 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
9773 #ifdef TARGET_NR_timer_settime
9774 case TARGET_NR_timer_settime
:
9776 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9777 * struct itimerspec * old_value */
9778 target_timer_t timerid
= get_timer_id(arg1
);
9782 } else if (arg3
== 0) {
9783 ret
= -TARGET_EINVAL
;
9785 timer_t htimer
= g_posix_timers
[timerid
];
9786 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9788 target_to_host_itimerspec(&hspec_new
, arg3
);
9790 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9791 host_to_target_itimerspec(arg2
, &hspec_old
);
9797 #ifdef TARGET_NR_timer_gettime
9798 case TARGET_NR_timer_gettime
:
9800 /* args: timer_t timerid, struct itimerspec *curr_value */
9801 target_timer_t timerid
= get_timer_id(arg1
);
9806 ret
= -TARGET_EFAULT
;
9808 timer_t htimer
= g_posix_timers
[timerid
];
9809 struct itimerspec hspec
;
9810 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9812 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9813 ret
= -TARGET_EFAULT
;
9820 #ifdef TARGET_NR_timer_getoverrun
9821 case TARGET_NR_timer_getoverrun
:
9823 /* args: timer_t timerid */
9824 target_timer_t timerid
= get_timer_id(arg1
);
9829 timer_t htimer
= g_posix_timers
[timerid
];
9830 ret
= get_errno(timer_getoverrun(htimer
));
9836 #ifdef TARGET_NR_timer_delete
9837 case TARGET_NR_timer_delete
:
9839 /* args: timer_t timerid */
9840 target_timer_t timerid
= get_timer_id(arg1
);
9845 timer_t htimer
= g_posix_timers
[timerid
];
9846 ret
= get_errno(timer_delete(htimer
));
9847 g_posix_timers
[timerid
] = 0;
9853 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9854 case TARGET_NR_timerfd_create
:
9855 ret
= get_errno(timerfd_create(arg1
,
9856 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
9860 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9861 case TARGET_NR_timerfd_gettime
:
9863 struct itimerspec its_curr
;
9865 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
9867 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
9874 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9875 case TARGET_NR_timerfd_settime
:
9877 struct itimerspec its_new
, its_old
, *p_new
;
9880 if (target_to_host_itimerspec(&its_new
, arg3
)) {
9888 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
9890 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
9897 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9898 case TARGET_NR_ioprio_get
:
9899 ret
= get_errno(ioprio_get(arg1
, arg2
));
9903 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9904 case TARGET_NR_ioprio_set
:
9905 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
9909 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9910 case TARGET_NR_setns
:
9911 ret
= get_errno(setns(arg1
, arg2
));
9914 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9915 case TARGET_NR_unshare
:
9916 ret
= get_errno(unshare(arg1
));
9922 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9923 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9924 unimplemented_nowarn
:
9926 ret
= -TARGET_ENOSYS
;
9931 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9934 print_syscall_ret(num
, ret
);
9937 ret
= -TARGET_EFAULT
;