4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn
)(void *), void *child_stack_base
,
51 size_t stack_size
, int flags
, void *arg
, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
98 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_futex __NR_futex
190 #define __NR_sys_inotify_init __NR_inotify_init
191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 #define __NR__llseek __NR_lseek
199 /* Newer kernel ports have llseek() instead of _llseek() */
200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
201 #define TARGET_NR__llseek TARGET_NR_llseek
205 _syscall0(int, gettid
)
207 /* This is a replacement for the host gettid() and must return a host
209 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
216 #if !defined(__NR_getdents) || \
217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
218 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
221 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
222 loff_t
*, res
, uint
, wh
);
224 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
225 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
227 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
230 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
232 #ifdef __NR_exit_group
233 _syscall1(int,exit_group
,int,error_code
)
235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
236 _syscall1(int,set_tid_address
,int *,tidptr
)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
244 unsigned long *, user_mask_ptr
);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
250 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
253 struct __user_cap_data_struct
*, data
);
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
265 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
266 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
267 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
268 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
279 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
288 static int sys_getcwd1(char *buf
, size_t size
)
290 if (getcwd(buf
, size
) == NULL
) {
291 /* getcwd() sets errno */
294 return strlen(buf
)+1;
297 #ifdef TARGET_NR_openat
298 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
301 * open(2) has extra parameter 'mode' when called with
304 if ((flags
& O_CREAT
) != 0) {
305 return (openat(dirfd
, pathname
, flags
, mode
));
307 return (openat(dirfd
, pathname
, flags
));
311 #ifdef TARGET_NR_utimensat
312 #ifdef CONFIG_UTIMENSAT
313 static int sys_utimensat(int dirfd
, const char *pathname
,
314 const struct timespec times
[2], int flags
)
316 if (pathname
== NULL
)
317 return futimens(dirfd
, times
);
319 return utimensat(dirfd
, pathname
, times
, flags
);
321 #elif defined(__NR_utimensat)
322 #define __NR_sys_utimensat __NR_utimensat
323 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
324 const struct timespec
*,tsp
,int,flags
)
326 static int sys_utimensat(int dirfd
, const char *pathname
,
327 const struct timespec times
[2], int flags
)
333 #endif /* TARGET_NR_utimensat */
335 #ifdef CONFIG_INOTIFY
336 #include <sys/inotify.h>
338 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
339 static int sys_inotify_init(void)
341 return (inotify_init());
344 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
345 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
347 return (inotify_add_watch(fd
, pathname
, mask
));
350 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
351 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
353 return (inotify_rm_watch(fd
, wd
));
356 #ifdef CONFIG_INOTIFY1
357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
358 static int sys_inotify_init1(int flags
)
360 return (inotify_init1(flags
));
365 /* Userspace can usually survive runtime without inotify */
366 #undef TARGET_NR_inotify_init
367 #undef TARGET_NR_inotify_init1
368 #undef TARGET_NR_inotify_add_watch
369 #undef TARGET_NR_inotify_rm_watch
370 #endif /* CONFIG_INOTIFY */
372 #if defined(TARGET_NR_ppoll)
374 # define __NR_ppoll -1
376 #define __NR_sys_ppoll __NR_ppoll
377 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
378 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
382 #if defined(TARGET_NR_pselect6)
383 #ifndef __NR_pselect6
384 # define __NR_pselect6 -1
386 #define __NR_sys_pselect6 __NR_pselect6
387 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
388 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
391 #if defined(TARGET_NR_prlimit64)
392 #ifndef __NR_prlimit64
393 # define __NR_prlimit64 -1
395 #define __NR_sys_prlimit64 __NR_prlimit64
396 /* The glibc rlimit structure may not be that used by the underlying syscall */
397 struct host_rlimit64
{
401 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
402 const struct host_rlimit64
*, new_limit
,
403 struct host_rlimit64
*, old_limit
)
407 #if defined(TARGET_NR_timer_create)
408 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
409 static timer_t g_posix_timers
[32] = { 0, } ;
411 static inline int next_free_host_timer(void)
414 /* FIXME: Does finding the next free slot require a lock? */
415 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
416 if (g_posix_timers
[k
] == 0) {
417 g_posix_timers
[k
] = (timer_t
) 1;
425 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
427 static inline int regpairs_aligned(void *cpu_env
) {
428 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
430 #elif defined(TARGET_MIPS)
431 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
432 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
433 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
434 * of registers which translates to the same as ARM/MIPS, because we start with
436 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
438 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
441 #define ERRNO_TABLE_SIZE 1200
443 /* target_to_host_errno_table[] is initialized from
444 * host_to_target_errno_table[] in syscall_init(). */
445 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
449 * This list is the union of errno values overridden in asm-<arch>/errno.h
450 * minus the errnos that are not actually generic to all archs.
452 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
453 [EIDRM
] = TARGET_EIDRM
,
454 [ECHRNG
] = TARGET_ECHRNG
,
455 [EL2NSYNC
] = TARGET_EL2NSYNC
,
456 [EL3HLT
] = TARGET_EL3HLT
,
457 [EL3RST
] = TARGET_EL3RST
,
458 [ELNRNG
] = TARGET_ELNRNG
,
459 [EUNATCH
] = TARGET_EUNATCH
,
460 [ENOCSI
] = TARGET_ENOCSI
,
461 [EL2HLT
] = TARGET_EL2HLT
,
462 [EDEADLK
] = TARGET_EDEADLK
,
463 [ENOLCK
] = TARGET_ENOLCK
,
464 [EBADE
] = TARGET_EBADE
,
465 [EBADR
] = TARGET_EBADR
,
466 [EXFULL
] = TARGET_EXFULL
,
467 [ENOANO
] = TARGET_ENOANO
,
468 [EBADRQC
] = TARGET_EBADRQC
,
469 [EBADSLT
] = TARGET_EBADSLT
,
470 [EBFONT
] = TARGET_EBFONT
,
471 [ENOSTR
] = TARGET_ENOSTR
,
472 [ENODATA
] = TARGET_ENODATA
,
473 [ETIME
] = TARGET_ETIME
,
474 [ENOSR
] = TARGET_ENOSR
,
475 [ENONET
] = TARGET_ENONET
,
476 [ENOPKG
] = TARGET_ENOPKG
,
477 [EREMOTE
] = TARGET_EREMOTE
,
478 [ENOLINK
] = TARGET_ENOLINK
,
479 [EADV
] = TARGET_EADV
,
480 [ESRMNT
] = TARGET_ESRMNT
,
481 [ECOMM
] = TARGET_ECOMM
,
482 [EPROTO
] = TARGET_EPROTO
,
483 [EDOTDOT
] = TARGET_EDOTDOT
,
484 [EMULTIHOP
] = TARGET_EMULTIHOP
,
485 [EBADMSG
] = TARGET_EBADMSG
,
486 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
487 [EOVERFLOW
] = TARGET_EOVERFLOW
,
488 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
489 [EBADFD
] = TARGET_EBADFD
,
490 [EREMCHG
] = TARGET_EREMCHG
,
491 [ELIBACC
] = TARGET_ELIBACC
,
492 [ELIBBAD
] = TARGET_ELIBBAD
,
493 [ELIBSCN
] = TARGET_ELIBSCN
,
494 [ELIBMAX
] = TARGET_ELIBMAX
,
495 [ELIBEXEC
] = TARGET_ELIBEXEC
,
496 [EILSEQ
] = TARGET_EILSEQ
,
497 [ENOSYS
] = TARGET_ENOSYS
,
498 [ELOOP
] = TARGET_ELOOP
,
499 [ERESTART
] = TARGET_ERESTART
,
500 [ESTRPIPE
] = TARGET_ESTRPIPE
,
501 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
502 [EUSERS
] = TARGET_EUSERS
,
503 [ENOTSOCK
] = TARGET_ENOTSOCK
,
504 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
505 [EMSGSIZE
] = TARGET_EMSGSIZE
,
506 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
507 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
508 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
509 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
510 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
511 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
512 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
513 [EADDRINUSE
] = TARGET_EADDRINUSE
,
514 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
515 [ENETDOWN
] = TARGET_ENETDOWN
,
516 [ENETUNREACH
] = TARGET_ENETUNREACH
,
517 [ENETRESET
] = TARGET_ENETRESET
,
518 [ECONNABORTED
] = TARGET_ECONNABORTED
,
519 [ECONNRESET
] = TARGET_ECONNRESET
,
520 [ENOBUFS
] = TARGET_ENOBUFS
,
521 [EISCONN
] = TARGET_EISCONN
,
522 [ENOTCONN
] = TARGET_ENOTCONN
,
523 [EUCLEAN
] = TARGET_EUCLEAN
,
524 [ENOTNAM
] = TARGET_ENOTNAM
,
525 [ENAVAIL
] = TARGET_ENAVAIL
,
526 [EISNAM
] = TARGET_EISNAM
,
527 [EREMOTEIO
] = TARGET_EREMOTEIO
,
528 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
529 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
530 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
531 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
532 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
533 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
534 [EALREADY
] = TARGET_EALREADY
,
535 [EINPROGRESS
] = TARGET_EINPROGRESS
,
536 [ESTALE
] = TARGET_ESTALE
,
537 [ECANCELED
] = TARGET_ECANCELED
,
538 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
539 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
541 [ENOKEY
] = TARGET_ENOKEY
,
544 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
547 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
550 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
553 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
555 #ifdef ENOTRECOVERABLE
556 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
560 static inline int host_to_target_errno(int err
)
562 if(host_to_target_errno_table
[err
])
563 return host_to_target_errno_table
[err
];
567 static inline int target_to_host_errno(int err
)
569 if (target_to_host_errno_table
[err
])
570 return target_to_host_errno_table
[err
];
574 static inline abi_long
get_errno(abi_long ret
)
577 return -host_to_target_errno(errno
);
582 static inline int is_error(abi_long ret
)
584 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
587 char *target_strerror(int err
)
589 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
592 return strerror(target_to_host_errno(err
));
595 static inline int host_to_target_sock_type(int host_type
)
599 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
601 target_type
= TARGET_SOCK_DGRAM
;
604 target_type
= TARGET_SOCK_STREAM
;
607 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
611 #if defined(SOCK_CLOEXEC)
612 if (host_type
& SOCK_CLOEXEC
) {
613 target_type
|= TARGET_SOCK_CLOEXEC
;
617 #if defined(SOCK_NONBLOCK)
618 if (host_type
& SOCK_NONBLOCK
) {
619 target_type
|= TARGET_SOCK_NONBLOCK
;
626 static abi_ulong target_brk
;
627 static abi_ulong target_original_brk
;
628 static abi_ulong brk_page
;
630 void target_set_brk(abi_ulong new_brk
)
632 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
633 brk_page
= HOST_PAGE_ALIGN(target_brk
);
636 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
637 #define DEBUGF_BRK(message, args...)
639 /* do_brk() must return target values and target errnos. */
640 abi_long
do_brk(abi_ulong new_brk
)
642 abi_long mapped_addr
;
645 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
648 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
651 if (new_brk
< target_original_brk
) {
652 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
657 /* If the new brk is less than the highest page reserved to the
658 * target heap allocation, set it and we're almost done... */
659 if (new_brk
<= brk_page
) {
660 /* Heap contents are initialized to zero, as for anonymous
662 if (new_brk
> target_brk
) {
663 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
665 target_brk
= new_brk
;
666 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
670 /* We need to allocate more memory after the brk... Note that
671 * we don't use MAP_FIXED because that will map over the top of
672 * any existing mapping (like the one with the host libc or qemu
673 * itself); instead we treat "mapped but at wrong address" as
674 * a failure and unmap again.
676 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
677 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
678 PROT_READ
|PROT_WRITE
,
679 MAP_ANON
|MAP_PRIVATE
, 0, 0));
681 if (mapped_addr
== brk_page
) {
682 /* Heap contents are initialized to zero, as for anonymous
683 * mapped pages. Technically the new pages are already
684 * initialized to zero since they *are* anonymous mapped
685 * pages, however we have to take care with the contents that
686 * come from the remaining part of the previous page: it may
687 * contains garbage data due to a previous heap usage (grown
689 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
691 target_brk
= new_brk
;
692 brk_page
= HOST_PAGE_ALIGN(target_brk
);
693 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
696 } else if (mapped_addr
!= -1) {
697 /* Mapped but at wrong address, meaning there wasn't actually
698 * enough space for this brk.
700 target_munmap(mapped_addr
, new_alloc_size
);
702 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
705 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
708 #if defined(TARGET_ALPHA)
709 /* We (partially) emulate OSF/1 on Alpha, which requires we
710 return a proper errno, not an unchanged brk value. */
711 return -TARGET_ENOMEM
;
713 /* For everything else, return the previous break. */
717 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
718 abi_ulong target_fds_addr
,
722 abi_ulong b
, *target_fds
;
724 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
725 if (!(target_fds
= lock_user(VERIFY_READ
,
727 sizeof(abi_ulong
) * nw
,
729 return -TARGET_EFAULT
;
733 for (i
= 0; i
< nw
; i
++) {
734 /* grab the abi_ulong */
735 __get_user(b
, &target_fds
[i
]);
736 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
737 /* check the bit inside the abi_ulong */
744 unlock_user(target_fds
, target_fds_addr
, 0);
749 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
750 abi_ulong target_fds_addr
,
753 if (target_fds_addr
) {
754 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
755 return -TARGET_EFAULT
;
763 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
769 abi_ulong
*target_fds
;
771 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
772 if (!(target_fds
= lock_user(VERIFY_WRITE
,
774 sizeof(abi_ulong
) * nw
,
776 return -TARGET_EFAULT
;
779 for (i
= 0; i
< nw
; i
++) {
781 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
782 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
785 __put_user(v
, &target_fds
[i
]);
788 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
793 #if defined(__alpha__)
799 static inline abi_long
host_to_target_clock_t(long ticks
)
801 #if HOST_HZ == TARGET_HZ
804 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
808 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
809 const struct rusage
*rusage
)
811 struct target_rusage
*target_rusage
;
813 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
814 return -TARGET_EFAULT
;
815 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
816 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
817 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
818 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
819 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
820 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
821 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
822 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
823 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
824 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
825 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
826 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
827 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
828 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
829 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
830 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
831 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
832 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
833 unlock_user_struct(target_rusage
, target_addr
, 1);
838 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
840 abi_ulong target_rlim_swap
;
843 target_rlim_swap
= tswapal(target_rlim
);
844 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
845 return RLIM_INFINITY
;
847 result
= target_rlim_swap
;
848 if (target_rlim_swap
!= (rlim_t
)result
)
849 return RLIM_INFINITY
;
854 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
856 abi_ulong target_rlim_swap
;
859 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
860 target_rlim_swap
= TARGET_RLIM_INFINITY
;
862 target_rlim_swap
= rlim
;
863 result
= tswapal(target_rlim_swap
);
868 static inline int target_to_host_resource(int code
)
871 case TARGET_RLIMIT_AS
:
873 case TARGET_RLIMIT_CORE
:
875 case TARGET_RLIMIT_CPU
:
877 case TARGET_RLIMIT_DATA
:
879 case TARGET_RLIMIT_FSIZE
:
881 case TARGET_RLIMIT_LOCKS
:
883 case TARGET_RLIMIT_MEMLOCK
:
884 return RLIMIT_MEMLOCK
;
885 case TARGET_RLIMIT_MSGQUEUE
:
886 return RLIMIT_MSGQUEUE
;
887 case TARGET_RLIMIT_NICE
:
889 case TARGET_RLIMIT_NOFILE
:
890 return RLIMIT_NOFILE
;
891 case TARGET_RLIMIT_NPROC
:
893 case TARGET_RLIMIT_RSS
:
895 case TARGET_RLIMIT_RTPRIO
:
896 return RLIMIT_RTPRIO
;
897 case TARGET_RLIMIT_SIGPENDING
:
898 return RLIMIT_SIGPENDING
;
899 case TARGET_RLIMIT_STACK
:
906 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
907 abi_ulong target_tv_addr
)
909 struct target_timeval
*target_tv
;
911 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
912 return -TARGET_EFAULT
;
914 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
915 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
917 unlock_user_struct(target_tv
, target_tv_addr
, 0);
922 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
923 const struct timeval
*tv
)
925 struct target_timeval
*target_tv
;
927 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
928 return -TARGET_EFAULT
;
930 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
931 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
933 unlock_user_struct(target_tv
, target_tv_addr
, 1);
938 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
939 abi_ulong target_tz_addr
)
941 struct target_timezone
*target_tz
;
943 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
944 return -TARGET_EFAULT
;
947 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
948 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
950 unlock_user_struct(target_tz
, target_tz_addr
, 0);
955 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
958 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
959 abi_ulong target_mq_attr_addr
)
961 struct target_mq_attr
*target_mq_attr
;
963 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
964 target_mq_attr_addr
, 1))
965 return -TARGET_EFAULT
;
967 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
968 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
969 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
970 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
972 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
977 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
978 const struct mq_attr
*attr
)
980 struct target_mq_attr
*target_mq_attr
;
982 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
983 target_mq_attr_addr
, 0))
984 return -TARGET_EFAULT
;
986 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
987 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
988 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
989 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
991 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
997 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
998 /* do_select() must return target values and target errnos. */
999 static abi_long
do_select(int n
,
1000 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1001 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1003 fd_set rfds
, wfds
, efds
;
1004 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1005 struct timeval tv
, *tv_ptr
;
1008 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1012 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1016 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1021 if (target_tv_addr
) {
1022 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1023 return -TARGET_EFAULT
;
1029 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1031 if (!is_error(ret
)) {
1032 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1033 return -TARGET_EFAULT
;
1034 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1035 return -TARGET_EFAULT
;
1036 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1037 return -TARGET_EFAULT
;
1039 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1040 return -TARGET_EFAULT
;
1047 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1050 return pipe2(host_pipe
, flags
);
1056 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1057 int flags
, int is_pipe2
)
1061 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1064 return get_errno(ret
);
1066 /* Several targets have special calling conventions for the original
1067 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1069 #if defined(TARGET_ALPHA)
1070 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1071 return host_pipe
[0];
1072 #elif defined(TARGET_MIPS)
1073 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1074 return host_pipe
[0];
1075 #elif defined(TARGET_SH4)
1076 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1077 return host_pipe
[0];
1078 #elif defined(TARGET_SPARC)
1079 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1080 return host_pipe
[0];
1084 if (put_user_s32(host_pipe
[0], pipedes
)
1085 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1086 return -TARGET_EFAULT
;
1087 return get_errno(ret
);
1090 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1091 abi_ulong target_addr
,
1094 struct target_ip_mreqn
*target_smreqn
;
1096 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1098 return -TARGET_EFAULT
;
1099 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1100 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1101 if (len
== sizeof(struct target_ip_mreqn
))
1102 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1103 unlock_user(target_smreqn
, target_addr
, 0);
1108 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1109 abi_ulong target_addr
,
1112 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1113 sa_family_t sa_family
;
1114 struct target_sockaddr
*target_saddr
;
1116 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1118 return -TARGET_EFAULT
;
1120 sa_family
= tswap16(target_saddr
->sa_family
);
1122 /* Oops. The caller might send a incomplete sun_path; sun_path
1123 * must be terminated by \0 (see the manual page), but
1124 * unfortunately it is quite common to specify sockaddr_un
1125 * length as "strlen(x->sun_path)" while it should be
1126 * "strlen(...) + 1". We'll fix that here if needed.
1127 * Linux kernel has a similar feature.
1130 if (sa_family
== AF_UNIX
) {
1131 if (len
< unix_maxlen
&& len
> 0) {
1132 char *cp
= (char*)target_saddr
;
1134 if ( cp
[len
-1] && !cp
[len
] )
1137 if (len
> unix_maxlen
)
1141 memcpy(addr
, target_saddr
, len
);
1142 addr
->sa_family
= sa_family
;
1143 if (sa_family
== AF_PACKET
) {
1144 struct target_sockaddr_ll
*lladdr
;
1146 lladdr
= (struct target_sockaddr_ll
*)addr
;
1147 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1148 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1150 unlock_user(target_saddr
, target_addr
, 0);
1155 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1156 struct sockaddr
*addr
,
1159 struct target_sockaddr
*target_saddr
;
1161 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1163 return -TARGET_EFAULT
;
1164 memcpy(target_saddr
, addr
, len
);
1165 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1166 unlock_user(target_saddr
, target_addr
, len
);
1171 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1172 struct target_msghdr
*target_msgh
)
1174 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1175 abi_long msg_controllen
;
1176 abi_ulong target_cmsg_addr
;
1177 struct target_cmsghdr
*target_cmsg
;
1178 socklen_t space
= 0;
1180 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1181 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1183 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1184 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1186 return -TARGET_EFAULT
;
1188 while (cmsg
&& target_cmsg
) {
1189 void *data
= CMSG_DATA(cmsg
);
1190 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1192 int len
= tswapal(target_cmsg
->cmsg_len
)
1193 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1195 space
+= CMSG_SPACE(len
);
1196 if (space
> msgh
->msg_controllen
) {
1197 space
-= CMSG_SPACE(len
);
1198 gemu_log("Host cmsg overflow\n");
1202 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1203 cmsg
->cmsg_level
= SOL_SOCKET
;
1205 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1207 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1208 cmsg
->cmsg_len
= CMSG_LEN(len
);
1210 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1211 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1212 memcpy(data
, target_data
, len
);
1214 int *fd
= (int *)data
;
1215 int *target_fd
= (int *)target_data
;
1216 int i
, numfds
= len
/ sizeof(int);
1218 for (i
= 0; i
< numfds
; i
++)
1219 fd
[i
] = tswap32(target_fd
[i
]);
1222 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1223 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1225 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1227 msgh
->msg_controllen
= space
;
1231 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1232 struct msghdr
*msgh
)
1234 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1235 abi_long msg_controllen
;
1236 abi_ulong target_cmsg_addr
;
1237 struct target_cmsghdr
*target_cmsg
;
1238 socklen_t space
= 0;
1240 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1241 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1243 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1244 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1246 return -TARGET_EFAULT
;
1248 while (cmsg
&& target_cmsg
) {
1249 void *data
= CMSG_DATA(cmsg
);
1250 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1252 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1254 space
+= TARGET_CMSG_SPACE(len
);
1255 if (space
> msg_controllen
) {
1256 space
-= TARGET_CMSG_SPACE(len
);
1257 gemu_log("Target cmsg overflow\n");
1261 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1262 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1264 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1266 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1267 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1269 switch (cmsg
->cmsg_level
) {
1271 switch (cmsg
->cmsg_type
) {
1274 int *fd
= (int *)data
;
1275 int *target_fd
= (int *)target_data
;
1276 int i
, numfds
= len
/ sizeof(int);
1278 for (i
= 0; i
< numfds
; i
++)
1279 target_fd
[i
] = tswap32(fd
[i
]);
1284 struct timeval
*tv
= (struct timeval
*)data
;
1285 struct target_timeval
*target_tv
=
1286 (struct target_timeval
*)target_data
;
1288 if (len
!= sizeof(struct timeval
))
1291 /* copy struct timeval to target */
1292 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1293 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1296 case SCM_CREDENTIALS
:
1298 struct ucred
*cred
= (struct ucred
*)data
;
1299 struct target_ucred
*target_cred
=
1300 (struct target_ucred
*)target_data
;
1302 __put_user(cred
->pid
, &target_cred
->pid
);
1303 __put_user(cred
->uid
, &target_cred
->uid
);
1304 __put_user(cred
->gid
, &target_cred
->gid
);
1314 gemu_log("Unsupported ancillary data: %d/%d\n",
1315 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1316 memcpy(target_data
, data
, len
);
1319 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1320 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1322 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1324 target_msgh
->msg_controllen
= tswapal(space
);
1328 /* do_setsockopt() Must return target values and target errnos. */
1329 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1330 abi_ulong optval_addr
, socklen_t optlen
)
1334 struct ip_mreqn
*ip_mreq
;
1335 struct ip_mreq_source
*ip_mreq_source
;
1339 /* TCP options all take an 'int' value. */
1340 if (optlen
< sizeof(uint32_t))
1341 return -TARGET_EINVAL
;
1343 if (get_user_u32(val
, optval_addr
))
1344 return -TARGET_EFAULT
;
1345 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1352 case IP_ROUTER_ALERT
:
1356 case IP_MTU_DISCOVER
:
1362 case IP_MULTICAST_TTL
:
1363 case IP_MULTICAST_LOOP
:
1365 if (optlen
>= sizeof(uint32_t)) {
1366 if (get_user_u32(val
, optval_addr
))
1367 return -TARGET_EFAULT
;
1368 } else if (optlen
>= 1) {
1369 if (get_user_u8(val
, optval_addr
))
1370 return -TARGET_EFAULT
;
1372 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1374 case IP_ADD_MEMBERSHIP
:
1375 case IP_DROP_MEMBERSHIP
:
1376 if (optlen
< sizeof (struct target_ip_mreq
) ||
1377 optlen
> sizeof (struct target_ip_mreqn
))
1378 return -TARGET_EINVAL
;
1380 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1381 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1382 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1385 case IP_BLOCK_SOURCE
:
1386 case IP_UNBLOCK_SOURCE
:
1387 case IP_ADD_SOURCE_MEMBERSHIP
:
1388 case IP_DROP_SOURCE_MEMBERSHIP
:
1389 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1390 return -TARGET_EINVAL
;
1392 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1393 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1394 unlock_user (ip_mreq_source
, optval_addr
, 0);
1403 case IPV6_MTU_DISCOVER
:
1406 case IPV6_RECVPKTINFO
:
1408 if (optlen
< sizeof(uint32_t)) {
1409 return -TARGET_EINVAL
;
1411 if (get_user_u32(val
, optval_addr
)) {
1412 return -TARGET_EFAULT
;
1414 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1415 &val
, sizeof(val
)));
1424 /* struct icmp_filter takes an u32 value */
1425 if (optlen
< sizeof(uint32_t)) {
1426 return -TARGET_EINVAL
;
1429 if (get_user_u32(val
, optval_addr
)) {
1430 return -TARGET_EFAULT
;
1432 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1433 &val
, sizeof(val
)));
1440 case TARGET_SOL_SOCKET
:
1442 case TARGET_SO_RCVTIMEO
:
1446 optname
= SO_RCVTIMEO
;
1449 if (optlen
!= sizeof(struct target_timeval
)) {
1450 return -TARGET_EINVAL
;
1453 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1454 return -TARGET_EFAULT
;
1457 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1461 case TARGET_SO_SNDTIMEO
:
1462 optname
= SO_SNDTIMEO
;
1464 case TARGET_SO_ATTACH_FILTER
:
1466 struct target_sock_fprog
*tfprog
;
1467 struct target_sock_filter
*tfilter
;
1468 struct sock_fprog fprog
;
1469 struct sock_filter
*filter
;
1472 if (optlen
!= sizeof(*tfprog
)) {
1473 return -TARGET_EINVAL
;
1475 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1476 return -TARGET_EFAULT
;
1478 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1479 tswapal(tfprog
->filter
), 0)) {
1480 unlock_user_struct(tfprog
, optval_addr
, 1);
1481 return -TARGET_EFAULT
;
1484 fprog
.len
= tswap16(tfprog
->len
);
1485 filter
= malloc(fprog
.len
* sizeof(*filter
));
1486 if (filter
== NULL
) {
1487 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1488 unlock_user_struct(tfprog
, optval_addr
, 1);
1489 return -TARGET_ENOMEM
;
1491 for (i
= 0; i
< fprog
.len
; i
++) {
1492 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1493 filter
[i
].jt
= tfilter
[i
].jt
;
1494 filter
[i
].jf
= tfilter
[i
].jf
;
1495 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1497 fprog
.filter
= filter
;
1499 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1500 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1503 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1504 unlock_user_struct(tfprog
, optval_addr
, 1);
1507 case TARGET_SO_BINDTODEVICE
:
1509 char *dev_ifname
, *addr_ifname
;
1511 if (optlen
> IFNAMSIZ
- 1) {
1512 optlen
= IFNAMSIZ
- 1;
1514 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1516 return -TARGET_EFAULT
;
1518 optname
= SO_BINDTODEVICE
;
1519 addr_ifname
= alloca(IFNAMSIZ
);
1520 memcpy(addr_ifname
, dev_ifname
, optlen
);
1521 addr_ifname
[optlen
] = 0;
1522 ret
= get_errno(setsockopt(sockfd
, level
, optname
, addr_ifname
, optlen
));
1523 unlock_user (dev_ifname
, optval_addr
, 0);
1526 /* Options with 'int' argument. */
1527 case TARGET_SO_DEBUG
:
1530 case TARGET_SO_REUSEADDR
:
1531 optname
= SO_REUSEADDR
;
1533 case TARGET_SO_TYPE
:
1536 case TARGET_SO_ERROR
:
1539 case TARGET_SO_DONTROUTE
:
1540 optname
= SO_DONTROUTE
;
1542 case TARGET_SO_BROADCAST
:
1543 optname
= SO_BROADCAST
;
1545 case TARGET_SO_SNDBUF
:
1546 optname
= SO_SNDBUF
;
1548 case TARGET_SO_SNDBUFFORCE
:
1549 optname
= SO_SNDBUFFORCE
;
1551 case TARGET_SO_RCVBUF
:
1552 optname
= SO_RCVBUF
;
1554 case TARGET_SO_RCVBUFFORCE
:
1555 optname
= SO_RCVBUFFORCE
;
1557 case TARGET_SO_KEEPALIVE
:
1558 optname
= SO_KEEPALIVE
;
1560 case TARGET_SO_OOBINLINE
:
1561 optname
= SO_OOBINLINE
;
1563 case TARGET_SO_NO_CHECK
:
1564 optname
= SO_NO_CHECK
;
1566 case TARGET_SO_PRIORITY
:
1567 optname
= SO_PRIORITY
;
1570 case TARGET_SO_BSDCOMPAT
:
1571 optname
= SO_BSDCOMPAT
;
1574 case TARGET_SO_PASSCRED
:
1575 optname
= SO_PASSCRED
;
1577 case TARGET_SO_PASSSEC
:
1578 optname
= SO_PASSSEC
;
1580 case TARGET_SO_TIMESTAMP
:
1581 optname
= SO_TIMESTAMP
;
1583 case TARGET_SO_RCVLOWAT
:
1584 optname
= SO_RCVLOWAT
;
1590 if (optlen
< sizeof(uint32_t))
1591 return -TARGET_EINVAL
;
1593 if (get_user_u32(val
, optval_addr
))
1594 return -TARGET_EFAULT
;
1595 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1599 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1600 ret
= -TARGET_ENOPROTOOPT
;
1605 /* do_getsockopt() Must return target values and target errnos. */
1606 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1607 abi_ulong optval_addr
, abi_ulong optlen
)
1614 case TARGET_SOL_SOCKET
:
1617 /* These don't just return a single integer */
1618 case TARGET_SO_LINGER
:
1619 case TARGET_SO_RCVTIMEO
:
1620 case TARGET_SO_SNDTIMEO
:
1621 case TARGET_SO_PEERNAME
:
1623 case TARGET_SO_PEERCRED
: {
1626 struct target_ucred
*tcr
;
1628 if (get_user_u32(len
, optlen
)) {
1629 return -TARGET_EFAULT
;
1632 return -TARGET_EINVAL
;
1636 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1644 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1645 return -TARGET_EFAULT
;
1647 __put_user(cr
.pid
, &tcr
->pid
);
1648 __put_user(cr
.uid
, &tcr
->uid
);
1649 __put_user(cr
.gid
, &tcr
->gid
);
1650 unlock_user_struct(tcr
, optval_addr
, 1);
1651 if (put_user_u32(len
, optlen
)) {
1652 return -TARGET_EFAULT
;
1656 /* Options with 'int' argument. */
1657 case TARGET_SO_DEBUG
:
1660 case TARGET_SO_REUSEADDR
:
1661 optname
= SO_REUSEADDR
;
1663 case TARGET_SO_TYPE
:
1666 case TARGET_SO_ERROR
:
1669 case TARGET_SO_DONTROUTE
:
1670 optname
= SO_DONTROUTE
;
1672 case TARGET_SO_BROADCAST
:
1673 optname
= SO_BROADCAST
;
1675 case TARGET_SO_SNDBUF
:
1676 optname
= SO_SNDBUF
;
1678 case TARGET_SO_RCVBUF
:
1679 optname
= SO_RCVBUF
;
1681 case TARGET_SO_KEEPALIVE
:
1682 optname
= SO_KEEPALIVE
;
1684 case TARGET_SO_OOBINLINE
:
1685 optname
= SO_OOBINLINE
;
1687 case TARGET_SO_NO_CHECK
:
1688 optname
= SO_NO_CHECK
;
1690 case TARGET_SO_PRIORITY
:
1691 optname
= SO_PRIORITY
;
1694 case TARGET_SO_BSDCOMPAT
:
1695 optname
= SO_BSDCOMPAT
;
1698 case TARGET_SO_PASSCRED
:
1699 optname
= SO_PASSCRED
;
1701 case TARGET_SO_TIMESTAMP
:
1702 optname
= SO_TIMESTAMP
;
1704 case TARGET_SO_RCVLOWAT
:
1705 optname
= SO_RCVLOWAT
;
1707 case TARGET_SO_ACCEPTCONN
:
1708 optname
= SO_ACCEPTCONN
;
1715 /* TCP options all take an 'int' value. */
1717 if (get_user_u32(len
, optlen
))
1718 return -TARGET_EFAULT
;
1720 return -TARGET_EINVAL
;
1722 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1725 if (optname
== SO_TYPE
) {
1726 val
= host_to_target_sock_type(val
);
1731 if (put_user_u32(val
, optval_addr
))
1732 return -TARGET_EFAULT
;
1734 if (put_user_u8(val
, optval_addr
))
1735 return -TARGET_EFAULT
;
1737 if (put_user_u32(len
, optlen
))
1738 return -TARGET_EFAULT
;
1745 case IP_ROUTER_ALERT
:
1749 case IP_MTU_DISCOVER
:
1755 case IP_MULTICAST_TTL
:
1756 case IP_MULTICAST_LOOP
:
1757 if (get_user_u32(len
, optlen
))
1758 return -TARGET_EFAULT
;
1760 return -TARGET_EINVAL
;
1762 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1765 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1767 if (put_user_u32(len
, optlen
)
1768 || put_user_u8(val
, optval_addr
))
1769 return -TARGET_EFAULT
;
1771 if (len
> sizeof(int))
1773 if (put_user_u32(len
, optlen
)
1774 || put_user_u32(val
, optval_addr
))
1775 return -TARGET_EFAULT
;
1779 ret
= -TARGET_ENOPROTOOPT
;
1785 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1787 ret
= -TARGET_EOPNOTSUPP
;
1793 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1794 int count
, int copy
)
1796 struct target_iovec
*target_vec
;
1798 abi_ulong total_len
, max_len
;
1806 if (count
< 0 || count
> IOV_MAX
) {
1811 vec
= calloc(count
, sizeof(struct iovec
));
1817 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1818 count
* sizeof(struct target_iovec
), 1);
1819 if (target_vec
== NULL
) {
1824 /* ??? If host page size > target page size, this will result in a
1825 value larger than what we can actually support. */
1826 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1829 for (i
= 0; i
< count
; i
++) {
1830 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1831 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1836 } else if (len
== 0) {
1837 /* Zero length pointer is ignored. */
1838 vec
[i
].iov_base
= 0;
1840 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1841 if (!vec
[i
].iov_base
) {
1845 if (len
> max_len
- total_len
) {
1846 len
= max_len
- total_len
;
1849 vec
[i
].iov_len
= len
;
1853 unlock_user(target_vec
, target_addr
, 0);
1857 unlock_user(target_vec
, target_addr
, 0);
1864 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1865 int count
, int copy
)
1867 struct target_iovec
*target_vec
;
1870 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1871 count
* sizeof(struct target_iovec
), 1);
1873 for (i
= 0; i
< count
; i
++) {
1874 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1875 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1879 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1881 unlock_user(target_vec
, target_addr
, 0);
1887 static inline int target_to_host_sock_type(int *type
)
1890 int target_type
= *type
;
1892 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1893 case TARGET_SOCK_DGRAM
:
1894 host_type
= SOCK_DGRAM
;
1896 case TARGET_SOCK_STREAM
:
1897 host_type
= SOCK_STREAM
;
1900 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1903 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1904 #if defined(SOCK_CLOEXEC)
1905 host_type
|= SOCK_CLOEXEC
;
1907 return -TARGET_EINVAL
;
1910 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1911 #if defined(SOCK_NONBLOCK)
1912 host_type
|= SOCK_NONBLOCK
;
1913 #elif !defined(O_NONBLOCK)
1914 return -TARGET_EINVAL
;
1921 /* Try to emulate socket type flags after socket creation. */
1922 static int sock_flags_fixup(int fd
, int target_type
)
1924 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1925 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1926 int flags
= fcntl(fd
, F_GETFL
);
1927 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
1929 return -TARGET_EINVAL
;
1936 /* do_socket() Must return target values and target errnos. */
1937 static abi_long
do_socket(int domain
, int type
, int protocol
)
1939 int target_type
= type
;
1942 ret
= target_to_host_sock_type(&type
);
1947 if (domain
== PF_NETLINK
)
1948 return -TARGET_EAFNOSUPPORT
;
1949 ret
= get_errno(socket(domain
, type
, protocol
));
1951 ret
= sock_flags_fixup(ret
, target_type
);
1956 /* do_bind() Must return target values and target errnos. */
1957 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1963 if ((int)addrlen
< 0) {
1964 return -TARGET_EINVAL
;
1967 addr
= alloca(addrlen
+1);
1969 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1973 return get_errno(bind(sockfd
, addr
, addrlen
));
1976 /* do_connect() Must return target values and target errnos. */
1977 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1983 if ((int)addrlen
< 0) {
1984 return -TARGET_EINVAL
;
1987 addr
= alloca(addrlen
+1);
1989 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1993 return get_errno(connect(sockfd
, addr
, addrlen
));
1996 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1997 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
1998 int flags
, int send
)
2004 abi_ulong target_vec
;
2006 if (msgp
->msg_name
) {
2007 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2008 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2009 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
2015 msg
.msg_name
= NULL
;
2016 msg
.msg_namelen
= 0;
2018 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2019 msg
.msg_control
= alloca(msg
.msg_controllen
);
2020 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2022 count
= tswapal(msgp
->msg_iovlen
);
2023 target_vec
= tswapal(msgp
->msg_iov
);
2024 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2025 target_vec
, count
, send
);
2027 ret
= -host_to_target_errno(errno
);
2030 msg
.msg_iovlen
= count
;
2034 ret
= target_to_host_cmsg(&msg
, msgp
);
2036 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2038 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2039 if (!is_error(ret
)) {
2041 ret
= host_to_target_cmsg(msgp
, &msg
);
2042 if (!is_error(ret
)) {
2043 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2044 if (msg
.msg_name
!= NULL
) {
2045 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2046 msg
.msg_name
, msg
.msg_namelen
);
2058 unlock_iovec(vec
, target_vec
, count
, !send
);
2063 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2064 int flags
, int send
)
2067 struct target_msghdr
*msgp
;
2069 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2073 return -TARGET_EFAULT
;
2075 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2076 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2080 #ifdef TARGET_NR_sendmmsg
2081 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2082 * so it might not have this *mmsg-specific flag either.
2084 #ifndef MSG_WAITFORONE
2085 #define MSG_WAITFORONE 0x10000
2088 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2089 unsigned int vlen
, unsigned int flags
,
2092 struct target_mmsghdr
*mmsgp
;
2096 if (vlen
> UIO_MAXIOV
) {
2100 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2102 return -TARGET_EFAULT
;
2105 for (i
= 0; i
< vlen
; i
++) {
2106 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2107 if (is_error(ret
)) {
2110 mmsgp
[i
].msg_len
= tswap32(ret
);
2111 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2112 if (flags
& MSG_WAITFORONE
) {
2113 flags
|= MSG_DONTWAIT
;
2117 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2119 /* Return number of datagrams sent if we sent any at all;
2120 * otherwise return the error.
2129 /* If we don't have a system accept4() then just call accept.
2130 * The callsites to do_accept4() will ensure that they don't
2131 * pass a non-zero flags argument in this config.
2133 #ifndef CONFIG_ACCEPT4
2134 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2135 socklen_t
*addrlen
, int flags
)
2138 return accept(sockfd
, addr
, addrlen
);
2142 /* do_accept4() Must return target values and target errnos. */
2143 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2144 abi_ulong target_addrlen_addr
, int flags
)
2151 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2153 if (target_addr
== 0) {
2154 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2157 /* linux returns EINVAL if addrlen pointer is invalid */
2158 if (get_user_u32(addrlen
, target_addrlen_addr
))
2159 return -TARGET_EINVAL
;
2161 if ((int)addrlen
< 0) {
2162 return -TARGET_EINVAL
;
2165 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2166 return -TARGET_EINVAL
;
2168 addr
= alloca(addrlen
);
2170 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2171 if (!is_error(ret
)) {
2172 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2173 if (put_user_u32(addrlen
, target_addrlen_addr
))
2174 ret
= -TARGET_EFAULT
;
2179 /* do_getpeername() Must return target values and target errnos. */
2180 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2181 abi_ulong target_addrlen_addr
)
2187 if (get_user_u32(addrlen
, target_addrlen_addr
))
2188 return -TARGET_EFAULT
;
2190 if ((int)addrlen
< 0) {
2191 return -TARGET_EINVAL
;
2194 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2195 return -TARGET_EFAULT
;
2197 addr
= alloca(addrlen
);
2199 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2200 if (!is_error(ret
)) {
2201 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2202 if (put_user_u32(addrlen
, target_addrlen_addr
))
2203 ret
= -TARGET_EFAULT
;
2208 /* do_getsockname() Must return target values and target errnos. */
2209 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2210 abi_ulong target_addrlen_addr
)
2216 if (get_user_u32(addrlen
, target_addrlen_addr
))
2217 return -TARGET_EFAULT
;
2219 if ((int)addrlen
< 0) {
2220 return -TARGET_EINVAL
;
2223 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2224 return -TARGET_EFAULT
;
2226 addr
= alloca(addrlen
);
2228 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2229 if (!is_error(ret
)) {
2230 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2231 if (put_user_u32(addrlen
, target_addrlen_addr
))
2232 ret
= -TARGET_EFAULT
;
2237 /* do_socketpair() Must return target values and target errnos. */
2238 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2239 abi_ulong target_tab_addr
)
2244 target_to_host_sock_type(&type
);
2246 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2247 if (!is_error(ret
)) {
2248 if (put_user_s32(tab
[0], target_tab_addr
)
2249 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2250 ret
= -TARGET_EFAULT
;
2255 /* do_sendto() Must return target values and target errnos. */
2256 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2257 abi_ulong target_addr
, socklen_t addrlen
)
2263 if ((int)addrlen
< 0) {
2264 return -TARGET_EINVAL
;
2267 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2269 return -TARGET_EFAULT
;
2271 addr
= alloca(addrlen
+1);
2272 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2274 unlock_user(host_msg
, msg
, 0);
2277 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2279 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2281 unlock_user(host_msg
, msg
, 0);
2285 /* do_recvfrom() Must return target values and target errnos. */
2286 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2287 abi_ulong target_addr
,
2288 abi_ulong target_addrlen
)
2295 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2297 return -TARGET_EFAULT
;
2299 if (get_user_u32(addrlen
, target_addrlen
)) {
2300 ret
= -TARGET_EFAULT
;
2303 if ((int)addrlen
< 0) {
2304 ret
= -TARGET_EINVAL
;
2307 addr
= alloca(addrlen
);
2308 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2310 addr
= NULL
; /* To keep compiler quiet. */
2311 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2313 if (!is_error(ret
)) {
2315 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2316 if (put_user_u32(addrlen
, target_addrlen
)) {
2317 ret
= -TARGET_EFAULT
;
2321 unlock_user(host_msg
, msg
, len
);
2324 unlock_user(host_msg
, msg
, 0);
2329 #ifdef TARGET_NR_socketcall
2330 /* do_socketcall() Must return target values and target errnos. */
2331 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2333 static const unsigned ac
[] = { /* number of arguments per call */
2334 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2335 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2336 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2337 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2338 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2339 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2340 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2341 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2342 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2343 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2344 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2345 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2346 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2347 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2348 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2349 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2350 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2351 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2353 abi_long a
[6]; /* max 6 args */
2355 /* first, collect the arguments in a[] according to ac[] */
2356 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2358 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2359 for (i
= 0; i
< ac
[num
]; ++i
) {
2360 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2361 return -TARGET_EFAULT
;
2366 /* now when we have the args, actually handle the call */
2368 case SOCKOP_socket
: /* domain, type, protocol */
2369 return do_socket(a
[0], a
[1], a
[2]);
2370 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2371 return do_bind(a
[0], a
[1], a
[2]);
2372 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2373 return do_connect(a
[0], a
[1], a
[2]);
2374 case SOCKOP_listen
: /* sockfd, backlog */
2375 return get_errno(listen(a
[0], a
[1]));
2376 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2377 return do_accept4(a
[0], a
[1], a
[2], 0);
2378 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2379 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2380 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2381 return do_getsockname(a
[0], a
[1], a
[2]);
2382 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2383 return do_getpeername(a
[0], a
[1], a
[2]);
2384 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2385 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2386 case SOCKOP_send
: /* sockfd, msg, len, flags */
2387 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2388 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2389 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2390 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2391 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2392 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2393 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2394 case SOCKOP_shutdown
: /* sockfd, how */
2395 return get_errno(shutdown(a
[0], a
[1]));
2396 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2397 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2398 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2399 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2400 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2401 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2402 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2403 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2405 gemu_log("Unsupported socketcall: %d\n", num
);
2406 return -TARGET_ENOSYS
;
2411 #define N_SHM_REGIONS 32
2413 static struct shm_region
{
2416 } shm_regions
[N_SHM_REGIONS
];
2418 struct target_semid_ds
2420 struct target_ipc_perm sem_perm
;
2421 abi_ulong sem_otime
;
2422 abi_ulong __unused1
;
2423 abi_ulong sem_ctime
;
2424 abi_ulong __unused2
;
2425 abi_ulong sem_nsems
;
2426 abi_ulong __unused3
;
2427 abi_ulong __unused4
;
2430 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2431 abi_ulong target_addr
)
2433 struct target_ipc_perm
*target_ip
;
2434 struct target_semid_ds
*target_sd
;
2436 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2437 return -TARGET_EFAULT
;
2438 target_ip
= &(target_sd
->sem_perm
);
2439 host_ip
->__key
= tswap32(target_ip
->__key
);
2440 host_ip
->uid
= tswap32(target_ip
->uid
);
2441 host_ip
->gid
= tswap32(target_ip
->gid
);
2442 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2443 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2444 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2445 host_ip
->mode
= tswap32(target_ip
->mode
);
2447 host_ip
->mode
= tswap16(target_ip
->mode
);
2449 #if defined(TARGET_PPC)
2450 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2452 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2454 unlock_user_struct(target_sd
, target_addr
, 0);
2458 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2459 struct ipc_perm
*host_ip
)
2461 struct target_ipc_perm
*target_ip
;
2462 struct target_semid_ds
*target_sd
;
2464 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2465 return -TARGET_EFAULT
;
2466 target_ip
= &(target_sd
->sem_perm
);
2467 target_ip
->__key
= tswap32(host_ip
->__key
);
2468 target_ip
->uid
= tswap32(host_ip
->uid
);
2469 target_ip
->gid
= tswap32(host_ip
->gid
);
2470 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2471 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2472 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2473 target_ip
->mode
= tswap32(host_ip
->mode
);
2475 target_ip
->mode
= tswap16(host_ip
->mode
);
2477 #if defined(TARGET_PPC)
2478 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2480 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2482 unlock_user_struct(target_sd
, target_addr
, 1);
2486 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2487 abi_ulong target_addr
)
2489 struct target_semid_ds
*target_sd
;
2491 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2492 return -TARGET_EFAULT
;
2493 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2494 return -TARGET_EFAULT
;
2495 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2496 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2497 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2498 unlock_user_struct(target_sd
, target_addr
, 0);
2502 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2503 struct semid_ds
*host_sd
)
2505 struct target_semid_ds
*target_sd
;
2507 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2508 return -TARGET_EFAULT
;
2509 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2510 return -TARGET_EFAULT
;
2511 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2512 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2513 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2514 unlock_user_struct(target_sd
, target_addr
, 1);
2518 struct target_seminfo
{
2531 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2532 struct seminfo
*host_seminfo
)
2534 struct target_seminfo
*target_seminfo
;
2535 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2536 return -TARGET_EFAULT
;
2537 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2538 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2539 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2540 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2541 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2542 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2543 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2544 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2545 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2546 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2547 unlock_user_struct(target_seminfo
, target_addr
, 1);
2553 struct semid_ds
*buf
;
2554 unsigned short *array
;
2555 struct seminfo
*__buf
;
2558 union target_semun
{
2565 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2566 abi_ulong target_addr
)
2569 unsigned short *array
;
2571 struct semid_ds semid_ds
;
2574 semun
.buf
= &semid_ds
;
2576 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2578 return get_errno(ret
);
2580 nsems
= semid_ds
.sem_nsems
;
2582 *host_array
= malloc(nsems
*sizeof(unsigned short));
2584 return -TARGET_ENOMEM
;
2586 array
= lock_user(VERIFY_READ
, target_addr
,
2587 nsems
*sizeof(unsigned short), 1);
2590 return -TARGET_EFAULT
;
2593 for(i
=0; i
<nsems
; i
++) {
2594 __get_user((*host_array
)[i
], &array
[i
]);
2596 unlock_user(array
, target_addr
, 0);
2601 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2602 unsigned short **host_array
)
2605 unsigned short *array
;
2607 struct semid_ds semid_ds
;
2610 semun
.buf
= &semid_ds
;
2612 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2614 return get_errno(ret
);
2616 nsems
= semid_ds
.sem_nsems
;
2618 array
= lock_user(VERIFY_WRITE
, target_addr
,
2619 nsems
*sizeof(unsigned short), 0);
2621 return -TARGET_EFAULT
;
2623 for(i
=0; i
<nsems
; i
++) {
2624 __put_user((*host_array
)[i
], &array
[i
]);
2627 unlock_user(array
, target_addr
, 1);
2632 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2633 union target_semun target_su
)
2636 struct semid_ds dsarg
;
2637 unsigned short *array
= NULL
;
2638 struct seminfo seminfo
;
2639 abi_long ret
= -TARGET_EINVAL
;
2646 arg
.val
= tswap32(target_su
.val
);
2647 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2648 target_su
.val
= tswap32(arg
.val
);
2652 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2656 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2657 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2664 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2668 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2669 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2675 arg
.__buf
= &seminfo
;
2676 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2677 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2685 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2692 struct target_sembuf
{
2693 unsigned short sem_num
;
2698 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2699 abi_ulong target_addr
,
2702 struct target_sembuf
*target_sembuf
;
2705 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2706 nsops
*sizeof(struct target_sembuf
), 1);
2708 return -TARGET_EFAULT
;
2710 for(i
=0; i
<nsops
; i
++) {
2711 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2712 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2713 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2716 unlock_user(target_sembuf
, target_addr
, 0);
2721 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2723 struct sembuf sops
[nsops
];
2725 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2726 return -TARGET_EFAULT
;
2728 return get_errno(semop(semid
, sops
, nsops
));
2731 struct target_msqid_ds
2733 struct target_ipc_perm msg_perm
;
2734 abi_ulong msg_stime
;
2735 #if TARGET_ABI_BITS == 32
2736 abi_ulong __unused1
;
2738 abi_ulong msg_rtime
;
2739 #if TARGET_ABI_BITS == 32
2740 abi_ulong __unused2
;
2742 abi_ulong msg_ctime
;
2743 #if TARGET_ABI_BITS == 32
2744 abi_ulong __unused3
;
2746 abi_ulong __msg_cbytes
;
2748 abi_ulong msg_qbytes
;
2749 abi_ulong msg_lspid
;
2750 abi_ulong msg_lrpid
;
2751 abi_ulong __unused4
;
2752 abi_ulong __unused5
;
2755 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2756 abi_ulong target_addr
)
2758 struct target_msqid_ds
*target_md
;
2760 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2761 return -TARGET_EFAULT
;
2762 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2763 return -TARGET_EFAULT
;
2764 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2765 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2766 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2767 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2768 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2769 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2770 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2771 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2772 unlock_user_struct(target_md
, target_addr
, 0);
2776 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2777 struct msqid_ds
*host_md
)
2779 struct target_msqid_ds
*target_md
;
2781 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2782 return -TARGET_EFAULT
;
2783 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2784 return -TARGET_EFAULT
;
2785 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2786 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2787 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2788 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2789 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2790 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2791 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2792 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2793 unlock_user_struct(target_md
, target_addr
, 1);
2797 struct target_msginfo
{
2805 unsigned short int msgseg
;
2808 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2809 struct msginfo
*host_msginfo
)
2811 struct target_msginfo
*target_msginfo
;
2812 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2813 return -TARGET_EFAULT
;
2814 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2815 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2816 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2817 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2818 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2819 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2820 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2821 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2822 unlock_user_struct(target_msginfo
, target_addr
, 1);
2826 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2828 struct msqid_ds dsarg
;
2829 struct msginfo msginfo
;
2830 abi_long ret
= -TARGET_EINVAL
;
2838 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2839 return -TARGET_EFAULT
;
2840 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2841 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2842 return -TARGET_EFAULT
;
2845 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2849 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2850 if (host_to_target_msginfo(ptr
, &msginfo
))
2851 return -TARGET_EFAULT
;
2858 struct target_msgbuf
{
2863 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2864 unsigned int msgsz
, int msgflg
)
2866 struct target_msgbuf
*target_mb
;
2867 struct msgbuf
*host_mb
;
2870 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2871 return -TARGET_EFAULT
;
2872 host_mb
= malloc(msgsz
+sizeof(long));
2873 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2874 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2875 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2877 unlock_user_struct(target_mb
, msgp
, 0);
2882 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2883 unsigned int msgsz
, abi_long msgtyp
,
2886 struct target_msgbuf
*target_mb
;
2888 struct msgbuf
*host_mb
;
2891 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2892 return -TARGET_EFAULT
;
2894 host_mb
= g_malloc(msgsz
+sizeof(long));
2895 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2898 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2899 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2900 if (!target_mtext
) {
2901 ret
= -TARGET_EFAULT
;
2904 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2905 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2908 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2912 unlock_user_struct(target_mb
, msgp
, 1);
2917 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2918 abi_ulong target_addr
)
2920 struct target_shmid_ds
*target_sd
;
2922 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2923 return -TARGET_EFAULT
;
2924 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2925 return -TARGET_EFAULT
;
2926 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2927 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2928 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2929 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2930 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2931 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2932 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2933 unlock_user_struct(target_sd
, target_addr
, 0);
2937 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2938 struct shmid_ds
*host_sd
)
2940 struct target_shmid_ds
*target_sd
;
2942 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2943 return -TARGET_EFAULT
;
2944 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2945 return -TARGET_EFAULT
;
2946 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2947 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2948 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2949 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2950 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2951 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2952 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2953 unlock_user_struct(target_sd
, target_addr
, 1);
2957 struct target_shminfo
{
2965 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2966 struct shminfo
*host_shminfo
)
2968 struct target_shminfo
*target_shminfo
;
2969 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2970 return -TARGET_EFAULT
;
2971 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2972 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2973 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2974 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2975 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2976 unlock_user_struct(target_shminfo
, target_addr
, 1);
2980 struct target_shm_info
{
2985 abi_ulong swap_attempts
;
2986 abi_ulong swap_successes
;
2989 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2990 struct shm_info
*host_shm_info
)
2992 struct target_shm_info
*target_shm_info
;
2993 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2994 return -TARGET_EFAULT
;
2995 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2996 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2997 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2998 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2999 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3000 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3001 unlock_user_struct(target_shm_info
, target_addr
, 1);
3005 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3007 struct shmid_ds dsarg
;
3008 struct shminfo shminfo
;
3009 struct shm_info shm_info
;
3010 abi_long ret
= -TARGET_EINVAL
;
3018 if (target_to_host_shmid_ds(&dsarg
, buf
))
3019 return -TARGET_EFAULT
;
3020 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3021 if (host_to_target_shmid_ds(buf
, &dsarg
))
3022 return -TARGET_EFAULT
;
3025 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3026 if (host_to_target_shminfo(buf
, &shminfo
))
3027 return -TARGET_EFAULT
;
3030 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3031 if (host_to_target_shm_info(buf
, &shm_info
))
3032 return -TARGET_EFAULT
;
3037 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3044 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3048 struct shmid_ds shm_info
;
3051 /* find out the length of the shared memory segment */
3052 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3053 if (is_error(ret
)) {
3054 /* can't get length, bail out */
3061 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3063 abi_ulong mmap_start
;
3065 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3067 if (mmap_start
== -1) {
3069 host_raddr
= (void *)-1;
3071 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3074 if (host_raddr
== (void *)-1) {
3076 return get_errno((long)host_raddr
);
3078 raddr
=h2g((unsigned long)host_raddr
);
3080 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3081 PAGE_VALID
| PAGE_READ
|
3082 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3084 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3085 if (shm_regions
[i
].start
== 0) {
3086 shm_regions
[i
].start
= raddr
;
3087 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3097 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3101 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3102 if (shm_regions
[i
].start
== shmaddr
) {
3103 shm_regions
[i
].start
= 0;
3104 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3109 return get_errno(shmdt(g2h(shmaddr
)));
3112 #ifdef TARGET_NR_ipc
3113 /* ??? This only works with linear mappings. */
3114 /* do_ipc() must return target values and target errnos. */
3115 static abi_long
do_ipc(unsigned int call
, int first
,
3116 int second
, int third
,
3117 abi_long ptr
, abi_long fifth
)
3122 version
= call
>> 16;
3127 ret
= do_semop(first
, ptr
, second
);
3131 ret
= get_errno(semget(first
, second
, third
));
3135 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3139 ret
= get_errno(msgget(first
, second
));
3143 ret
= do_msgsnd(first
, ptr
, second
, third
);
3147 ret
= do_msgctl(first
, second
, ptr
);
3154 struct target_ipc_kludge
{
3159 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3160 ret
= -TARGET_EFAULT
;
3164 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3166 unlock_user_struct(tmp
, ptr
, 0);
3170 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3179 raddr
= do_shmat(first
, ptr
, second
);
3180 if (is_error(raddr
))
3181 return get_errno(raddr
);
3182 if (put_user_ual(raddr
, third
))
3183 return -TARGET_EFAULT
;
3187 ret
= -TARGET_EINVAL
;
3192 ret
= do_shmdt(ptr
);
3196 /* IPC_* flag values are the same on all linux platforms */
3197 ret
= get_errno(shmget(first
, second
, third
));
3200 /* IPC_* and SHM_* command values are the same on all linux platforms */
3202 ret
= do_shmctl(first
, second
, ptr
);
3205 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3206 ret
= -TARGET_ENOSYS
;
3213 /* kernel structure types definitions */
3215 #define STRUCT(name, ...) STRUCT_ ## name,
3216 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3218 #include "syscall_types.h"
3221 #undef STRUCT_SPECIAL
3223 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3224 #define STRUCT_SPECIAL(name)
3225 #include "syscall_types.h"
3227 #undef STRUCT_SPECIAL
3229 typedef struct IOCTLEntry IOCTLEntry
;
3231 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3232 int fd
, abi_long cmd
, abi_long arg
);
3235 unsigned int target_cmd
;
3236 unsigned int host_cmd
;
3239 do_ioctl_fn
*do_ioctl
;
3240 const argtype arg_type
[5];
3243 #define IOC_R 0x0001
3244 #define IOC_W 0x0002
3245 #define IOC_RW (IOC_R | IOC_W)
3247 #define MAX_STRUCT_SIZE 4096
3249 #ifdef CONFIG_FIEMAP
3250 /* So fiemap access checks don't overflow on 32 bit systems.
3251 * This is very slightly smaller than the limit imposed by
3252 * the underlying kernel.
3254 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3255 / sizeof(struct fiemap_extent))
3257 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3258 int fd
, abi_long cmd
, abi_long arg
)
3260 /* The parameter for this ioctl is a struct fiemap followed
3261 * by an array of struct fiemap_extent whose size is set
3262 * in fiemap->fm_extent_count. The array is filled in by the
3265 int target_size_in
, target_size_out
;
3267 const argtype
*arg_type
= ie
->arg_type
;
3268 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3271 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3275 assert(arg_type
[0] == TYPE_PTR
);
3276 assert(ie
->access
== IOC_RW
);
3278 target_size_in
= thunk_type_size(arg_type
, 0);
3279 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3281 return -TARGET_EFAULT
;
3283 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3284 unlock_user(argptr
, arg
, 0);
3285 fm
= (struct fiemap
*)buf_temp
;
3286 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3287 return -TARGET_EINVAL
;
3290 outbufsz
= sizeof (*fm
) +
3291 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3293 if (outbufsz
> MAX_STRUCT_SIZE
) {
3294 /* We can't fit all the extents into the fixed size buffer.
3295 * Allocate one that is large enough and use it instead.
3297 fm
= malloc(outbufsz
);
3299 return -TARGET_ENOMEM
;
3301 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3304 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3305 if (!is_error(ret
)) {
3306 target_size_out
= target_size_in
;
3307 /* An extent_count of 0 means we were only counting the extents
3308 * so there are no structs to copy
3310 if (fm
->fm_extent_count
!= 0) {
3311 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3313 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3315 ret
= -TARGET_EFAULT
;
3317 /* Convert the struct fiemap */
3318 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3319 if (fm
->fm_extent_count
!= 0) {
3320 p
= argptr
+ target_size_in
;
3321 /* ...and then all the struct fiemap_extents */
3322 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3323 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3328 unlock_user(argptr
, arg
, target_size_out
);
3338 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3339 int fd
, abi_long cmd
, abi_long arg
)
3341 const argtype
*arg_type
= ie
->arg_type
;
3345 struct ifconf
*host_ifconf
;
3347 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3348 int target_ifreq_size
;
3353 abi_long target_ifc_buf
;
3357 assert(arg_type
[0] == TYPE_PTR
);
3358 assert(ie
->access
== IOC_RW
);
3361 target_size
= thunk_type_size(arg_type
, 0);
3363 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3365 return -TARGET_EFAULT
;
3366 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3367 unlock_user(argptr
, arg
, 0);
3369 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3370 target_ifc_len
= host_ifconf
->ifc_len
;
3371 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3373 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3374 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3375 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3377 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3378 if (outbufsz
> MAX_STRUCT_SIZE
) {
3379 /* We can't fit all the extents into the fixed size buffer.
3380 * Allocate one that is large enough and use it instead.
3382 host_ifconf
= malloc(outbufsz
);
3384 return -TARGET_ENOMEM
;
3386 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3389 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3391 host_ifconf
->ifc_len
= host_ifc_len
;
3392 host_ifconf
->ifc_buf
= host_ifc_buf
;
3394 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3395 if (!is_error(ret
)) {
3396 /* convert host ifc_len to target ifc_len */
3398 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3399 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3400 host_ifconf
->ifc_len
= target_ifc_len
;
3402 /* restore target ifc_buf */
3404 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3406 /* copy struct ifconf to target user */
3408 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3410 return -TARGET_EFAULT
;
3411 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3412 unlock_user(argptr
, arg
, target_size
);
3414 /* copy ifreq[] to target user */
3416 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3417 for (i
= 0; i
< nb_ifreq
; i
++) {
3418 thunk_convert(argptr
+ i
* target_ifreq_size
,
3419 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3420 ifreq_arg_type
, THUNK_TARGET
);
3422 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3432 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3433 abi_long cmd
, abi_long arg
)
3436 struct dm_ioctl
*host_dm
;
3437 abi_long guest_data
;
3438 uint32_t guest_data_size
;
3440 const argtype
*arg_type
= ie
->arg_type
;
3442 void *big_buf
= NULL
;
3446 target_size
= thunk_type_size(arg_type
, 0);
3447 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3449 ret
= -TARGET_EFAULT
;
3452 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3453 unlock_user(argptr
, arg
, 0);
3455 /* buf_temp is too small, so fetch things into a bigger buffer */
3456 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3457 memcpy(big_buf
, buf_temp
, target_size
);
3461 guest_data
= arg
+ host_dm
->data_start
;
3462 if ((guest_data
- arg
) < 0) {
3466 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3467 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3469 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3470 switch (ie
->host_cmd
) {
3472 case DM_LIST_DEVICES
:
3475 case DM_DEV_SUSPEND
:
3478 case DM_TABLE_STATUS
:
3479 case DM_TABLE_CLEAR
:
3481 case DM_LIST_VERSIONS
:
3485 case DM_DEV_SET_GEOMETRY
:
3486 /* data contains only strings */
3487 memcpy(host_data
, argptr
, guest_data_size
);
3490 memcpy(host_data
, argptr
, guest_data_size
);
3491 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3495 void *gspec
= argptr
;
3496 void *cur_data
= host_data
;
3497 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3498 int spec_size
= thunk_type_size(arg_type
, 0);
3501 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3502 struct dm_target_spec
*spec
= cur_data
;
3506 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3507 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3509 spec
->next
= sizeof(*spec
) + slen
;
3510 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3512 cur_data
+= spec
->next
;
3517 ret
= -TARGET_EINVAL
;
3520 unlock_user(argptr
, guest_data
, 0);
3522 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3523 if (!is_error(ret
)) {
3524 guest_data
= arg
+ host_dm
->data_start
;
3525 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3526 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3527 switch (ie
->host_cmd
) {
3532 case DM_DEV_SUSPEND
:
3535 case DM_TABLE_CLEAR
:
3537 case DM_DEV_SET_GEOMETRY
:
3538 /* no return data */
3540 case DM_LIST_DEVICES
:
3542 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3543 uint32_t remaining_data
= guest_data_size
;
3544 void *cur_data
= argptr
;
3545 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3546 int nl_size
= 12; /* can't use thunk_size due to alignment */
3549 uint32_t next
= nl
->next
;
3551 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3553 if (remaining_data
< nl
->next
) {
3554 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3557 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3558 strcpy(cur_data
+ nl_size
, nl
->name
);
3559 cur_data
+= nl
->next
;
3560 remaining_data
-= nl
->next
;
3564 nl
= (void*)nl
+ next
;
3569 case DM_TABLE_STATUS
:
3571 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3572 void *cur_data
= argptr
;
3573 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3574 int spec_size
= thunk_type_size(arg_type
, 0);
3577 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3578 uint32_t next
= spec
->next
;
3579 int slen
= strlen((char*)&spec
[1]) + 1;
3580 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3581 if (guest_data_size
< spec
->next
) {
3582 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3585 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3586 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3587 cur_data
= argptr
+ spec
->next
;
3588 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3594 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3595 int count
= *(uint32_t*)hdata
;
3596 uint64_t *hdev
= hdata
+ 8;
3597 uint64_t *gdev
= argptr
+ 8;
3600 *(uint32_t*)argptr
= tswap32(count
);
3601 for (i
= 0; i
< count
; i
++) {
3602 *gdev
= tswap64(*hdev
);
3608 case DM_LIST_VERSIONS
:
3610 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3611 uint32_t remaining_data
= guest_data_size
;
3612 void *cur_data
= argptr
;
3613 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3614 int vers_size
= thunk_type_size(arg_type
, 0);
3617 uint32_t next
= vers
->next
;
3619 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3621 if (remaining_data
< vers
->next
) {
3622 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3625 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3626 strcpy(cur_data
+ vers_size
, vers
->name
);
3627 cur_data
+= vers
->next
;
3628 remaining_data
-= vers
->next
;
3632 vers
= (void*)vers
+ next
;
3637 ret
= -TARGET_EINVAL
;
3640 unlock_user(argptr
, guest_data
, guest_data_size
);
3642 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3644 ret
= -TARGET_EFAULT
;
3647 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3648 unlock_user(argptr
, arg
, target_size
);
3655 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3656 int fd
, abi_long cmd
, abi_long arg
)
3658 const argtype
*arg_type
= ie
->arg_type
;
3659 const StructEntry
*se
;
3660 const argtype
*field_types
;
3661 const int *dst_offsets
, *src_offsets
;
3664 abi_ulong
*target_rt_dev_ptr
;
3665 unsigned long *host_rt_dev_ptr
;
3669 assert(ie
->access
== IOC_W
);
3670 assert(*arg_type
== TYPE_PTR
);
3672 assert(*arg_type
== TYPE_STRUCT
);
3673 target_size
= thunk_type_size(arg_type
, 0);
3674 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3676 return -TARGET_EFAULT
;
3679 assert(*arg_type
== (int)STRUCT_rtentry
);
3680 se
= struct_entries
+ *arg_type
++;
3681 assert(se
->convert
[0] == NULL
);
3682 /* convert struct here to be able to catch rt_dev string */
3683 field_types
= se
->field_types
;
3684 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3685 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3686 for (i
= 0; i
< se
->nb_fields
; i
++) {
3687 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3688 assert(*field_types
== TYPE_PTRVOID
);
3689 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3690 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3691 if (*target_rt_dev_ptr
!= 0) {
3692 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3693 tswapal(*target_rt_dev_ptr
));
3694 if (!*host_rt_dev_ptr
) {
3695 unlock_user(argptr
, arg
, 0);
3696 return -TARGET_EFAULT
;
3699 *host_rt_dev_ptr
= 0;
3704 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3705 argptr
+ src_offsets
[i
],
3706 field_types
, THUNK_HOST
);
3708 unlock_user(argptr
, arg
, 0);
3710 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3711 if (*host_rt_dev_ptr
!= 0) {
3712 unlock_user((void *)*host_rt_dev_ptr
,
3713 *target_rt_dev_ptr
, 0);
3718 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3719 int fd
, abi_long cmd
, abi_long arg
)
3721 int sig
= target_to_host_signal(arg
);
3722 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
3725 static IOCTLEntry ioctl_entries
[] = {
3726 #define IOCTL(cmd, access, ...) \
3727 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3728 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3729 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3734 /* ??? Implement proper locking for ioctls. */
3735 /* do_ioctl() Must return target values and target errnos. */
3736 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3738 const IOCTLEntry
*ie
;
3739 const argtype
*arg_type
;
3741 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3747 if (ie
->target_cmd
== 0) {
3748 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3749 return -TARGET_ENOSYS
;
3751 if (ie
->target_cmd
== cmd
)
3755 arg_type
= ie
->arg_type
;
3757 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3760 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3763 switch(arg_type
[0]) {
3766 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3771 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3775 target_size
= thunk_type_size(arg_type
, 0);
3776 switch(ie
->access
) {
3778 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3779 if (!is_error(ret
)) {
3780 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3782 return -TARGET_EFAULT
;
3783 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3784 unlock_user(argptr
, arg
, target_size
);
3788 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3790 return -TARGET_EFAULT
;
3791 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3792 unlock_user(argptr
, arg
, 0);
3793 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3797 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3799 return -TARGET_EFAULT
;
3800 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3801 unlock_user(argptr
, arg
, 0);
3802 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3803 if (!is_error(ret
)) {
3804 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3806 return -TARGET_EFAULT
;
3807 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3808 unlock_user(argptr
, arg
, target_size
);
3814 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3815 (long)cmd
, arg_type
[0]);
3816 ret
= -TARGET_ENOSYS
;
3822 static const bitmask_transtbl iflag_tbl
[] = {
3823 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3824 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3825 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3826 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3827 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3828 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3829 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3830 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3831 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3832 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3833 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3834 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3835 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3836 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3840 static const bitmask_transtbl oflag_tbl
[] = {
3841 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3842 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3843 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3844 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3845 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3846 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3847 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3848 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3849 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3850 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3851 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3852 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3853 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3854 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3855 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3856 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3857 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3858 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3859 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3860 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3861 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3862 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3863 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3864 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3868 static const bitmask_transtbl cflag_tbl
[] = {
3869 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3870 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3871 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3872 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3873 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3874 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3875 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3876 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3877 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3878 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3879 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3880 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3881 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3882 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3883 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3884 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3885 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3886 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3887 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3888 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3889 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3890 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3891 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3892 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3893 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3894 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3895 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3896 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3897 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3898 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3899 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3903 static const bitmask_transtbl lflag_tbl
[] = {
3904 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3905 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3906 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3907 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3908 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3909 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3910 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3911 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3912 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3913 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3914 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3915 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3916 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3917 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3918 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3922 static void target_to_host_termios (void *dst
, const void *src
)
3924 struct host_termios
*host
= dst
;
3925 const struct target_termios
*target
= src
;
3928 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3930 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3932 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3934 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3935 host
->c_line
= target
->c_line
;
3937 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3938 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3939 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3940 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3941 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3942 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3943 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3944 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3945 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3946 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3947 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3948 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3949 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3950 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3951 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3952 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3953 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3954 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3957 static void host_to_target_termios (void *dst
, const void *src
)
3959 struct target_termios
*target
= dst
;
3960 const struct host_termios
*host
= src
;
3963 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3965 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3967 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3969 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3970 target
->c_line
= host
->c_line
;
3972 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3973 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3974 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3975 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3976 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3977 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3978 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3979 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3980 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3981 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3982 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3983 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3984 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3985 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3986 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3987 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3988 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3989 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3992 static const StructEntry struct_termios_def
= {
3993 .convert
= { host_to_target_termios
, target_to_host_termios
},
3994 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3995 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3998 static bitmask_transtbl mmap_flags_tbl
[] = {
3999 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4000 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4001 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4002 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4003 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4004 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4005 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4006 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4007 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4012 #if defined(TARGET_I386)
4014 /* NOTE: there is really one LDT for all the threads */
4015 static uint8_t *ldt_table
;
4017 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4024 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4025 if (size
> bytecount
)
4027 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4029 return -TARGET_EFAULT
;
4030 /* ??? Should this by byteswapped? */
4031 memcpy(p
, ldt_table
, size
);
4032 unlock_user(p
, ptr
, size
);
4036 /* XXX: add locking support */
4037 static abi_long
write_ldt(CPUX86State
*env
,
4038 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4040 struct target_modify_ldt_ldt_s ldt_info
;
4041 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4042 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4043 int seg_not_present
, useable
, lm
;
4044 uint32_t *lp
, entry_1
, entry_2
;
4046 if (bytecount
!= sizeof(ldt_info
))
4047 return -TARGET_EINVAL
;
4048 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4049 return -TARGET_EFAULT
;
4050 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4051 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4052 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4053 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4054 unlock_user_struct(target_ldt_info
, ptr
, 0);
4056 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4057 return -TARGET_EINVAL
;
4058 seg_32bit
= ldt_info
.flags
& 1;
4059 contents
= (ldt_info
.flags
>> 1) & 3;
4060 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4061 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4062 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4063 useable
= (ldt_info
.flags
>> 6) & 1;
4067 lm
= (ldt_info
.flags
>> 7) & 1;
4069 if (contents
== 3) {
4071 return -TARGET_EINVAL
;
4072 if (seg_not_present
== 0)
4073 return -TARGET_EINVAL
;
4075 /* allocate the LDT */
4077 env
->ldt
.base
= target_mmap(0,
4078 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4079 PROT_READ
|PROT_WRITE
,
4080 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4081 if (env
->ldt
.base
== -1)
4082 return -TARGET_ENOMEM
;
4083 memset(g2h(env
->ldt
.base
), 0,
4084 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4085 env
->ldt
.limit
= 0xffff;
4086 ldt_table
= g2h(env
->ldt
.base
);
4089 /* NOTE: same code as Linux kernel */
4090 /* Allow LDTs to be cleared by the user. */
4091 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4094 read_exec_only
== 1 &&
4096 limit_in_pages
== 0 &&
4097 seg_not_present
== 1 &&
4105 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4106 (ldt_info
.limit
& 0x0ffff);
4107 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4108 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4109 (ldt_info
.limit
& 0xf0000) |
4110 ((read_exec_only
^ 1) << 9) |
4112 ((seg_not_present
^ 1) << 15) |
4114 (limit_in_pages
<< 23) |
4118 entry_2
|= (useable
<< 20);
4120 /* Install the new entry ... */
4122 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4123 lp
[0] = tswap32(entry_1
);
4124 lp
[1] = tswap32(entry_2
);
4128 /* specific and weird i386 syscalls */
4129 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4130 unsigned long bytecount
)
4136 ret
= read_ldt(ptr
, bytecount
);
4139 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4142 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4145 ret
= -TARGET_ENOSYS
;
4151 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4152 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4154 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4155 struct target_modify_ldt_ldt_s ldt_info
;
4156 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4157 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4158 int seg_not_present
, useable
, lm
;
4159 uint32_t *lp
, entry_1
, entry_2
;
4162 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4163 if (!target_ldt_info
)
4164 return -TARGET_EFAULT
;
4165 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4166 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4167 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4168 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4169 if (ldt_info
.entry_number
== -1) {
4170 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4171 if (gdt_table
[i
] == 0) {
4172 ldt_info
.entry_number
= i
;
4173 target_ldt_info
->entry_number
= tswap32(i
);
4178 unlock_user_struct(target_ldt_info
, ptr
, 1);
4180 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4181 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4182 return -TARGET_EINVAL
;
4183 seg_32bit
= ldt_info
.flags
& 1;
4184 contents
= (ldt_info
.flags
>> 1) & 3;
4185 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4186 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4187 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4188 useable
= (ldt_info
.flags
>> 6) & 1;
4192 lm
= (ldt_info
.flags
>> 7) & 1;
4195 if (contents
== 3) {
4196 if (seg_not_present
== 0)
4197 return -TARGET_EINVAL
;
4200 /* NOTE: same code as Linux kernel */
4201 /* Allow LDTs to be cleared by the user. */
4202 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4203 if ((contents
== 0 &&
4204 read_exec_only
== 1 &&
4206 limit_in_pages
== 0 &&
4207 seg_not_present
== 1 &&
4215 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4216 (ldt_info
.limit
& 0x0ffff);
4217 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4218 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4219 (ldt_info
.limit
& 0xf0000) |
4220 ((read_exec_only
^ 1) << 9) |
4222 ((seg_not_present
^ 1) << 15) |
4224 (limit_in_pages
<< 23) |
4229 /* Install the new entry ... */
4231 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4232 lp
[0] = tswap32(entry_1
);
4233 lp
[1] = tswap32(entry_2
);
4237 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4239 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4240 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4241 uint32_t base_addr
, limit
, flags
;
4242 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4243 int seg_not_present
, useable
, lm
;
4244 uint32_t *lp
, entry_1
, entry_2
;
4246 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4247 if (!target_ldt_info
)
4248 return -TARGET_EFAULT
;
4249 idx
= tswap32(target_ldt_info
->entry_number
);
4250 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4251 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4252 unlock_user_struct(target_ldt_info
, ptr
, 1);
4253 return -TARGET_EINVAL
;
4255 lp
= (uint32_t *)(gdt_table
+ idx
);
4256 entry_1
= tswap32(lp
[0]);
4257 entry_2
= tswap32(lp
[1]);
4259 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4260 contents
= (entry_2
>> 10) & 3;
4261 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4262 seg_32bit
= (entry_2
>> 22) & 1;
4263 limit_in_pages
= (entry_2
>> 23) & 1;
4264 useable
= (entry_2
>> 20) & 1;
4268 lm
= (entry_2
>> 21) & 1;
4270 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4271 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4272 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4273 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4274 base_addr
= (entry_1
>> 16) |
4275 (entry_2
& 0xff000000) |
4276 ((entry_2
& 0xff) << 16);
4277 target_ldt_info
->base_addr
= tswapal(base_addr
);
4278 target_ldt_info
->limit
= tswap32(limit
);
4279 target_ldt_info
->flags
= tswap32(flags
);
4280 unlock_user_struct(target_ldt_info
, ptr
, 1);
4283 #endif /* TARGET_I386 && TARGET_ABI32 */
4285 #ifndef TARGET_ABI32
4286 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4293 case TARGET_ARCH_SET_GS
:
4294 case TARGET_ARCH_SET_FS
:
4295 if (code
== TARGET_ARCH_SET_GS
)
4299 cpu_x86_load_seg(env
, idx
, 0);
4300 env
->segs
[idx
].base
= addr
;
4302 case TARGET_ARCH_GET_GS
:
4303 case TARGET_ARCH_GET_FS
:
4304 if (code
== TARGET_ARCH_GET_GS
)
4308 val
= env
->segs
[idx
].base
;
4309 if (put_user(val
, addr
, abi_ulong
))
4310 ret
= -TARGET_EFAULT
;
4313 ret
= -TARGET_EINVAL
;
4320 #endif /* defined(TARGET_I386) */
4322 #define NEW_STACK_SIZE 0x40000
4325 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4328 pthread_mutex_t mutex
;
4329 pthread_cond_t cond
;
4332 abi_ulong child_tidptr
;
4333 abi_ulong parent_tidptr
;
4337 static void *clone_func(void *arg
)
4339 new_thread_info
*info
= arg
;
4345 cpu
= ENV_GET_CPU(env
);
4347 ts
= (TaskState
*)cpu
->opaque
;
4348 info
->tid
= gettid();
4349 cpu
->host_tid
= info
->tid
;
4351 if (info
->child_tidptr
)
4352 put_user_u32(info
->tid
, info
->child_tidptr
);
4353 if (info
->parent_tidptr
)
4354 put_user_u32(info
->tid
, info
->parent_tidptr
);
4355 /* Enable signals. */
4356 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4357 /* Signal to the parent that we're ready. */
4358 pthread_mutex_lock(&info
->mutex
);
4359 pthread_cond_broadcast(&info
->cond
);
4360 pthread_mutex_unlock(&info
->mutex
);
4361 /* Wait until the parent has finshed initializing the tls state. */
4362 pthread_mutex_lock(&clone_lock
);
4363 pthread_mutex_unlock(&clone_lock
);
4369 /* do_fork() Must return host values and target errnos (unlike most
4370 do_*() functions). */
4371 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4372 abi_ulong parent_tidptr
, target_ulong newtls
,
4373 abi_ulong child_tidptr
)
4375 CPUState
*cpu
= ENV_GET_CPU(env
);
4379 CPUArchState
*new_env
;
4380 unsigned int nptl_flags
;
4383 /* Emulate vfork() with fork() */
4384 if (flags
& CLONE_VFORK
)
4385 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4387 if (flags
& CLONE_VM
) {
4388 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4389 new_thread_info info
;
4390 pthread_attr_t attr
;
4392 ts
= g_malloc0(sizeof(TaskState
));
4393 init_task_state(ts
);
4394 /* we create a new CPU instance. */
4395 new_env
= cpu_copy(env
);
4396 /* Init regs that differ from the parent. */
4397 cpu_clone_regs(new_env
, newsp
);
4398 new_cpu
= ENV_GET_CPU(new_env
);
4399 new_cpu
->opaque
= ts
;
4400 ts
->bprm
= parent_ts
->bprm
;
4401 ts
->info
= parent_ts
->info
;
4403 flags
&= ~CLONE_NPTL_FLAGS2
;
4405 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4406 ts
->child_tidptr
= child_tidptr
;
4409 if (nptl_flags
& CLONE_SETTLS
)
4410 cpu_set_tls (new_env
, newtls
);
4412 /* Grab a mutex so that thread setup appears atomic. */
4413 pthread_mutex_lock(&clone_lock
);
4415 memset(&info
, 0, sizeof(info
));
4416 pthread_mutex_init(&info
.mutex
, NULL
);
4417 pthread_mutex_lock(&info
.mutex
);
4418 pthread_cond_init(&info
.cond
, NULL
);
4420 if (nptl_flags
& CLONE_CHILD_SETTID
)
4421 info
.child_tidptr
= child_tidptr
;
4422 if (nptl_flags
& CLONE_PARENT_SETTID
)
4423 info
.parent_tidptr
= parent_tidptr
;
4425 ret
= pthread_attr_init(&attr
);
4426 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4427 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4428 /* It is not safe to deliver signals until the child has finished
4429 initializing, so temporarily block all signals. */
4430 sigfillset(&sigmask
);
4431 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4433 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4434 /* TODO: Free new CPU state if thread creation failed. */
4436 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4437 pthread_attr_destroy(&attr
);
4439 /* Wait for the child to initialize. */
4440 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4442 if (flags
& CLONE_PARENT_SETTID
)
4443 put_user_u32(ret
, parent_tidptr
);
4447 pthread_mutex_unlock(&info
.mutex
);
4448 pthread_cond_destroy(&info
.cond
);
4449 pthread_mutex_destroy(&info
.mutex
);
4450 pthread_mutex_unlock(&clone_lock
);
4452 /* if no CLONE_VM, we consider it is a fork */
4453 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4458 /* Child Process. */
4459 cpu_clone_regs(env
, newsp
);
4461 /* There is a race condition here. The parent process could
4462 theoretically read the TID in the child process before the child
4463 tid is set. This would require using either ptrace
4464 (not implemented) or having *_tidptr to point at a shared memory
4465 mapping. We can't repeat the spinlock hack used above because
4466 the child process gets its own copy of the lock. */
4467 if (flags
& CLONE_CHILD_SETTID
)
4468 put_user_u32(gettid(), child_tidptr
);
4469 if (flags
& CLONE_PARENT_SETTID
)
4470 put_user_u32(gettid(), parent_tidptr
);
4471 ts
= (TaskState
*)cpu
->opaque
;
4472 if (flags
& CLONE_SETTLS
)
4473 cpu_set_tls (env
, newtls
);
4474 if (flags
& CLONE_CHILD_CLEARTID
)
4475 ts
->child_tidptr
= child_tidptr
;
4483 /* warning : doesn't handle linux specific flags... */
4484 static int target_to_host_fcntl_cmd(int cmd
)
4487 case TARGET_F_DUPFD
:
4488 case TARGET_F_GETFD
:
4489 case TARGET_F_SETFD
:
4490 case TARGET_F_GETFL
:
4491 case TARGET_F_SETFL
:
4493 case TARGET_F_GETLK
:
4495 case TARGET_F_SETLK
:
4497 case TARGET_F_SETLKW
:
4499 case TARGET_F_GETOWN
:
4501 case TARGET_F_SETOWN
:
4503 case TARGET_F_GETSIG
:
4505 case TARGET_F_SETSIG
:
4507 #if TARGET_ABI_BITS == 32
4508 case TARGET_F_GETLK64
:
4510 case TARGET_F_SETLK64
:
4512 case TARGET_F_SETLKW64
:
4515 case TARGET_F_SETLEASE
:
4517 case TARGET_F_GETLEASE
:
4519 #ifdef F_DUPFD_CLOEXEC
4520 case TARGET_F_DUPFD_CLOEXEC
:
4521 return F_DUPFD_CLOEXEC
;
4523 case TARGET_F_NOTIFY
:
4526 case TARGET_F_GETOWN_EX
:
4530 case TARGET_F_SETOWN_EX
:
4534 return -TARGET_EINVAL
;
4536 return -TARGET_EINVAL
;
4539 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4540 static const bitmask_transtbl flock_tbl
[] = {
4541 TRANSTBL_CONVERT(F_RDLCK
),
4542 TRANSTBL_CONVERT(F_WRLCK
),
4543 TRANSTBL_CONVERT(F_UNLCK
),
4544 TRANSTBL_CONVERT(F_EXLCK
),
4545 TRANSTBL_CONVERT(F_SHLCK
),
4549 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4552 struct target_flock
*target_fl
;
4553 struct flock64 fl64
;
4554 struct target_flock64
*target_fl64
;
4556 struct f_owner_ex fox
;
4557 struct target_f_owner_ex
*target_fox
;
4560 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4562 if (host_cmd
== -TARGET_EINVAL
)
4566 case TARGET_F_GETLK
:
4567 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4568 return -TARGET_EFAULT
;
4570 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4571 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4572 fl
.l_start
= tswapal(target_fl
->l_start
);
4573 fl
.l_len
= tswapal(target_fl
->l_len
);
4574 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4575 unlock_user_struct(target_fl
, arg
, 0);
4576 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4578 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4579 return -TARGET_EFAULT
;
4581 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4582 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4583 target_fl
->l_start
= tswapal(fl
.l_start
);
4584 target_fl
->l_len
= tswapal(fl
.l_len
);
4585 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4586 unlock_user_struct(target_fl
, arg
, 1);
4590 case TARGET_F_SETLK
:
4591 case TARGET_F_SETLKW
:
4592 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4593 return -TARGET_EFAULT
;
4595 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4596 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4597 fl
.l_start
= tswapal(target_fl
->l_start
);
4598 fl
.l_len
= tswapal(target_fl
->l_len
);
4599 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4600 unlock_user_struct(target_fl
, arg
, 0);
4601 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4604 case TARGET_F_GETLK64
:
4605 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4606 return -TARGET_EFAULT
;
4608 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4609 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4610 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4611 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4612 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4613 unlock_user_struct(target_fl64
, arg
, 0);
4614 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4616 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4617 return -TARGET_EFAULT
;
4618 target_fl64
->l_type
=
4619 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4620 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4621 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4622 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4623 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4624 unlock_user_struct(target_fl64
, arg
, 1);
4627 case TARGET_F_SETLK64
:
4628 case TARGET_F_SETLKW64
:
4629 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4630 return -TARGET_EFAULT
;
4632 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4633 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4634 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4635 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4636 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4637 unlock_user_struct(target_fl64
, arg
, 0);
4638 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4641 case TARGET_F_GETFL
:
4642 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4644 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4648 case TARGET_F_SETFL
:
4649 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4653 case TARGET_F_GETOWN_EX
:
4654 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4656 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4657 return -TARGET_EFAULT
;
4658 target_fox
->type
= tswap32(fox
.type
);
4659 target_fox
->pid
= tswap32(fox
.pid
);
4660 unlock_user_struct(target_fox
, arg
, 1);
4666 case TARGET_F_SETOWN_EX
:
4667 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4668 return -TARGET_EFAULT
;
4669 fox
.type
= tswap32(target_fox
->type
);
4670 fox
.pid
= tswap32(target_fox
->pid
);
4671 unlock_user_struct(target_fox
, arg
, 0);
4672 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4676 case TARGET_F_SETOWN
:
4677 case TARGET_F_GETOWN
:
4678 case TARGET_F_SETSIG
:
4679 case TARGET_F_GETSIG
:
4680 case TARGET_F_SETLEASE
:
4681 case TARGET_F_GETLEASE
:
4682 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4686 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4694 static inline int high2lowuid(int uid
)
4702 static inline int high2lowgid(int gid
)
4710 static inline int low2highuid(int uid
)
4712 if ((int16_t)uid
== -1)
4718 static inline int low2highgid(int gid
)
4720 if ((int16_t)gid
== -1)
4725 static inline int tswapid(int id
)
4730 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4732 #else /* !USE_UID16 */
4733 static inline int high2lowuid(int uid
)
4737 static inline int high2lowgid(int gid
)
4741 static inline int low2highuid(int uid
)
4745 static inline int low2highgid(int gid
)
4749 static inline int tswapid(int id
)
4754 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4756 #endif /* USE_UID16 */
4758 void syscall_init(void)
4761 const argtype
*arg_type
;
4765 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4766 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4767 #include "syscall_types.h"
4769 #undef STRUCT_SPECIAL
4771 /* Build target_to_host_errno_table[] table from
4772 * host_to_target_errno_table[]. */
4773 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4774 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4777 /* we patch the ioctl size if necessary. We rely on the fact that
4778 no ioctl has all the bits at '1' in the size field */
4780 while (ie
->target_cmd
!= 0) {
4781 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4782 TARGET_IOC_SIZEMASK
) {
4783 arg_type
= ie
->arg_type
;
4784 if (arg_type
[0] != TYPE_PTR
) {
4785 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4790 size
= thunk_type_size(arg_type
, 0);
4791 ie
->target_cmd
= (ie
->target_cmd
&
4792 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4793 (size
<< TARGET_IOC_SIZESHIFT
);
4796 /* automatic consistency check if same arch */
4797 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4798 (defined(__x86_64__) && defined(TARGET_X86_64))
4799 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4800 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4801 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4808 #if TARGET_ABI_BITS == 32
4809 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4811 #ifdef TARGET_WORDS_BIGENDIAN
4812 return ((uint64_t)word0
<< 32) | word1
;
4814 return ((uint64_t)word1
<< 32) | word0
;
4817 #else /* TARGET_ABI_BITS == 32 */
4818 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4822 #endif /* TARGET_ABI_BITS != 32 */
4824 #ifdef TARGET_NR_truncate64
4825 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4830 if (regpairs_aligned(cpu_env
)) {
4834 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4838 #ifdef TARGET_NR_ftruncate64
4839 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4844 if (regpairs_aligned(cpu_env
)) {
4848 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4852 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4853 abi_ulong target_addr
)
4855 struct target_timespec
*target_ts
;
4857 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4858 return -TARGET_EFAULT
;
4859 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4860 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4861 unlock_user_struct(target_ts
, target_addr
, 0);
4865 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4866 struct timespec
*host_ts
)
4868 struct target_timespec
*target_ts
;
4870 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4871 return -TARGET_EFAULT
;
4872 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4873 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4874 unlock_user_struct(target_ts
, target_addr
, 1);
4878 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
4879 abi_ulong target_addr
)
4881 struct target_itimerspec
*target_itspec
;
4883 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
4884 return -TARGET_EFAULT
;
4887 host_itspec
->it_interval
.tv_sec
=
4888 tswapal(target_itspec
->it_interval
.tv_sec
);
4889 host_itspec
->it_interval
.tv_nsec
=
4890 tswapal(target_itspec
->it_interval
.tv_nsec
);
4891 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
4892 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
4894 unlock_user_struct(target_itspec
, target_addr
, 1);
4898 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
4899 struct itimerspec
*host_its
)
4901 struct target_itimerspec
*target_itspec
;
4903 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
4904 return -TARGET_EFAULT
;
4907 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
4908 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
4910 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
4911 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
4913 unlock_user_struct(target_itspec
, target_addr
, 0);
4917 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4918 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4919 abi_ulong target_addr
,
4920 struct stat
*host_st
)
4922 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4923 if (((CPUARMState
*)cpu_env
)->eabi
) {
4924 struct target_eabi_stat64
*target_st
;
4926 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4927 return -TARGET_EFAULT
;
4928 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4929 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4930 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4931 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4932 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4934 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4935 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4936 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4937 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4938 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4939 __put_user(host_st
->st_size
, &target_st
->st_size
);
4940 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4941 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4942 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4943 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4944 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4945 unlock_user_struct(target_st
, target_addr
, 1);
4949 #if defined(TARGET_HAS_STRUCT_STAT64)
4950 struct target_stat64
*target_st
;
4952 struct target_stat
*target_st
;
4955 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4956 return -TARGET_EFAULT
;
4957 memset(target_st
, 0, sizeof(*target_st
));
4958 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4959 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4960 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4961 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4963 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4964 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4965 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4966 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4967 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4968 /* XXX: better use of kernel struct */
4969 __put_user(host_st
->st_size
, &target_st
->st_size
);
4970 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4971 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4972 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4973 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4974 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4975 unlock_user_struct(target_st
, target_addr
, 1);
4982 /* ??? Using host futex calls even when target atomic operations
4983 are not really atomic probably breaks things. However implementing
4984 futexes locally would make futexes shared between multiple processes
4985 tricky. However they're probably useless because guest atomic
4986 operations won't work either. */
4987 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4988 target_ulong uaddr2
, int val3
)
4990 struct timespec ts
, *pts
;
4993 /* ??? We assume FUTEX_* constants are the same on both host
4995 #ifdef FUTEX_CMD_MASK
4996 base_op
= op
& FUTEX_CMD_MASK
;
5002 case FUTEX_WAIT_BITSET
:
5005 target_to_host_timespec(pts
, timeout
);
5009 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5012 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5014 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5016 case FUTEX_CMP_REQUEUE
:
5018 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5019 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5020 But the prototype takes a `struct timespec *'; insert casts
5021 to satisfy the compiler. We do not need to tswap TIMEOUT
5022 since it's not compared to guest memory. */
5023 pts
= (struct timespec
*)(uintptr_t) timeout
;
5024 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5026 (base_op
== FUTEX_CMP_REQUEUE
5030 return -TARGET_ENOSYS
;
5034 /* Map host to target signal numbers for the wait family of syscalls.
5035 Assume all other status bits are the same. */
5036 int host_to_target_waitstatus(int status
)
5038 if (WIFSIGNALED(status
)) {
5039 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5041 if (WIFSTOPPED(status
)) {
5042 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5048 static int open_self_cmdline(void *cpu_env
, int fd
)
5051 bool word_skipped
= false;
5053 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5063 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5065 fd_orig
= close(fd_orig
);
5067 } else if (nb_read
== 0) {
5071 if (!word_skipped
) {
5072 /* Skip the first string, which is the path to qemu-*-static
5073 instead of the actual command. */
5074 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5076 /* Null byte found, skip one string */
5078 nb_read
-= cp_buf
- buf
;
5079 word_skipped
= true;
5084 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5090 return close(fd_orig
);
5093 static int open_self_maps(void *cpu_env
, int fd
)
5095 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5096 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5097 TaskState
*ts
= cpu
->opaque
;
5104 fp
= fopen("/proc/self/maps", "r");
5109 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5110 int fields
, dev_maj
, dev_min
, inode
;
5111 uint64_t min
, max
, offset
;
5112 char flag_r
, flag_w
, flag_x
, flag_p
;
5113 char path
[512] = "";
5114 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5115 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5116 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5118 if ((fields
< 10) || (fields
> 11)) {
5121 if (!strncmp(path
, "[stack]", 7)) {
5124 if (h2g_valid(min
) && h2g_valid(max
)) {
5125 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5126 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5127 h2g(min
), h2g(max
), flag_r
, flag_w
,
5128 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5129 path
[0] ? " " : "", path
);
5136 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5137 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5138 (unsigned long long)ts
->info
->stack_limit
,
5139 (unsigned long long)(ts
->info
->start_stack
+
5140 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5141 (unsigned long long)0);
5147 static int open_self_stat(void *cpu_env
, int fd
)
5149 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5150 TaskState
*ts
= cpu
->opaque
;
5151 abi_ulong start_stack
= ts
->info
->start_stack
;
5154 for (i
= 0; i
< 44; i
++) {
5162 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5163 } else if (i
== 1) {
5165 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5166 } else if (i
== 27) {
5169 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5171 /* for the rest, there is MasterCard */
5172 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5176 if (write(fd
, buf
, len
) != len
) {
5184 static int open_self_auxv(void *cpu_env
, int fd
)
5186 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5187 TaskState
*ts
= cpu
->opaque
;
5188 abi_ulong auxv
= ts
->info
->saved_auxv
;
5189 abi_ulong len
= ts
->info
->auxv_len
;
5193 * Auxiliary vector is stored in target process stack.
5194 * read in whole auxv vector and copy it to file
5196 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5200 r
= write(fd
, ptr
, len
);
5207 lseek(fd
, 0, SEEK_SET
);
5208 unlock_user(ptr
, auxv
, len
);
5214 static int is_proc_myself(const char *filename
, const char *entry
)
5216 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5217 filename
+= strlen("/proc/");
5218 if (!strncmp(filename
, "self/", strlen("self/"))) {
5219 filename
+= strlen("self/");
5220 } else if (*filename
>= '1' && *filename
<= '9') {
5222 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5223 if (!strncmp(filename
, myself
, strlen(myself
))) {
5224 filename
+= strlen(myself
);
5231 if (!strcmp(filename
, entry
)) {
5238 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5239 static int is_proc(const char *filename
, const char *entry
)
5241 return strcmp(filename
, entry
) == 0;
5244 static int open_net_route(void *cpu_env
, int fd
)
5251 fp
= fopen("/proc/net/route", "r");
5258 read
= getline(&line
, &len
, fp
);
5259 dprintf(fd
, "%s", line
);
5263 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5265 uint32_t dest
, gw
, mask
;
5266 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5267 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5268 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5269 &mask
, &mtu
, &window
, &irtt
);
5270 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5271 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5272 metric
, tswap32(mask
), mtu
, window
, irtt
);
5282 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5285 const char *filename
;
5286 int (*fill
)(void *cpu_env
, int fd
);
5287 int (*cmp
)(const char *s1
, const char *s2
);
5289 const struct fake_open
*fake_open
;
5290 static const struct fake_open fakes
[] = {
5291 { "maps", open_self_maps
, is_proc_myself
},
5292 { "stat", open_self_stat
, is_proc_myself
},
5293 { "auxv", open_self_auxv
, is_proc_myself
},
5294 { "cmdline", open_self_cmdline
, is_proc_myself
},
5295 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5296 { "/proc/net/route", open_net_route
, is_proc
},
5298 { NULL
, NULL
, NULL
}
5301 if (is_proc_myself(pathname
, "exe")) {
5302 int execfd
= qemu_getauxval(AT_EXECFD
);
5303 return execfd
? execfd
: get_errno(open(exec_path
, flags
, mode
));
5306 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5307 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5312 if (fake_open
->filename
) {
5314 char filename
[PATH_MAX
];
5317 /* create temporary file to map stat to */
5318 tmpdir
= getenv("TMPDIR");
5321 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5322 fd
= mkstemp(filename
);
5328 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5332 lseek(fd
, 0, SEEK_SET
);
5337 return get_errno(open(path(pathname
), flags
, mode
));
5340 /* do_syscall() should always have a single exit point at the end so
5341 that actions, such as logging of syscall results, can be performed.
5342 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5343 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5344 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5345 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5348 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5355 gemu_log("syscall %d", num
);
5358 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5361 case TARGET_NR_exit
:
5362 /* In old applications this may be used to implement _exit(2).
5363 However in threaded applictions it is used for thread termination,
5364 and _exit_group is used for application termination.
5365 Do thread termination if we have more then one thread. */
5366 /* FIXME: This probably breaks if a signal arrives. We should probably
5367 be disabling signals. */
5368 if (CPU_NEXT(first_cpu
)) {
5372 /* Remove the CPU from the list. */
5373 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5376 if (ts
->child_tidptr
) {
5377 put_user_u32(0, ts
->child_tidptr
);
5378 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5382 object_unref(OBJECT(cpu
));
5389 gdb_exit(cpu_env
, arg1
);
5391 ret
= 0; /* avoid warning */
5393 case TARGET_NR_read
:
5397 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5399 ret
= get_errno(read(arg1
, p
, arg3
));
5400 unlock_user(p
, arg2
, ret
);
5403 case TARGET_NR_write
:
5404 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5406 ret
= get_errno(write(arg1
, p
, arg3
));
5407 unlock_user(p
, arg2
, 0);
5409 case TARGET_NR_open
:
5410 if (!(p
= lock_user_string(arg1
)))
5412 ret
= get_errno(do_open(cpu_env
, p
,
5413 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5415 unlock_user(p
, arg1
, 0);
5417 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5418 case TARGET_NR_openat
:
5419 if (!(p
= lock_user_string(arg2
)))
5421 ret
= get_errno(sys_openat(arg1
,
5423 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5425 unlock_user(p
, arg2
, 0);
5428 case TARGET_NR_close
:
5429 ret
= get_errno(close(arg1
));
5434 case TARGET_NR_fork
:
5435 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5437 #ifdef TARGET_NR_waitpid
5438 case TARGET_NR_waitpid
:
5441 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5442 if (!is_error(ret
) && arg2
&& ret
5443 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5448 #ifdef TARGET_NR_waitid
5449 case TARGET_NR_waitid
:
5453 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5454 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5455 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5457 host_to_target_siginfo(p
, &info
);
5458 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5463 #ifdef TARGET_NR_creat /* not on alpha */
5464 case TARGET_NR_creat
:
5465 if (!(p
= lock_user_string(arg1
)))
5467 ret
= get_errno(creat(p
, arg2
));
5468 unlock_user(p
, arg1
, 0);
5471 case TARGET_NR_link
:
5474 p
= lock_user_string(arg1
);
5475 p2
= lock_user_string(arg2
);
5477 ret
= -TARGET_EFAULT
;
5479 ret
= get_errno(link(p
, p2
));
5480 unlock_user(p2
, arg2
, 0);
5481 unlock_user(p
, arg1
, 0);
5484 #if defined(TARGET_NR_linkat)
5485 case TARGET_NR_linkat
:
5490 p
= lock_user_string(arg2
);
5491 p2
= lock_user_string(arg4
);
5493 ret
= -TARGET_EFAULT
;
5495 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5496 unlock_user(p
, arg2
, 0);
5497 unlock_user(p2
, arg4
, 0);
5501 case TARGET_NR_unlink
:
5502 if (!(p
= lock_user_string(arg1
)))
5504 ret
= get_errno(unlink(p
));
5505 unlock_user(p
, arg1
, 0);
5507 #if defined(TARGET_NR_unlinkat)
5508 case TARGET_NR_unlinkat
:
5509 if (!(p
= lock_user_string(arg2
)))
5511 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5512 unlock_user(p
, arg2
, 0);
5515 case TARGET_NR_execve
:
5517 char **argp
, **envp
;
5520 abi_ulong guest_argp
;
5521 abi_ulong guest_envp
;
5528 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5529 if (get_user_ual(addr
, gp
))
5537 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5538 if (get_user_ual(addr
, gp
))
5545 argp
= alloca((argc
+ 1) * sizeof(void *));
5546 envp
= alloca((envc
+ 1) * sizeof(void *));
5548 for (gp
= guest_argp
, q
= argp
; gp
;
5549 gp
+= sizeof(abi_ulong
), q
++) {
5550 if (get_user_ual(addr
, gp
))
5554 if (!(*q
= lock_user_string(addr
)))
5556 total_size
+= strlen(*q
) + 1;
5560 for (gp
= guest_envp
, q
= envp
; gp
;
5561 gp
+= sizeof(abi_ulong
), q
++) {
5562 if (get_user_ual(addr
, gp
))
5566 if (!(*q
= lock_user_string(addr
)))
5568 total_size
+= strlen(*q
) + 1;
5572 /* This case will not be caught by the host's execve() if its
5573 page size is bigger than the target's. */
5574 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5575 ret
= -TARGET_E2BIG
;
5578 if (!(p
= lock_user_string(arg1
)))
5580 ret
= get_errno(execve(p
, argp
, envp
));
5581 unlock_user(p
, arg1
, 0);
5586 ret
= -TARGET_EFAULT
;
5589 for (gp
= guest_argp
, q
= argp
; *q
;
5590 gp
+= sizeof(abi_ulong
), q
++) {
5591 if (get_user_ual(addr
, gp
)
5594 unlock_user(*q
, addr
, 0);
5596 for (gp
= guest_envp
, q
= envp
; *q
;
5597 gp
+= sizeof(abi_ulong
), q
++) {
5598 if (get_user_ual(addr
, gp
)
5601 unlock_user(*q
, addr
, 0);
5605 case TARGET_NR_chdir
:
5606 if (!(p
= lock_user_string(arg1
)))
5608 ret
= get_errno(chdir(p
));
5609 unlock_user(p
, arg1
, 0);
5611 #ifdef TARGET_NR_time
5612 case TARGET_NR_time
:
5615 ret
= get_errno(time(&host_time
));
5618 && put_user_sal(host_time
, arg1
))
5623 case TARGET_NR_mknod
:
5624 if (!(p
= lock_user_string(arg1
)))
5626 ret
= get_errno(mknod(p
, arg2
, arg3
));
5627 unlock_user(p
, arg1
, 0);
5629 #if defined(TARGET_NR_mknodat)
5630 case TARGET_NR_mknodat
:
5631 if (!(p
= lock_user_string(arg2
)))
5633 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5634 unlock_user(p
, arg2
, 0);
5637 case TARGET_NR_chmod
:
5638 if (!(p
= lock_user_string(arg1
)))
5640 ret
= get_errno(chmod(p
, arg2
));
5641 unlock_user(p
, arg1
, 0);
5643 #ifdef TARGET_NR_break
5644 case TARGET_NR_break
:
5647 #ifdef TARGET_NR_oldstat
5648 case TARGET_NR_oldstat
:
5651 case TARGET_NR_lseek
:
5652 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5654 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5655 /* Alpha specific */
5656 case TARGET_NR_getxpid
:
5657 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5658 ret
= get_errno(getpid());
5661 #ifdef TARGET_NR_getpid
5662 case TARGET_NR_getpid
:
5663 ret
= get_errno(getpid());
5666 case TARGET_NR_mount
:
5668 /* need to look at the data field */
5672 p
= lock_user_string(arg1
);
5680 p2
= lock_user_string(arg2
);
5683 unlock_user(p
, arg1
, 0);
5689 p3
= lock_user_string(arg3
);
5692 unlock_user(p
, arg1
, 0);
5694 unlock_user(p2
, arg2
, 0);
5701 /* FIXME - arg5 should be locked, but it isn't clear how to
5702 * do that since it's not guaranteed to be a NULL-terminated
5706 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
5708 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
5710 ret
= get_errno(ret
);
5713 unlock_user(p
, arg1
, 0);
5715 unlock_user(p2
, arg2
, 0);
5717 unlock_user(p3
, arg3
, 0);
5721 #ifdef TARGET_NR_umount
5722 case TARGET_NR_umount
:
5723 if (!(p
= lock_user_string(arg1
)))
5725 ret
= get_errno(umount(p
));
5726 unlock_user(p
, arg1
, 0);
5729 #ifdef TARGET_NR_stime /* not on alpha */
5730 case TARGET_NR_stime
:
5733 if (get_user_sal(host_time
, arg1
))
5735 ret
= get_errno(stime(&host_time
));
5739 case TARGET_NR_ptrace
:
5741 #ifdef TARGET_NR_alarm /* not on alpha */
5742 case TARGET_NR_alarm
:
5746 #ifdef TARGET_NR_oldfstat
5747 case TARGET_NR_oldfstat
:
5750 #ifdef TARGET_NR_pause /* not on alpha */
5751 case TARGET_NR_pause
:
5752 ret
= get_errno(pause());
5755 #ifdef TARGET_NR_utime
5756 case TARGET_NR_utime
:
5758 struct utimbuf tbuf
, *host_tbuf
;
5759 struct target_utimbuf
*target_tbuf
;
5761 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5763 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5764 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5765 unlock_user_struct(target_tbuf
, arg2
, 0);
5770 if (!(p
= lock_user_string(arg1
)))
5772 ret
= get_errno(utime(p
, host_tbuf
));
5773 unlock_user(p
, arg1
, 0);
5777 case TARGET_NR_utimes
:
5779 struct timeval
*tvp
, tv
[2];
5781 if (copy_from_user_timeval(&tv
[0], arg2
)
5782 || copy_from_user_timeval(&tv
[1],
5783 arg2
+ sizeof(struct target_timeval
)))
5789 if (!(p
= lock_user_string(arg1
)))
5791 ret
= get_errno(utimes(p
, tvp
));
5792 unlock_user(p
, arg1
, 0);
5795 #if defined(TARGET_NR_futimesat)
5796 case TARGET_NR_futimesat
:
5798 struct timeval
*tvp
, tv
[2];
5800 if (copy_from_user_timeval(&tv
[0], arg3
)
5801 || copy_from_user_timeval(&tv
[1],
5802 arg3
+ sizeof(struct target_timeval
)))
5808 if (!(p
= lock_user_string(arg2
)))
5810 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5811 unlock_user(p
, arg2
, 0);
5815 #ifdef TARGET_NR_stty
5816 case TARGET_NR_stty
:
5819 #ifdef TARGET_NR_gtty
5820 case TARGET_NR_gtty
:
5823 case TARGET_NR_access
:
5824 if (!(p
= lock_user_string(arg1
)))
5826 ret
= get_errno(access(path(p
), arg2
));
5827 unlock_user(p
, arg1
, 0);
5829 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5830 case TARGET_NR_faccessat
:
5831 if (!(p
= lock_user_string(arg2
)))
5833 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5834 unlock_user(p
, arg2
, 0);
5837 #ifdef TARGET_NR_nice /* not on alpha */
5838 case TARGET_NR_nice
:
5839 ret
= get_errno(nice(arg1
));
5842 #ifdef TARGET_NR_ftime
5843 case TARGET_NR_ftime
:
5846 case TARGET_NR_sync
:
5850 case TARGET_NR_kill
:
5851 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5853 case TARGET_NR_rename
:
5856 p
= lock_user_string(arg1
);
5857 p2
= lock_user_string(arg2
);
5859 ret
= -TARGET_EFAULT
;
5861 ret
= get_errno(rename(p
, p2
));
5862 unlock_user(p2
, arg2
, 0);
5863 unlock_user(p
, arg1
, 0);
5866 #if defined(TARGET_NR_renameat)
5867 case TARGET_NR_renameat
:
5870 p
= lock_user_string(arg2
);
5871 p2
= lock_user_string(arg4
);
5873 ret
= -TARGET_EFAULT
;
5875 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
5876 unlock_user(p2
, arg4
, 0);
5877 unlock_user(p
, arg2
, 0);
5881 case TARGET_NR_mkdir
:
5882 if (!(p
= lock_user_string(arg1
)))
5884 ret
= get_errno(mkdir(p
, arg2
));
5885 unlock_user(p
, arg1
, 0);
5887 #if defined(TARGET_NR_mkdirat)
5888 case TARGET_NR_mkdirat
:
5889 if (!(p
= lock_user_string(arg2
)))
5891 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
5892 unlock_user(p
, arg2
, 0);
5895 case TARGET_NR_rmdir
:
5896 if (!(p
= lock_user_string(arg1
)))
5898 ret
= get_errno(rmdir(p
));
5899 unlock_user(p
, arg1
, 0);
5902 ret
= get_errno(dup(arg1
));
5904 case TARGET_NR_pipe
:
5905 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5907 #ifdef TARGET_NR_pipe2
5908 case TARGET_NR_pipe2
:
5909 ret
= do_pipe(cpu_env
, arg1
,
5910 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5913 case TARGET_NR_times
:
5915 struct target_tms
*tmsp
;
5917 ret
= get_errno(times(&tms
));
5919 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5922 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5923 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5924 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5925 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5928 ret
= host_to_target_clock_t(ret
);
5931 #ifdef TARGET_NR_prof
5932 case TARGET_NR_prof
:
5935 #ifdef TARGET_NR_signal
5936 case TARGET_NR_signal
:
5939 case TARGET_NR_acct
:
5941 ret
= get_errno(acct(NULL
));
5943 if (!(p
= lock_user_string(arg1
)))
5945 ret
= get_errno(acct(path(p
)));
5946 unlock_user(p
, arg1
, 0);
5949 #ifdef TARGET_NR_umount2
5950 case TARGET_NR_umount2
:
5951 if (!(p
= lock_user_string(arg1
)))
5953 ret
= get_errno(umount2(p
, arg2
));
5954 unlock_user(p
, arg1
, 0);
5957 #ifdef TARGET_NR_lock
5958 case TARGET_NR_lock
:
5961 case TARGET_NR_ioctl
:
5962 ret
= do_ioctl(arg1
, arg2
, arg3
);
5964 case TARGET_NR_fcntl
:
5965 ret
= do_fcntl(arg1
, arg2
, arg3
);
5967 #ifdef TARGET_NR_mpx
5971 case TARGET_NR_setpgid
:
5972 ret
= get_errno(setpgid(arg1
, arg2
));
5974 #ifdef TARGET_NR_ulimit
5975 case TARGET_NR_ulimit
:
5978 #ifdef TARGET_NR_oldolduname
5979 case TARGET_NR_oldolduname
:
5982 case TARGET_NR_umask
:
5983 ret
= get_errno(umask(arg1
));
5985 case TARGET_NR_chroot
:
5986 if (!(p
= lock_user_string(arg1
)))
5988 ret
= get_errno(chroot(p
));
5989 unlock_user(p
, arg1
, 0);
5991 case TARGET_NR_ustat
:
5993 case TARGET_NR_dup2
:
5994 ret
= get_errno(dup2(arg1
, arg2
));
5996 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5997 case TARGET_NR_dup3
:
5998 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6001 #ifdef TARGET_NR_getppid /* not on alpha */
6002 case TARGET_NR_getppid
:
6003 ret
= get_errno(getppid());
6006 case TARGET_NR_getpgrp
:
6007 ret
= get_errno(getpgrp());
6009 case TARGET_NR_setsid
:
6010 ret
= get_errno(setsid());
6012 #ifdef TARGET_NR_sigaction
6013 case TARGET_NR_sigaction
:
6015 #if defined(TARGET_ALPHA)
6016 struct target_sigaction act
, oact
, *pact
= 0;
6017 struct target_old_sigaction
*old_act
;
6019 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6021 act
._sa_handler
= old_act
->_sa_handler
;
6022 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6023 act
.sa_flags
= old_act
->sa_flags
;
6024 act
.sa_restorer
= 0;
6025 unlock_user_struct(old_act
, arg2
, 0);
6028 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6029 if (!is_error(ret
) && arg3
) {
6030 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6032 old_act
->_sa_handler
= oact
._sa_handler
;
6033 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6034 old_act
->sa_flags
= oact
.sa_flags
;
6035 unlock_user_struct(old_act
, arg3
, 1);
6037 #elif defined(TARGET_MIPS)
6038 struct target_sigaction act
, oact
, *pact
, *old_act
;
6041 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6043 act
._sa_handler
= old_act
->_sa_handler
;
6044 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6045 act
.sa_flags
= old_act
->sa_flags
;
6046 unlock_user_struct(old_act
, arg2
, 0);
6052 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6054 if (!is_error(ret
) && arg3
) {
6055 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6057 old_act
->_sa_handler
= oact
._sa_handler
;
6058 old_act
->sa_flags
= oact
.sa_flags
;
6059 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6060 old_act
->sa_mask
.sig
[1] = 0;
6061 old_act
->sa_mask
.sig
[2] = 0;
6062 old_act
->sa_mask
.sig
[3] = 0;
6063 unlock_user_struct(old_act
, arg3
, 1);
6066 struct target_old_sigaction
*old_act
;
6067 struct target_sigaction act
, oact
, *pact
;
6069 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6071 act
._sa_handler
= old_act
->_sa_handler
;
6072 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6073 act
.sa_flags
= old_act
->sa_flags
;
6074 act
.sa_restorer
= old_act
->sa_restorer
;
6075 unlock_user_struct(old_act
, arg2
, 0);
6080 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6081 if (!is_error(ret
) && arg3
) {
6082 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6084 old_act
->_sa_handler
= oact
._sa_handler
;
6085 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6086 old_act
->sa_flags
= oact
.sa_flags
;
6087 old_act
->sa_restorer
= oact
.sa_restorer
;
6088 unlock_user_struct(old_act
, arg3
, 1);
6094 case TARGET_NR_rt_sigaction
:
6096 #if defined(TARGET_ALPHA)
6097 struct target_sigaction act
, oact
, *pact
= 0;
6098 struct target_rt_sigaction
*rt_act
;
6099 /* ??? arg4 == sizeof(sigset_t). */
6101 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6103 act
._sa_handler
= rt_act
->_sa_handler
;
6104 act
.sa_mask
= rt_act
->sa_mask
;
6105 act
.sa_flags
= rt_act
->sa_flags
;
6106 act
.sa_restorer
= arg5
;
6107 unlock_user_struct(rt_act
, arg2
, 0);
6110 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6111 if (!is_error(ret
) && arg3
) {
6112 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6114 rt_act
->_sa_handler
= oact
._sa_handler
;
6115 rt_act
->sa_mask
= oact
.sa_mask
;
6116 rt_act
->sa_flags
= oact
.sa_flags
;
6117 unlock_user_struct(rt_act
, arg3
, 1);
6120 struct target_sigaction
*act
;
6121 struct target_sigaction
*oact
;
6124 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6129 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6130 ret
= -TARGET_EFAULT
;
6131 goto rt_sigaction_fail
;
6135 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6138 unlock_user_struct(act
, arg2
, 0);
6140 unlock_user_struct(oact
, arg3
, 1);
6144 #ifdef TARGET_NR_sgetmask /* not on alpha */
6145 case TARGET_NR_sgetmask
:
6148 abi_ulong target_set
;
6149 do_sigprocmask(0, NULL
, &cur_set
);
6150 host_to_target_old_sigset(&target_set
, &cur_set
);
6155 #ifdef TARGET_NR_ssetmask /* not on alpha */
6156 case TARGET_NR_ssetmask
:
6158 sigset_t set
, oset
, cur_set
;
6159 abi_ulong target_set
= arg1
;
6160 do_sigprocmask(0, NULL
, &cur_set
);
6161 target_to_host_old_sigset(&set
, &target_set
);
6162 sigorset(&set
, &set
, &cur_set
);
6163 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6164 host_to_target_old_sigset(&target_set
, &oset
);
6169 #ifdef TARGET_NR_sigprocmask
6170 case TARGET_NR_sigprocmask
:
6172 #if defined(TARGET_ALPHA)
6173 sigset_t set
, oldset
;
6178 case TARGET_SIG_BLOCK
:
6181 case TARGET_SIG_UNBLOCK
:
6184 case TARGET_SIG_SETMASK
:
6188 ret
= -TARGET_EINVAL
;
6192 target_to_host_old_sigset(&set
, &mask
);
6194 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6195 if (!is_error(ret
)) {
6196 host_to_target_old_sigset(&mask
, &oldset
);
6198 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6201 sigset_t set
, oldset
, *set_ptr
;
6206 case TARGET_SIG_BLOCK
:
6209 case TARGET_SIG_UNBLOCK
:
6212 case TARGET_SIG_SETMASK
:
6216 ret
= -TARGET_EINVAL
;
6219 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6221 target_to_host_old_sigset(&set
, p
);
6222 unlock_user(p
, arg2
, 0);
6228 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6229 if (!is_error(ret
) && arg3
) {
6230 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6232 host_to_target_old_sigset(p
, &oldset
);
6233 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6239 case TARGET_NR_rt_sigprocmask
:
6242 sigset_t set
, oldset
, *set_ptr
;
6246 case TARGET_SIG_BLOCK
:
6249 case TARGET_SIG_UNBLOCK
:
6252 case TARGET_SIG_SETMASK
:
6256 ret
= -TARGET_EINVAL
;
6259 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6261 target_to_host_sigset(&set
, p
);
6262 unlock_user(p
, arg2
, 0);
6268 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6269 if (!is_error(ret
) && arg3
) {
6270 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6272 host_to_target_sigset(p
, &oldset
);
6273 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6277 #ifdef TARGET_NR_sigpending
6278 case TARGET_NR_sigpending
:
6281 ret
= get_errno(sigpending(&set
));
6282 if (!is_error(ret
)) {
6283 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6285 host_to_target_old_sigset(p
, &set
);
6286 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6291 case TARGET_NR_rt_sigpending
:
6294 ret
= get_errno(sigpending(&set
));
6295 if (!is_error(ret
)) {
6296 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6298 host_to_target_sigset(p
, &set
);
6299 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6303 #ifdef TARGET_NR_sigsuspend
6304 case TARGET_NR_sigsuspend
:
6307 #if defined(TARGET_ALPHA)
6308 abi_ulong mask
= arg1
;
6309 target_to_host_old_sigset(&set
, &mask
);
6311 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6313 target_to_host_old_sigset(&set
, p
);
6314 unlock_user(p
, arg1
, 0);
6316 ret
= get_errno(sigsuspend(&set
));
6320 case TARGET_NR_rt_sigsuspend
:
6323 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6325 target_to_host_sigset(&set
, p
);
6326 unlock_user(p
, arg1
, 0);
6327 ret
= get_errno(sigsuspend(&set
));
6330 case TARGET_NR_rt_sigtimedwait
:
6333 struct timespec uts
, *puts
;
6336 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6338 target_to_host_sigset(&set
, p
);
6339 unlock_user(p
, arg1
, 0);
6342 target_to_host_timespec(puts
, arg3
);
6346 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6347 if (!is_error(ret
)) {
6349 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6354 host_to_target_siginfo(p
, &uinfo
);
6355 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6357 ret
= host_to_target_signal(ret
);
6361 case TARGET_NR_rt_sigqueueinfo
:
6364 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6366 target_to_host_siginfo(&uinfo
, p
);
6367 unlock_user(p
, arg1
, 0);
6368 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6371 #ifdef TARGET_NR_sigreturn
6372 case TARGET_NR_sigreturn
:
6373 /* NOTE: ret is eax, so not transcoding must be done */
6374 ret
= do_sigreturn(cpu_env
);
6377 case TARGET_NR_rt_sigreturn
:
6378 /* NOTE: ret is eax, so not transcoding must be done */
6379 ret
= do_rt_sigreturn(cpu_env
);
6381 case TARGET_NR_sethostname
:
6382 if (!(p
= lock_user_string(arg1
)))
6384 ret
= get_errno(sethostname(p
, arg2
));
6385 unlock_user(p
, arg1
, 0);
6387 case TARGET_NR_setrlimit
:
6389 int resource
= target_to_host_resource(arg1
);
6390 struct target_rlimit
*target_rlim
;
6392 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6394 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6395 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6396 unlock_user_struct(target_rlim
, arg2
, 0);
6397 ret
= get_errno(setrlimit(resource
, &rlim
));
6400 case TARGET_NR_getrlimit
:
6402 int resource
= target_to_host_resource(arg1
);
6403 struct target_rlimit
*target_rlim
;
6406 ret
= get_errno(getrlimit(resource
, &rlim
));
6407 if (!is_error(ret
)) {
6408 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6410 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6411 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6412 unlock_user_struct(target_rlim
, arg2
, 1);
6416 case TARGET_NR_getrusage
:
6418 struct rusage rusage
;
6419 ret
= get_errno(getrusage(arg1
, &rusage
));
6420 if (!is_error(ret
)) {
6421 ret
= host_to_target_rusage(arg2
, &rusage
);
6425 case TARGET_NR_gettimeofday
:
6428 ret
= get_errno(gettimeofday(&tv
, NULL
));
6429 if (!is_error(ret
)) {
6430 if (copy_to_user_timeval(arg1
, &tv
))
6435 case TARGET_NR_settimeofday
:
6437 struct timeval tv
, *ptv
= NULL
;
6438 struct timezone tz
, *ptz
= NULL
;
6441 if (copy_from_user_timeval(&tv
, arg1
)) {
6448 if (copy_from_user_timezone(&tz
, arg2
)) {
6454 ret
= get_errno(settimeofday(ptv
, ptz
));
6457 #if defined(TARGET_NR_select)
6458 case TARGET_NR_select
:
6459 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6460 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6463 struct target_sel_arg_struct
*sel
;
6464 abi_ulong inp
, outp
, exp
, tvp
;
6467 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6469 nsel
= tswapal(sel
->n
);
6470 inp
= tswapal(sel
->inp
);
6471 outp
= tswapal(sel
->outp
);
6472 exp
= tswapal(sel
->exp
);
6473 tvp
= tswapal(sel
->tvp
);
6474 unlock_user_struct(sel
, arg1
, 0);
6475 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6480 #ifdef TARGET_NR_pselect6
6481 case TARGET_NR_pselect6
:
6483 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6484 fd_set rfds
, wfds
, efds
;
6485 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6486 struct timespec ts
, *ts_ptr
;
6489 * The 6th arg is actually two args smashed together,
6490 * so we cannot use the C library.
6498 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6499 target_sigset_t
*target_sigset
;
6507 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6511 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6515 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6521 * This takes a timespec, and not a timeval, so we cannot
6522 * use the do_select() helper ...
6525 if (target_to_host_timespec(&ts
, ts_addr
)) {
6533 /* Extract the two packed args for the sigset */
6536 sig
.size
= _NSIG
/ 8;
6538 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6542 arg_sigset
= tswapal(arg7
[0]);
6543 arg_sigsize
= tswapal(arg7
[1]);
6544 unlock_user(arg7
, arg6
, 0);
6548 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6549 /* Like the kernel, we enforce correct size sigsets */
6550 ret
= -TARGET_EINVAL
;
6553 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6554 sizeof(*target_sigset
), 1);
6555 if (!target_sigset
) {
6558 target_to_host_sigset(&set
, target_sigset
);
6559 unlock_user(target_sigset
, arg_sigset
, 0);
6567 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6570 if (!is_error(ret
)) {
6571 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6573 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6575 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6578 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6584 case TARGET_NR_symlink
:
6587 p
= lock_user_string(arg1
);
6588 p2
= lock_user_string(arg2
);
6590 ret
= -TARGET_EFAULT
;
6592 ret
= get_errno(symlink(p
, p2
));
6593 unlock_user(p2
, arg2
, 0);
6594 unlock_user(p
, arg1
, 0);
6597 #if defined(TARGET_NR_symlinkat)
6598 case TARGET_NR_symlinkat
:
6601 p
= lock_user_string(arg1
);
6602 p2
= lock_user_string(arg3
);
6604 ret
= -TARGET_EFAULT
;
6606 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6607 unlock_user(p2
, arg3
, 0);
6608 unlock_user(p
, arg1
, 0);
6612 #ifdef TARGET_NR_oldlstat
6613 case TARGET_NR_oldlstat
:
6616 case TARGET_NR_readlink
:
6619 p
= lock_user_string(arg1
);
6620 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6622 ret
= -TARGET_EFAULT
;
6623 } else if (is_proc_myself((const char *)p
, "exe")) {
6624 char real
[PATH_MAX
], *temp
;
6625 temp
= realpath(exec_path
, real
);
6626 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6627 snprintf((char *)p2
, arg3
, "%s", real
);
6629 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6631 unlock_user(p2
, arg2
, ret
);
6632 unlock_user(p
, arg1
, 0);
6635 #if defined(TARGET_NR_readlinkat)
6636 case TARGET_NR_readlinkat
:
6639 p
= lock_user_string(arg2
);
6640 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6642 ret
= -TARGET_EFAULT
;
6643 } else if (is_proc_myself((const char *)p
, "exe")) {
6644 char real
[PATH_MAX
], *temp
;
6645 temp
= realpath(exec_path
, real
);
6646 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6647 snprintf((char *)p2
, arg4
, "%s", real
);
6649 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6651 unlock_user(p2
, arg3
, ret
);
6652 unlock_user(p
, arg2
, 0);
6656 #ifdef TARGET_NR_uselib
6657 case TARGET_NR_uselib
:
6660 #ifdef TARGET_NR_swapon
6661 case TARGET_NR_swapon
:
6662 if (!(p
= lock_user_string(arg1
)))
6664 ret
= get_errno(swapon(p
, arg2
));
6665 unlock_user(p
, arg1
, 0);
6668 case TARGET_NR_reboot
:
6669 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6670 /* arg4 must be ignored in all other cases */
6671 p
= lock_user_string(arg4
);
6675 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6676 unlock_user(p
, arg4
, 0);
6678 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6681 #ifdef TARGET_NR_readdir
6682 case TARGET_NR_readdir
:
6685 #ifdef TARGET_NR_mmap
6686 case TARGET_NR_mmap
:
6687 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6688 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6689 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6690 || defined(TARGET_S390X)
6693 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6694 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6702 unlock_user(v
, arg1
, 0);
6703 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6704 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6708 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6709 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6715 #ifdef TARGET_NR_mmap2
6716 case TARGET_NR_mmap2
:
6718 #define MMAP_SHIFT 12
6720 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6721 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6723 arg6
<< MMAP_SHIFT
));
6726 case TARGET_NR_munmap
:
6727 ret
= get_errno(target_munmap(arg1
, arg2
));
6729 case TARGET_NR_mprotect
:
6731 TaskState
*ts
= cpu
->opaque
;
6732 /* Special hack to detect libc making the stack executable. */
6733 if ((arg3
& PROT_GROWSDOWN
)
6734 && arg1
>= ts
->info
->stack_limit
6735 && arg1
<= ts
->info
->start_stack
) {
6736 arg3
&= ~PROT_GROWSDOWN
;
6737 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6738 arg1
= ts
->info
->stack_limit
;
6741 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6743 #ifdef TARGET_NR_mremap
6744 case TARGET_NR_mremap
:
6745 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6748 /* ??? msync/mlock/munlock are broken for softmmu. */
6749 #ifdef TARGET_NR_msync
6750 case TARGET_NR_msync
:
6751 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6754 #ifdef TARGET_NR_mlock
6755 case TARGET_NR_mlock
:
6756 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6759 #ifdef TARGET_NR_munlock
6760 case TARGET_NR_munlock
:
6761 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6764 #ifdef TARGET_NR_mlockall
6765 case TARGET_NR_mlockall
:
6766 ret
= get_errno(mlockall(arg1
));
6769 #ifdef TARGET_NR_munlockall
6770 case TARGET_NR_munlockall
:
6771 ret
= get_errno(munlockall());
6774 case TARGET_NR_truncate
:
6775 if (!(p
= lock_user_string(arg1
)))
6777 ret
= get_errno(truncate(p
, arg2
));
6778 unlock_user(p
, arg1
, 0);
6780 case TARGET_NR_ftruncate
:
6781 ret
= get_errno(ftruncate(arg1
, arg2
));
6783 case TARGET_NR_fchmod
:
6784 ret
= get_errno(fchmod(arg1
, arg2
));
6786 #if defined(TARGET_NR_fchmodat)
6787 case TARGET_NR_fchmodat
:
6788 if (!(p
= lock_user_string(arg2
)))
6790 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6791 unlock_user(p
, arg2
, 0);
6794 case TARGET_NR_getpriority
:
6795 /* Note that negative values are valid for getpriority, so we must
6796 differentiate based on errno settings. */
6798 ret
= getpriority(arg1
, arg2
);
6799 if (ret
== -1 && errno
!= 0) {
6800 ret
= -host_to_target_errno(errno
);
6804 /* Return value is the unbiased priority. Signal no error. */
6805 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6807 /* Return value is a biased priority to avoid negative numbers. */
6811 case TARGET_NR_setpriority
:
6812 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6814 #ifdef TARGET_NR_profil
6815 case TARGET_NR_profil
:
6818 case TARGET_NR_statfs
:
6819 if (!(p
= lock_user_string(arg1
)))
6821 ret
= get_errno(statfs(path(p
), &stfs
));
6822 unlock_user(p
, arg1
, 0);
6824 if (!is_error(ret
)) {
6825 struct target_statfs
*target_stfs
;
6827 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6829 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6830 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6831 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6832 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6833 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6834 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6835 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6836 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6837 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6838 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6839 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6840 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6841 unlock_user_struct(target_stfs
, arg2
, 1);
6844 case TARGET_NR_fstatfs
:
6845 ret
= get_errno(fstatfs(arg1
, &stfs
));
6846 goto convert_statfs
;
6847 #ifdef TARGET_NR_statfs64
6848 case TARGET_NR_statfs64
:
6849 if (!(p
= lock_user_string(arg1
)))
6851 ret
= get_errno(statfs(path(p
), &stfs
));
6852 unlock_user(p
, arg1
, 0);
6854 if (!is_error(ret
)) {
6855 struct target_statfs64
*target_stfs
;
6857 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6859 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6860 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6861 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6862 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6863 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6864 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6865 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6866 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6867 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6868 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6869 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6870 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6871 unlock_user_struct(target_stfs
, arg3
, 1);
6874 case TARGET_NR_fstatfs64
:
6875 ret
= get_errno(fstatfs(arg1
, &stfs
));
6876 goto convert_statfs64
;
6878 #ifdef TARGET_NR_ioperm
6879 case TARGET_NR_ioperm
:
6882 #ifdef TARGET_NR_socketcall
6883 case TARGET_NR_socketcall
:
6884 ret
= do_socketcall(arg1
, arg2
);
6887 #ifdef TARGET_NR_accept
6888 case TARGET_NR_accept
:
6889 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
6892 #ifdef TARGET_NR_accept4
6893 case TARGET_NR_accept4
:
6894 #ifdef CONFIG_ACCEPT4
6895 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
6901 #ifdef TARGET_NR_bind
6902 case TARGET_NR_bind
:
6903 ret
= do_bind(arg1
, arg2
, arg3
);
6906 #ifdef TARGET_NR_connect
6907 case TARGET_NR_connect
:
6908 ret
= do_connect(arg1
, arg2
, arg3
);
6911 #ifdef TARGET_NR_getpeername
6912 case TARGET_NR_getpeername
:
6913 ret
= do_getpeername(arg1
, arg2
, arg3
);
6916 #ifdef TARGET_NR_getsockname
6917 case TARGET_NR_getsockname
:
6918 ret
= do_getsockname(arg1
, arg2
, arg3
);
6921 #ifdef TARGET_NR_getsockopt
6922 case TARGET_NR_getsockopt
:
6923 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6926 #ifdef TARGET_NR_listen
6927 case TARGET_NR_listen
:
6928 ret
= get_errno(listen(arg1
, arg2
));
6931 #ifdef TARGET_NR_recv
6932 case TARGET_NR_recv
:
6933 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6936 #ifdef TARGET_NR_recvfrom
6937 case TARGET_NR_recvfrom
:
6938 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6941 #ifdef TARGET_NR_recvmsg
6942 case TARGET_NR_recvmsg
:
6943 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6946 #ifdef TARGET_NR_send
6947 case TARGET_NR_send
:
6948 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6951 #ifdef TARGET_NR_sendmsg
6952 case TARGET_NR_sendmsg
:
6953 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6956 #ifdef TARGET_NR_sendmmsg
6957 case TARGET_NR_sendmmsg
:
6958 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
6960 case TARGET_NR_recvmmsg
:
6961 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
6964 #ifdef TARGET_NR_sendto
6965 case TARGET_NR_sendto
:
6966 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6969 #ifdef TARGET_NR_shutdown
6970 case TARGET_NR_shutdown
:
6971 ret
= get_errno(shutdown(arg1
, arg2
));
6974 #ifdef TARGET_NR_socket
6975 case TARGET_NR_socket
:
6976 ret
= do_socket(arg1
, arg2
, arg3
);
6979 #ifdef TARGET_NR_socketpair
6980 case TARGET_NR_socketpair
:
6981 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6984 #ifdef TARGET_NR_setsockopt
6985 case TARGET_NR_setsockopt
:
6986 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6990 case TARGET_NR_syslog
:
6991 if (!(p
= lock_user_string(arg2
)))
6993 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6994 unlock_user(p
, arg2
, 0);
6997 case TARGET_NR_setitimer
:
6999 struct itimerval value
, ovalue
, *pvalue
;
7003 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7004 || copy_from_user_timeval(&pvalue
->it_value
,
7005 arg2
+ sizeof(struct target_timeval
)))
7010 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7011 if (!is_error(ret
) && arg3
) {
7012 if (copy_to_user_timeval(arg3
,
7013 &ovalue
.it_interval
)
7014 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7020 case TARGET_NR_getitimer
:
7022 struct itimerval value
;
7024 ret
= get_errno(getitimer(arg1
, &value
));
7025 if (!is_error(ret
) && arg2
) {
7026 if (copy_to_user_timeval(arg2
,
7028 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7034 case TARGET_NR_stat
:
7035 if (!(p
= lock_user_string(arg1
)))
7037 ret
= get_errno(stat(path(p
), &st
));
7038 unlock_user(p
, arg1
, 0);
7040 case TARGET_NR_lstat
:
7041 if (!(p
= lock_user_string(arg1
)))
7043 ret
= get_errno(lstat(path(p
), &st
));
7044 unlock_user(p
, arg1
, 0);
7046 case TARGET_NR_fstat
:
7048 ret
= get_errno(fstat(arg1
, &st
));
7050 if (!is_error(ret
)) {
7051 struct target_stat
*target_st
;
7053 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7055 memset(target_st
, 0, sizeof(*target_st
));
7056 __put_user(st
.st_dev
, &target_st
->st_dev
);
7057 __put_user(st
.st_ino
, &target_st
->st_ino
);
7058 __put_user(st
.st_mode
, &target_st
->st_mode
);
7059 __put_user(st
.st_uid
, &target_st
->st_uid
);
7060 __put_user(st
.st_gid
, &target_st
->st_gid
);
7061 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7062 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7063 __put_user(st
.st_size
, &target_st
->st_size
);
7064 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7065 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7066 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7067 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7068 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7069 unlock_user_struct(target_st
, arg2
, 1);
7073 #ifdef TARGET_NR_olduname
7074 case TARGET_NR_olduname
:
7077 #ifdef TARGET_NR_iopl
7078 case TARGET_NR_iopl
:
7081 case TARGET_NR_vhangup
:
7082 ret
= get_errno(vhangup());
7084 #ifdef TARGET_NR_idle
7085 case TARGET_NR_idle
:
7088 #ifdef TARGET_NR_syscall
7089 case TARGET_NR_syscall
:
7090 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7091 arg6
, arg7
, arg8
, 0);
7094 case TARGET_NR_wait4
:
7097 abi_long status_ptr
= arg2
;
7098 struct rusage rusage
, *rusage_ptr
;
7099 abi_ulong target_rusage
= arg4
;
7100 abi_long rusage_err
;
7102 rusage_ptr
= &rusage
;
7105 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7106 if (!is_error(ret
)) {
7107 if (status_ptr
&& ret
) {
7108 status
= host_to_target_waitstatus(status
);
7109 if (put_user_s32(status
, status_ptr
))
7112 if (target_rusage
) {
7113 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7121 #ifdef TARGET_NR_swapoff
7122 case TARGET_NR_swapoff
:
7123 if (!(p
= lock_user_string(arg1
)))
7125 ret
= get_errno(swapoff(p
));
7126 unlock_user(p
, arg1
, 0);
7129 case TARGET_NR_sysinfo
:
7131 struct target_sysinfo
*target_value
;
7132 struct sysinfo value
;
7133 ret
= get_errno(sysinfo(&value
));
7134 if (!is_error(ret
) && arg1
)
7136 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7138 __put_user(value
.uptime
, &target_value
->uptime
);
7139 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7140 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7141 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7142 __put_user(value
.totalram
, &target_value
->totalram
);
7143 __put_user(value
.freeram
, &target_value
->freeram
);
7144 __put_user(value
.sharedram
, &target_value
->sharedram
);
7145 __put_user(value
.bufferram
, &target_value
->bufferram
);
7146 __put_user(value
.totalswap
, &target_value
->totalswap
);
7147 __put_user(value
.freeswap
, &target_value
->freeswap
);
7148 __put_user(value
.procs
, &target_value
->procs
);
7149 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7150 __put_user(value
.freehigh
, &target_value
->freehigh
);
7151 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7152 unlock_user_struct(target_value
, arg1
, 1);
7156 #ifdef TARGET_NR_ipc
7158 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7161 #ifdef TARGET_NR_semget
7162 case TARGET_NR_semget
:
7163 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7166 #ifdef TARGET_NR_semop
7167 case TARGET_NR_semop
:
7168 ret
= do_semop(arg1
, arg2
, arg3
);
7171 #ifdef TARGET_NR_semctl
7172 case TARGET_NR_semctl
:
7173 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
7176 #ifdef TARGET_NR_msgctl
7177 case TARGET_NR_msgctl
:
7178 ret
= do_msgctl(arg1
, arg2
, arg3
);
7181 #ifdef TARGET_NR_msgget
7182 case TARGET_NR_msgget
:
7183 ret
= get_errno(msgget(arg1
, arg2
));
7186 #ifdef TARGET_NR_msgrcv
7187 case TARGET_NR_msgrcv
:
7188 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7191 #ifdef TARGET_NR_msgsnd
7192 case TARGET_NR_msgsnd
:
7193 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7196 #ifdef TARGET_NR_shmget
7197 case TARGET_NR_shmget
:
7198 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7201 #ifdef TARGET_NR_shmctl
7202 case TARGET_NR_shmctl
:
7203 ret
= do_shmctl(arg1
, arg2
, arg3
);
7206 #ifdef TARGET_NR_shmat
7207 case TARGET_NR_shmat
:
7208 ret
= do_shmat(arg1
, arg2
, arg3
);
7211 #ifdef TARGET_NR_shmdt
7212 case TARGET_NR_shmdt
:
7213 ret
= do_shmdt(arg1
);
7216 case TARGET_NR_fsync
:
7217 ret
= get_errno(fsync(arg1
));
7219 case TARGET_NR_clone
:
7220 /* Linux manages to have three different orderings for its
7221 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7222 * match the kernel's CONFIG_CLONE_* settings.
7223 * Microblaze is further special in that it uses a sixth
7224 * implicit argument to clone for the TLS pointer.
7226 #if defined(TARGET_MICROBLAZE)
7227 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7228 #elif defined(TARGET_CLONE_BACKWARDS)
7229 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7230 #elif defined(TARGET_CLONE_BACKWARDS2)
7231 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7233 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7236 #ifdef __NR_exit_group
7237 /* new thread calls */
7238 case TARGET_NR_exit_group
:
7242 gdb_exit(cpu_env
, arg1
);
7243 ret
= get_errno(exit_group(arg1
));
7246 case TARGET_NR_setdomainname
:
7247 if (!(p
= lock_user_string(arg1
)))
7249 ret
= get_errno(setdomainname(p
, arg2
));
7250 unlock_user(p
, arg1
, 0);
7252 case TARGET_NR_uname
:
7253 /* no need to transcode because we use the linux syscall */
7255 struct new_utsname
* buf
;
7257 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7259 ret
= get_errno(sys_uname(buf
));
7260 if (!is_error(ret
)) {
7261 /* Overrite the native machine name with whatever is being
7263 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7264 /* Allow the user to override the reported release. */
7265 if (qemu_uname_release
&& *qemu_uname_release
)
7266 strcpy (buf
->release
, qemu_uname_release
);
7268 unlock_user_struct(buf
, arg1
, 1);
7272 case TARGET_NR_modify_ldt
:
7273 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7275 #if !defined(TARGET_X86_64)
7276 case TARGET_NR_vm86old
:
7278 case TARGET_NR_vm86
:
7279 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7283 case TARGET_NR_adjtimex
:
7285 #ifdef TARGET_NR_create_module
7286 case TARGET_NR_create_module
:
7288 case TARGET_NR_init_module
:
7289 case TARGET_NR_delete_module
:
7290 #ifdef TARGET_NR_get_kernel_syms
7291 case TARGET_NR_get_kernel_syms
:
7294 case TARGET_NR_quotactl
:
7296 case TARGET_NR_getpgid
:
7297 ret
= get_errno(getpgid(arg1
));
7299 case TARGET_NR_fchdir
:
7300 ret
= get_errno(fchdir(arg1
));
7302 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7303 case TARGET_NR_bdflush
:
7306 #ifdef TARGET_NR_sysfs
7307 case TARGET_NR_sysfs
:
7310 case TARGET_NR_personality
:
7311 ret
= get_errno(personality(arg1
));
7313 #ifdef TARGET_NR_afs_syscall
7314 case TARGET_NR_afs_syscall
:
7317 #ifdef TARGET_NR__llseek /* Not on alpha */
7318 case TARGET_NR__llseek
:
7321 #if !defined(__NR_llseek)
7322 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7324 ret
= get_errno(res
);
7329 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7331 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7337 case TARGET_NR_getdents
:
7338 #ifdef __NR_getdents
7339 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7341 struct target_dirent
*target_dirp
;
7342 struct linux_dirent
*dirp
;
7343 abi_long count
= arg3
;
7345 dirp
= malloc(count
);
7347 ret
= -TARGET_ENOMEM
;
7351 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7352 if (!is_error(ret
)) {
7353 struct linux_dirent
*de
;
7354 struct target_dirent
*tde
;
7356 int reclen
, treclen
;
7357 int count1
, tnamelen
;
7361 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7365 reclen
= de
->d_reclen
;
7366 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7367 assert(tnamelen
>= 0);
7368 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7369 assert(count1
+ treclen
<= count
);
7370 tde
->d_reclen
= tswap16(treclen
);
7371 tde
->d_ino
= tswapal(de
->d_ino
);
7372 tde
->d_off
= tswapal(de
->d_off
);
7373 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7374 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7376 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7380 unlock_user(target_dirp
, arg2
, ret
);
7386 struct linux_dirent
*dirp
;
7387 abi_long count
= arg3
;
7389 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7391 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7392 if (!is_error(ret
)) {
7393 struct linux_dirent
*de
;
7398 reclen
= de
->d_reclen
;
7401 de
->d_reclen
= tswap16(reclen
);
7402 tswapls(&de
->d_ino
);
7403 tswapls(&de
->d_off
);
7404 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7408 unlock_user(dirp
, arg2
, ret
);
7412 /* Implement getdents in terms of getdents64 */
7414 struct linux_dirent64
*dirp
;
7415 abi_long count
= arg3
;
7417 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7421 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7422 if (!is_error(ret
)) {
7423 /* Convert the dirent64 structs to target dirent. We do this
7424 * in-place, since we can guarantee that a target_dirent is no
7425 * larger than a dirent64; however this means we have to be
7426 * careful to read everything before writing in the new format.
7428 struct linux_dirent64
*de
;
7429 struct target_dirent
*tde
;
7434 tde
= (struct target_dirent
*)dirp
;
7436 int namelen
, treclen
;
7437 int reclen
= de
->d_reclen
;
7438 uint64_t ino
= de
->d_ino
;
7439 int64_t off
= de
->d_off
;
7440 uint8_t type
= de
->d_type
;
7442 namelen
= strlen(de
->d_name
);
7443 treclen
= offsetof(struct target_dirent
, d_name
)
7445 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7447 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7448 tde
->d_ino
= tswapal(ino
);
7449 tde
->d_off
= tswapal(off
);
7450 tde
->d_reclen
= tswap16(treclen
);
7451 /* The target_dirent type is in what was formerly a padding
7452 * byte at the end of the structure:
7454 *(((char *)tde
) + treclen
- 1) = type
;
7456 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7457 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7463 unlock_user(dirp
, arg2
, ret
);
7467 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7468 case TARGET_NR_getdents64
:
7470 struct linux_dirent64
*dirp
;
7471 abi_long count
= arg3
;
7472 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7474 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7475 if (!is_error(ret
)) {
7476 struct linux_dirent64
*de
;
7481 reclen
= de
->d_reclen
;
7484 de
->d_reclen
= tswap16(reclen
);
7485 tswap64s((uint64_t *)&de
->d_ino
);
7486 tswap64s((uint64_t *)&de
->d_off
);
7487 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7491 unlock_user(dirp
, arg2
, ret
);
7494 #endif /* TARGET_NR_getdents64 */
7495 #if defined(TARGET_NR__newselect)
7496 case TARGET_NR__newselect
:
7497 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7500 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7501 # ifdef TARGET_NR_poll
7502 case TARGET_NR_poll
:
7504 # ifdef TARGET_NR_ppoll
7505 case TARGET_NR_ppoll
:
7508 struct target_pollfd
*target_pfd
;
7509 unsigned int nfds
= arg2
;
7514 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7518 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7519 for(i
= 0; i
< nfds
; i
++) {
7520 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7521 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7524 # ifdef TARGET_NR_ppoll
7525 if (num
== TARGET_NR_ppoll
) {
7526 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7527 target_sigset_t
*target_set
;
7528 sigset_t _set
, *set
= &_set
;
7531 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7532 unlock_user(target_pfd
, arg1
, 0);
7540 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7542 unlock_user(target_pfd
, arg1
, 0);
7545 target_to_host_sigset(set
, target_set
);
7550 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7552 if (!is_error(ret
) && arg3
) {
7553 host_to_target_timespec(arg3
, timeout_ts
);
7556 unlock_user(target_set
, arg4
, 0);
7560 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7562 if (!is_error(ret
)) {
7563 for(i
= 0; i
< nfds
; i
++) {
7564 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7567 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7571 case TARGET_NR_flock
:
7572 /* NOTE: the flock constant seems to be the same for every
7574 ret
= get_errno(flock(arg1
, arg2
));
7576 case TARGET_NR_readv
:
7578 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7580 ret
= get_errno(readv(arg1
, vec
, arg3
));
7581 unlock_iovec(vec
, arg2
, arg3
, 1);
7583 ret
= -host_to_target_errno(errno
);
7587 case TARGET_NR_writev
:
7589 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7591 ret
= get_errno(writev(arg1
, vec
, arg3
));
7592 unlock_iovec(vec
, arg2
, arg3
, 0);
7594 ret
= -host_to_target_errno(errno
);
7598 case TARGET_NR_getsid
:
7599 ret
= get_errno(getsid(arg1
));
7601 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7602 case TARGET_NR_fdatasync
:
7603 ret
= get_errno(fdatasync(arg1
));
7606 case TARGET_NR__sysctl
:
7607 /* We don't implement this, but ENOTDIR is always a safe
7609 ret
= -TARGET_ENOTDIR
;
7611 case TARGET_NR_sched_getaffinity
:
7613 unsigned int mask_size
;
7614 unsigned long *mask
;
7617 * sched_getaffinity needs multiples of ulong, so need to take
7618 * care of mismatches between target ulong and host ulong sizes.
7620 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7621 ret
= -TARGET_EINVAL
;
7624 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7626 mask
= alloca(mask_size
);
7627 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7629 if (!is_error(ret
)) {
7631 /* More data returned than the caller's buffer will fit.
7632 * This only happens if sizeof(abi_long) < sizeof(long)
7633 * and the caller passed us a buffer holding an odd number
7634 * of abi_longs. If the host kernel is actually using the
7635 * extra 4 bytes then fail EINVAL; otherwise we can just
7636 * ignore them and only copy the interesting part.
7638 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
7639 if (numcpus
> arg2
* 8) {
7640 ret
= -TARGET_EINVAL
;
7646 if (copy_to_user(arg3
, mask
, ret
)) {
7652 case TARGET_NR_sched_setaffinity
:
7654 unsigned int mask_size
;
7655 unsigned long *mask
;
7658 * sched_setaffinity needs multiples of ulong, so need to take
7659 * care of mismatches between target ulong and host ulong sizes.
7661 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7662 ret
= -TARGET_EINVAL
;
7665 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7667 mask
= alloca(mask_size
);
7668 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7671 memcpy(mask
, p
, arg2
);
7672 unlock_user_struct(p
, arg2
, 0);
7674 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7677 case TARGET_NR_sched_setparam
:
7679 struct sched_param
*target_schp
;
7680 struct sched_param schp
;
7682 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7684 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7685 unlock_user_struct(target_schp
, arg2
, 0);
7686 ret
= get_errno(sched_setparam(arg1
, &schp
));
7689 case TARGET_NR_sched_getparam
:
7691 struct sched_param
*target_schp
;
7692 struct sched_param schp
;
7693 ret
= get_errno(sched_getparam(arg1
, &schp
));
7694 if (!is_error(ret
)) {
7695 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7697 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7698 unlock_user_struct(target_schp
, arg2
, 1);
7702 case TARGET_NR_sched_setscheduler
:
7704 struct sched_param
*target_schp
;
7705 struct sched_param schp
;
7706 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7708 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7709 unlock_user_struct(target_schp
, arg3
, 0);
7710 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7713 case TARGET_NR_sched_getscheduler
:
7714 ret
= get_errno(sched_getscheduler(arg1
));
7716 case TARGET_NR_sched_yield
:
7717 ret
= get_errno(sched_yield());
7719 case TARGET_NR_sched_get_priority_max
:
7720 ret
= get_errno(sched_get_priority_max(arg1
));
7722 case TARGET_NR_sched_get_priority_min
:
7723 ret
= get_errno(sched_get_priority_min(arg1
));
7725 case TARGET_NR_sched_rr_get_interval
:
7728 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7729 if (!is_error(ret
)) {
7730 host_to_target_timespec(arg2
, &ts
);
7734 case TARGET_NR_nanosleep
:
7736 struct timespec req
, rem
;
7737 target_to_host_timespec(&req
, arg1
);
7738 ret
= get_errno(nanosleep(&req
, &rem
));
7739 if (is_error(ret
) && arg2
) {
7740 host_to_target_timespec(arg2
, &rem
);
7744 #ifdef TARGET_NR_query_module
7745 case TARGET_NR_query_module
:
7748 #ifdef TARGET_NR_nfsservctl
7749 case TARGET_NR_nfsservctl
:
7752 case TARGET_NR_prctl
:
7754 case PR_GET_PDEATHSIG
:
7757 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7758 if (!is_error(ret
) && arg2
7759 && put_user_ual(deathsig
, arg2
)) {
7767 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7771 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7773 unlock_user(name
, arg2
, 16);
7778 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7782 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7784 unlock_user(name
, arg2
, 0);
7789 /* Most prctl options have no pointer arguments */
7790 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7794 #ifdef TARGET_NR_arch_prctl
7795 case TARGET_NR_arch_prctl
:
7796 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7797 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7803 #ifdef TARGET_NR_pread64
7804 case TARGET_NR_pread64
:
7805 if (regpairs_aligned(cpu_env
)) {
7809 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7811 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7812 unlock_user(p
, arg2
, ret
);
7814 case TARGET_NR_pwrite64
:
7815 if (regpairs_aligned(cpu_env
)) {
7819 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7821 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7822 unlock_user(p
, arg2
, 0);
7825 case TARGET_NR_getcwd
:
7826 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7828 ret
= get_errno(sys_getcwd1(p
, arg2
));
7829 unlock_user(p
, arg1
, ret
);
7831 case TARGET_NR_capget
:
7832 case TARGET_NR_capset
:
7834 struct target_user_cap_header
*target_header
;
7835 struct target_user_cap_data
*target_data
= NULL
;
7836 struct __user_cap_header_struct header
;
7837 struct __user_cap_data_struct data
[2];
7838 struct __user_cap_data_struct
*dataptr
= NULL
;
7839 int i
, target_datalen
;
7842 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
7845 header
.version
= tswap32(target_header
->version
);
7846 header
.pid
= tswap32(target_header
->pid
);
7848 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
7849 /* Version 2 and up takes pointer to two user_data structs */
7853 target_datalen
= sizeof(*target_data
) * data_items
;
7856 if (num
== TARGET_NR_capget
) {
7857 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
7859 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
7862 unlock_user_struct(target_header
, arg1
, 0);
7866 if (num
== TARGET_NR_capset
) {
7867 for (i
= 0; i
< data_items
; i
++) {
7868 data
[i
].effective
= tswap32(target_data
[i
].effective
);
7869 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
7870 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
7877 if (num
== TARGET_NR_capget
) {
7878 ret
= get_errno(capget(&header
, dataptr
));
7880 ret
= get_errno(capset(&header
, dataptr
));
7883 /* The kernel always updates version for both capget and capset */
7884 target_header
->version
= tswap32(header
.version
);
7885 unlock_user_struct(target_header
, arg1
, 1);
7888 if (num
== TARGET_NR_capget
) {
7889 for (i
= 0; i
< data_items
; i
++) {
7890 target_data
[i
].effective
= tswap32(data
[i
].effective
);
7891 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
7892 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
7894 unlock_user(target_data
, arg2
, target_datalen
);
7896 unlock_user(target_data
, arg2
, 0);
7901 case TARGET_NR_sigaltstack
:
7902 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7903 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7904 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7905 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7911 #ifdef CONFIG_SENDFILE
7912 case TARGET_NR_sendfile
:
7917 ret
= get_user_sal(off
, arg3
);
7918 if (is_error(ret
)) {
7923 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7924 if (!is_error(ret
) && arg3
) {
7925 abi_long ret2
= put_user_sal(off
, arg3
);
7926 if (is_error(ret2
)) {
7932 #ifdef TARGET_NR_sendfile64
7933 case TARGET_NR_sendfile64
:
7938 ret
= get_user_s64(off
, arg3
);
7939 if (is_error(ret
)) {
7944 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7945 if (!is_error(ret
) && arg3
) {
7946 abi_long ret2
= put_user_s64(off
, arg3
);
7947 if (is_error(ret2
)) {
7955 case TARGET_NR_sendfile
:
7956 #ifdef TARGET_NR_sendfile64
7957 case TARGET_NR_sendfile64
:
7962 #ifdef TARGET_NR_getpmsg
7963 case TARGET_NR_getpmsg
:
7966 #ifdef TARGET_NR_putpmsg
7967 case TARGET_NR_putpmsg
:
7970 #ifdef TARGET_NR_vfork
7971 case TARGET_NR_vfork
:
7972 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7976 #ifdef TARGET_NR_ugetrlimit
7977 case TARGET_NR_ugetrlimit
:
7980 int resource
= target_to_host_resource(arg1
);
7981 ret
= get_errno(getrlimit(resource
, &rlim
));
7982 if (!is_error(ret
)) {
7983 struct target_rlimit
*target_rlim
;
7984 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7986 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7987 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7988 unlock_user_struct(target_rlim
, arg2
, 1);
7993 #ifdef TARGET_NR_truncate64
7994 case TARGET_NR_truncate64
:
7995 if (!(p
= lock_user_string(arg1
)))
7997 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7998 unlock_user(p
, arg1
, 0);
8001 #ifdef TARGET_NR_ftruncate64
8002 case TARGET_NR_ftruncate64
:
8003 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8006 #ifdef TARGET_NR_stat64
8007 case TARGET_NR_stat64
:
8008 if (!(p
= lock_user_string(arg1
)))
8010 ret
= get_errno(stat(path(p
), &st
));
8011 unlock_user(p
, arg1
, 0);
8013 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8016 #ifdef TARGET_NR_lstat64
8017 case TARGET_NR_lstat64
:
8018 if (!(p
= lock_user_string(arg1
)))
8020 ret
= get_errno(lstat(path(p
), &st
));
8021 unlock_user(p
, arg1
, 0);
8023 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8026 #ifdef TARGET_NR_fstat64
8027 case TARGET_NR_fstat64
:
8028 ret
= get_errno(fstat(arg1
, &st
));
8030 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8033 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8034 #ifdef TARGET_NR_fstatat64
8035 case TARGET_NR_fstatat64
:
8037 #ifdef TARGET_NR_newfstatat
8038 case TARGET_NR_newfstatat
:
8040 if (!(p
= lock_user_string(arg2
)))
8042 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8044 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8047 case TARGET_NR_lchown
:
8048 if (!(p
= lock_user_string(arg1
)))
8050 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8051 unlock_user(p
, arg1
, 0);
8053 #ifdef TARGET_NR_getuid
8054 case TARGET_NR_getuid
:
8055 ret
= get_errno(high2lowuid(getuid()));
8058 #ifdef TARGET_NR_getgid
8059 case TARGET_NR_getgid
:
8060 ret
= get_errno(high2lowgid(getgid()));
8063 #ifdef TARGET_NR_geteuid
8064 case TARGET_NR_geteuid
:
8065 ret
= get_errno(high2lowuid(geteuid()));
8068 #ifdef TARGET_NR_getegid
8069 case TARGET_NR_getegid
:
8070 ret
= get_errno(high2lowgid(getegid()));
8073 case TARGET_NR_setreuid
:
8074 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8076 case TARGET_NR_setregid
:
8077 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8079 case TARGET_NR_getgroups
:
8081 int gidsetsize
= arg1
;
8082 target_id
*target_grouplist
;
8086 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8087 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8088 if (gidsetsize
== 0)
8090 if (!is_error(ret
)) {
8091 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8092 if (!target_grouplist
)
8094 for(i
= 0;i
< ret
; i
++)
8095 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8096 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8100 case TARGET_NR_setgroups
:
8102 int gidsetsize
= arg1
;
8103 target_id
*target_grouplist
;
8104 gid_t
*grouplist
= NULL
;
8107 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8108 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8109 if (!target_grouplist
) {
8110 ret
= -TARGET_EFAULT
;
8113 for (i
= 0; i
< gidsetsize
; i
++) {
8114 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8116 unlock_user(target_grouplist
, arg2
, 0);
8118 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8121 case TARGET_NR_fchown
:
8122 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8124 #if defined(TARGET_NR_fchownat)
8125 case TARGET_NR_fchownat
:
8126 if (!(p
= lock_user_string(arg2
)))
8128 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8129 low2highgid(arg4
), arg5
));
8130 unlock_user(p
, arg2
, 0);
8133 #ifdef TARGET_NR_setresuid
8134 case TARGET_NR_setresuid
:
8135 ret
= get_errno(setresuid(low2highuid(arg1
),
8137 low2highuid(arg3
)));
8140 #ifdef TARGET_NR_getresuid
8141 case TARGET_NR_getresuid
:
8143 uid_t ruid
, euid
, suid
;
8144 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8145 if (!is_error(ret
)) {
8146 if (put_user_id(high2lowuid(ruid
), arg1
)
8147 || put_user_id(high2lowuid(euid
), arg2
)
8148 || put_user_id(high2lowuid(suid
), arg3
))
8154 #ifdef TARGET_NR_getresgid
8155 case TARGET_NR_setresgid
:
8156 ret
= get_errno(setresgid(low2highgid(arg1
),
8158 low2highgid(arg3
)));
8161 #ifdef TARGET_NR_getresgid
8162 case TARGET_NR_getresgid
:
8164 gid_t rgid
, egid
, sgid
;
8165 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8166 if (!is_error(ret
)) {
8167 if (put_user_id(high2lowgid(rgid
), arg1
)
8168 || put_user_id(high2lowgid(egid
), arg2
)
8169 || put_user_id(high2lowgid(sgid
), arg3
))
8175 case TARGET_NR_chown
:
8176 if (!(p
= lock_user_string(arg1
)))
8178 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8179 unlock_user(p
, arg1
, 0);
8181 case TARGET_NR_setuid
:
8182 ret
= get_errno(setuid(low2highuid(arg1
)));
8184 case TARGET_NR_setgid
:
8185 ret
= get_errno(setgid(low2highgid(arg1
)));
8187 case TARGET_NR_setfsuid
:
8188 ret
= get_errno(setfsuid(arg1
));
8190 case TARGET_NR_setfsgid
:
8191 ret
= get_errno(setfsgid(arg1
));
8194 #ifdef TARGET_NR_lchown32
8195 case TARGET_NR_lchown32
:
8196 if (!(p
= lock_user_string(arg1
)))
8198 ret
= get_errno(lchown(p
, arg2
, arg3
));
8199 unlock_user(p
, arg1
, 0);
8202 #ifdef TARGET_NR_getuid32
8203 case TARGET_NR_getuid32
:
8204 ret
= get_errno(getuid());
8208 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8209 /* Alpha specific */
8210 case TARGET_NR_getxuid
:
8214 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8216 ret
= get_errno(getuid());
8219 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8220 /* Alpha specific */
8221 case TARGET_NR_getxgid
:
8225 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8227 ret
= get_errno(getgid());
8230 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8231 /* Alpha specific */
8232 case TARGET_NR_osf_getsysinfo
:
8233 ret
= -TARGET_EOPNOTSUPP
;
8235 case TARGET_GSI_IEEE_FP_CONTROL
:
8237 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8239 /* Copied from linux ieee_fpcr_to_swcr. */
8240 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8241 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8242 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8243 | SWCR_TRAP_ENABLE_DZE
8244 | SWCR_TRAP_ENABLE_OVF
);
8245 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8246 | SWCR_TRAP_ENABLE_INE
);
8247 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8248 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8250 if (put_user_u64 (swcr
, arg2
))
8256 /* case GSI_IEEE_STATE_AT_SIGNAL:
8257 -- Not implemented in linux kernel.
8259 -- Retrieves current unaligned access state; not much used.
8261 -- Retrieves implver information; surely not used.
8263 -- Grabs a copy of the HWRPB; surely not used.
8268 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8269 /* Alpha specific */
8270 case TARGET_NR_osf_setsysinfo
:
8271 ret
= -TARGET_EOPNOTSUPP
;
8273 case TARGET_SSI_IEEE_FP_CONTROL
:
8275 uint64_t swcr
, fpcr
, orig_fpcr
;
8277 if (get_user_u64 (swcr
, arg2
)) {
8280 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8281 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8283 /* Copied from linux ieee_swcr_to_fpcr. */
8284 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8285 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8286 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8287 | SWCR_TRAP_ENABLE_DZE
8288 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8289 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8290 | SWCR_TRAP_ENABLE_INE
)) << 57;
8291 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8292 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8294 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8299 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8301 uint64_t exc
, fpcr
, orig_fpcr
;
8304 if (get_user_u64(exc
, arg2
)) {
8308 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8310 /* We only add to the exception status here. */
8311 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8313 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8316 /* Old exceptions are not signaled. */
8317 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8319 /* If any exceptions set by this call,
8320 and are unmasked, send a signal. */
8322 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8323 si_code
= TARGET_FPE_FLTRES
;
8325 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8326 si_code
= TARGET_FPE_FLTUND
;
8328 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8329 si_code
= TARGET_FPE_FLTOVF
;
8331 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8332 si_code
= TARGET_FPE_FLTDIV
;
8334 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8335 si_code
= TARGET_FPE_FLTINV
;
8338 target_siginfo_t info
;
8339 info
.si_signo
= SIGFPE
;
8341 info
.si_code
= si_code
;
8342 info
._sifields
._sigfault
._addr
8343 = ((CPUArchState
*)cpu_env
)->pc
;
8344 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8349 /* case SSI_NVPAIRS:
8350 -- Used with SSIN_UACPROC to enable unaligned accesses.
8351 case SSI_IEEE_STATE_AT_SIGNAL:
8352 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8353 -- Not implemented in linux kernel
8358 #ifdef TARGET_NR_osf_sigprocmask
8359 /* Alpha specific. */
8360 case TARGET_NR_osf_sigprocmask
:
8364 sigset_t set
, oldset
;
8367 case TARGET_SIG_BLOCK
:
8370 case TARGET_SIG_UNBLOCK
:
8373 case TARGET_SIG_SETMASK
:
8377 ret
= -TARGET_EINVAL
;
8381 target_to_host_old_sigset(&set
, &mask
);
8382 do_sigprocmask(how
, &set
, &oldset
);
8383 host_to_target_old_sigset(&mask
, &oldset
);
8389 #ifdef TARGET_NR_getgid32
8390 case TARGET_NR_getgid32
:
8391 ret
= get_errno(getgid());
8394 #ifdef TARGET_NR_geteuid32
8395 case TARGET_NR_geteuid32
:
8396 ret
= get_errno(geteuid());
8399 #ifdef TARGET_NR_getegid32
8400 case TARGET_NR_getegid32
:
8401 ret
= get_errno(getegid());
8404 #ifdef TARGET_NR_setreuid32
8405 case TARGET_NR_setreuid32
:
8406 ret
= get_errno(setreuid(arg1
, arg2
));
8409 #ifdef TARGET_NR_setregid32
8410 case TARGET_NR_setregid32
:
8411 ret
= get_errno(setregid(arg1
, arg2
));
8414 #ifdef TARGET_NR_getgroups32
8415 case TARGET_NR_getgroups32
:
8417 int gidsetsize
= arg1
;
8418 uint32_t *target_grouplist
;
8422 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8423 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8424 if (gidsetsize
== 0)
8426 if (!is_error(ret
)) {
8427 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8428 if (!target_grouplist
) {
8429 ret
= -TARGET_EFAULT
;
8432 for(i
= 0;i
< ret
; i
++)
8433 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8434 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8439 #ifdef TARGET_NR_setgroups32
8440 case TARGET_NR_setgroups32
:
8442 int gidsetsize
= arg1
;
8443 uint32_t *target_grouplist
;
8447 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8448 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8449 if (!target_grouplist
) {
8450 ret
= -TARGET_EFAULT
;
8453 for(i
= 0;i
< gidsetsize
; i
++)
8454 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8455 unlock_user(target_grouplist
, arg2
, 0);
8456 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8460 #ifdef TARGET_NR_fchown32
8461 case TARGET_NR_fchown32
:
8462 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8465 #ifdef TARGET_NR_setresuid32
8466 case TARGET_NR_setresuid32
:
8467 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8470 #ifdef TARGET_NR_getresuid32
8471 case TARGET_NR_getresuid32
:
8473 uid_t ruid
, euid
, suid
;
8474 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8475 if (!is_error(ret
)) {
8476 if (put_user_u32(ruid
, arg1
)
8477 || put_user_u32(euid
, arg2
)
8478 || put_user_u32(suid
, arg3
))
8484 #ifdef TARGET_NR_setresgid32
8485 case TARGET_NR_setresgid32
:
8486 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8489 #ifdef TARGET_NR_getresgid32
8490 case TARGET_NR_getresgid32
:
8492 gid_t rgid
, egid
, sgid
;
8493 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8494 if (!is_error(ret
)) {
8495 if (put_user_u32(rgid
, arg1
)
8496 || put_user_u32(egid
, arg2
)
8497 || put_user_u32(sgid
, arg3
))
8503 #ifdef TARGET_NR_chown32
8504 case TARGET_NR_chown32
:
8505 if (!(p
= lock_user_string(arg1
)))
8507 ret
= get_errno(chown(p
, arg2
, arg3
));
8508 unlock_user(p
, arg1
, 0);
8511 #ifdef TARGET_NR_setuid32
8512 case TARGET_NR_setuid32
:
8513 ret
= get_errno(setuid(arg1
));
8516 #ifdef TARGET_NR_setgid32
8517 case TARGET_NR_setgid32
:
8518 ret
= get_errno(setgid(arg1
));
8521 #ifdef TARGET_NR_setfsuid32
8522 case TARGET_NR_setfsuid32
:
8523 ret
= get_errno(setfsuid(arg1
));
8526 #ifdef TARGET_NR_setfsgid32
8527 case TARGET_NR_setfsgid32
:
8528 ret
= get_errno(setfsgid(arg1
));
8532 case TARGET_NR_pivot_root
:
8534 #ifdef TARGET_NR_mincore
8535 case TARGET_NR_mincore
:
8538 ret
= -TARGET_EFAULT
;
8539 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8541 if (!(p
= lock_user_string(arg3
)))
8543 ret
= get_errno(mincore(a
, arg2
, p
));
8544 unlock_user(p
, arg3
, ret
);
8546 unlock_user(a
, arg1
, 0);
8550 #ifdef TARGET_NR_arm_fadvise64_64
8551 case TARGET_NR_arm_fadvise64_64
:
8554 * arm_fadvise64_64 looks like fadvise64_64 but
8555 * with different argument order
8563 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8564 #ifdef TARGET_NR_fadvise64_64
8565 case TARGET_NR_fadvise64_64
:
8567 #ifdef TARGET_NR_fadvise64
8568 case TARGET_NR_fadvise64
:
8572 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8573 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8574 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8575 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8579 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8582 #ifdef TARGET_NR_madvise
8583 case TARGET_NR_madvise
:
8584 /* A straight passthrough may not be safe because qemu sometimes
8585 turns private file-backed mappings into anonymous mappings.
8586 This will break MADV_DONTNEED.
8587 This is a hint, so ignoring and returning success is ok. */
8591 #if TARGET_ABI_BITS == 32
8592 case TARGET_NR_fcntl64
:
8596 struct target_flock64
*target_fl
;
8598 struct target_eabi_flock64
*target_efl
;
8601 cmd
= target_to_host_fcntl_cmd(arg2
);
8602 if (cmd
== -TARGET_EINVAL
) {
8608 case TARGET_F_GETLK64
:
8610 if (((CPUARMState
*)cpu_env
)->eabi
) {
8611 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8613 fl
.l_type
= tswap16(target_efl
->l_type
);
8614 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8615 fl
.l_start
= tswap64(target_efl
->l_start
);
8616 fl
.l_len
= tswap64(target_efl
->l_len
);
8617 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8618 unlock_user_struct(target_efl
, arg3
, 0);
8622 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8624 fl
.l_type
= tswap16(target_fl
->l_type
);
8625 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8626 fl
.l_start
= tswap64(target_fl
->l_start
);
8627 fl
.l_len
= tswap64(target_fl
->l_len
);
8628 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8629 unlock_user_struct(target_fl
, arg3
, 0);
8631 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8634 if (((CPUARMState
*)cpu_env
)->eabi
) {
8635 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8637 target_efl
->l_type
= tswap16(fl
.l_type
);
8638 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8639 target_efl
->l_start
= tswap64(fl
.l_start
);
8640 target_efl
->l_len
= tswap64(fl
.l_len
);
8641 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8642 unlock_user_struct(target_efl
, arg3
, 1);
8646 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8648 target_fl
->l_type
= tswap16(fl
.l_type
);
8649 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8650 target_fl
->l_start
= tswap64(fl
.l_start
);
8651 target_fl
->l_len
= tswap64(fl
.l_len
);
8652 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8653 unlock_user_struct(target_fl
, arg3
, 1);
8658 case TARGET_F_SETLK64
:
8659 case TARGET_F_SETLKW64
:
8661 if (((CPUARMState
*)cpu_env
)->eabi
) {
8662 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8664 fl
.l_type
= tswap16(target_efl
->l_type
);
8665 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8666 fl
.l_start
= tswap64(target_efl
->l_start
);
8667 fl
.l_len
= tswap64(target_efl
->l_len
);
8668 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8669 unlock_user_struct(target_efl
, arg3
, 0);
8673 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8675 fl
.l_type
= tswap16(target_fl
->l_type
);
8676 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8677 fl
.l_start
= tswap64(target_fl
->l_start
);
8678 fl
.l_len
= tswap64(target_fl
->l_len
);
8679 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8680 unlock_user_struct(target_fl
, arg3
, 0);
8682 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8685 ret
= do_fcntl(arg1
, arg2
, arg3
);
8691 #ifdef TARGET_NR_cacheflush
8692 case TARGET_NR_cacheflush
:
8693 /* self-modifying code is handled automatically, so nothing needed */
8697 #ifdef TARGET_NR_security
8698 case TARGET_NR_security
:
8701 #ifdef TARGET_NR_getpagesize
8702 case TARGET_NR_getpagesize
:
8703 ret
= TARGET_PAGE_SIZE
;
8706 case TARGET_NR_gettid
:
8707 ret
= get_errno(gettid());
8709 #ifdef TARGET_NR_readahead
8710 case TARGET_NR_readahead
:
8711 #if TARGET_ABI_BITS == 32
8712 if (regpairs_aligned(cpu_env
)) {
8717 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8719 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8724 #ifdef TARGET_NR_setxattr
8725 case TARGET_NR_listxattr
:
8726 case TARGET_NR_llistxattr
:
8730 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8732 ret
= -TARGET_EFAULT
;
8736 p
= lock_user_string(arg1
);
8738 if (num
== TARGET_NR_listxattr
) {
8739 ret
= get_errno(listxattr(p
, b
, arg3
));
8741 ret
= get_errno(llistxattr(p
, b
, arg3
));
8744 ret
= -TARGET_EFAULT
;
8746 unlock_user(p
, arg1
, 0);
8747 unlock_user(b
, arg2
, arg3
);
8750 case TARGET_NR_flistxattr
:
8754 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8756 ret
= -TARGET_EFAULT
;
8760 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8761 unlock_user(b
, arg2
, arg3
);
8764 case TARGET_NR_setxattr
:
8765 case TARGET_NR_lsetxattr
:
8767 void *p
, *n
, *v
= 0;
8769 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8771 ret
= -TARGET_EFAULT
;
8775 p
= lock_user_string(arg1
);
8776 n
= lock_user_string(arg2
);
8778 if (num
== TARGET_NR_setxattr
) {
8779 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8781 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8784 ret
= -TARGET_EFAULT
;
8786 unlock_user(p
, arg1
, 0);
8787 unlock_user(n
, arg2
, 0);
8788 unlock_user(v
, arg3
, 0);
8791 case TARGET_NR_fsetxattr
:
8795 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8797 ret
= -TARGET_EFAULT
;
8801 n
= lock_user_string(arg2
);
8803 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8805 ret
= -TARGET_EFAULT
;
8807 unlock_user(n
, arg2
, 0);
8808 unlock_user(v
, arg3
, 0);
8811 case TARGET_NR_getxattr
:
8812 case TARGET_NR_lgetxattr
:
8814 void *p
, *n
, *v
= 0;
8816 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8818 ret
= -TARGET_EFAULT
;
8822 p
= lock_user_string(arg1
);
8823 n
= lock_user_string(arg2
);
8825 if (num
== TARGET_NR_getxattr
) {
8826 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8828 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8831 ret
= -TARGET_EFAULT
;
8833 unlock_user(p
, arg1
, 0);
8834 unlock_user(n
, arg2
, 0);
8835 unlock_user(v
, arg3
, arg4
);
8838 case TARGET_NR_fgetxattr
:
8842 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8844 ret
= -TARGET_EFAULT
;
8848 n
= lock_user_string(arg2
);
8850 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8852 ret
= -TARGET_EFAULT
;
8854 unlock_user(n
, arg2
, 0);
8855 unlock_user(v
, arg3
, arg4
);
8858 case TARGET_NR_removexattr
:
8859 case TARGET_NR_lremovexattr
:
8862 p
= lock_user_string(arg1
);
8863 n
= lock_user_string(arg2
);
8865 if (num
== TARGET_NR_removexattr
) {
8866 ret
= get_errno(removexattr(p
, n
));
8868 ret
= get_errno(lremovexattr(p
, n
));
8871 ret
= -TARGET_EFAULT
;
8873 unlock_user(p
, arg1
, 0);
8874 unlock_user(n
, arg2
, 0);
8877 case TARGET_NR_fremovexattr
:
8880 n
= lock_user_string(arg2
);
8882 ret
= get_errno(fremovexattr(arg1
, n
));
8884 ret
= -TARGET_EFAULT
;
8886 unlock_user(n
, arg2
, 0);
8890 #endif /* CONFIG_ATTR */
8891 #ifdef TARGET_NR_set_thread_area
8892 case TARGET_NR_set_thread_area
:
8893 #if defined(TARGET_MIPS)
8894 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
8897 #elif defined(TARGET_CRIS)
8899 ret
= -TARGET_EINVAL
;
8901 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8905 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8906 ret
= do_set_thread_area(cpu_env
, arg1
);
8908 #elif defined(TARGET_M68K)
8910 TaskState
*ts
= cpu
->opaque
;
8911 ts
->tp_value
= arg1
;
8916 goto unimplemented_nowarn
;
8919 #ifdef TARGET_NR_get_thread_area
8920 case TARGET_NR_get_thread_area
:
8921 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8922 ret
= do_get_thread_area(cpu_env
, arg1
);
8924 #elif defined(TARGET_M68K)
8926 TaskState
*ts
= cpu
->opaque
;
8931 goto unimplemented_nowarn
;
8934 #ifdef TARGET_NR_getdomainname
8935 case TARGET_NR_getdomainname
:
8936 goto unimplemented_nowarn
;
8939 #ifdef TARGET_NR_clock_gettime
8940 case TARGET_NR_clock_gettime
:
8943 ret
= get_errno(clock_gettime(arg1
, &ts
));
8944 if (!is_error(ret
)) {
8945 host_to_target_timespec(arg2
, &ts
);
8950 #ifdef TARGET_NR_clock_getres
8951 case TARGET_NR_clock_getres
:
8954 ret
= get_errno(clock_getres(arg1
, &ts
));
8955 if (!is_error(ret
)) {
8956 host_to_target_timespec(arg2
, &ts
);
8961 #ifdef TARGET_NR_clock_nanosleep
8962 case TARGET_NR_clock_nanosleep
:
8965 target_to_host_timespec(&ts
, arg3
);
8966 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8968 host_to_target_timespec(arg4
, &ts
);
8973 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8974 case TARGET_NR_set_tid_address
:
8975 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8979 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8980 case TARGET_NR_tkill
:
8981 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8985 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8986 case TARGET_NR_tgkill
:
8987 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8988 target_to_host_signal(arg3
)));
8992 #ifdef TARGET_NR_set_robust_list
8993 case TARGET_NR_set_robust_list
:
8994 case TARGET_NR_get_robust_list
:
8995 /* The ABI for supporting robust futexes has userspace pass
8996 * the kernel a pointer to a linked list which is updated by
8997 * userspace after the syscall; the list is walked by the kernel
8998 * when the thread exits. Since the linked list in QEMU guest
8999 * memory isn't a valid linked list for the host and we have
9000 * no way to reliably intercept the thread-death event, we can't
9001 * support these. Silently return ENOSYS so that guest userspace
9002 * falls back to a non-robust futex implementation (which should
9003 * be OK except in the corner case of the guest crashing while
9004 * holding a mutex that is shared with another process via
9007 goto unimplemented_nowarn
;
9010 #if defined(TARGET_NR_utimensat)
9011 case TARGET_NR_utimensat
:
9013 struct timespec
*tsp
, ts
[2];
9017 target_to_host_timespec(ts
, arg3
);
9018 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9022 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9024 if (!(p
= lock_user_string(arg2
))) {
9025 ret
= -TARGET_EFAULT
;
9028 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9029 unlock_user(p
, arg2
, 0);
9034 case TARGET_NR_futex
:
9035 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9037 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9038 case TARGET_NR_inotify_init
:
9039 ret
= get_errno(sys_inotify_init());
9042 #ifdef CONFIG_INOTIFY1
9043 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9044 case TARGET_NR_inotify_init1
:
9045 ret
= get_errno(sys_inotify_init1(arg1
));
9049 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9050 case TARGET_NR_inotify_add_watch
:
9051 p
= lock_user_string(arg2
);
9052 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9053 unlock_user(p
, arg2
, 0);
9056 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9057 case TARGET_NR_inotify_rm_watch
:
9058 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9062 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9063 case TARGET_NR_mq_open
:
9065 struct mq_attr posix_mq_attr
;
9067 p
= lock_user_string(arg1
- 1);
9069 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9070 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
9071 unlock_user (p
, arg1
, 0);
9075 case TARGET_NR_mq_unlink
:
9076 p
= lock_user_string(arg1
- 1);
9077 ret
= get_errno(mq_unlink(p
));
9078 unlock_user (p
, arg1
, 0);
9081 case TARGET_NR_mq_timedsend
:
9085 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9087 target_to_host_timespec(&ts
, arg5
);
9088 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9089 host_to_target_timespec(arg5
, &ts
);
9092 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9093 unlock_user (p
, arg2
, arg3
);
9097 case TARGET_NR_mq_timedreceive
:
9102 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9104 target_to_host_timespec(&ts
, arg5
);
9105 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9106 host_to_target_timespec(arg5
, &ts
);
9109 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9110 unlock_user (p
, arg2
, arg3
);
9112 put_user_u32(prio
, arg4
);
9116 /* Not implemented for now... */
9117 /* case TARGET_NR_mq_notify: */
9120 case TARGET_NR_mq_getsetattr
:
9122 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9125 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9126 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9129 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9130 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9137 #ifdef CONFIG_SPLICE
9138 #ifdef TARGET_NR_tee
9141 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9145 #ifdef TARGET_NR_splice
9146 case TARGET_NR_splice
:
9148 loff_t loff_in
, loff_out
;
9149 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9151 get_user_u64(loff_in
, arg2
);
9152 ploff_in
= &loff_in
;
9155 get_user_u64(loff_out
, arg2
);
9156 ploff_out
= &loff_out
;
9158 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9162 #ifdef TARGET_NR_vmsplice
9163 case TARGET_NR_vmsplice
:
9165 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9167 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9168 unlock_iovec(vec
, arg2
, arg3
, 0);
9170 ret
= -host_to_target_errno(errno
);
9175 #endif /* CONFIG_SPLICE */
9176 #ifdef CONFIG_EVENTFD
9177 #if defined(TARGET_NR_eventfd)
9178 case TARGET_NR_eventfd
:
9179 ret
= get_errno(eventfd(arg1
, 0));
9182 #if defined(TARGET_NR_eventfd2)
9183 case TARGET_NR_eventfd2
:
9185 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9186 if (arg2
& TARGET_O_NONBLOCK
) {
9187 host_flags
|= O_NONBLOCK
;
9189 if (arg2
& TARGET_O_CLOEXEC
) {
9190 host_flags
|= O_CLOEXEC
;
9192 ret
= get_errno(eventfd(arg1
, host_flags
));
9196 #endif /* CONFIG_EVENTFD */
9197 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9198 case TARGET_NR_fallocate
:
9199 #if TARGET_ABI_BITS == 32
9200 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9201 target_offset64(arg5
, arg6
)));
9203 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9207 #if defined(CONFIG_SYNC_FILE_RANGE)
9208 #if defined(TARGET_NR_sync_file_range)
9209 case TARGET_NR_sync_file_range
:
9210 #if TARGET_ABI_BITS == 32
9211 #if defined(TARGET_MIPS)
9212 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9213 target_offset64(arg5
, arg6
), arg7
));
9215 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9216 target_offset64(arg4
, arg5
), arg6
));
9217 #endif /* !TARGET_MIPS */
9219 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9223 #if defined(TARGET_NR_sync_file_range2)
9224 case TARGET_NR_sync_file_range2
:
9225 /* This is like sync_file_range but the arguments are reordered */
9226 #if TARGET_ABI_BITS == 32
9227 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9228 target_offset64(arg5
, arg6
), arg2
));
9230 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9235 #if defined(CONFIG_EPOLL)
9236 #if defined(TARGET_NR_epoll_create)
9237 case TARGET_NR_epoll_create
:
9238 ret
= get_errno(epoll_create(arg1
));
9241 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9242 case TARGET_NR_epoll_create1
:
9243 ret
= get_errno(epoll_create1(arg1
));
9246 #if defined(TARGET_NR_epoll_ctl)
9247 case TARGET_NR_epoll_ctl
:
9249 struct epoll_event ep
;
9250 struct epoll_event
*epp
= 0;
9252 struct target_epoll_event
*target_ep
;
9253 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9256 ep
.events
= tswap32(target_ep
->events
);
9257 /* The epoll_data_t union is just opaque data to the kernel,
9258 * so we transfer all 64 bits across and need not worry what
9259 * actual data type it is.
9261 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9262 unlock_user_struct(target_ep
, arg4
, 0);
9265 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9270 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9271 #define IMPLEMENT_EPOLL_PWAIT
9273 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9274 #if defined(TARGET_NR_epoll_wait)
9275 case TARGET_NR_epoll_wait
:
9277 #if defined(IMPLEMENT_EPOLL_PWAIT)
9278 case TARGET_NR_epoll_pwait
:
9281 struct target_epoll_event
*target_ep
;
9282 struct epoll_event
*ep
;
9284 int maxevents
= arg3
;
9287 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9288 maxevents
* sizeof(struct target_epoll_event
), 1);
9293 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9296 #if defined(IMPLEMENT_EPOLL_PWAIT)
9297 case TARGET_NR_epoll_pwait
:
9299 target_sigset_t
*target_set
;
9300 sigset_t _set
, *set
= &_set
;
9303 target_set
= lock_user(VERIFY_READ
, arg5
,
9304 sizeof(target_sigset_t
), 1);
9306 unlock_user(target_ep
, arg2
, 0);
9309 target_to_host_sigset(set
, target_set
);
9310 unlock_user(target_set
, arg5
, 0);
9315 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9319 #if defined(TARGET_NR_epoll_wait)
9320 case TARGET_NR_epoll_wait
:
9321 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9325 ret
= -TARGET_ENOSYS
;
9327 if (!is_error(ret
)) {
9329 for (i
= 0; i
< ret
; i
++) {
9330 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9331 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9334 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9339 #ifdef TARGET_NR_prlimit64
9340 case TARGET_NR_prlimit64
:
9342 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9343 struct target_rlimit64
*target_rnew
, *target_rold
;
9344 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9346 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9349 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9350 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9351 unlock_user_struct(target_rnew
, arg3
, 0);
9355 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
9356 if (!is_error(ret
) && arg4
) {
9357 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9360 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9361 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9362 unlock_user_struct(target_rold
, arg4
, 1);
9367 #ifdef TARGET_NR_gethostname
9368 case TARGET_NR_gethostname
:
9370 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9372 ret
= get_errno(gethostname(name
, arg2
));
9373 unlock_user(name
, arg1
, arg2
);
9375 ret
= -TARGET_EFAULT
;
9380 #ifdef TARGET_NR_atomic_cmpxchg_32
9381 case TARGET_NR_atomic_cmpxchg_32
:
9383 /* should use start_exclusive from main.c */
9384 abi_ulong mem_value
;
9385 if (get_user_u32(mem_value
, arg6
)) {
9386 target_siginfo_t info
;
9387 info
.si_signo
= SIGSEGV
;
9389 info
.si_code
= TARGET_SEGV_MAPERR
;
9390 info
._sifields
._sigfault
._addr
= arg6
;
9391 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9395 if (mem_value
== arg2
)
9396 put_user_u32(arg1
, arg6
);
9401 #ifdef TARGET_NR_atomic_barrier
9402 case TARGET_NR_atomic_barrier
:
9404 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9410 #ifdef TARGET_NR_timer_create
9411 case TARGET_NR_timer_create
:
9413 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9415 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9416 struct target_sigevent
*ptarget_sevp
;
9417 struct target_timer_t
*ptarget_timer
;
9420 int timer_index
= next_free_host_timer();
9422 if (timer_index
< 0) {
9423 ret
= -TARGET_EAGAIN
;
9425 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9428 if (!lock_user_struct(VERIFY_READ
, ptarget_sevp
, arg2
, 1)) {
9432 host_sevp
.sigev_signo
= tswap32(ptarget_sevp
->sigev_signo
);
9433 host_sevp
.sigev_notify
= tswap32(ptarget_sevp
->sigev_notify
);
9435 phost_sevp
= &host_sevp
;
9438 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9442 if (!lock_user_struct(VERIFY_WRITE
, ptarget_timer
, arg3
, 1)) {
9445 ptarget_timer
->ptr
= tswap32(0xcafe0000 | timer_index
);
9446 unlock_user_struct(ptarget_timer
, arg3
, 1);
9453 #ifdef TARGET_NR_timer_settime
9454 case TARGET_NR_timer_settime
:
9456 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9457 * struct itimerspec * old_value */
9459 if (arg3
== 0 || arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9460 ret
= -TARGET_EINVAL
;
9462 timer_t htimer
= g_posix_timers
[arg1
];
9463 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9465 target_to_host_itimerspec(&hspec_new
, arg3
);
9467 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9468 host_to_target_itimerspec(arg2
, &hspec_old
);
9474 #ifdef TARGET_NR_timer_gettime
9475 case TARGET_NR_timer_gettime
:
9477 /* args: timer_t timerid, struct itimerspec *curr_value */
9480 return -TARGET_EFAULT
;
9481 } else if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9482 ret
= -TARGET_EINVAL
;
9484 timer_t htimer
= g_posix_timers
[arg1
];
9485 struct itimerspec hspec
;
9486 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9488 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9489 ret
= -TARGET_EFAULT
;
9496 #ifdef TARGET_NR_timer_getoverrun
9497 case TARGET_NR_timer_getoverrun
:
9499 /* args: timer_t timerid */
9501 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9502 ret
= -TARGET_EINVAL
;
9504 timer_t htimer
= g_posix_timers
[arg1
];
9505 ret
= get_errno(timer_getoverrun(htimer
));
9511 #ifdef TARGET_NR_timer_delete
9512 case TARGET_NR_timer_delete
:
9514 /* args: timer_t timerid */
9516 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9517 ret
= -TARGET_EINVAL
;
9519 timer_t htimer
= g_posix_timers
[arg1
];
9520 ret
= get_errno(timer_delete(htimer
));
9521 g_posix_timers
[arg1
] = 0;
9529 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9530 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9531 unimplemented_nowarn
:
9533 ret
= -TARGET_ENOSYS
;
9538 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9541 print_syscall_ret(num
, ret
);
9544 ret
= -TARGET_EFAULT
;