4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn
)(void *), void *child_stack_base
,
51 size_t stack_size
, int flags
, void *arg
, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
61 #include <sys/timerfd.h>
63 #include <sys/sysinfo.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
74 #include <sys/eventfd.h>
77 #include <sys/epoll.h>
80 #include "qemu/xattr.h"
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include "linux_loop.h"
116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#include <linux/msdos_fs.h>
122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_tgkill __NR_tgkill
189 #define __NR_sys_tkill __NR_tkill
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group
,int,error_code
)
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address
,int *,tidptr
)
239 #if defined(TARGET_NR_futex) && defined(__NR_futex)
240 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
241 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
251 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
252 struct __user_cap_data_struct
*, data
);
253 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
254 struct __user_cap_data_struct
*, data
);
255 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
256 _syscall2(int, ioprio_get
, int, which
, int, who
)
258 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
259 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
262 static bitmask_transtbl fcntl_flags_tbl
[] = {
263 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
264 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
265 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
266 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
267 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
268 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
269 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
270 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
271 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
272 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
273 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
274 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
275 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
286 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
295 static int sys_getcwd1(char *buf
, size_t size
)
297 if (getcwd(buf
, size
) == NULL
) {
298 /* getcwd() sets errno */
301 return strlen(buf
)+1;
304 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
307 * open(2) has extra parameter 'mode' when called with
310 if ((flags
& O_CREAT
) != 0) {
311 return (openat(dirfd
, pathname
, flags
, mode
));
313 return (openat(dirfd
, pathname
, flags
));
316 #ifdef TARGET_NR_utimensat
317 #ifdef CONFIG_UTIMENSAT
318 static int sys_utimensat(int dirfd
, const char *pathname
,
319 const struct timespec times
[2], int flags
)
321 if (pathname
== NULL
)
322 return futimens(dirfd
, times
);
324 return utimensat(dirfd
, pathname
, times
, flags
);
326 #elif defined(__NR_utimensat)
327 #define __NR_sys_utimensat __NR_utimensat
328 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
329 const struct timespec
*,tsp
,int,flags
)
331 static int sys_utimensat(int dirfd
, const char *pathname
,
332 const struct timespec times
[2], int flags
)
338 #endif /* TARGET_NR_utimensat */
340 #ifdef CONFIG_INOTIFY
341 #include <sys/inotify.h>
343 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
344 static int sys_inotify_init(void)
346 return (inotify_init());
349 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
350 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
352 return (inotify_add_watch(fd
, pathname
, mask
));
355 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
356 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
358 return (inotify_rm_watch(fd
, wd
));
361 #ifdef CONFIG_INOTIFY1
362 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
363 static int sys_inotify_init1(int flags
)
365 return (inotify_init1(flags
));
370 /* Userspace can usually survive runtime without inotify */
371 #undef TARGET_NR_inotify_init
372 #undef TARGET_NR_inotify_init1
373 #undef TARGET_NR_inotify_add_watch
374 #undef TARGET_NR_inotify_rm_watch
375 #endif /* CONFIG_INOTIFY */
377 #if defined(TARGET_NR_ppoll)
379 # define __NR_ppoll -1
381 #define __NR_sys_ppoll __NR_ppoll
382 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
383 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
387 #if defined(TARGET_NR_pselect6)
388 #ifndef __NR_pselect6
389 # define __NR_pselect6 -1
391 #define __NR_sys_pselect6 __NR_pselect6
392 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
393 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
396 #if defined(TARGET_NR_prlimit64)
397 #ifndef __NR_prlimit64
398 # define __NR_prlimit64 -1
400 #define __NR_sys_prlimit64 __NR_prlimit64
401 /* The glibc rlimit structure may not be that used by the underlying syscall */
402 struct host_rlimit64
{
406 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
407 const struct host_rlimit64
*, new_limit
,
408 struct host_rlimit64
*, old_limit
)
412 #if defined(TARGET_NR_timer_create)
413 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
414 static timer_t g_posix_timers
[32] = { 0, } ;
416 static inline int next_free_host_timer(void)
419 /* FIXME: Does finding the next free slot require a lock? */
420 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
421 if (g_posix_timers
[k
] == 0) {
422 g_posix_timers
[k
] = (timer_t
) 1;
430 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
432 static inline int regpairs_aligned(void *cpu_env
) {
433 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
435 #elif defined(TARGET_MIPS)
436 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
437 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
438 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
439 * of registers which translates to the same as ARM/MIPS, because we start with
441 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
443 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
446 #define ERRNO_TABLE_SIZE 1200
448 /* target_to_host_errno_table[] is initialized from
449 * host_to_target_errno_table[] in syscall_init(). */
450 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
454 * This list is the union of errno values overridden in asm-<arch>/errno.h
455 * minus the errnos that are not actually generic to all archs.
457 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
458 [EIDRM
] = TARGET_EIDRM
,
459 [ECHRNG
] = TARGET_ECHRNG
,
460 [EL2NSYNC
] = TARGET_EL2NSYNC
,
461 [EL3HLT
] = TARGET_EL3HLT
,
462 [EL3RST
] = TARGET_EL3RST
,
463 [ELNRNG
] = TARGET_ELNRNG
,
464 [EUNATCH
] = TARGET_EUNATCH
,
465 [ENOCSI
] = TARGET_ENOCSI
,
466 [EL2HLT
] = TARGET_EL2HLT
,
467 [EDEADLK
] = TARGET_EDEADLK
,
468 [ENOLCK
] = TARGET_ENOLCK
,
469 [EBADE
] = TARGET_EBADE
,
470 [EBADR
] = TARGET_EBADR
,
471 [EXFULL
] = TARGET_EXFULL
,
472 [ENOANO
] = TARGET_ENOANO
,
473 [EBADRQC
] = TARGET_EBADRQC
,
474 [EBADSLT
] = TARGET_EBADSLT
,
475 [EBFONT
] = TARGET_EBFONT
,
476 [ENOSTR
] = TARGET_ENOSTR
,
477 [ENODATA
] = TARGET_ENODATA
,
478 [ETIME
] = TARGET_ETIME
,
479 [ENOSR
] = TARGET_ENOSR
,
480 [ENONET
] = TARGET_ENONET
,
481 [ENOPKG
] = TARGET_ENOPKG
,
482 [EREMOTE
] = TARGET_EREMOTE
,
483 [ENOLINK
] = TARGET_ENOLINK
,
484 [EADV
] = TARGET_EADV
,
485 [ESRMNT
] = TARGET_ESRMNT
,
486 [ECOMM
] = TARGET_ECOMM
,
487 [EPROTO
] = TARGET_EPROTO
,
488 [EDOTDOT
] = TARGET_EDOTDOT
,
489 [EMULTIHOP
] = TARGET_EMULTIHOP
,
490 [EBADMSG
] = TARGET_EBADMSG
,
491 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
492 [EOVERFLOW
] = TARGET_EOVERFLOW
,
493 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
494 [EBADFD
] = TARGET_EBADFD
,
495 [EREMCHG
] = TARGET_EREMCHG
,
496 [ELIBACC
] = TARGET_ELIBACC
,
497 [ELIBBAD
] = TARGET_ELIBBAD
,
498 [ELIBSCN
] = TARGET_ELIBSCN
,
499 [ELIBMAX
] = TARGET_ELIBMAX
,
500 [ELIBEXEC
] = TARGET_ELIBEXEC
,
501 [EILSEQ
] = TARGET_EILSEQ
,
502 [ENOSYS
] = TARGET_ENOSYS
,
503 [ELOOP
] = TARGET_ELOOP
,
504 [ERESTART
] = TARGET_ERESTART
,
505 [ESTRPIPE
] = TARGET_ESTRPIPE
,
506 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
507 [EUSERS
] = TARGET_EUSERS
,
508 [ENOTSOCK
] = TARGET_ENOTSOCK
,
509 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
510 [EMSGSIZE
] = TARGET_EMSGSIZE
,
511 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
512 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
513 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
514 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
515 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
516 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
517 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
518 [EADDRINUSE
] = TARGET_EADDRINUSE
,
519 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
520 [ENETDOWN
] = TARGET_ENETDOWN
,
521 [ENETUNREACH
] = TARGET_ENETUNREACH
,
522 [ENETRESET
] = TARGET_ENETRESET
,
523 [ECONNABORTED
] = TARGET_ECONNABORTED
,
524 [ECONNRESET
] = TARGET_ECONNRESET
,
525 [ENOBUFS
] = TARGET_ENOBUFS
,
526 [EISCONN
] = TARGET_EISCONN
,
527 [ENOTCONN
] = TARGET_ENOTCONN
,
528 [EUCLEAN
] = TARGET_EUCLEAN
,
529 [ENOTNAM
] = TARGET_ENOTNAM
,
530 [ENAVAIL
] = TARGET_ENAVAIL
,
531 [EISNAM
] = TARGET_EISNAM
,
532 [EREMOTEIO
] = TARGET_EREMOTEIO
,
533 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
534 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
535 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
536 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
537 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
538 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
539 [EALREADY
] = TARGET_EALREADY
,
540 [EINPROGRESS
] = TARGET_EINPROGRESS
,
541 [ESTALE
] = TARGET_ESTALE
,
542 [ECANCELED
] = TARGET_ECANCELED
,
543 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
544 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
546 [ENOKEY
] = TARGET_ENOKEY
,
549 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
552 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
555 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
558 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
560 #ifdef ENOTRECOVERABLE
561 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
565 static inline int host_to_target_errno(int err
)
567 if(host_to_target_errno_table
[err
])
568 return host_to_target_errno_table
[err
];
572 static inline int target_to_host_errno(int err
)
574 if (target_to_host_errno_table
[err
])
575 return target_to_host_errno_table
[err
];
579 static inline abi_long
get_errno(abi_long ret
)
582 return -host_to_target_errno(errno
);
587 static inline int is_error(abi_long ret
)
589 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
592 char *target_strerror(int err
)
594 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
597 return strerror(target_to_host_errno(err
));
600 static inline int host_to_target_sock_type(int host_type
)
604 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
606 target_type
= TARGET_SOCK_DGRAM
;
609 target_type
= TARGET_SOCK_STREAM
;
612 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
616 #if defined(SOCK_CLOEXEC)
617 if (host_type
& SOCK_CLOEXEC
) {
618 target_type
|= TARGET_SOCK_CLOEXEC
;
622 #if defined(SOCK_NONBLOCK)
623 if (host_type
& SOCK_NONBLOCK
) {
624 target_type
|= TARGET_SOCK_NONBLOCK
;
631 static abi_ulong target_brk
;
632 static abi_ulong target_original_brk
;
633 static abi_ulong brk_page
;
635 void target_set_brk(abi_ulong new_brk
)
637 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
638 brk_page
= HOST_PAGE_ALIGN(target_brk
);
641 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
642 #define DEBUGF_BRK(message, args...)
644 /* do_brk() must return target values and target errnos. */
645 abi_long
do_brk(abi_ulong new_brk
)
647 abi_long mapped_addr
;
650 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
653 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
656 if (new_brk
< target_original_brk
) {
657 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
662 /* If the new brk is less than the highest page reserved to the
663 * target heap allocation, set it and we're almost done... */
664 if (new_brk
<= brk_page
) {
665 /* Heap contents are initialized to zero, as for anonymous
667 if (new_brk
> target_brk
) {
668 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
670 target_brk
= new_brk
;
671 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
675 /* We need to allocate more memory after the brk... Note that
676 * we don't use MAP_FIXED because that will map over the top of
677 * any existing mapping (like the one with the host libc or qemu
678 * itself); instead we treat "mapped but at wrong address" as
679 * a failure and unmap again.
681 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
682 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
683 PROT_READ
|PROT_WRITE
,
684 MAP_ANON
|MAP_PRIVATE
, 0, 0));
686 if (mapped_addr
== brk_page
) {
687 /* Heap contents are initialized to zero, as for anonymous
688 * mapped pages. Technically the new pages are already
689 * initialized to zero since they *are* anonymous mapped
690 * pages, however we have to take care with the contents that
691 * come from the remaining part of the previous page: it may
692 * contains garbage data due to a previous heap usage (grown
694 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
696 target_brk
= new_brk
;
697 brk_page
= HOST_PAGE_ALIGN(target_brk
);
698 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
701 } else if (mapped_addr
!= -1) {
702 /* Mapped but at wrong address, meaning there wasn't actually
703 * enough space for this brk.
705 target_munmap(mapped_addr
, new_alloc_size
);
707 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
710 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
713 #if defined(TARGET_ALPHA)
714 /* We (partially) emulate OSF/1 on Alpha, which requires we
715 return a proper errno, not an unchanged brk value. */
716 return -TARGET_ENOMEM
;
718 /* For everything else, return the previous break. */
722 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
723 abi_ulong target_fds_addr
,
727 abi_ulong b
, *target_fds
;
729 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
730 if (!(target_fds
= lock_user(VERIFY_READ
,
732 sizeof(abi_ulong
) * nw
,
734 return -TARGET_EFAULT
;
738 for (i
= 0; i
< nw
; i
++) {
739 /* grab the abi_ulong */
740 __get_user(b
, &target_fds
[i
]);
741 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
742 /* check the bit inside the abi_ulong */
749 unlock_user(target_fds
, target_fds_addr
, 0);
754 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
755 abi_ulong target_fds_addr
,
758 if (target_fds_addr
) {
759 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
760 return -TARGET_EFAULT
;
768 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
774 abi_ulong
*target_fds
;
776 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
777 if (!(target_fds
= lock_user(VERIFY_WRITE
,
779 sizeof(abi_ulong
) * nw
,
781 return -TARGET_EFAULT
;
784 for (i
= 0; i
< nw
; i
++) {
786 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
787 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
790 __put_user(v
, &target_fds
[i
]);
793 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
798 #if defined(__alpha__)
804 static inline abi_long
host_to_target_clock_t(long ticks
)
806 #if HOST_HZ == TARGET_HZ
809 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
813 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
814 const struct rusage
*rusage
)
816 struct target_rusage
*target_rusage
;
818 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
819 return -TARGET_EFAULT
;
820 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
821 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
822 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
823 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
824 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
825 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
826 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
827 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
828 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
829 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
830 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
831 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
832 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
833 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
834 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
835 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
836 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
837 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
838 unlock_user_struct(target_rusage
, target_addr
, 1);
843 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
845 abi_ulong target_rlim_swap
;
848 target_rlim_swap
= tswapal(target_rlim
);
849 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
850 return RLIM_INFINITY
;
852 result
= target_rlim_swap
;
853 if (target_rlim_swap
!= (rlim_t
)result
)
854 return RLIM_INFINITY
;
859 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
861 abi_ulong target_rlim_swap
;
864 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
865 target_rlim_swap
= TARGET_RLIM_INFINITY
;
867 target_rlim_swap
= rlim
;
868 result
= tswapal(target_rlim_swap
);
873 static inline int target_to_host_resource(int code
)
876 case TARGET_RLIMIT_AS
:
878 case TARGET_RLIMIT_CORE
:
880 case TARGET_RLIMIT_CPU
:
882 case TARGET_RLIMIT_DATA
:
884 case TARGET_RLIMIT_FSIZE
:
886 case TARGET_RLIMIT_LOCKS
:
888 case TARGET_RLIMIT_MEMLOCK
:
889 return RLIMIT_MEMLOCK
;
890 case TARGET_RLIMIT_MSGQUEUE
:
891 return RLIMIT_MSGQUEUE
;
892 case TARGET_RLIMIT_NICE
:
894 case TARGET_RLIMIT_NOFILE
:
895 return RLIMIT_NOFILE
;
896 case TARGET_RLIMIT_NPROC
:
898 case TARGET_RLIMIT_RSS
:
900 case TARGET_RLIMIT_RTPRIO
:
901 return RLIMIT_RTPRIO
;
902 case TARGET_RLIMIT_SIGPENDING
:
903 return RLIMIT_SIGPENDING
;
904 case TARGET_RLIMIT_STACK
:
911 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
912 abi_ulong target_tv_addr
)
914 struct target_timeval
*target_tv
;
916 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
917 return -TARGET_EFAULT
;
919 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
920 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
922 unlock_user_struct(target_tv
, target_tv_addr
, 0);
927 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
928 const struct timeval
*tv
)
930 struct target_timeval
*target_tv
;
932 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
933 return -TARGET_EFAULT
;
935 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
936 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
938 unlock_user_struct(target_tv
, target_tv_addr
, 1);
943 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
944 abi_ulong target_tz_addr
)
946 struct target_timezone
*target_tz
;
948 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
949 return -TARGET_EFAULT
;
952 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
953 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
955 unlock_user_struct(target_tz
, target_tz_addr
, 0);
960 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
963 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
964 abi_ulong target_mq_attr_addr
)
966 struct target_mq_attr
*target_mq_attr
;
968 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
969 target_mq_attr_addr
, 1))
970 return -TARGET_EFAULT
;
972 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
973 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
974 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
975 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
977 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
982 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
983 const struct mq_attr
*attr
)
985 struct target_mq_attr
*target_mq_attr
;
987 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
988 target_mq_attr_addr
, 0))
989 return -TARGET_EFAULT
;
991 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
992 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
993 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
994 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
996 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1002 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1003 /* do_select() must return target values and target errnos. */
1004 static abi_long
do_select(int n
,
1005 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1006 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1008 fd_set rfds
, wfds
, efds
;
1009 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1010 struct timeval tv
, *tv_ptr
;
1013 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1017 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1021 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1026 if (target_tv_addr
) {
1027 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1028 return -TARGET_EFAULT
;
1034 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1036 if (!is_error(ret
)) {
1037 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1038 return -TARGET_EFAULT
;
1039 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1040 return -TARGET_EFAULT
;
1041 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1042 return -TARGET_EFAULT
;
1044 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1045 return -TARGET_EFAULT
;
1052 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1055 return pipe2(host_pipe
, flags
);
1061 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1062 int flags
, int is_pipe2
)
1066 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1069 return get_errno(ret
);
1071 /* Several targets have special calling conventions for the original
1072 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1074 #if defined(TARGET_ALPHA)
1075 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1076 return host_pipe
[0];
1077 #elif defined(TARGET_MIPS)
1078 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1079 return host_pipe
[0];
1080 #elif defined(TARGET_SH4)
1081 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1082 return host_pipe
[0];
1083 #elif defined(TARGET_SPARC)
1084 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1085 return host_pipe
[0];
1089 if (put_user_s32(host_pipe
[0], pipedes
)
1090 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1091 return -TARGET_EFAULT
;
1092 return get_errno(ret
);
1095 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1096 abi_ulong target_addr
,
1099 struct target_ip_mreqn
*target_smreqn
;
1101 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1103 return -TARGET_EFAULT
;
1104 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1105 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1106 if (len
== sizeof(struct target_ip_mreqn
))
1107 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1108 unlock_user(target_smreqn
, target_addr
, 0);
1113 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1114 abi_ulong target_addr
,
1117 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1118 sa_family_t sa_family
;
1119 struct target_sockaddr
*target_saddr
;
1121 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1123 return -TARGET_EFAULT
;
1125 sa_family
= tswap16(target_saddr
->sa_family
);
1127 /* Oops. The caller might send a incomplete sun_path; sun_path
1128 * must be terminated by \0 (see the manual page), but
1129 * unfortunately it is quite common to specify sockaddr_un
1130 * length as "strlen(x->sun_path)" while it should be
1131 * "strlen(...) + 1". We'll fix that here if needed.
1132 * Linux kernel has a similar feature.
1135 if (sa_family
== AF_UNIX
) {
1136 if (len
< unix_maxlen
&& len
> 0) {
1137 char *cp
= (char*)target_saddr
;
1139 if ( cp
[len
-1] && !cp
[len
] )
1142 if (len
> unix_maxlen
)
1146 memcpy(addr
, target_saddr
, len
);
1147 addr
->sa_family
= sa_family
;
1148 if (sa_family
== AF_PACKET
) {
1149 struct target_sockaddr_ll
*lladdr
;
1151 lladdr
= (struct target_sockaddr_ll
*)addr
;
1152 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1153 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1155 unlock_user(target_saddr
, target_addr
, 0);
1160 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1161 struct sockaddr
*addr
,
1164 struct target_sockaddr
*target_saddr
;
1166 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1168 return -TARGET_EFAULT
;
1169 memcpy(target_saddr
, addr
, len
);
1170 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1171 unlock_user(target_saddr
, target_addr
, len
);
1176 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1177 struct target_msghdr
*target_msgh
)
1179 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1180 abi_long msg_controllen
;
1181 abi_ulong target_cmsg_addr
;
1182 struct target_cmsghdr
*target_cmsg
;
1183 socklen_t space
= 0;
1185 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1186 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1188 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1189 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1191 return -TARGET_EFAULT
;
1193 while (cmsg
&& target_cmsg
) {
1194 void *data
= CMSG_DATA(cmsg
);
1195 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1197 int len
= tswapal(target_cmsg
->cmsg_len
)
1198 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1200 space
+= CMSG_SPACE(len
);
1201 if (space
> msgh
->msg_controllen
) {
1202 space
-= CMSG_SPACE(len
);
1203 gemu_log("Host cmsg overflow\n");
1207 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1208 cmsg
->cmsg_level
= SOL_SOCKET
;
1210 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1212 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1213 cmsg
->cmsg_len
= CMSG_LEN(len
);
1215 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1216 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1217 memcpy(data
, target_data
, len
);
1219 int *fd
= (int *)data
;
1220 int *target_fd
= (int *)target_data
;
1221 int i
, numfds
= len
/ sizeof(int);
1223 for (i
= 0; i
< numfds
; i
++)
1224 fd
[i
] = tswap32(target_fd
[i
]);
1227 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1228 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1230 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1232 msgh
->msg_controllen
= space
;
1236 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1237 struct msghdr
*msgh
)
1239 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1240 abi_long msg_controllen
;
1241 abi_ulong target_cmsg_addr
;
1242 struct target_cmsghdr
*target_cmsg
;
1243 socklen_t space
= 0;
1245 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1246 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1248 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1249 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1251 return -TARGET_EFAULT
;
1253 while (cmsg
&& target_cmsg
) {
1254 void *data
= CMSG_DATA(cmsg
);
1255 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1257 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1259 space
+= TARGET_CMSG_SPACE(len
);
1260 if (space
> msg_controllen
) {
1261 space
-= TARGET_CMSG_SPACE(len
);
1262 gemu_log("Target cmsg overflow\n");
1266 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1267 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1269 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1271 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1272 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1274 switch (cmsg
->cmsg_level
) {
1276 switch (cmsg
->cmsg_type
) {
1279 int *fd
= (int *)data
;
1280 int *target_fd
= (int *)target_data
;
1281 int i
, numfds
= len
/ sizeof(int);
1283 for (i
= 0; i
< numfds
; i
++)
1284 target_fd
[i
] = tswap32(fd
[i
]);
1289 struct timeval
*tv
= (struct timeval
*)data
;
1290 struct target_timeval
*target_tv
=
1291 (struct target_timeval
*)target_data
;
1293 if (len
!= sizeof(struct timeval
))
1296 /* copy struct timeval to target */
1297 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1298 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1301 case SCM_CREDENTIALS
:
1303 struct ucred
*cred
= (struct ucred
*)data
;
1304 struct target_ucred
*target_cred
=
1305 (struct target_ucred
*)target_data
;
1307 __put_user(cred
->pid
, &target_cred
->pid
);
1308 __put_user(cred
->uid
, &target_cred
->uid
);
1309 __put_user(cred
->gid
, &target_cred
->gid
);
1319 gemu_log("Unsupported ancillary data: %d/%d\n",
1320 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1321 memcpy(target_data
, data
, len
);
1324 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1325 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1327 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1329 target_msgh
->msg_controllen
= tswapal(space
);
1333 /* do_setsockopt() Must return target values and target errnos. */
1334 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1335 abi_ulong optval_addr
, socklen_t optlen
)
1339 struct ip_mreqn
*ip_mreq
;
1340 struct ip_mreq_source
*ip_mreq_source
;
1344 /* TCP options all take an 'int' value. */
1345 if (optlen
< sizeof(uint32_t))
1346 return -TARGET_EINVAL
;
1348 if (get_user_u32(val
, optval_addr
))
1349 return -TARGET_EFAULT
;
1350 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1357 case IP_ROUTER_ALERT
:
1361 case IP_MTU_DISCOVER
:
1367 case IP_MULTICAST_TTL
:
1368 case IP_MULTICAST_LOOP
:
1370 if (optlen
>= sizeof(uint32_t)) {
1371 if (get_user_u32(val
, optval_addr
))
1372 return -TARGET_EFAULT
;
1373 } else if (optlen
>= 1) {
1374 if (get_user_u8(val
, optval_addr
))
1375 return -TARGET_EFAULT
;
1377 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1379 case IP_ADD_MEMBERSHIP
:
1380 case IP_DROP_MEMBERSHIP
:
1381 if (optlen
< sizeof (struct target_ip_mreq
) ||
1382 optlen
> sizeof (struct target_ip_mreqn
))
1383 return -TARGET_EINVAL
;
1385 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1386 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1387 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1390 case IP_BLOCK_SOURCE
:
1391 case IP_UNBLOCK_SOURCE
:
1392 case IP_ADD_SOURCE_MEMBERSHIP
:
1393 case IP_DROP_SOURCE_MEMBERSHIP
:
1394 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1395 return -TARGET_EINVAL
;
1397 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1398 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1399 unlock_user (ip_mreq_source
, optval_addr
, 0);
1408 case IPV6_MTU_DISCOVER
:
1411 case IPV6_RECVPKTINFO
:
1413 if (optlen
< sizeof(uint32_t)) {
1414 return -TARGET_EINVAL
;
1416 if (get_user_u32(val
, optval_addr
)) {
1417 return -TARGET_EFAULT
;
1419 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1420 &val
, sizeof(val
)));
1429 /* struct icmp_filter takes an u32 value */
1430 if (optlen
< sizeof(uint32_t)) {
1431 return -TARGET_EINVAL
;
1434 if (get_user_u32(val
, optval_addr
)) {
1435 return -TARGET_EFAULT
;
1437 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1438 &val
, sizeof(val
)));
1445 case TARGET_SOL_SOCKET
:
1447 case TARGET_SO_RCVTIMEO
:
1451 optname
= SO_RCVTIMEO
;
1454 if (optlen
!= sizeof(struct target_timeval
)) {
1455 return -TARGET_EINVAL
;
1458 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1459 return -TARGET_EFAULT
;
1462 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1466 case TARGET_SO_SNDTIMEO
:
1467 optname
= SO_SNDTIMEO
;
1469 case TARGET_SO_ATTACH_FILTER
:
1471 struct target_sock_fprog
*tfprog
;
1472 struct target_sock_filter
*tfilter
;
1473 struct sock_fprog fprog
;
1474 struct sock_filter
*filter
;
1477 if (optlen
!= sizeof(*tfprog
)) {
1478 return -TARGET_EINVAL
;
1480 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1481 return -TARGET_EFAULT
;
1483 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1484 tswapal(tfprog
->filter
), 0)) {
1485 unlock_user_struct(tfprog
, optval_addr
, 1);
1486 return -TARGET_EFAULT
;
1489 fprog
.len
= tswap16(tfprog
->len
);
1490 filter
= malloc(fprog
.len
* sizeof(*filter
));
1491 if (filter
== NULL
) {
1492 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1493 unlock_user_struct(tfprog
, optval_addr
, 1);
1494 return -TARGET_ENOMEM
;
1496 for (i
= 0; i
< fprog
.len
; i
++) {
1497 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1498 filter
[i
].jt
= tfilter
[i
].jt
;
1499 filter
[i
].jf
= tfilter
[i
].jf
;
1500 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1502 fprog
.filter
= filter
;
1504 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1505 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1508 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1509 unlock_user_struct(tfprog
, optval_addr
, 1);
1512 case TARGET_SO_BINDTODEVICE
:
1514 char *dev_ifname
, *addr_ifname
;
1516 if (optlen
> IFNAMSIZ
- 1) {
1517 optlen
= IFNAMSIZ
- 1;
1519 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1521 return -TARGET_EFAULT
;
1523 optname
= SO_BINDTODEVICE
;
1524 addr_ifname
= alloca(IFNAMSIZ
);
1525 memcpy(addr_ifname
, dev_ifname
, optlen
);
1526 addr_ifname
[optlen
] = 0;
1527 ret
= get_errno(setsockopt(sockfd
, level
, optname
, addr_ifname
, optlen
));
1528 unlock_user (dev_ifname
, optval_addr
, 0);
1531 /* Options with 'int' argument. */
1532 case TARGET_SO_DEBUG
:
1535 case TARGET_SO_REUSEADDR
:
1536 optname
= SO_REUSEADDR
;
1538 case TARGET_SO_TYPE
:
1541 case TARGET_SO_ERROR
:
1544 case TARGET_SO_DONTROUTE
:
1545 optname
= SO_DONTROUTE
;
1547 case TARGET_SO_BROADCAST
:
1548 optname
= SO_BROADCAST
;
1550 case TARGET_SO_SNDBUF
:
1551 optname
= SO_SNDBUF
;
1553 case TARGET_SO_SNDBUFFORCE
:
1554 optname
= SO_SNDBUFFORCE
;
1556 case TARGET_SO_RCVBUF
:
1557 optname
= SO_RCVBUF
;
1559 case TARGET_SO_RCVBUFFORCE
:
1560 optname
= SO_RCVBUFFORCE
;
1562 case TARGET_SO_KEEPALIVE
:
1563 optname
= SO_KEEPALIVE
;
1565 case TARGET_SO_OOBINLINE
:
1566 optname
= SO_OOBINLINE
;
1568 case TARGET_SO_NO_CHECK
:
1569 optname
= SO_NO_CHECK
;
1571 case TARGET_SO_PRIORITY
:
1572 optname
= SO_PRIORITY
;
1575 case TARGET_SO_BSDCOMPAT
:
1576 optname
= SO_BSDCOMPAT
;
1579 case TARGET_SO_PASSCRED
:
1580 optname
= SO_PASSCRED
;
1582 case TARGET_SO_PASSSEC
:
1583 optname
= SO_PASSSEC
;
1585 case TARGET_SO_TIMESTAMP
:
1586 optname
= SO_TIMESTAMP
;
1588 case TARGET_SO_RCVLOWAT
:
1589 optname
= SO_RCVLOWAT
;
1595 if (optlen
< sizeof(uint32_t))
1596 return -TARGET_EINVAL
;
1598 if (get_user_u32(val
, optval_addr
))
1599 return -TARGET_EFAULT
;
1600 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1604 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1605 ret
= -TARGET_ENOPROTOOPT
;
1610 /* do_getsockopt() Must return target values and target errnos. */
1611 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1612 abi_ulong optval_addr
, abi_ulong optlen
)
1619 case TARGET_SOL_SOCKET
:
1622 /* These don't just return a single integer */
1623 case TARGET_SO_LINGER
:
1624 case TARGET_SO_RCVTIMEO
:
1625 case TARGET_SO_SNDTIMEO
:
1626 case TARGET_SO_PEERNAME
:
1628 case TARGET_SO_PEERCRED
: {
1631 struct target_ucred
*tcr
;
1633 if (get_user_u32(len
, optlen
)) {
1634 return -TARGET_EFAULT
;
1637 return -TARGET_EINVAL
;
1641 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1649 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1650 return -TARGET_EFAULT
;
1652 __put_user(cr
.pid
, &tcr
->pid
);
1653 __put_user(cr
.uid
, &tcr
->uid
);
1654 __put_user(cr
.gid
, &tcr
->gid
);
1655 unlock_user_struct(tcr
, optval_addr
, 1);
1656 if (put_user_u32(len
, optlen
)) {
1657 return -TARGET_EFAULT
;
1661 /* Options with 'int' argument. */
1662 case TARGET_SO_DEBUG
:
1665 case TARGET_SO_REUSEADDR
:
1666 optname
= SO_REUSEADDR
;
1668 case TARGET_SO_TYPE
:
1671 case TARGET_SO_ERROR
:
1674 case TARGET_SO_DONTROUTE
:
1675 optname
= SO_DONTROUTE
;
1677 case TARGET_SO_BROADCAST
:
1678 optname
= SO_BROADCAST
;
1680 case TARGET_SO_SNDBUF
:
1681 optname
= SO_SNDBUF
;
1683 case TARGET_SO_RCVBUF
:
1684 optname
= SO_RCVBUF
;
1686 case TARGET_SO_KEEPALIVE
:
1687 optname
= SO_KEEPALIVE
;
1689 case TARGET_SO_OOBINLINE
:
1690 optname
= SO_OOBINLINE
;
1692 case TARGET_SO_NO_CHECK
:
1693 optname
= SO_NO_CHECK
;
1695 case TARGET_SO_PRIORITY
:
1696 optname
= SO_PRIORITY
;
1699 case TARGET_SO_BSDCOMPAT
:
1700 optname
= SO_BSDCOMPAT
;
1703 case TARGET_SO_PASSCRED
:
1704 optname
= SO_PASSCRED
;
1706 case TARGET_SO_TIMESTAMP
:
1707 optname
= SO_TIMESTAMP
;
1709 case TARGET_SO_RCVLOWAT
:
1710 optname
= SO_RCVLOWAT
;
1712 case TARGET_SO_ACCEPTCONN
:
1713 optname
= SO_ACCEPTCONN
;
1720 /* TCP options all take an 'int' value. */
1722 if (get_user_u32(len
, optlen
))
1723 return -TARGET_EFAULT
;
1725 return -TARGET_EINVAL
;
1727 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1730 if (optname
== SO_TYPE
) {
1731 val
= host_to_target_sock_type(val
);
1736 if (put_user_u32(val
, optval_addr
))
1737 return -TARGET_EFAULT
;
1739 if (put_user_u8(val
, optval_addr
))
1740 return -TARGET_EFAULT
;
1742 if (put_user_u32(len
, optlen
))
1743 return -TARGET_EFAULT
;
1750 case IP_ROUTER_ALERT
:
1754 case IP_MTU_DISCOVER
:
1760 case IP_MULTICAST_TTL
:
1761 case IP_MULTICAST_LOOP
:
1762 if (get_user_u32(len
, optlen
))
1763 return -TARGET_EFAULT
;
1765 return -TARGET_EINVAL
;
1767 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1770 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1772 if (put_user_u32(len
, optlen
)
1773 || put_user_u8(val
, optval_addr
))
1774 return -TARGET_EFAULT
;
1776 if (len
> sizeof(int))
1778 if (put_user_u32(len
, optlen
)
1779 || put_user_u32(val
, optval_addr
))
1780 return -TARGET_EFAULT
;
1784 ret
= -TARGET_ENOPROTOOPT
;
1790 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1792 ret
= -TARGET_EOPNOTSUPP
;
1798 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1799 int count
, int copy
)
1801 struct target_iovec
*target_vec
;
1803 abi_ulong total_len
, max_len
;
1806 bool bad_address
= false;
1812 if (count
< 0 || count
> IOV_MAX
) {
1817 vec
= calloc(count
, sizeof(struct iovec
));
1823 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1824 count
* sizeof(struct target_iovec
), 1);
1825 if (target_vec
== NULL
) {
1830 /* ??? If host page size > target page size, this will result in a
1831 value larger than what we can actually support. */
1832 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1835 for (i
= 0; i
< count
; i
++) {
1836 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1837 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1842 } else if (len
== 0) {
1843 /* Zero length pointer is ignored. */
1844 vec
[i
].iov_base
= 0;
1846 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1847 /* If the first buffer pointer is bad, this is a fault. But
1848 * subsequent bad buffers will result in a partial write; this
1849 * is realized by filling the vector with null pointers and
1851 if (!vec
[i
].iov_base
) {
1862 if (len
> max_len
- total_len
) {
1863 len
= max_len
- total_len
;
1866 vec
[i
].iov_len
= len
;
1870 unlock_user(target_vec
, target_addr
, 0);
1874 unlock_user(target_vec
, target_addr
, 0);
1881 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1882 int count
, int copy
)
1884 struct target_iovec
*target_vec
;
1887 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1888 count
* sizeof(struct target_iovec
), 1);
1890 for (i
= 0; i
< count
; i
++) {
1891 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1892 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1896 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1898 unlock_user(target_vec
, target_addr
, 0);
1904 static inline int target_to_host_sock_type(int *type
)
1907 int target_type
= *type
;
1909 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1910 case TARGET_SOCK_DGRAM
:
1911 host_type
= SOCK_DGRAM
;
1913 case TARGET_SOCK_STREAM
:
1914 host_type
= SOCK_STREAM
;
1917 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1920 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1921 #if defined(SOCK_CLOEXEC)
1922 host_type
|= SOCK_CLOEXEC
;
1924 return -TARGET_EINVAL
;
1927 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1928 #if defined(SOCK_NONBLOCK)
1929 host_type
|= SOCK_NONBLOCK
;
1930 #elif !defined(O_NONBLOCK)
1931 return -TARGET_EINVAL
;
1938 /* Try to emulate socket type flags after socket creation. */
1939 static int sock_flags_fixup(int fd
, int target_type
)
1941 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1942 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1943 int flags
= fcntl(fd
, F_GETFL
);
1944 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
1946 return -TARGET_EINVAL
;
1953 /* do_socket() Must return target values and target errnos. */
1954 static abi_long
do_socket(int domain
, int type
, int protocol
)
1956 int target_type
= type
;
1959 ret
= target_to_host_sock_type(&type
);
1964 if (domain
== PF_NETLINK
)
1965 return -TARGET_EAFNOSUPPORT
;
1966 ret
= get_errno(socket(domain
, type
, protocol
));
1968 ret
= sock_flags_fixup(ret
, target_type
);
1973 /* do_bind() Must return target values and target errnos. */
1974 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1980 if ((int)addrlen
< 0) {
1981 return -TARGET_EINVAL
;
1984 addr
= alloca(addrlen
+1);
1986 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1990 return get_errno(bind(sockfd
, addr
, addrlen
));
1993 /* do_connect() Must return target values and target errnos. */
1994 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2000 if ((int)addrlen
< 0) {
2001 return -TARGET_EINVAL
;
2004 addr
= alloca(addrlen
+1);
2006 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2010 return get_errno(connect(sockfd
, addr
, addrlen
));
2013 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2014 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2015 int flags
, int send
)
2021 abi_ulong target_vec
;
2023 if (msgp
->msg_name
) {
2024 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2025 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2026 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
2032 msg
.msg_name
= NULL
;
2033 msg
.msg_namelen
= 0;
2035 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2036 msg
.msg_control
= alloca(msg
.msg_controllen
);
2037 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2039 count
= tswapal(msgp
->msg_iovlen
);
2040 target_vec
= tswapal(msgp
->msg_iov
);
2041 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2042 target_vec
, count
, send
);
2044 ret
= -host_to_target_errno(errno
);
2047 msg
.msg_iovlen
= count
;
2051 ret
= target_to_host_cmsg(&msg
, msgp
);
2053 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2055 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2056 if (!is_error(ret
)) {
2058 ret
= host_to_target_cmsg(msgp
, &msg
);
2059 if (!is_error(ret
)) {
2060 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2061 if (msg
.msg_name
!= NULL
) {
2062 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2063 msg
.msg_name
, msg
.msg_namelen
);
2075 unlock_iovec(vec
, target_vec
, count
, !send
);
2080 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2081 int flags
, int send
)
2084 struct target_msghdr
*msgp
;
2086 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2090 return -TARGET_EFAULT
;
2092 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2093 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2097 #ifdef TARGET_NR_sendmmsg
2098 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2099 * so it might not have this *mmsg-specific flag either.
2101 #ifndef MSG_WAITFORONE
2102 #define MSG_WAITFORONE 0x10000
2105 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2106 unsigned int vlen
, unsigned int flags
,
2109 struct target_mmsghdr
*mmsgp
;
2113 if (vlen
> UIO_MAXIOV
) {
2117 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2119 return -TARGET_EFAULT
;
2122 for (i
= 0; i
< vlen
; i
++) {
2123 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2124 if (is_error(ret
)) {
2127 mmsgp
[i
].msg_len
= tswap32(ret
);
2128 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2129 if (flags
& MSG_WAITFORONE
) {
2130 flags
|= MSG_DONTWAIT
;
2134 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2136 /* Return number of datagrams sent if we sent any at all;
2137 * otherwise return the error.
2146 /* If we don't have a system accept4() then just call accept.
2147 * The callsites to do_accept4() will ensure that they don't
2148 * pass a non-zero flags argument in this config.
2150 #ifndef CONFIG_ACCEPT4
2151 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2152 socklen_t
*addrlen
, int flags
)
2155 return accept(sockfd
, addr
, addrlen
);
2159 /* do_accept4() Must return target values and target errnos. */
2160 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2161 abi_ulong target_addrlen_addr
, int flags
)
2168 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2170 if (target_addr
== 0) {
2171 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2174 /* linux returns EINVAL if addrlen pointer is invalid */
2175 if (get_user_u32(addrlen
, target_addrlen_addr
))
2176 return -TARGET_EINVAL
;
2178 if ((int)addrlen
< 0) {
2179 return -TARGET_EINVAL
;
2182 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2183 return -TARGET_EINVAL
;
2185 addr
= alloca(addrlen
);
2187 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2188 if (!is_error(ret
)) {
2189 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2190 if (put_user_u32(addrlen
, target_addrlen_addr
))
2191 ret
= -TARGET_EFAULT
;
2196 /* do_getpeername() Must return target values and target errnos. */
2197 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2198 abi_ulong target_addrlen_addr
)
2204 if (get_user_u32(addrlen
, target_addrlen_addr
))
2205 return -TARGET_EFAULT
;
2207 if ((int)addrlen
< 0) {
2208 return -TARGET_EINVAL
;
2211 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2212 return -TARGET_EFAULT
;
2214 addr
= alloca(addrlen
);
2216 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2217 if (!is_error(ret
)) {
2218 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2219 if (put_user_u32(addrlen
, target_addrlen_addr
))
2220 ret
= -TARGET_EFAULT
;
2225 /* do_getsockname() Must return target values and target errnos. */
2226 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2227 abi_ulong target_addrlen_addr
)
2233 if (get_user_u32(addrlen
, target_addrlen_addr
))
2234 return -TARGET_EFAULT
;
2236 if ((int)addrlen
< 0) {
2237 return -TARGET_EINVAL
;
2240 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2241 return -TARGET_EFAULT
;
2243 addr
= alloca(addrlen
);
2245 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2246 if (!is_error(ret
)) {
2247 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2248 if (put_user_u32(addrlen
, target_addrlen_addr
))
2249 ret
= -TARGET_EFAULT
;
2254 /* do_socketpair() Must return target values and target errnos. */
2255 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2256 abi_ulong target_tab_addr
)
2261 target_to_host_sock_type(&type
);
2263 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2264 if (!is_error(ret
)) {
2265 if (put_user_s32(tab
[0], target_tab_addr
)
2266 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2267 ret
= -TARGET_EFAULT
;
2272 /* do_sendto() Must return target values and target errnos. */
2273 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2274 abi_ulong target_addr
, socklen_t addrlen
)
2280 if ((int)addrlen
< 0) {
2281 return -TARGET_EINVAL
;
2284 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2286 return -TARGET_EFAULT
;
2288 addr
= alloca(addrlen
+1);
2289 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2291 unlock_user(host_msg
, msg
, 0);
2294 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2296 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2298 unlock_user(host_msg
, msg
, 0);
2302 /* do_recvfrom() Must return target values and target errnos. */
2303 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2304 abi_ulong target_addr
,
2305 abi_ulong target_addrlen
)
2312 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2314 return -TARGET_EFAULT
;
2316 if (get_user_u32(addrlen
, target_addrlen
)) {
2317 ret
= -TARGET_EFAULT
;
2320 if ((int)addrlen
< 0) {
2321 ret
= -TARGET_EINVAL
;
2324 addr
= alloca(addrlen
);
2325 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2327 addr
= NULL
; /* To keep compiler quiet. */
2328 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2330 if (!is_error(ret
)) {
2332 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2333 if (put_user_u32(addrlen
, target_addrlen
)) {
2334 ret
= -TARGET_EFAULT
;
2338 unlock_user(host_msg
, msg
, len
);
2341 unlock_user(host_msg
, msg
, 0);
2346 #ifdef TARGET_NR_socketcall
2347 /* do_socketcall() Must return target values and target errnos. */
2348 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2350 static const unsigned ac
[] = { /* number of arguments per call */
2351 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2352 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2353 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2354 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2355 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2356 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2357 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2358 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2359 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2360 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2361 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2362 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2363 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2364 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2365 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2366 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2367 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2368 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2370 abi_long a
[6]; /* max 6 args */
2372 /* first, collect the arguments in a[] according to ac[] */
2373 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2375 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2376 for (i
= 0; i
< ac
[num
]; ++i
) {
2377 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2378 return -TARGET_EFAULT
;
2383 /* now when we have the args, actually handle the call */
2385 case SOCKOP_socket
: /* domain, type, protocol */
2386 return do_socket(a
[0], a
[1], a
[2]);
2387 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2388 return do_bind(a
[0], a
[1], a
[2]);
2389 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2390 return do_connect(a
[0], a
[1], a
[2]);
2391 case SOCKOP_listen
: /* sockfd, backlog */
2392 return get_errno(listen(a
[0], a
[1]));
2393 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2394 return do_accept4(a
[0], a
[1], a
[2], 0);
2395 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2396 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2397 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2398 return do_getsockname(a
[0], a
[1], a
[2]);
2399 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2400 return do_getpeername(a
[0], a
[1], a
[2]);
2401 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2402 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2403 case SOCKOP_send
: /* sockfd, msg, len, flags */
2404 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2405 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2406 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2407 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2408 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2409 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2410 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2411 case SOCKOP_shutdown
: /* sockfd, how */
2412 return get_errno(shutdown(a
[0], a
[1]));
2413 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2414 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2415 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2416 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2417 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2418 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2419 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2420 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2422 gemu_log("Unsupported socketcall: %d\n", num
);
2423 return -TARGET_ENOSYS
;
2428 #define N_SHM_REGIONS 32
2430 static struct shm_region
{
2433 } shm_regions
[N_SHM_REGIONS
];
2435 struct target_semid_ds
2437 struct target_ipc_perm sem_perm
;
2438 abi_ulong sem_otime
;
2439 #if !defined(TARGET_PPC64)
2440 abi_ulong __unused1
;
2442 abi_ulong sem_ctime
;
2443 #if !defined(TARGET_PPC64)
2444 abi_ulong __unused2
;
2446 abi_ulong sem_nsems
;
2447 abi_ulong __unused3
;
2448 abi_ulong __unused4
;
2451 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2452 abi_ulong target_addr
)
2454 struct target_ipc_perm
*target_ip
;
2455 struct target_semid_ds
*target_sd
;
2457 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2458 return -TARGET_EFAULT
;
2459 target_ip
= &(target_sd
->sem_perm
);
2460 host_ip
->__key
= tswap32(target_ip
->__key
);
2461 host_ip
->uid
= tswap32(target_ip
->uid
);
2462 host_ip
->gid
= tswap32(target_ip
->gid
);
2463 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2464 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2465 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2466 host_ip
->mode
= tswap32(target_ip
->mode
);
2468 host_ip
->mode
= tswap16(target_ip
->mode
);
2470 #if defined(TARGET_PPC)
2471 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2473 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2475 unlock_user_struct(target_sd
, target_addr
, 0);
2479 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2480 struct ipc_perm
*host_ip
)
2482 struct target_ipc_perm
*target_ip
;
2483 struct target_semid_ds
*target_sd
;
2485 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2486 return -TARGET_EFAULT
;
2487 target_ip
= &(target_sd
->sem_perm
);
2488 target_ip
->__key
= tswap32(host_ip
->__key
);
2489 target_ip
->uid
= tswap32(host_ip
->uid
);
2490 target_ip
->gid
= tswap32(host_ip
->gid
);
2491 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2492 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2493 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2494 target_ip
->mode
= tswap32(host_ip
->mode
);
2496 target_ip
->mode
= tswap16(host_ip
->mode
);
2498 #if defined(TARGET_PPC)
2499 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2501 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2503 unlock_user_struct(target_sd
, target_addr
, 1);
2507 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2508 abi_ulong target_addr
)
2510 struct target_semid_ds
*target_sd
;
2512 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2513 return -TARGET_EFAULT
;
2514 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2515 return -TARGET_EFAULT
;
2516 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2517 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2518 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2519 unlock_user_struct(target_sd
, target_addr
, 0);
2523 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2524 struct semid_ds
*host_sd
)
2526 struct target_semid_ds
*target_sd
;
2528 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2529 return -TARGET_EFAULT
;
2530 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2531 return -TARGET_EFAULT
;
2532 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2533 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2534 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2535 unlock_user_struct(target_sd
, target_addr
, 1);
2539 struct target_seminfo
{
2552 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2553 struct seminfo
*host_seminfo
)
2555 struct target_seminfo
*target_seminfo
;
2556 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2557 return -TARGET_EFAULT
;
2558 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2559 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2560 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2561 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2562 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2563 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2564 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2565 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2566 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2567 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2568 unlock_user_struct(target_seminfo
, target_addr
, 1);
2574 struct semid_ds
*buf
;
2575 unsigned short *array
;
2576 struct seminfo
*__buf
;
2579 union target_semun
{
2586 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2587 abi_ulong target_addr
)
2590 unsigned short *array
;
2592 struct semid_ds semid_ds
;
2595 semun
.buf
= &semid_ds
;
2597 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2599 return get_errno(ret
);
2601 nsems
= semid_ds
.sem_nsems
;
2603 *host_array
= malloc(nsems
*sizeof(unsigned short));
2605 return -TARGET_ENOMEM
;
2607 array
= lock_user(VERIFY_READ
, target_addr
,
2608 nsems
*sizeof(unsigned short), 1);
2611 return -TARGET_EFAULT
;
2614 for(i
=0; i
<nsems
; i
++) {
2615 __get_user((*host_array
)[i
], &array
[i
]);
2617 unlock_user(array
, target_addr
, 0);
2622 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2623 unsigned short **host_array
)
2626 unsigned short *array
;
2628 struct semid_ds semid_ds
;
2631 semun
.buf
= &semid_ds
;
2633 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2635 return get_errno(ret
);
2637 nsems
= semid_ds
.sem_nsems
;
2639 array
= lock_user(VERIFY_WRITE
, target_addr
,
2640 nsems
*sizeof(unsigned short), 0);
2642 return -TARGET_EFAULT
;
2644 for(i
=0; i
<nsems
; i
++) {
2645 __put_user((*host_array
)[i
], &array
[i
]);
2648 unlock_user(array
, target_addr
, 1);
2653 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2654 union target_semun target_su
)
2657 struct semid_ds dsarg
;
2658 unsigned short *array
= NULL
;
2659 struct seminfo seminfo
;
2660 abi_long ret
= -TARGET_EINVAL
;
2667 /* In 64 bit cross-endian situations, we will erroneously pick up
2668 * the wrong half of the union for the "val" element. To rectify
2669 * this, the entire 8-byte structure is byteswapped, followed by
2670 * a swap of the 4 byte val field. In other cases, the data is
2671 * already in proper host byte order. */
2672 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2673 target_su
.buf
= tswapal(target_su
.buf
);
2674 arg
.val
= tswap32(target_su
.val
);
2676 arg
.val
= target_su
.val
;
2678 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2682 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2686 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2687 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2694 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2698 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2699 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2705 arg
.__buf
= &seminfo
;
2706 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2707 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2715 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2722 struct target_sembuf
{
2723 unsigned short sem_num
;
2728 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2729 abi_ulong target_addr
,
2732 struct target_sembuf
*target_sembuf
;
2735 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2736 nsops
*sizeof(struct target_sembuf
), 1);
2738 return -TARGET_EFAULT
;
2740 for(i
=0; i
<nsops
; i
++) {
2741 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2742 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2743 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2746 unlock_user(target_sembuf
, target_addr
, 0);
2751 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2753 struct sembuf sops
[nsops
];
2755 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2756 return -TARGET_EFAULT
;
2758 return get_errno(semop(semid
, sops
, nsops
));
2761 struct target_msqid_ds
2763 struct target_ipc_perm msg_perm
;
2764 abi_ulong msg_stime
;
2765 #if TARGET_ABI_BITS == 32
2766 abi_ulong __unused1
;
2768 abi_ulong msg_rtime
;
2769 #if TARGET_ABI_BITS == 32
2770 abi_ulong __unused2
;
2772 abi_ulong msg_ctime
;
2773 #if TARGET_ABI_BITS == 32
2774 abi_ulong __unused3
;
2776 abi_ulong __msg_cbytes
;
2778 abi_ulong msg_qbytes
;
2779 abi_ulong msg_lspid
;
2780 abi_ulong msg_lrpid
;
2781 abi_ulong __unused4
;
2782 abi_ulong __unused5
;
2785 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2786 abi_ulong target_addr
)
2788 struct target_msqid_ds
*target_md
;
2790 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2791 return -TARGET_EFAULT
;
2792 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2793 return -TARGET_EFAULT
;
2794 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2795 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2796 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2797 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2798 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2799 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2800 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2801 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2802 unlock_user_struct(target_md
, target_addr
, 0);
2806 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2807 struct msqid_ds
*host_md
)
2809 struct target_msqid_ds
*target_md
;
2811 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2812 return -TARGET_EFAULT
;
2813 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2814 return -TARGET_EFAULT
;
2815 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2816 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2817 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2818 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2819 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2820 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2821 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2822 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2823 unlock_user_struct(target_md
, target_addr
, 1);
2827 struct target_msginfo
{
2835 unsigned short int msgseg
;
2838 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2839 struct msginfo
*host_msginfo
)
2841 struct target_msginfo
*target_msginfo
;
2842 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2843 return -TARGET_EFAULT
;
2844 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2845 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2846 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2847 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2848 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2849 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2850 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2851 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2852 unlock_user_struct(target_msginfo
, target_addr
, 1);
2856 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2858 struct msqid_ds dsarg
;
2859 struct msginfo msginfo
;
2860 abi_long ret
= -TARGET_EINVAL
;
2868 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2869 return -TARGET_EFAULT
;
2870 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2871 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2872 return -TARGET_EFAULT
;
2875 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2879 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2880 if (host_to_target_msginfo(ptr
, &msginfo
))
2881 return -TARGET_EFAULT
;
2888 struct target_msgbuf
{
2893 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2894 ssize_t msgsz
, int msgflg
)
2896 struct target_msgbuf
*target_mb
;
2897 struct msgbuf
*host_mb
;
2901 return -TARGET_EINVAL
;
2904 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2905 return -TARGET_EFAULT
;
2906 host_mb
= malloc(msgsz
+sizeof(long));
2908 unlock_user_struct(target_mb
, msgp
, 0);
2909 return -TARGET_ENOMEM
;
2911 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2912 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2913 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2915 unlock_user_struct(target_mb
, msgp
, 0);
2920 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2921 unsigned int msgsz
, abi_long msgtyp
,
2924 struct target_msgbuf
*target_mb
;
2926 struct msgbuf
*host_mb
;
2929 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2930 return -TARGET_EFAULT
;
2932 host_mb
= g_malloc(msgsz
+sizeof(long));
2933 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2936 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2937 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2938 if (!target_mtext
) {
2939 ret
= -TARGET_EFAULT
;
2942 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2943 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2946 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2950 unlock_user_struct(target_mb
, msgp
, 1);
2955 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2956 abi_ulong target_addr
)
2958 struct target_shmid_ds
*target_sd
;
2960 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2961 return -TARGET_EFAULT
;
2962 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2963 return -TARGET_EFAULT
;
2964 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2965 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2966 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2967 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2968 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2969 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2970 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2971 unlock_user_struct(target_sd
, target_addr
, 0);
2975 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2976 struct shmid_ds
*host_sd
)
2978 struct target_shmid_ds
*target_sd
;
2980 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2981 return -TARGET_EFAULT
;
2982 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2983 return -TARGET_EFAULT
;
2984 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2985 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2986 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2987 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2988 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2989 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2990 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2991 unlock_user_struct(target_sd
, target_addr
, 1);
2995 struct target_shminfo
{
3003 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3004 struct shminfo
*host_shminfo
)
3006 struct target_shminfo
*target_shminfo
;
3007 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3008 return -TARGET_EFAULT
;
3009 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3010 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3011 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3012 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3013 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3014 unlock_user_struct(target_shminfo
, target_addr
, 1);
3018 struct target_shm_info
{
3023 abi_ulong swap_attempts
;
3024 abi_ulong swap_successes
;
3027 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3028 struct shm_info
*host_shm_info
)
3030 struct target_shm_info
*target_shm_info
;
3031 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3032 return -TARGET_EFAULT
;
3033 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3034 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3035 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3036 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3037 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3038 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3039 unlock_user_struct(target_shm_info
, target_addr
, 1);
3043 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3045 struct shmid_ds dsarg
;
3046 struct shminfo shminfo
;
3047 struct shm_info shm_info
;
3048 abi_long ret
= -TARGET_EINVAL
;
3056 if (target_to_host_shmid_ds(&dsarg
, buf
))
3057 return -TARGET_EFAULT
;
3058 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3059 if (host_to_target_shmid_ds(buf
, &dsarg
))
3060 return -TARGET_EFAULT
;
3063 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3064 if (host_to_target_shminfo(buf
, &shminfo
))
3065 return -TARGET_EFAULT
;
3068 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3069 if (host_to_target_shm_info(buf
, &shm_info
))
3070 return -TARGET_EFAULT
;
3075 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3082 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3086 struct shmid_ds shm_info
;
3089 /* find out the length of the shared memory segment */
3090 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3091 if (is_error(ret
)) {
3092 /* can't get length, bail out */
3099 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3101 abi_ulong mmap_start
;
3103 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3105 if (mmap_start
== -1) {
3107 host_raddr
= (void *)-1;
3109 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3112 if (host_raddr
== (void *)-1) {
3114 return get_errno((long)host_raddr
);
3116 raddr
=h2g((unsigned long)host_raddr
);
3118 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3119 PAGE_VALID
| PAGE_READ
|
3120 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3122 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3123 if (shm_regions
[i
].start
== 0) {
3124 shm_regions
[i
].start
= raddr
;
3125 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3135 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3139 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3140 if (shm_regions
[i
].start
== shmaddr
) {
3141 shm_regions
[i
].start
= 0;
3142 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3147 return get_errno(shmdt(g2h(shmaddr
)));
3150 #ifdef TARGET_NR_ipc
3151 /* ??? This only works with linear mappings. */
3152 /* do_ipc() must return target values and target errnos. */
3153 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3154 abi_long second
, abi_long third
,
3155 abi_long ptr
, abi_long fifth
)
3160 version
= call
>> 16;
3165 ret
= do_semop(first
, ptr
, second
);
3169 ret
= get_errno(semget(first
, second
, third
));
3172 case IPCOP_semctl
: {
3173 /* The semun argument to semctl is passed by value, so dereference the
3176 get_user_ual(atptr
, ptr
);
3177 ret
= do_semctl(first
, second
, third
,
3178 (union target_semun
) atptr
);
3183 ret
= get_errno(msgget(first
, second
));
3187 ret
= do_msgsnd(first
, ptr
, second
, third
);
3191 ret
= do_msgctl(first
, second
, ptr
);
3198 struct target_ipc_kludge
{
3203 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3204 ret
= -TARGET_EFAULT
;
3208 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3210 unlock_user_struct(tmp
, ptr
, 0);
3214 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3223 raddr
= do_shmat(first
, ptr
, second
);
3224 if (is_error(raddr
))
3225 return get_errno(raddr
);
3226 if (put_user_ual(raddr
, third
))
3227 return -TARGET_EFAULT
;
3231 ret
= -TARGET_EINVAL
;
3236 ret
= do_shmdt(ptr
);
3240 /* IPC_* flag values are the same on all linux platforms */
3241 ret
= get_errno(shmget(first
, second
, third
));
3244 /* IPC_* and SHM_* command values are the same on all linux platforms */
3246 ret
= do_shmctl(first
, second
, ptr
);
3249 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3250 ret
= -TARGET_ENOSYS
;
3257 /* kernel structure types definitions */
3259 #define STRUCT(name, ...) STRUCT_ ## name,
3260 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3262 #include "syscall_types.h"
3265 #undef STRUCT_SPECIAL
3267 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3268 #define STRUCT_SPECIAL(name)
3269 #include "syscall_types.h"
3271 #undef STRUCT_SPECIAL
3273 typedef struct IOCTLEntry IOCTLEntry
;
3275 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3276 int fd
, abi_long cmd
, abi_long arg
);
3279 unsigned int target_cmd
;
3280 unsigned int host_cmd
;
3283 do_ioctl_fn
*do_ioctl
;
3284 const argtype arg_type
[5];
3287 #define IOC_R 0x0001
3288 #define IOC_W 0x0002
3289 #define IOC_RW (IOC_R | IOC_W)
3291 #define MAX_STRUCT_SIZE 4096
3293 #ifdef CONFIG_FIEMAP
3294 /* So fiemap access checks don't overflow on 32 bit systems.
3295 * This is very slightly smaller than the limit imposed by
3296 * the underlying kernel.
3298 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3299 / sizeof(struct fiemap_extent))
3301 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3302 int fd
, abi_long cmd
, abi_long arg
)
3304 /* The parameter for this ioctl is a struct fiemap followed
3305 * by an array of struct fiemap_extent whose size is set
3306 * in fiemap->fm_extent_count. The array is filled in by the
3309 int target_size_in
, target_size_out
;
3311 const argtype
*arg_type
= ie
->arg_type
;
3312 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3315 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3319 assert(arg_type
[0] == TYPE_PTR
);
3320 assert(ie
->access
== IOC_RW
);
3322 target_size_in
= thunk_type_size(arg_type
, 0);
3323 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3325 return -TARGET_EFAULT
;
3327 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3328 unlock_user(argptr
, arg
, 0);
3329 fm
= (struct fiemap
*)buf_temp
;
3330 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3331 return -TARGET_EINVAL
;
3334 outbufsz
= sizeof (*fm
) +
3335 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3337 if (outbufsz
> MAX_STRUCT_SIZE
) {
3338 /* We can't fit all the extents into the fixed size buffer.
3339 * Allocate one that is large enough and use it instead.
3341 fm
= malloc(outbufsz
);
3343 return -TARGET_ENOMEM
;
3345 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3348 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3349 if (!is_error(ret
)) {
3350 target_size_out
= target_size_in
;
3351 /* An extent_count of 0 means we were only counting the extents
3352 * so there are no structs to copy
3354 if (fm
->fm_extent_count
!= 0) {
3355 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3357 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3359 ret
= -TARGET_EFAULT
;
3361 /* Convert the struct fiemap */
3362 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3363 if (fm
->fm_extent_count
!= 0) {
3364 p
= argptr
+ target_size_in
;
3365 /* ...and then all the struct fiemap_extents */
3366 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3367 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3372 unlock_user(argptr
, arg
, target_size_out
);
3382 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3383 int fd
, abi_long cmd
, abi_long arg
)
3385 const argtype
*arg_type
= ie
->arg_type
;
3389 struct ifconf
*host_ifconf
;
3391 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3392 int target_ifreq_size
;
3397 abi_long target_ifc_buf
;
3401 assert(arg_type
[0] == TYPE_PTR
);
3402 assert(ie
->access
== IOC_RW
);
3405 target_size
= thunk_type_size(arg_type
, 0);
3407 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3409 return -TARGET_EFAULT
;
3410 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3411 unlock_user(argptr
, arg
, 0);
3413 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3414 target_ifc_len
= host_ifconf
->ifc_len
;
3415 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3417 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3418 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3419 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3421 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3422 if (outbufsz
> MAX_STRUCT_SIZE
) {
3423 /* We can't fit all the extents into the fixed size buffer.
3424 * Allocate one that is large enough and use it instead.
3426 host_ifconf
= malloc(outbufsz
);
3428 return -TARGET_ENOMEM
;
3430 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3433 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3435 host_ifconf
->ifc_len
= host_ifc_len
;
3436 host_ifconf
->ifc_buf
= host_ifc_buf
;
3438 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3439 if (!is_error(ret
)) {
3440 /* convert host ifc_len to target ifc_len */
3442 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3443 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3444 host_ifconf
->ifc_len
= target_ifc_len
;
3446 /* restore target ifc_buf */
3448 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3450 /* copy struct ifconf to target user */
3452 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3454 return -TARGET_EFAULT
;
3455 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3456 unlock_user(argptr
, arg
, target_size
);
3458 /* copy ifreq[] to target user */
3460 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3461 for (i
= 0; i
< nb_ifreq
; i
++) {
3462 thunk_convert(argptr
+ i
* target_ifreq_size
,
3463 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3464 ifreq_arg_type
, THUNK_TARGET
);
3466 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3476 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3477 abi_long cmd
, abi_long arg
)
3480 struct dm_ioctl
*host_dm
;
3481 abi_long guest_data
;
3482 uint32_t guest_data_size
;
3484 const argtype
*arg_type
= ie
->arg_type
;
3486 void *big_buf
= NULL
;
3490 target_size
= thunk_type_size(arg_type
, 0);
3491 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3493 ret
= -TARGET_EFAULT
;
3496 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3497 unlock_user(argptr
, arg
, 0);
3499 /* buf_temp is too small, so fetch things into a bigger buffer */
3500 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3501 memcpy(big_buf
, buf_temp
, target_size
);
3505 guest_data
= arg
+ host_dm
->data_start
;
3506 if ((guest_data
- arg
) < 0) {
3510 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3511 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3513 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3514 switch (ie
->host_cmd
) {
3516 case DM_LIST_DEVICES
:
3519 case DM_DEV_SUSPEND
:
3522 case DM_TABLE_STATUS
:
3523 case DM_TABLE_CLEAR
:
3525 case DM_LIST_VERSIONS
:
3529 case DM_DEV_SET_GEOMETRY
:
3530 /* data contains only strings */
3531 memcpy(host_data
, argptr
, guest_data_size
);
3534 memcpy(host_data
, argptr
, guest_data_size
);
3535 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3539 void *gspec
= argptr
;
3540 void *cur_data
= host_data
;
3541 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3542 int spec_size
= thunk_type_size(arg_type
, 0);
3545 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3546 struct dm_target_spec
*spec
= cur_data
;
3550 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3551 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3553 spec
->next
= sizeof(*spec
) + slen
;
3554 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3556 cur_data
+= spec
->next
;
3561 ret
= -TARGET_EINVAL
;
3564 unlock_user(argptr
, guest_data
, 0);
3566 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3567 if (!is_error(ret
)) {
3568 guest_data
= arg
+ host_dm
->data_start
;
3569 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3570 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3571 switch (ie
->host_cmd
) {
3576 case DM_DEV_SUSPEND
:
3579 case DM_TABLE_CLEAR
:
3581 case DM_DEV_SET_GEOMETRY
:
3582 /* no return data */
3584 case DM_LIST_DEVICES
:
3586 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3587 uint32_t remaining_data
= guest_data_size
;
3588 void *cur_data
= argptr
;
3589 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3590 int nl_size
= 12; /* can't use thunk_size due to alignment */
3593 uint32_t next
= nl
->next
;
3595 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3597 if (remaining_data
< nl
->next
) {
3598 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3601 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3602 strcpy(cur_data
+ nl_size
, nl
->name
);
3603 cur_data
+= nl
->next
;
3604 remaining_data
-= nl
->next
;
3608 nl
= (void*)nl
+ next
;
3613 case DM_TABLE_STATUS
:
3615 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3616 void *cur_data
= argptr
;
3617 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3618 int spec_size
= thunk_type_size(arg_type
, 0);
3621 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3622 uint32_t next
= spec
->next
;
3623 int slen
= strlen((char*)&spec
[1]) + 1;
3624 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3625 if (guest_data_size
< spec
->next
) {
3626 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3629 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3630 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3631 cur_data
= argptr
+ spec
->next
;
3632 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3638 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3639 int count
= *(uint32_t*)hdata
;
3640 uint64_t *hdev
= hdata
+ 8;
3641 uint64_t *gdev
= argptr
+ 8;
3644 *(uint32_t*)argptr
= tswap32(count
);
3645 for (i
= 0; i
< count
; i
++) {
3646 *gdev
= tswap64(*hdev
);
3652 case DM_LIST_VERSIONS
:
3654 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3655 uint32_t remaining_data
= guest_data_size
;
3656 void *cur_data
= argptr
;
3657 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3658 int vers_size
= thunk_type_size(arg_type
, 0);
3661 uint32_t next
= vers
->next
;
3663 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3665 if (remaining_data
< vers
->next
) {
3666 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3669 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3670 strcpy(cur_data
+ vers_size
, vers
->name
);
3671 cur_data
+= vers
->next
;
3672 remaining_data
-= vers
->next
;
3676 vers
= (void*)vers
+ next
;
3681 ret
= -TARGET_EINVAL
;
3684 unlock_user(argptr
, guest_data
, guest_data_size
);
3686 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3688 ret
= -TARGET_EFAULT
;
3691 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3692 unlock_user(argptr
, arg
, target_size
);
3699 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3700 int fd
, abi_long cmd
, abi_long arg
)
3702 const argtype
*arg_type
= ie
->arg_type
;
3703 const StructEntry
*se
;
3704 const argtype
*field_types
;
3705 const int *dst_offsets
, *src_offsets
;
3708 abi_ulong
*target_rt_dev_ptr
;
3709 unsigned long *host_rt_dev_ptr
;
3713 assert(ie
->access
== IOC_W
);
3714 assert(*arg_type
== TYPE_PTR
);
3716 assert(*arg_type
== TYPE_STRUCT
);
3717 target_size
= thunk_type_size(arg_type
, 0);
3718 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3720 return -TARGET_EFAULT
;
3723 assert(*arg_type
== (int)STRUCT_rtentry
);
3724 se
= struct_entries
+ *arg_type
++;
3725 assert(se
->convert
[0] == NULL
);
3726 /* convert struct here to be able to catch rt_dev string */
3727 field_types
= se
->field_types
;
3728 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3729 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3730 for (i
= 0; i
< se
->nb_fields
; i
++) {
3731 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3732 assert(*field_types
== TYPE_PTRVOID
);
3733 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3734 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3735 if (*target_rt_dev_ptr
!= 0) {
3736 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3737 tswapal(*target_rt_dev_ptr
));
3738 if (!*host_rt_dev_ptr
) {
3739 unlock_user(argptr
, arg
, 0);
3740 return -TARGET_EFAULT
;
3743 *host_rt_dev_ptr
= 0;
3748 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3749 argptr
+ src_offsets
[i
],
3750 field_types
, THUNK_HOST
);
3752 unlock_user(argptr
, arg
, 0);
3754 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3755 if (*host_rt_dev_ptr
!= 0) {
3756 unlock_user((void *)*host_rt_dev_ptr
,
3757 *target_rt_dev_ptr
, 0);
3762 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3763 int fd
, abi_long cmd
, abi_long arg
)
3765 int sig
= target_to_host_signal(arg
);
3766 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
3769 static IOCTLEntry ioctl_entries
[] = {
3770 #define IOCTL(cmd, access, ...) \
3771 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3772 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3773 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3778 /* ??? Implement proper locking for ioctls. */
3779 /* do_ioctl() Must return target values and target errnos. */
3780 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3782 const IOCTLEntry
*ie
;
3783 const argtype
*arg_type
;
3785 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3791 if (ie
->target_cmd
== 0) {
3792 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3793 return -TARGET_ENOSYS
;
3795 if (ie
->target_cmd
== cmd
)
3799 arg_type
= ie
->arg_type
;
3801 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3804 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3807 switch(arg_type
[0]) {
3810 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3815 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3819 target_size
= thunk_type_size(arg_type
, 0);
3820 switch(ie
->access
) {
3822 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3823 if (!is_error(ret
)) {
3824 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3826 return -TARGET_EFAULT
;
3827 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3828 unlock_user(argptr
, arg
, target_size
);
3832 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3834 return -TARGET_EFAULT
;
3835 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3836 unlock_user(argptr
, arg
, 0);
3837 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3841 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3843 return -TARGET_EFAULT
;
3844 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3845 unlock_user(argptr
, arg
, 0);
3846 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3847 if (!is_error(ret
)) {
3848 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3850 return -TARGET_EFAULT
;
3851 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3852 unlock_user(argptr
, arg
, target_size
);
3858 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3859 (long)cmd
, arg_type
[0]);
3860 ret
= -TARGET_ENOSYS
;
3866 static const bitmask_transtbl iflag_tbl
[] = {
3867 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3868 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3869 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3870 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3871 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3872 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3873 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3874 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3875 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3876 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3877 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3878 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3879 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3880 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3884 static const bitmask_transtbl oflag_tbl
[] = {
3885 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3886 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3887 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3888 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3889 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3890 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3891 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3892 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3893 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3894 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3895 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3896 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3897 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3898 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3899 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3900 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3901 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3902 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3903 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3904 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3905 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3906 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3907 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3908 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3912 static const bitmask_transtbl cflag_tbl
[] = {
3913 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3914 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3915 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3916 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3917 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3918 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3919 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3920 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3921 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3922 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3923 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3924 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3925 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3926 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3927 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3928 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3929 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3930 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3931 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3932 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3933 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3934 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3935 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3936 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3937 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3938 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3939 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3940 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3941 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3942 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3943 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3947 static const bitmask_transtbl lflag_tbl
[] = {
3948 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3949 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3950 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3951 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3952 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3953 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3954 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3955 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3956 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3957 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3958 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3959 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3960 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3961 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3962 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3966 static void target_to_host_termios (void *dst
, const void *src
)
3968 struct host_termios
*host
= dst
;
3969 const struct target_termios
*target
= src
;
3972 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3974 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3976 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3978 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3979 host
->c_line
= target
->c_line
;
3981 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3982 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3983 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3984 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3985 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3986 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3987 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3988 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3989 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3990 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3991 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3992 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3993 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3994 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3995 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3996 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3997 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3998 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4001 static void host_to_target_termios (void *dst
, const void *src
)
4003 struct target_termios
*target
= dst
;
4004 const struct host_termios
*host
= src
;
4007 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4009 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4011 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4013 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4014 target
->c_line
= host
->c_line
;
4016 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4017 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4018 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4019 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4020 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4021 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4022 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4023 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4024 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4025 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4026 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4027 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4028 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4029 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4030 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4031 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4032 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4033 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4036 static const StructEntry struct_termios_def
= {
4037 .convert
= { host_to_target_termios
, target_to_host_termios
},
4038 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4039 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4042 static bitmask_transtbl mmap_flags_tbl
[] = {
4043 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4044 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4045 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4046 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4047 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4048 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4049 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4050 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4051 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4056 #if defined(TARGET_I386)
4058 /* NOTE: there is really one LDT for all the threads */
4059 static uint8_t *ldt_table
;
4061 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4068 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4069 if (size
> bytecount
)
4071 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4073 return -TARGET_EFAULT
;
4074 /* ??? Should this by byteswapped? */
4075 memcpy(p
, ldt_table
, size
);
4076 unlock_user(p
, ptr
, size
);
4080 /* XXX: add locking support */
4081 static abi_long
write_ldt(CPUX86State
*env
,
4082 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4084 struct target_modify_ldt_ldt_s ldt_info
;
4085 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4086 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4087 int seg_not_present
, useable
, lm
;
4088 uint32_t *lp
, entry_1
, entry_2
;
4090 if (bytecount
!= sizeof(ldt_info
))
4091 return -TARGET_EINVAL
;
4092 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4093 return -TARGET_EFAULT
;
4094 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4095 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4096 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4097 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4098 unlock_user_struct(target_ldt_info
, ptr
, 0);
4100 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4101 return -TARGET_EINVAL
;
4102 seg_32bit
= ldt_info
.flags
& 1;
4103 contents
= (ldt_info
.flags
>> 1) & 3;
4104 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4105 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4106 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4107 useable
= (ldt_info
.flags
>> 6) & 1;
4111 lm
= (ldt_info
.flags
>> 7) & 1;
4113 if (contents
== 3) {
4115 return -TARGET_EINVAL
;
4116 if (seg_not_present
== 0)
4117 return -TARGET_EINVAL
;
4119 /* allocate the LDT */
4121 env
->ldt
.base
= target_mmap(0,
4122 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4123 PROT_READ
|PROT_WRITE
,
4124 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4125 if (env
->ldt
.base
== -1)
4126 return -TARGET_ENOMEM
;
4127 memset(g2h(env
->ldt
.base
), 0,
4128 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4129 env
->ldt
.limit
= 0xffff;
4130 ldt_table
= g2h(env
->ldt
.base
);
4133 /* NOTE: same code as Linux kernel */
4134 /* Allow LDTs to be cleared by the user. */
4135 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4138 read_exec_only
== 1 &&
4140 limit_in_pages
== 0 &&
4141 seg_not_present
== 1 &&
4149 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4150 (ldt_info
.limit
& 0x0ffff);
4151 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4152 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4153 (ldt_info
.limit
& 0xf0000) |
4154 ((read_exec_only
^ 1) << 9) |
4156 ((seg_not_present
^ 1) << 15) |
4158 (limit_in_pages
<< 23) |
4162 entry_2
|= (useable
<< 20);
4164 /* Install the new entry ... */
4166 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4167 lp
[0] = tswap32(entry_1
);
4168 lp
[1] = tswap32(entry_2
);
4172 /* specific and weird i386 syscalls */
4173 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4174 unsigned long bytecount
)
4180 ret
= read_ldt(ptr
, bytecount
);
4183 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4186 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4189 ret
= -TARGET_ENOSYS
;
4195 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4196 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4198 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4199 struct target_modify_ldt_ldt_s ldt_info
;
4200 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4201 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4202 int seg_not_present
, useable
, lm
;
4203 uint32_t *lp
, entry_1
, entry_2
;
4206 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4207 if (!target_ldt_info
)
4208 return -TARGET_EFAULT
;
4209 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4210 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4211 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4212 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4213 if (ldt_info
.entry_number
== -1) {
4214 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4215 if (gdt_table
[i
] == 0) {
4216 ldt_info
.entry_number
= i
;
4217 target_ldt_info
->entry_number
= tswap32(i
);
4222 unlock_user_struct(target_ldt_info
, ptr
, 1);
4224 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4225 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4226 return -TARGET_EINVAL
;
4227 seg_32bit
= ldt_info
.flags
& 1;
4228 contents
= (ldt_info
.flags
>> 1) & 3;
4229 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4230 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4231 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4232 useable
= (ldt_info
.flags
>> 6) & 1;
4236 lm
= (ldt_info
.flags
>> 7) & 1;
4239 if (contents
== 3) {
4240 if (seg_not_present
== 0)
4241 return -TARGET_EINVAL
;
4244 /* NOTE: same code as Linux kernel */
4245 /* Allow LDTs to be cleared by the user. */
4246 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4247 if ((contents
== 0 &&
4248 read_exec_only
== 1 &&
4250 limit_in_pages
== 0 &&
4251 seg_not_present
== 1 &&
4259 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4260 (ldt_info
.limit
& 0x0ffff);
4261 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4262 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4263 (ldt_info
.limit
& 0xf0000) |
4264 ((read_exec_only
^ 1) << 9) |
4266 ((seg_not_present
^ 1) << 15) |
4268 (limit_in_pages
<< 23) |
4273 /* Install the new entry ... */
4275 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4276 lp
[0] = tswap32(entry_1
);
4277 lp
[1] = tswap32(entry_2
);
4281 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4283 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4284 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4285 uint32_t base_addr
, limit
, flags
;
4286 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4287 int seg_not_present
, useable
, lm
;
4288 uint32_t *lp
, entry_1
, entry_2
;
4290 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4291 if (!target_ldt_info
)
4292 return -TARGET_EFAULT
;
4293 idx
= tswap32(target_ldt_info
->entry_number
);
4294 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4295 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4296 unlock_user_struct(target_ldt_info
, ptr
, 1);
4297 return -TARGET_EINVAL
;
4299 lp
= (uint32_t *)(gdt_table
+ idx
);
4300 entry_1
= tswap32(lp
[0]);
4301 entry_2
= tswap32(lp
[1]);
4303 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4304 contents
= (entry_2
>> 10) & 3;
4305 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4306 seg_32bit
= (entry_2
>> 22) & 1;
4307 limit_in_pages
= (entry_2
>> 23) & 1;
4308 useable
= (entry_2
>> 20) & 1;
4312 lm
= (entry_2
>> 21) & 1;
4314 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4315 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4316 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4317 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4318 base_addr
= (entry_1
>> 16) |
4319 (entry_2
& 0xff000000) |
4320 ((entry_2
& 0xff) << 16);
4321 target_ldt_info
->base_addr
= tswapal(base_addr
);
4322 target_ldt_info
->limit
= tswap32(limit
);
4323 target_ldt_info
->flags
= tswap32(flags
);
4324 unlock_user_struct(target_ldt_info
, ptr
, 1);
4327 #endif /* TARGET_I386 && TARGET_ABI32 */
4329 #ifndef TARGET_ABI32
4330 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4337 case TARGET_ARCH_SET_GS
:
4338 case TARGET_ARCH_SET_FS
:
4339 if (code
== TARGET_ARCH_SET_GS
)
4343 cpu_x86_load_seg(env
, idx
, 0);
4344 env
->segs
[idx
].base
= addr
;
4346 case TARGET_ARCH_GET_GS
:
4347 case TARGET_ARCH_GET_FS
:
4348 if (code
== TARGET_ARCH_GET_GS
)
4352 val
= env
->segs
[idx
].base
;
4353 if (put_user(val
, addr
, abi_ulong
))
4354 ret
= -TARGET_EFAULT
;
4357 ret
= -TARGET_EINVAL
;
4364 #endif /* defined(TARGET_I386) */
4366 #define NEW_STACK_SIZE 0x40000
4369 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4372 pthread_mutex_t mutex
;
4373 pthread_cond_t cond
;
4376 abi_ulong child_tidptr
;
4377 abi_ulong parent_tidptr
;
4381 static void *clone_func(void *arg
)
4383 new_thread_info
*info
= arg
;
4389 cpu
= ENV_GET_CPU(env
);
4391 ts
= (TaskState
*)cpu
->opaque
;
4392 info
->tid
= gettid();
4393 cpu
->host_tid
= info
->tid
;
4395 if (info
->child_tidptr
)
4396 put_user_u32(info
->tid
, info
->child_tidptr
);
4397 if (info
->parent_tidptr
)
4398 put_user_u32(info
->tid
, info
->parent_tidptr
);
4399 /* Enable signals. */
4400 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4401 /* Signal to the parent that we're ready. */
4402 pthread_mutex_lock(&info
->mutex
);
4403 pthread_cond_broadcast(&info
->cond
);
4404 pthread_mutex_unlock(&info
->mutex
);
4405 /* Wait until the parent has finshed initializing the tls state. */
4406 pthread_mutex_lock(&clone_lock
);
4407 pthread_mutex_unlock(&clone_lock
);
4413 /* do_fork() Must return host values and target errnos (unlike most
4414 do_*() functions). */
4415 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4416 abi_ulong parent_tidptr
, target_ulong newtls
,
4417 abi_ulong child_tidptr
)
4419 CPUState
*cpu
= ENV_GET_CPU(env
);
4423 CPUArchState
*new_env
;
4424 unsigned int nptl_flags
;
4427 /* Emulate vfork() with fork() */
4428 if (flags
& CLONE_VFORK
)
4429 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4431 if (flags
& CLONE_VM
) {
4432 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4433 new_thread_info info
;
4434 pthread_attr_t attr
;
4436 ts
= g_malloc0(sizeof(TaskState
));
4437 init_task_state(ts
);
4438 /* we create a new CPU instance. */
4439 new_env
= cpu_copy(env
);
4440 /* Init regs that differ from the parent. */
4441 cpu_clone_regs(new_env
, newsp
);
4442 new_cpu
= ENV_GET_CPU(new_env
);
4443 new_cpu
->opaque
= ts
;
4444 ts
->bprm
= parent_ts
->bprm
;
4445 ts
->info
= parent_ts
->info
;
4447 flags
&= ~CLONE_NPTL_FLAGS2
;
4449 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4450 ts
->child_tidptr
= child_tidptr
;
4453 if (nptl_flags
& CLONE_SETTLS
)
4454 cpu_set_tls (new_env
, newtls
);
4456 /* Grab a mutex so that thread setup appears atomic. */
4457 pthread_mutex_lock(&clone_lock
);
4459 memset(&info
, 0, sizeof(info
));
4460 pthread_mutex_init(&info
.mutex
, NULL
);
4461 pthread_mutex_lock(&info
.mutex
);
4462 pthread_cond_init(&info
.cond
, NULL
);
4464 if (nptl_flags
& CLONE_CHILD_SETTID
)
4465 info
.child_tidptr
= child_tidptr
;
4466 if (nptl_flags
& CLONE_PARENT_SETTID
)
4467 info
.parent_tidptr
= parent_tidptr
;
4469 ret
= pthread_attr_init(&attr
);
4470 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4471 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4472 /* It is not safe to deliver signals until the child has finished
4473 initializing, so temporarily block all signals. */
4474 sigfillset(&sigmask
);
4475 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4477 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4478 /* TODO: Free new CPU state if thread creation failed. */
4480 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4481 pthread_attr_destroy(&attr
);
4483 /* Wait for the child to initialize. */
4484 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4486 if (flags
& CLONE_PARENT_SETTID
)
4487 put_user_u32(ret
, parent_tidptr
);
4491 pthread_mutex_unlock(&info
.mutex
);
4492 pthread_cond_destroy(&info
.cond
);
4493 pthread_mutex_destroy(&info
.mutex
);
4494 pthread_mutex_unlock(&clone_lock
);
4496 /* if no CLONE_VM, we consider it is a fork */
4497 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4502 /* Child Process. */
4503 cpu_clone_regs(env
, newsp
);
4505 /* There is a race condition here. The parent process could
4506 theoretically read the TID in the child process before the child
4507 tid is set. This would require using either ptrace
4508 (not implemented) or having *_tidptr to point at a shared memory
4509 mapping. We can't repeat the spinlock hack used above because
4510 the child process gets its own copy of the lock. */
4511 if (flags
& CLONE_CHILD_SETTID
)
4512 put_user_u32(gettid(), child_tidptr
);
4513 if (flags
& CLONE_PARENT_SETTID
)
4514 put_user_u32(gettid(), parent_tidptr
);
4515 ts
= (TaskState
*)cpu
->opaque
;
4516 if (flags
& CLONE_SETTLS
)
4517 cpu_set_tls (env
, newtls
);
4518 if (flags
& CLONE_CHILD_CLEARTID
)
4519 ts
->child_tidptr
= child_tidptr
;
4527 /* warning : doesn't handle linux specific flags... */
4528 static int target_to_host_fcntl_cmd(int cmd
)
4531 case TARGET_F_DUPFD
:
4532 case TARGET_F_GETFD
:
4533 case TARGET_F_SETFD
:
4534 case TARGET_F_GETFL
:
4535 case TARGET_F_SETFL
:
4537 case TARGET_F_GETLK
:
4539 case TARGET_F_SETLK
:
4541 case TARGET_F_SETLKW
:
4543 case TARGET_F_GETOWN
:
4545 case TARGET_F_SETOWN
:
4547 case TARGET_F_GETSIG
:
4549 case TARGET_F_SETSIG
:
4551 #if TARGET_ABI_BITS == 32
4552 case TARGET_F_GETLK64
:
4554 case TARGET_F_SETLK64
:
4556 case TARGET_F_SETLKW64
:
4559 case TARGET_F_SETLEASE
:
4561 case TARGET_F_GETLEASE
:
4563 #ifdef F_DUPFD_CLOEXEC
4564 case TARGET_F_DUPFD_CLOEXEC
:
4565 return F_DUPFD_CLOEXEC
;
4567 case TARGET_F_NOTIFY
:
4570 case TARGET_F_GETOWN_EX
:
4574 case TARGET_F_SETOWN_EX
:
4578 return -TARGET_EINVAL
;
4580 return -TARGET_EINVAL
;
4583 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4584 static const bitmask_transtbl flock_tbl
[] = {
4585 TRANSTBL_CONVERT(F_RDLCK
),
4586 TRANSTBL_CONVERT(F_WRLCK
),
4587 TRANSTBL_CONVERT(F_UNLCK
),
4588 TRANSTBL_CONVERT(F_EXLCK
),
4589 TRANSTBL_CONVERT(F_SHLCK
),
4593 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4596 struct target_flock
*target_fl
;
4597 struct flock64 fl64
;
4598 struct target_flock64
*target_fl64
;
4600 struct f_owner_ex fox
;
4601 struct target_f_owner_ex
*target_fox
;
4604 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4606 if (host_cmd
== -TARGET_EINVAL
)
4610 case TARGET_F_GETLK
:
4611 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4612 return -TARGET_EFAULT
;
4614 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4615 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4616 fl
.l_start
= tswapal(target_fl
->l_start
);
4617 fl
.l_len
= tswapal(target_fl
->l_len
);
4618 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4619 unlock_user_struct(target_fl
, arg
, 0);
4620 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4622 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4623 return -TARGET_EFAULT
;
4625 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4626 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4627 target_fl
->l_start
= tswapal(fl
.l_start
);
4628 target_fl
->l_len
= tswapal(fl
.l_len
);
4629 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4630 unlock_user_struct(target_fl
, arg
, 1);
4634 case TARGET_F_SETLK
:
4635 case TARGET_F_SETLKW
:
4636 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4637 return -TARGET_EFAULT
;
4639 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4640 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4641 fl
.l_start
= tswapal(target_fl
->l_start
);
4642 fl
.l_len
= tswapal(target_fl
->l_len
);
4643 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4644 unlock_user_struct(target_fl
, arg
, 0);
4645 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4648 case TARGET_F_GETLK64
:
4649 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4650 return -TARGET_EFAULT
;
4652 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4653 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4654 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4655 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4656 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4657 unlock_user_struct(target_fl64
, arg
, 0);
4658 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4660 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4661 return -TARGET_EFAULT
;
4662 target_fl64
->l_type
=
4663 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4664 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4665 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4666 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4667 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4668 unlock_user_struct(target_fl64
, arg
, 1);
4671 case TARGET_F_SETLK64
:
4672 case TARGET_F_SETLKW64
:
4673 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4674 return -TARGET_EFAULT
;
4676 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4677 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4678 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4679 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4680 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4681 unlock_user_struct(target_fl64
, arg
, 0);
4682 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4685 case TARGET_F_GETFL
:
4686 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4688 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4692 case TARGET_F_SETFL
:
4693 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4697 case TARGET_F_GETOWN_EX
:
4698 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4700 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4701 return -TARGET_EFAULT
;
4702 target_fox
->type
= tswap32(fox
.type
);
4703 target_fox
->pid
= tswap32(fox
.pid
);
4704 unlock_user_struct(target_fox
, arg
, 1);
4710 case TARGET_F_SETOWN_EX
:
4711 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4712 return -TARGET_EFAULT
;
4713 fox
.type
= tswap32(target_fox
->type
);
4714 fox
.pid
= tswap32(target_fox
->pid
);
4715 unlock_user_struct(target_fox
, arg
, 0);
4716 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4720 case TARGET_F_SETOWN
:
4721 case TARGET_F_GETOWN
:
4722 case TARGET_F_SETSIG
:
4723 case TARGET_F_GETSIG
:
4724 case TARGET_F_SETLEASE
:
4725 case TARGET_F_GETLEASE
:
4726 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4730 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4738 static inline int high2lowuid(int uid
)
4746 static inline int high2lowgid(int gid
)
4754 static inline int low2highuid(int uid
)
4756 if ((int16_t)uid
== -1)
4762 static inline int low2highgid(int gid
)
4764 if ((int16_t)gid
== -1)
4769 static inline int tswapid(int id
)
4774 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4776 #else /* !USE_UID16 */
4777 static inline int high2lowuid(int uid
)
4781 static inline int high2lowgid(int gid
)
4785 static inline int low2highuid(int uid
)
4789 static inline int low2highgid(int gid
)
4793 static inline int tswapid(int id
)
4798 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4800 #endif /* USE_UID16 */
4802 void syscall_init(void)
4805 const argtype
*arg_type
;
4809 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4810 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4811 #include "syscall_types.h"
4813 #undef STRUCT_SPECIAL
4815 /* Build target_to_host_errno_table[] table from
4816 * host_to_target_errno_table[]. */
4817 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4818 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4821 /* we patch the ioctl size if necessary. We rely on the fact that
4822 no ioctl has all the bits at '1' in the size field */
4824 while (ie
->target_cmd
!= 0) {
4825 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4826 TARGET_IOC_SIZEMASK
) {
4827 arg_type
= ie
->arg_type
;
4828 if (arg_type
[0] != TYPE_PTR
) {
4829 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4834 size
= thunk_type_size(arg_type
, 0);
4835 ie
->target_cmd
= (ie
->target_cmd
&
4836 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4837 (size
<< TARGET_IOC_SIZESHIFT
);
4840 /* automatic consistency check if same arch */
4841 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4842 (defined(__x86_64__) && defined(TARGET_X86_64))
4843 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4844 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4845 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4852 #if TARGET_ABI_BITS == 32
4853 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4855 #ifdef TARGET_WORDS_BIGENDIAN
4856 return ((uint64_t)word0
<< 32) | word1
;
4858 return ((uint64_t)word1
<< 32) | word0
;
4861 #else /* TARGET_ABI_BITS == 32 */
4862 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4866 #endif /* TARGET_ABI_BITS != 32 */
4868 #ifdef TARGET_NR_truncate64
4869 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4874 if (regpairs_aligned(cpu_env
)) {
4878 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4882 #ifdef TARGET_NR_ftruncate64
4883 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4888 if (regpairs_aligned(cpu_env
)) {
4892 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4896 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4897 abi_ulong target_addr
)
4899 struct target_timespec
*target_ts
;
4901 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4902 return -TARGET_EFAULT
;
4903 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4904 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4905 unlock_user_struct(target_ts
, target_addr
, 0);
4909 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4910 struct timespec
*host_ts
)
4912 struct target_timespec
*target_ts
;
4914 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4915 return -TARGET_EFAULT
;
4916 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4917 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4918 unlock_user_struct(target_ts
, target_addr
, 1);
4922 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
4923 abi_ulong target_addr
)
4925 struct target_itimerspec
*target_itspec
;
4927 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
4928 return -TARGET_EFAULT
;
4931 host_itspec
->it_interval
.tv_sec
=
4932 tswapal(target_itspec
->it_interval
.tv_sec
);
4933 host_itspec
->it_interval
.tv_nsec
=
4934 tswapal(target_itspec
->it_interval
.tv_nsec
);
4935 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
4936 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
4938 unlock_user_struct(target_itspec
, target_addr
, 1);
4942 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
4943 struct itimerspec
*host_its
)
4945 struct target_itimerspec
*target_itspec
;
4947 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
4948 return -TARGET_EFAULT
;
4951 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
4952 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
4954 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
4955 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
4957 unlock_user_struct(target_itspec
, target_addr
, 0);
4961 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
4962 abi_ulong target_addr
)
4964 struct target_sigevent
*target_sevp
;
4966 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
4967 return -TARGET_EFAULT
;
4970 /* This union is awkward on 64 bit systems because it has a 32 bit
4971 * integer and a pointer in it; we follow the conversion approach
4972 * used for handling sigval types in signal.c so the guest should get
4973 * the correct value back even if we did a 64 bit byteswap and it's
4974 * using the 32 bit integer.
4976 host_sevp
->sigev_value
.sival_ptr
=
4977 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
4978 host_sevp
->sigev_signo
=
4979 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
4980 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
4981 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
4983 unlock_user_struct(target_sevp
, target_addr
, 1);
4987 #if defined(TARGET_NR_mlockall)
4988 static inline int target_to_host_mlockall_arg(int arg
)
4992 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
4993 result
|= MCL_CURRENT
;
4995 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
4996 result
|= MCL_FUTURE
;
5002 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5003 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5004 abi_ulong target_addr
,
5005 struct stat
*host_st
)
5007 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5008 if (((CPUARMState
*)cpu_env
)->eabi
) {
5009 struct target_eabi_stat64
*target_st
;
5011 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5012 return -TARGET_EFAULT
;
5013 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5014 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5015 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5016 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5017 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5019 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5020 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5021 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5022 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5023 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5024 __put_user(host_st
->st_size
, &target_st
->st_size
);
5025 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5026 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5027 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5028 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5029 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5030 unlock_user_struct(target_st
, target_addr
, 1);
5034 #if defined(TARGET_HAS_STRUCT_STAT64)
5035 struct target_stat64
*target_st
;
5037 struct target_stat
*target_st
;
5040 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5041 return -TARGET_EFAULT
;
5042 memset(target_st
, 0, sizeof(*target_st
));
5043 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5044 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5045 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5046 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5048 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5049 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5050 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5051 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5052 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5053 /* XXX: better use of kernel struct */
5054 __put_user(host_st
->st_size
, &target_st
->st_size
);
5055 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5056 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5057 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5058 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5059 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5060 unlock_user_struct(target_st
, target_addr
, 1);
5067 /* ??? Using host futex calls even when target atomic operations
5068 are not really atomic probably breaks things. However implementing
5069 futexes locally would make futexes shared between multiple processes
5070 tricky. However they're probably useless because guest atomic
5071 operations won't work either. */
5072 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5073 target_ulong uaddr2
, int val3
)
5075 struct timespec ts
, *pts
;
5078 /* ??? We assume FUTEX_* constants are the same on both host
5080 #ifdef FUTEX_CMD_MASK
5081 base_op
= op
& FUTEX_CMD_MASK
;
5087 case FUTEX_WAIT_BITSET
:
5090 target_to_host_timespec(pts
, timeout
);
5094 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5097 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5099 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5101 case FUTEX_CMP_REQUEUE
:
5103 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5104 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5105 But the prototype takes a `struct timespec *'; insert casts
5106 to satisfy the compiler. We do not need to tswap TIMEOUT
5107 since it's not compared to guest memory. */
5108 pts
= (struct timespec
*)(uintptr_t) timeout
;
5109 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5111 (base_op
== FUTEX_CMP_REQUEUE
5115 return -TARGET_ENOSYS
;
5119 /* Map host to target signal numbers for the wait family of syscalls.
5120 Assume all other status bits are the same. */
5121 int host_to_target_waitstatus(int status
)
5123 if (WIFSIGNALED(status
)) {
5124 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5126 if (WIFSTOPPED(status
)) {
5127 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5133 static int open_self_cmdline(void *cpu_env
, int fd
)
5136 bool word_skipped
= false;
5138 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5148 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5150 fd_orig
= close(fd_orig
);
5152 } else if (nb_read
== 0) {
5156 if (!word_skipped
) {
5157 /* Skip the first string, which is the path to qemu-*-static
5158 instead of the actual command. */
5159 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5161 /* Null byte found, skip one string */
5163 nb_read
-= cp_buf
- buf
;
5164 word_skipped
= true;
5169 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5176 return close(fd_orig
);
5179 static int open_self_maps(void *cpu_env
, int fd
)
5181 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5182 TaskState
*ts
= cpu
->opaque
;
5188 fp
= fopen("/proc/self/maps", "r");
5193 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5194 int fields
, dev_maj
, dev_min
, inode
;
5195 uint64_t min
, max
, offset
;
5196 char flag_r
, flag_w
, flag_x
, flag_p
;
5197 char path
[512] = "";
5198 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5199 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5200 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5202 if ((fields
< 10) || (fields
> 11)) {
5205 if (h2g_valid(min
)) {
5206 int flags
= page_get_flags(h2g(min
));
5207 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5208 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5211 if (h2g(min
) == ts
->info
->stack_limit
) {
5212 pstrcpy(path
, sizeof(path
), " [stack]");
5214 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5215 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5216 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5217 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5218 path
[0] ? " " : "", path
);
5228 static int open_self_stat(void *cpu_env
, int fd
)
5230 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5231 TaskState
*ts
= cpu
->opaque
;
5232 abi_ulong start_stack
= ts
->info
->start_stack
;
5235 for (i
= 0; i
< 44; i
++) {
5243 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5244 } else if (i
== 1) {
5246 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5247 } else if (i
== 27) {
5250 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5252 /* for the rest, there is MasterCard */
5253 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5257 if (write(fd
, buf
, len
) != len
) {
5265 static int open_self_auxv(void *cpu_env
, int fd
)
5267 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5268 TaskState
*ts
= cpu
->opaque
;
5269 abi_ulong auxv
= ts
->info
->saved_auxv
;
5270 abi_ulong len
= ts
->info
->auxv_len
;
5274 * Auxiliary vector is stored in target process stack.
5275 * read in whole auxv vector and copy it to file
5277 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5281 r
= write(fd
, ptr
, len
);
5288 lseek(fd
, 0, SEEK_SET
);
5289 unlock_user(ptr
, auxv
, len
);
5295 static int is_proc_myself(const char *filename
, const char *entry
)
5297 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5298 filename
+= strlen("/proc/");
5299 if (!strncmp(filename
, "self/", strlen("self/"))) {
5300 filename
+= strlen("self/");
5301 } else if (*filename
>= '1' && *filename
<= '9') {
5303 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5304 if (!strncmp(filename
, myself
, strlen(myself
))) {
5305 filename
+= strlen(myself
);
5312 if (!strcmp(filename
, entry
)) {
5319 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5320 static int is_proc(const char *filename
, const char *entry
)
5322 return strcmp(filename
, entry
) == 0;
5325 static int open_net_route(void *cpu_env
, int fd
)
5332 fp
= fopen("/proc/net/route", "r");
5339 read
= getline(&line
, &len
, fp
);
5340 dprintf(fd
, "%s", line
);
5344 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5346 uint32_t dest
, gw
, mask
;
5347 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5348 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5349 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5350 &mask
, &mtu
, &window
, &irtt
);
5351 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5352 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5353 metric
, tswap32(mask
), mtu
, window
, irtt
);
5363 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5366 const char *filename
;
5367 int (*fill
)(void *cpu_env
, int fd
);
5368 int (*cmp
)(const char *s1
, const char *s2
);
5370 const struct fake_open
*fake_open
;
5371 static const struct fake_open fakes
[] = {
5372 { "maps", open_self_maps
, is_proc_myself
},
5373 { "stat", open_self_stat
, is_proc_myself
},
5374 { "auxv", open_self_auxv
, is_proc_myself
},
5375 { "cmdline", open_self_cmdline
, is_proc_myself
},
5376 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5377 { "/proc/net/route", open_net_route
, is_proc
},
5379 { NULL
, NULL
, NULL
}
5382 if (is_proc_myself(pathname
, "exe")) {
5383 int execfd
= qemu_getauxval(AT_EXECFD
);
5384 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5387 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5388 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5393 if (fake_open
->filename
) {
5395 char filename
[PATH_MAX
];
5398 /* create temporary file to map stat to */
5399 tmpdir
= getenv("TMPDIR");
5402 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5403 fd
= mkstemp(filename
);
5409 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5413 lseek(fd
, 0, SEEK_SET
);
5418 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5421 /* do_syscall() should always have a single exit point at the end so
5422 that actions, such as logging of syscall results, can be performed.
5423 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5424 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5425 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5426 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5429 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5436 gemu_log("syscall %d", num
);
5439 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5442 case TARGET_NR_exit
:
5443 /* In old applications this may be used to implement _exit(2).
5444 However in threaded applictions it is used for thread termination,
5445 and _exit_group is used for application termination.
5446 Do thread termination if we have more then one thread. */
5447 /* FIXME: This probably breaks if a signal arrives. We should probably
5448 be disabling signals. */
5449 if (CPU_NEXT(first_cpu
)) {
5453 /* Remove the CPU from the list. */
5454 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5457 if (ts
->child_tidptr
) {
5458 put_user_u32(0, ts
->child_tidptr
);
5459 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5463 object_unref(OBJECT(cpu
));
5470 gdb_exit(cpu_env
, arg1
);
5472 ret
= 0; /* avoid warning */
5474 case TARGET_NR_read
:
5478 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5480 ret
= get_errno(read(arg1
, p
, arg3
));
5481 unlock_user(p
, arg2
, ret
);
5484 case TARGET_NR_write
:
5485 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5487 ret
= get_errno(write(arg1
, p
, arg3
));
5488 unlock_user(p
, arg2
, 0);
5490 case TARGET_NR_open
:
5491 if (!(p
= lock_user_string(arg1
)))
5493 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5494 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5496 unlock_user(p
, arg1
, 0);
5498 case TARGET_NR_openat
:
5499 if (!(p
= lock_user_string(arg2
)))
5501 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5502 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5504 unlock_user(p
, arg2
, 0);
5506 case TARGET_NR_close
:
5507 ret
= get_errno(close(arg1
));
5512 case TARGET_NR_fork
:
5513 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5515 #ifdef TARGET_NR_waitpid
5516 case TARGET_NR_waitpid
:
5519 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5520 if (!is_error(ret
) && arg2
&& ret
5521 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5526 #ifdef TARGET_NR_waitid
5527 case TARGET_NR_waitid
:
5531 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5532 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5533 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5535 host_to_target_siginfo(p
, &info
);
5536 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5541 #ifdef TARGET_NR_creat /* not on alpha */
5542 case TARGET_NR_creat
:
5543 if (!(p
= lock_user_string(arg1
)))
5545 ret
= get_errno(creat(p
, arg2
));
5546 unlock_user(p
, arg1
, 0);
5549 case TARGET_NR_link
:
5552 p
= lock_user_string(arg1
);
5553 p2
= lock_user_string(arg2
);
5555 ret
= -TARGET_EFAULT
;
5557 ret
= get_errno(link(p
, p2
));
5558 unlock_user(p2
, arg2
, 0);
5559 unlock_user(p
, arg1
, 0);
5562 #if defined(TARGET_NR_linkat)
5563 case TARGET_NR_linkat
:
5568 p
= lock_user_string(arg2
);
5569 p2
= lock_user_string(arg4
);
5571 ret
= -TARGET_EFAULT
;
5573 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5574 unlock_user(p
, arg2
, 0);
5575 unlock_user(p2
, arg4
, 0);
5579 case TARGET_NR_unlink
:
5580 if (!(p
= lock_user_string(arg1
)))
5582 ret
= get_errno(unlink(p
));
5583 unlock_user(p
, arg1
, 0);
5585 #if defined(TARGET_NR_unlinkat)
5586 case TARGET_NR_unlinkat
:
5587 if (!(p
= lock_user_string(arg2
)))
5589 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5590 unlock_user(p
, arg2
, 0);
5593 case TARGET_NR_execve
:
5595 char **argp
, **envp
;
5598 abi_ulong guest_argp
;
5599 abi_ulong guest_envp
;
5606 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5607 if (get_user_ual(addr
, gp
))
5615 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5616 if (get_user_ual(addr
, gp
))
5623 argp
= alloca((argc
+ 1) * sizeof(void *));
5624 envp
= alloca((envc
+ 1) * sizeof(void *));
5626 for (gp
= guest_argp
, q
= argp
; gp
;
5627 gp
+= sizeof(abi_ulong
), q
++) {
5628 if (get_user_ual(addr
, gp
))
5632 if (!(*q
= lock_user_string(addr
)))
5634 total_size
+= strlen(*q
) + 1;
5638 for (gp
= guest_envp
, q
= envp
; gp
;
5639 gp
+= sizeof(abi_ulong
), q
++) {
5640 if (get_user_ual(addr
, gp
))
5644 if (!(*q
= lock_user_string(addr
)))
5646 total_size
+= strlen(*q
) + 1;
5650 /* This case will not be caught by the host's execve() if its
5651 page size is bigger than the target's. */
5652 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5653 ret
= -TARGET_E2BIG
;
5656 if (!(p
= lock_user_string(arg1
)))
5658 ret
= get_errno(execve(p
, argp
, envp
));
5659 unlock_user(p
, arg1
, 0);
5664 ret
= -TARGET_EFAULT
;
5667 for (gp
= guest_argp
, q
= argp
; *q
;
5668 gp
+= sizeof(abi_ulong
), q
++) {
5669 if (get_user_ual(addr
, gp
)
5672 unlock_user(*q
, addr
, 0);
5674 for (gp
= guest_envp
, q
= envp
; *q
;
5675 gp
+= sizeof(abi_ulong
), q
++) {
5676 if (get_user_ual(addr
, gp
)
5679 unlock_user(*q
, addr
, 0);
5683 case TARGET_NR_chdir
:
5684 if (!(p
= lock_user_string(arg1
)))
5686 ret
= get_errno(chdir(p
));
5687 unlock_user(p
, arg1
, 0);
5689 #ifdef TARGET_NR_time
5690 case TARGET_NR_time
:
5693 ret
= get_errno(time(&host_time
));
5696 && put_user_sal(host_time
, arg1
))
5701 case TARGET_NR_mknod
:
5702 if (!(p
= lock_user_string(arg1
)))
5704 ret
= get_errno(mknod(p
, arg2
, arg3
));
5705 unlock_user(p
, arg1
, 0);
5707 #if defined(TARGET_NR_mknodat)
5708 case TARGET_NR_mknodat
:
5709 if (!(p
= lock_user_string(arg2
)))
5711 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5712 unlock_user(p
, arg2
, 0);
5715 case TARGET_NR_chmod
:
5716 if (!(p
= lock_user_string(arg1
)))
5718 ret
= get_errno(chmod(p
, arg2
));
5719 unlock_user(p
, arg1
, 0);
5721 #ifdef TARGET_NR_break
5722 case TARGET_NR_break
:
5725 #ifdef TARGET_NR_oldstat
5726 case TARGET_NR_oldstat
:
5729 case TARGET_NR_lseek
:
5730 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5732 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5733 /* Alpha specific */
5734 case TARGET_NR_getxpid
:
5735 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5736 ret
= get_errno(getpid());
5739 #ifdef TARGET_NR_getpid
5740 case TARGET_NR_getpid
:
5741 ret
= get_errno(getpid());
5744 case TARGET_NR_mount
:
5746 /* need to look at the data field */
5750 p
= lock_user_string(arg1
);
5758 p2
= lock_user_string(arg2
);
5761 unlock_user(p
, arg1
, 0);
5767 p3
= lock_user_string(arg3
);
5770 unlock_user(p
, arg1
, 0);
5772 unlock_user(p2
, arg2
, 0);
5779 /* FIXME - arg5 should be locked, but it isn't clear how to
5780 * do that since it's not guaranteed to be a NULL-terminated
5784 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
5786 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
5788 ret
= get_errno(ret
);
5791 unlock_user(p
, arg1
, 0);
5793 unlock_user(p2
, arg2
, 0);
5795 unlock_user(p3
, arg3
, 0);
5799 #ifdef TARGET_NR_umount
5800 case TARGET_NR_umount
:
5801 if (!(p
= lock_user_string(arg1
)))
5803 ret
= get_errno(umount(p
));
5804 unlock_user(p
, arg1
, 0);
5807 #ifdef TARGET_NR_stime /* not on alpha */
5808 case TARGET_NR_stime
:
5811 if (get_user_sal(host_time
, arg1
))
5813 ret
= get_errno(stime(&host_time
));
5817 case TARGET_NR_ptrace
:
5819 #ifdef TARGET_NR_alarm /* not on alpha */
5820 case TARGET_NR_alarm
:
5824 #ifdef TARGET_NR_oldfstat
5825 case TARGET_NR_oldfstat
:
5828 #ifdef TARGET_NR_pause /* not on alpha */
5829 case TARGET_NR_pause
:
5830 ret
= get_errno(pause());
5833 #ifdef TARGET_NR_utime
5834 case TARGET_NR_utime
:
5836 struct utimbuf tbuf
, *host_tbuf
;
5837 struct target_utimbuf
*target_tbuf
;
5839 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5841 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5842 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5843 unlock_user_struct(target_tbuf
, arg2
, 0);
5848 if (!(p
= lock_user_string(arg1
)))
5850 ret
= get_errno(utime(p
, host_tbuf
));
5851 unlock_user(p
, arg1
, 0);
5855 case TARGET_NR_utimes
:
5857 struct timeval
*tvp
, tv
[2];
5859 if (copy_from_user_timeval(&tv
[0], arg2
)
5860 || copy_from_user_timeval(&tv
[1],
5861 arg2
+ sizeof(struct target_timeval
)))
5867 if (!(p
= lock_user_string(arg1
)))
5869 ret
= get_errno(utimes(p
, tvp
));
5870 unlock_user(p
, arg1
, 0);
5873 #if defined(TARGET_NR_futimesat)
5874 case TARGET_NR_futimesat
:
5876 struct timeval
*tvp
, tv
[2];
5878 if (copy_from_user_timeval(&tv
[0], arg3
)
5879 || copy_from_user_timeval(&tv
[1],
5880 arg3
+ sizeof(struct target_timeval
)))
5886 if (!(p
= lock_user_string(arg2
)))
5888 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5889 unlock_user(p
, arg2
, 0);
5893 #ifdef TARGET_NR_stty
5894 case TARGET_NR_stty
:
5897 #ifdef TARGET_NR_gtty
5898 case TARGET_NR_gtty
:
5901 case TARGET_NR_access
:
5902 if (!(p
= lock_user_string(arg1
)))
5904 ret
= get_errno(access(path(p
), arg2
));
5905 unlock_user(p
, arg1
, 0);
5907 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5908 case TARGET_NR_faccessat
:
5909 if (!(p
= lock_user_string(arg2
)))
5911 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5912 unlock_user(p
, arg2
, 0);
5915 #ifdef TARGET_NR_nice /* not on alpha */
5916 case TARGET_NR_nice
:
5917 ret
= get_errno(nice(arg1
));
5920 #ifdef TARGET_NR_ftime
5921 case TARGET_NR_ftime
:
5924 case TARGET_NR_sync
:
5928 case TARGET_NR_kill
:
5929 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5931 case TARGET_NR_rename
:
5934 p
= lock_user_string(arg1
);
5935 p2
= lock_user_string(arg2
);
5937 ret
= -TARGET_EFAULT
;
5939 ret
= get_errno(rename(p
, p2
));
5940 unlock_user(p2
, arg2
, 0);
5941 unlock_user(p
, arg1
, 0);
5944 #if defined(TARGET_NR_renameat)
5945 case TARGET_NR_renameat
:
5948 p
= lock_user_string(arg2
);
5949 p2
= lock_user_string(arg4
);
5951 ret
= -TARGET_EFAULT
;
5953 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
5954 unlock_user(p2
, arg4
, 0);
5955 unlock_user(p
, arg2
, 0);
5959 case TARGET_NR_mkdir
:
5960 if (!(p
= lock_user_string(arg1
)))
5962 ret
= get_errno(mkdir(p
, arg2
));
5963 unlock_user(p
, arg1
, 0);
5965 #if defined(TARGET_NR_mkdirat)
5966 case TARGET_NR_mkdirat
:
5967 if (!(p
= lock_user_string(arg2
)))
5969 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
5970 unlock_user(p
, arg2
, 0);
5973 case TARGET_NR_rmdir
:
5974 if (!(p
= lock_user_string(arg1
)))
5976 ret
= get_errno(rmdir(p
));
5977 unlock_user(p
, arg1
, 0);
5980 ret
= get_errno(dup(arg1
));
5982 case TARGET_NR_pipe
:
5983 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5985 #ifdef TARGET_NR_pipe2
5986 case TARGET_NR_pipe2
:
5987 ret
= do_pipe(cpu_env
, arg1
,
5988 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5991 case TARGET_NR_times
:
5993 struct target_tms
*tmsp
;
5995 ret
= get_errno(times(&tms
));
5997 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6000 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6001 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6002 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6003 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6006 ret
= host_to_target_clock_t(ret
);
6009 #ifdef TARGET_NR_prof
6010 case TARGET_NR_prof
:
6013 #ifdef TARGET_NR_signal
6014 case TARGET_NR_signal
:
6017 case TARGET_NR_acct
:
6019 ret
= get_errno(acct(NULL
));
6021 if (!(p
= lock_user_string(arg1
)))
6023 ret
= get_errno(acct(path(p
)));
6024 unlock_user(p
, arg1
, 0);
6027 #ifdef TARGET_NR_umount2
6028 case TARGET_NR_umount2
:
6029 if (!(p
= lock_user_string(arg1
)))
6031 ret
= get_errno(umount2(p
, arg2
));
6032 unlock_user(p
, arg1
, 0);
6035 #ifdef TARGET_NR_lock
6036 case TARGET_NR_lock
:
6039 case TARGET_NR_ioctl
:
6040 ret
= do_ioctl(arg1
, arg2
, arg3
);
6042 case TARGET_NR_fcntl
:
6043 ret
= do_fcntl(arg1
, arg2
, arg3
);
6045 #ifdef TARGET_NR_mpx
6049 case TARGET_NR_setpgid
:
6050 ret
= get_errno(setpgid(arg1
, arg2
));
6052 #ifdef TARGET_NR_ulimit
6053 case TARGET_NR_ulimit
:
6056 #ifdef TARGET_NR_oldolduname
6057 case TARGET_NR_oldolduname
:
6060 case TARGET_NR_umask
:
6061 ret
= get_errno(umask(arg1
));
6063 case TARGET_NR_chroot
:
6064 if (!(p
= lock_user_string(arg1
)))
6066 ret
= get_errno(chroot(p
));
6067 unlock_user(p
, arg1
, 0);
6069 case TARGET_NR_ustat
:
6071 case TARGET_NR_dup2
:
6072 ret
= get_errno(dup2(arg1
, arg2
));
6074 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6075 case TARGET_NR_dup3
:
6076 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6079 #ifdef TARGET_NR_getppid /* not on alpha */
6080 case TARGET_NR_getppid
:
6081 ret
= get_errno(getppid());
6084 case TARGET_NR_getpgrp
:
6085 ret
= get_errno(getpgrp());
6087 case TARGET_NR_setsid
:
6088 ret
= get_errno(setsid());
6090 #ifdef TARGET_NR_sigaction
6091 case TARGET_NR_sigaction
:
6093 #if defined(TARGET_ALPHA)
6094 struct target_sigaction act
, oact
, *pact
= 0;
6095 struct target_old_sigaction
*old_act
;
6097 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6099 act
._sa_handler
= old_act
->_sa_handler
;
6100 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6101 act
.sa_flags
= old_act
->sa_flags
;
6102 act
.sa_restorer
= 0;
6103 unlock_user_struct(old_act
, arg2
, 0);
6106 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6107 if (!is_error(ret
) && arg3
) {
6108 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6110 old_act
->_sa_handler
= oact
._sa_handler
;
6111 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6112 old_act
->sa_flags
= oact
.sa_flags
;
6113 unlock_user_struct(old_act
, arg3
, 1);
6115 #elif defined(TARGET_MIPS)
6116 struct target_sigaction act
, oact
, *pact
, *old_act
;
6119 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6121 act
._sa_handler
= old_act
->_sa_handler
;
6122 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6123 act
.sa_flags
= old_act
->sa_flags
;
6124 unlock_user_struct(old_act
, arg2
, 0);
6130 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6132 if (!is_error(ret
) && arg3
) {
6133 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6135 old_act
->_sa_handler
= oact
._sa_handler
;
6136 old_act
->sa_flags
= oact
.sa_flags
;
6137 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6138 old_act
->sa_mask
.sig
[1] = 0;
6139 old_act
->sa_mask
.sig
[2] = 0;
6140 old_act
->sa_mask
.sig
[3] = 0;
6141 unlock_user_struct(old_act
, arg3
, 1);
6144 struct target_old_sigaction
*old_act
;
6145 struct target_sigaction act
, oact
, *pact
;
6147 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6149 act
._sa_handler
= old_act
->_sa_handler
;
6150 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6151 act
.sa_flags
= old_act
->sa_flags
;
6152 act
.sa_restorer
= old_act
->sa_restorer
;
6153 unlock_user_struct(old_act
, arg2
, 0);
6158 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6159 if (!is_error(ret
) && arg3
) {
6160 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6162 old_act
->_sa_handler
= oact
._sa_handler
;
6163 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6164 old_act
->sa_flags
= oact
.sa_flags
;
6165 old_act
->sa_restorer
= oact
.sa_restorer
;
6166 unlock_user_struct(old_act
, arg3
, 1);
6172 case TARGET_NR_rt_sigaction
:
6174 #if defined(TARGET_ALPHA)
6175 struct target_sigaction act
, oact
, *pact
= 0;
6176 struct target_rt_sigaction
*rt_act
;
6177 /* ??? arg4 == sizeof(sigset_t). */
6179 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6181 act
._sa_handler
= rt_act
->_sa_handler
;
6182 act
.sa_mask
= rt_act
->sa_mask
;
6183 act
.sa_flags
= rt_act
->sa_flags
;
6184 act
.sa_restorer
= arg5
;
6185 unlock_user_struct(rt_act
, arg2
, 0);
6188 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6189 if (!is_error(ret
) && arg3
) {
6190 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6192 rt_act
->_sa_handler
= oact
._sa_handler
;
6193 rt_act
->sa_mask
= oact
.sa_mask
;
6194 rt_act
->sa_flags
= oact
.sa_flags
;
6195 unlock_user_struct(rt_act
, arg3
, 1);
6198 struct target_sigaction
*act
;
6199 struct target_sigaction
*oact
;
6202 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6207 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6208 ret
= -TARGET_EFAULT
;
6209 goto rt_sigaction_fail
;
6213 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6216 unlock_user_struct(act
, arg2
, 0);
6218 unlock_user_struct(oact
, arg3
, 1);
6222 #ifdef TARGET_NR_sgetmask /* not on alpha */
6223 case TARGET_NR_sgetmask
:
6226 abi_ulong target_set
;
6227 do_sigprocmask(0, NULL
, &cur_set
);
6228 host_to_target_old_sigset(&target_set
, &cur_set
);
6233 #ifdef TARGET_NR_ssetmask /* not on alpha */
6234 case TARGET_NR_ssetmask
:
6236 sigset_t set
, oset
, cur_set
;
6237 abi_ulong target_set
= arg1
;
6238 do_sigprocmask(0, NULL
, &cur_set
);
6239 target_to_host_old_sigset(&set
, &target_set
);
6240 sigorset(&set
, &set
, &cur_set
);
6241 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6242 host_to_target_old_sigset(&target_set
, &oset
);
6247 #ifdef TARGET_NR_sigprocmask
6248 case TARGET_NR_sigprocmask
:
6250 #if defined(TARGET_ALPHA)
6251 sigset_t set
, oldset
;
6256 case TARGET_SIG_BLOCK
:
6259 case TARGET_SIG_UNBLOCK
:
6262 case TARGET_SIG_SETMASK
:
6266 ret
= -TARGET_EINVAL
;
6270 target_to_host_old_sigset(&set
, &mask
);
6272 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6273 if (!is_error(ret
)) {
6274 host_to_target_old_sigset(&mask
, &oldset
);
6276 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6279 sigset_t set
, oldset
, *set_ptr
;
6284 case TARGET_SIG_BLOCK
:
6287 case TARGET_SIG_UNBLOCK
:
6290 case TARGET_SIG_SETMASK
:
6294 ret
= -TARGET_EINVAL
;
6297 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6299 target_to_host_old_sigset(&set
, p
);
6300 unlock_user(p
, arg2
, 0);
6306 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6307 if (!is_error(ret
) && arg3
) {
6308 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6310 host_to_target_old_sigset(p
, &oldset
);
6311 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6317 case TARGET_NR_rt_sigprocmask
:
6320 sigset_t set
, oldset
, *set_ptr
;
6324 case TARGET_SIG_BLOCK
:
6327 case TARGET_SIG_UNBLOCK
:
6330 case TARGET_SIG_SETMASK
:
6334 ret
= -TARGET_EINVAL
;
6337 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6339 target_to_host_sigset(&set
, p
);
6340 unlock_user(p
, arg2
, 0);
6346 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6347 if (!is_error(ret
) && arg3
) {
6348 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6350 host_to_target_sigset(p
, &oldset
);
6351 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6355 #ifdef TARGET_NR_sigpending
6356 case TARGET_NR_sigpending
:
6359 ret
= get_errno(sigpending(&set
));
6360 if (!is_error(ret
)) {
6361 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6363 host_to_target_old_sigset(p
, &set
);
6364 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6369 case TARGET_NR_rt_sigpending
:
6372 ret
= get_errno(sigpending(&set
));
6373 if (!is_error(ret
)) {
6374 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6376 host_to_target_sigset(p
, &set
);
6377 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6381 #ifdef TARGET_NR_sigsuspend
6382 case TARGET_NR_sigsuspend
:
6385 #if defined(TARGET_ALPHA)
6386 abi_ulong mask
= arg1
;
6387 target_to_host_old_sigset(&set
, &mask
);
6389 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6391 target_to_host_old_sigset(&set
, p
);
6392 unlock_user(p
, arg1
, 0);
6394 ret
= get_errno(sigsuspend(&set
));
6398 case TARGET_NR_rt_sigsuspend
:
6401 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6403 target_to_host_sigset(&set
, p
);
6404 unlock_user(p
, arg1
, 0);
6405 ret
= get_errno(sigsuspend(&set
));
6408 case TARGET_NR_rt_sigtimedwait
:
6411 struct timespec uts
, *puts
;
6414 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6416 target_to_host_sigset(&set
, p
);
6417 unlock_user(p
, arg1
, 0);
6420 target_to_host_timespec(puts
, arg3
);
6424 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6425 if (!is_error(ret
)) {
6427 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6432 host_to_target_siginfo(p
, &uinfo
);
6433 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6435 ret
= host_to_target_signal(ret
);
6439 case TARGET_NR_rt_sigqueueinfo
:
6442 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6444 target_to_host_siginfo(&uinfo
, p
);
6445 unlock_user(p
, arg1
, 0);
6446 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6449 #ifdef TARGET_NR_sigreturn
6450 case TARGET_NR_sigreturn
:
6451 /* NOTE: ret is eax, so not transcoding must be done */
6452 ret
= do_sigreturn(cpu_env
);
6455 case TARGET_NR_rt_sigreturn
:
6456 /* NOTE: ret is eax, so not transcoding must be done */
6457 ret
= do_rt_sigreturn(cpu_env
);
6459 case TARGET_NR_sethostname
:
6460 if (!(p
= lock_user_string(arg1
)))
6462 ret
= get_errno(sethostname(p
, arg2
));
6463 unlock_user(p
, arg1
, 0);
6465 case TARGET_NR_setrlimit
:
6467 int resource
= target_to_host_resource(arg1
);
6468 struct target_rlimit
*target_rlim
;
6470 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6472 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6473 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6474 unlock_user_struct(target_rlim
, arg2
, 0);
6475 ret
= get_errno(setrlimit(resource
, &rlim
));
6478 case TARGET_NR_getrlimit
:
6480 int resource
= target_to_host_resource(arg1
);
6481 struct target_rlimit
*target_rlim
;
6484 ret
= get_errno(getrlimit(resource
, &rlim
));
6485 if (!is_error(ret
)) {
6486 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6488 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6489 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6490 unlock_user_struct(target_rlim
, arg2
, 1);
6494 case TARGET_NR_getrusage
:
6496 struct rusage rusage
;
6497 ret
= get_errno(getrusage(arg1
, &rusage
));
6498 if (!is_error(ret
)) {
6499 ret
= host_to_target_rusage(arg2
, &rusage
);
6503 case TARGET_NR_gettimeofday
:
6506 ret
= get_errno(gettimeofday(&tv
, NULL
));
6507 if (!is_error(ret
)) {
6508 if (copy_to_user_timeval(arg1
, &tv
))
6513 case TARGET_NR_settimeofday
:
6515 struct timeval tv
, *ptv
= NULL
;
6516 struct timezone tz
, *ptz
= NULL
;
6519 if (copy_from_user_timeval(&tv
, arg1
)) {
6526 if (copy_from_user_timezone(&tz
, arg2
)) {
6532 ret
= get_errno(settimeofday(ptv
, ptz
));
6535 #if defined(TARGET_NR_select)
6536 case TARGET_NR_select
:
6537 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6538 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6541 struct target_sel_arg_struct
*sel
;
6542 abi_ulong inp
, outp
, exp
, tvp
;
6545 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6547 nsel
= tswapal(sel
->n
);
6548 inp
= tswapal(sel
->inp
);
6549 outp
= tswapal(sel
->outp
);
6550 exp
= tswapal(sel
->exp
);
6551 tvp
= tswapal(sel
->tvp
);
6552 unlock_user_struct(sel
, arg1
, 0);
6553 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6558 #ifdef TARGET_NR_pselect6
6559 case TARGET_NR_pselect6
:
6561 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6562 fd_set rfds
, wfds
, efds
;
6563 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6564 struct timespec ts
, *ts_ptr
;
6567 * The 6th arg is actually two args smashed together,
6568 * so we cannot use the C library.
6576 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6577 target_sigset_t
*target_sigset
;
6585 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6589 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6593 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6599 * This takes a timespec, and not a timeval, so we cannot
6600 * use the do_select() helper ...
6603 if (target_to_host_timespec(&ts
, ts_addr
)) {
6611 /* Extract the two packed args for the sigset */
6614 sig
.size
= _NSIG
/ 8;
6616 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6620 arg_sigset
= tswapal(arg7
[0]);
6621 arg_sigsize
= tswapal(arg7
[1]);
6622 unlock_user(arg7
, arg6
, 0);
6626 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6627 /* Like the kernel, we enforce correct size sigsets */
6628 ret
= -TARGET_EINVAL
;
6631 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6632 sizeof(*target_sigset
), 1);
6633 if (!target_sigset
) {
6636 target_to_host_sigset(&set
, target_sigset
);
6637 unlock_user(target_sigset
, arg_sigset
, 0);
6645 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6648 if (!is_error(ret
)) {
6649 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6651 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6653 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6656 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6662 case TARGET_NR_symlink
:
6665 p
= lock_user_string(arg1
);
6666 p2
= lock_user_string(arg2
);
6668 ret
= -TARGET_EFAULT
;
6670 ret
= get_errno(symlink(p
, p2
));
6671 unlock_user(p2
, arg2
, 0);
6672 unlock_user(p
, arg1
, 0);
6675 #if defined(TARGET_NR_symlinkat)
6676 case TARGET_NR_symlinkat
:
6679 p
= lock_user_string(arg1
);
6680 p2
= lock_user_string(arg3
);
6682 ret
= -TARGET_EFAULT
;
6684 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6685 unlock_user(p2
, arg3
, 0);
6686 unlock_user(p
, arg1
, 0);
6690 #ifdef TARGET_NR_oldlstat
6691 case TARGET_NR_oldlstat
:
6694 case TARGET_NR_readlink
:
6697 p
= lock_user_string(arg1
);
6698 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6700 ret
= -TARGET_EFAULT
;
6702 /* Short circuit this for the magic exe check. */
6703 ret
= -TARGET_EINVAL
;
6704 } else if (is_proc_myself((const char *)p
, "exe")) {
6705 char real
[PATH_MAX
], *temp
;
6706 temp
= realpath(exec_path
, real
);
6707 /* Return value is # of bytes that we wrote to the buffer. */
6709 ret
= get_errno(-1);
6711 /* Don't worry about sign mismatch as earlier mapping
6712 * logic would have thrown a bad address error. */
6713 ret
= MIN(strlen(real
), arg3
);
6714 /* We cannot NUL terminate the string. */
6715 memcpy(p2
, real
, ret
);
6718 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6720 unlock_user(p2
, arg2
, ret
);
6721 unlock_user(p
, arg1
, 0);
6724 #if defined(TARGET_NR_readlinkat)
6725 case TARGET_NR_readlinkat
:
6728 p
= lock_user_string(arg2
);
6729 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6731 ret
= -TARGET_EFAULT
;
6732 } else if (is_proc_myself((const char *)p
, "exe")) {
6733 char real
[PATH_MAX
], *temp
;
6734 temp
= realpath(exec_path
, real
);
6735 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6736 snprintf((char *)p2
, arg4
, "%s", real
);
6738 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6740 unlock_user(p2
, arg3
, ret
);
6741 unlock_user(p
, arg2
, 0);
6745 #ifdef TARGET_NR_uselib
6746 case TARGET_NR_uselib
:
6749 #ifdef TARGET_NR_swapon
6750 case TARGET_NR_swapon
:
6751 if (!(p
= lock_user_string(arg1
)))
6753 ret
= get_errno(swapon(p
, arg2
));
6754 unlock_user(p
, arg1
, 0);
6757 case TARGET_NR_reboot
:
6758 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6759 /* arg4 must be ignored in all other cases */
6760 p
= lock_user_string(arg4
);
6764 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6765 unlock_user(p
, arg4
, 0);
6767 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6770 #ifdef TARGET_NR_readdir
6771 case TARGET_NR_readdir
:
6774 #ifdef TARGET_NR_mmap
6775 case TARGET_NR_mmap
:
6776 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6777 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6778 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6779 || defined(TARGET_S390X)
6782 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6783 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6791 unlock_user(v
, arg1
, 0);
6792 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6793 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6797 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6798 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6804 #ifdef TARGET_NR_mmap2
6805 case TARGET_NR_mmap2
:
6807 #define MMAP_SHIFT 12
6809 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6810 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6812 arg6
<< MMAP_SHIFT
));
6815 case TARGET_NR_munmap
:
6816 ret
= get_errno(target_munmap(arg1
, arg2
));
6818 case TARGET_NR_mprotect
:
6820 TaskState
*ts
= cpu
->opaque
;
6821 /* Special hack to detect libc making the stack executable. */
6822 if ((arg3
& PROT_GROWSDOWN
)
6823 && arg1
>= ts
->info
->stack_limit
6824 && arg1
<= ts
->info
->start_stack
) {
6825 arg3
&= ~PROT_GROWSDOWN
;
6826 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6827 arg1
= ts
->info
->stack_limit
;
6830 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6832 #ifdef TARGET_NR_mremap
6833 case TARGET_NR_mremap
:
6834 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6837 /* ??? msync/mlock/munlock are broken for softmmu. */
6838 #ifdef TARGET_NR_msync
6839 case TARGET_NR_msync
:
6840 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6843 #ifdef TARGET_NR_mlock
6844 case TARGET_NR_mlock
:
6845 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6848 #ifdef TARGET_NR_munlock
6849 case TARGET_NR_munlock
:
6850 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6853 #ifdef TARGET_NR_mlockall
6854 case TARGET_NR_mlockall
:
6855 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
6858 #ifdef TARGET_NR_munlockall
6859 case TARGET_NR_munlockall
:
6860 ret
= get_errno(munlockall());
6863 case TARGET_NR_truncate
:
6864 if (!(p
= lock_user_string(arg1
)))
6866 ret
= get_errno(truncate(p
, arg2
));
6867 unlock_user(p
, arg1
, 0);
6869 case TARGET_NR_ftruncate
:
6870 ret
= get_errno(ftruncate(arg1
, arg2
));
6872 case TARGET_NR_fchmod
:
6873 ret
= get_errno(fchmod(arg1
, arg2
));
6875 #if defined(TARGET_NR_fchmodat)
6876 case TARGET_NR_fchmodat
:
6877 if (!(p
= lock_user_string(arg2
)))
6879 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6880 unlock_user(p
, arg2
, 0);
6883 case TARGET_NR_getpriority
:
6884 /* Note that negative values are valid for getpriority, so we must
6885 differentiate based on errno settings. */
6887 ret
= getpriority(arg1
, arg2
);
6888 if (ret
== -1 && errno
!= 0) {
6889 ret
= -host_to_target_errno(errno
);
6893 /* Return value is the unbiased priority. Signal no error. */
6894 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6896 /* Return value is a biased priority to avoid negative numbers. */
6900 case TARGET_NR_setpriority
:
6901 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6903 #ifdef TARGET_NR_profil
6904 case TARGET_NR_profil
:
6907 case TARGET_NR_statfs
:
6908 if (!(p
= lock_user_string(arg1
)))
6910 ret
= get_errno(statfs(path(p
), &stfs
));
6911 unlock_user(p
, arg1
, 0);
6913 if (!is_error(ret
)) {
6914 struct target_statfs
*target_stfs
;
6916 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6918 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6919 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6920 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6921 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6922 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6923 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6924 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6925 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6926 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6927 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6928 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6929 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6930 unlock_user_struct(target_stfs
, arg2
, 1);
6933 case TARGET_NR_fstatfs
:
6934 ret
= get_errno(fstatfs(arg1
, &stfs
));
6935 goto convert_statfs
;
6936 #ifdef TARGET_NR_statfs64
6937 case TARGET_NR_statfs64
:
6938 if (!(p
= lock_user_string(arg1
)))
6940 ret
= get_errno(statfs(path(p
), &stfs
));
6941 unlock_user(p
, arg1
, 0);
6943 if (!is_error(ret
)) {
6944 struct target_statfs64
*target_stfs
;
6946 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6948 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6949 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6950 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6951 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6952 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6953 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6954 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6955 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6956 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6957 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6958 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6959 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6960 unlock_user_struct(target_stfs
, arg3
, 1);
6963 case TARGET_NR_fstatfs64
:
6964 ret
= get_errno(fstatfs(arg1
, &stfs
));
6965 goto convert_statfs64
;
6967 #ifdef TARGET_NR_ioperm
6968 case TARGET_NR_ioperm
:
6971 #ifdef TARGET_NR_socketcall
6972 case TARGET_NR_socketcall
:
6973 ret
= do_socketcall(arg1
, arg2
);
6976 #ifdef TARGET_NR_accept
6977 case TARGET_NR_accept
:
6978 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
6981 #ifdef TARGET_NR_accept4
6982 case TARGET_NR_accept4
:
6983 #ifdef CONFIG_ACCEPT4
6984 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
6990 #ifdef TARGET_NR_bind
6991 case TARGET_NR_bind
:
6992 ret
= do_bind(arg1
, arg2
, arg3
);
6995 #ifdef TARGET_NR_connect
6996 case TARGET_NR_connect
:
6997 ret
= do_connect(arg1
, arg2
, arg3
);
7000 #ifdef TARGET_NR_getpeername
7001 case TARGET_NR_getpeername
:
7002 ret
= do_getpeername(arg1
, arg2
, arg3
);
7005 #ifdef TARGET_NR_getsockname
7006 case TARGET_NR_getsockname
:
7007 ret
= do_getsockname(arg1
, arg2
, arg3
);
7010 #ifdef TARGET_NR_getsockopt
7011 case TARGET_NR_getsockopt
:
7012 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7015 #ifdef TARGET_NR_listen
7016 case TARGET_NR_listen
:
7017 ret
= get_errno(listen(arg1
, arg2
));
7020 #ifdef TARGET_NR_recv
7021 case TARGET_NR_recv
:
7022 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7025 #ifdef TARGET_NR_recvfrom
7026 case TARGET_NR_recvfrom
:
7027 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7030 #ifdef TARGET_NR_recvmsg
7031 case TARGET_NR_recvmsg
:
7032 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7035 #ifdef TARGET_NR_send
7036 case TARGET_NR_send
:
7037 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7040 #ifdef TARGET_NR_sendmsg
7041 case TARGET_NR_sendmsg
:
7042 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7045 #ifdef TARGET_NR_sendmmsg
7046 case TARGET_NR_sendmmsg
:
7047 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7049 case TARGET_NR_recvmmsg
:
7050 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7053 #ifdef TARGET_NR_sendto
7054 case TARGET_NR_sendto
:
7055 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7058 #ifdef TARGET_NR_shutdown
7059 case TARGET_NR_shutdown
:
7060 ret
= get_errno(shutdown(arg1
, arg2
));
7063 #ifdef TARGET_NR_socket
7064 case TARGET_NR_socket
:
7065 ret
= do_socket(arg1
, arg2
, arg3
);
7068 #ifdef TARGET_NR_socketpair
7069 case TARGET_NR_socketpair
:
7070 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7073 #ifdef TARGET_NR_setsockopt
7074 case TARGET_NR_setsockopt
:
7075 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7079 case TARGET_NR_syslog
:
7080 if (!(p
= lock_user_string(arg2
)))
7082 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7083 unlock_user(p
, arg2
, 0);
7086 case TARGET_NR_setitimer
:
7088 struct itimerval value
, ovalue
, *pvalue
;
7092 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7093 || copy_from_user_timeval(&pvalue
->it_value
,
7094 arg2
+ sizeof(struct target_timeval
)))
7099 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7100 if (!is_error(ret
) && arg3
) {
7101 if (copy_to_user_timeval(arg3
,
7102 &ovalue
.it_interval
)
7103 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7109 case TARGET_NR_getitimer
:
7111 struct itimerval value
;
7113 ret
= get_errno(getitimer(arg1
, &value
));
7114 if (!is_error(ret
) && arg2
) {
7115 if (copy_to_user_timeval(arg2
,
7117 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7123 case TARGET_NR_stat
:
7124 if (!(p
= lock_user_string(arg1
)))
7126 ret
= get_errno(stat(path(p
), &st
));
7127 unlock_user(p
, arg1
, 0);
7129 case TARGET_NR_lstat
:
7130 if (!(p
= lock_user_string(arg1
)))
7132 ret
= get_errno(lstat(path(p
), &st
));
7133 unlock_user(p
, arg1
, 0);
7135 case TARGET_NR_fstat
:
7137 ret
= get_errno(fstat(arg1
, &st
));
7139 if (!is_error(ret
)) {
7140 struct target_stat
*target_st
;
7142 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7144 memset(target_st
, 0, sizeof(*target_st
));
7145 __put_user(st
.st_dev
, &target_st
->st_dev
);
7146 __put_user(st
.st_ino
, &target_st
->st_ino
);
7147 __put_user(st
.st_mode
, &target_st
->st_mode
);
7148 __put_user(st
.st_uid
, &target_st
->st_uid
);
7149 __put_user(st
.st_gid
, &target_st
->st_gid
);
7150 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7151 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7152 __put_user(st
.st_size
, &target_st
->st_size
);
7153 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7154 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7155 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7156 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7157 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7158 unlock_user_struct(target_st
, arg2
, 1);
7162 #ifdef TARGET_NR_olduname
7163 case TARGET_NR_olduname
:
7166 #ifdef TARGET_NR_iopl
7167 case TARGET_NR_iopl
:
7170 case TARGET_NR_vhangup
:
7171 ret
= get_errno(vhangup());
7173 #ifdef TARGET_NR_idle
7174 case TARGET_NR_idle
:
7177 #ifdef TARGET_NR_syscall
7178 case TARGET_NR_syscall
:
7179 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7180 arg6
, arg7
, arg8
, 0);
7183 case TARGET_NR_wait4
:
7186 abi_long status_ptr
= arg2
;
7187 struct rusage rusage
, *rusage_ptr
;
7188 abi_ulong target_rusage
= arg4
;
7189 abi_long rusage_err
;
7191 rusage_ptr
= &rusage
;
7194 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7195 if (!is_error(ret
)) {
7196 if (status_ptr
&& ret
) {
7197 status
= host_to_target_waitstatus(status
);
7198 if (put_user_s32(status
, status_ptr
))
7201 if (target_rusage
) {
7202 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7210 #ifdef TARGET_NR_swapoff
7211 case TARGET_NR_swapoff
:
7212 if (!(p
= lock_user_string(arg1
)))
7214 ret
= get_errno(swapoff(p
));
7215 unlock_user(p
, arg1
, 0);
7218 case TARGET_NR_sysinfo
:
7220 struct target_sysinfo
*target_value
;
7221 struct sysinfo value
;
7222 ret
= get_errno(sysinfo(&value
));
7223 if (!is_error(ret
) && arg1
)
7225 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7227 __put_user(value
.uptime
, &target_value
->uptime
);
7228 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7229 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7230 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7231 __put_user(value
.totalram
, &target_value
->totalram
);
7232 __put_user(value
.freeram
, &target_value
->freeram
);
7233 __put_user(value
.sharedram
, &target_value
->sharedram
);
7234 __put_user(value
.bufferram
, &target_value
->bufferram
);
7235 __put_user(value
.totalswap
, &target_value
->totalswap
);
7236 __put_user(value
.freeswap
, &target_value
->freeswap
);
7237 __put_user(value
.procs
, &target_value
->procs
);
7238 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7239 __put_user(value
.freehigh
, &target_value
->freehigh
);
7240 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7241 unlock_user_struct(target_value
, arg1
, 1);
7245 #ifdef TARGET_NR_ipc
7247 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7250 #ifdef TARGET_NR_semget
7251 case TARGET_NR_semget
:
7252 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7255 #ifdef TARGET_NR_semop
7256 case TARGET_NR_semop
:
7257 ret
= do_semop(arg1
, arg2
, arg3
);
7260 #ifdef TARGET_NR_semctl
7261 case TARGET_NR_semctl
:
7262 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
7265 #ifdef TARGET_NR_msgctl
7266 case TARGET_NR_msgctl
:
7267 ret
= do_msgctl(arg1
, arg2
, arg3
);
7270 #ifdef TARGET_NR_msgget
7271 case TARGET_NR_msgget
:
7272 ret
= get_errno(msgget(arg1
, arg2
));
7275 #ifdef TARGET_NR_msgrcv
7276 case TARGET_NR_msgrcv
:
7277 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7280 #ifdef TARGET_NR_msgsnd
7281 case TARGET_NR_msgsnd
:
7282 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7285 #ifdef TARGET_NR_shmget
7286 case TARGET_NR_shmget
:
7287 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7290 #ifdef TARGET_NR_shmctl
7291 case TARGET_NR_shmctl
:
7292 ret
= do_shmctl(arg1
, arg2
, arg3
);
7295 #ifdef TARGET_NR_shmat
7296 case TARGET_NR_shmat
:
7297 ret
= do_shmat(arg1
, arg2
, arg3
);
7300 #ifdef TARGET_NR_shmdt
7301 case TARGET_NR_shmdt
:
7302 ret
= do_shmdt(arg1
);
7305 case TARGET_NR_fsync
:
7306 ret
= get_errno(fsync(arg1
));
7308 case TARGET_NR_clone
:
7309 /* Linux manages to have three different orderings for its
7310 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7311 * match the kernel's CONFIG_CLONE_* settings.
7312 * Microblaze is further special in that it uses a sixth
7313 * implicit argument to clone for the TLS pointer.
7315 #if defined(TARGET_MICROBLAZE)
7316 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7317 #elif defined(TARGET_CLONE_BACKWARDS)
7318 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7319 #elif defined(TARGET_CLONE_BACKWARDS2)
7320 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7322 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7325 #ifdef __NR_exit_group
7326 /* new thread calls */
7327 case TARGET_NR_exit_group
:
7331 gdb_exit(cpu_env
, arg1
);
7332 ret
= get_errno(exit_group(arg1
));
7335 case TARGET_NR_setdomainname
:
7336 if (!(p
= lock_user_string(arg1
)))
7338 ret
= get_errno(setdomainname(p
, arg2
));
7339 unlock_user(p
, arg1
, 0);
7341 case TARGET_NR_uname
:
7342 /* no need to transcode because we use the linux syscall */
7344 struct new_utsname
* buf
;
7346 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7348 ret
= get_errno(sys_uname(buf
));
7349 if (!is_error(ret
)) {
7350 /* Overrite the native machine name with whatever is being
7352 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7353 /* Allow the user to override the reported release. */
7354 if (qemu_uname_release
&& *qemu_uname_release
)
7355 strcpy (buf
->release
, qemu_uname_release
);
7357 unlock_user_struct(buf
, arg1
, 1);
7361 case TARGET_NR_modify_ldt
:
7362 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7364 #if !defined(TARGET_X86_64)
7365 case TARGET_NR_vm86old
:
7367 case TARGET_NR_vm86
:
7368 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7372 case TARGET_NR_adjtimex
:
7374 #ifdef TARGET_NR_create_module
7375 case TARGET_NR_create_module
:
7377 case TARGET_NR_init_module
:
7378 case TARGET_NR_delete_module
:
7379 #ifdef TARGET_NR_get_kernel_syms
7380 case TARGET_NR_get_kernel_syms
:
7383 case TARGET_NR_quotactl
:
7385 case TARGET_NR_getpgid
:
7386 ret
= get_errno(getpgid(arg1
));
7388 case TARGET_NR_fchdir
:
7389 ret
= get_errno(fchdir(arg1
));
7391 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7392 case TARGET_NR_bdflush
:
7395 #ifdef TARGET_NR_sysfs
7396 case TARGET_NR_sysfs
:
7399 case TARGET_NR_personality
:
7400 ret
= get_errno(personality(arg1
));
7402 #ifdef TARGET_NR_afs_syscall
7403 case TARGET_NR_afs_syscall
:
7406 #ifdef TARGET_NR__llseek /* Not on alpha */
7407 case TARGET_NR__llseek
:
7410 #if !defined(__NR_llseek)
7411 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7413 ret
= get_errno(res
);
7418 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7420 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7426 case TARGET_NR_getdents
:
7427 #ifdef __NR_getdents
7428 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7430 struct target_dirent
*target_dirp
;
7431 struct linux_dirent
*dirp
;
7432 abi_long count
= arg3
;
7434 dirp
= malloc(count
);
7436 ret
= -TARGET_ENOMEM
;
7440 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7441 if (!is_error(ret
)) {
7442 struct linux_dirent
*de
;
7443 struct target_dirent
*tde
;
7445 int reclen
, treclen
;
7446 int count1
, tnamelen
;
7450 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7454 reclen
= de
->d_reclen
;
7455 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7456 assert(tnamelen
>= 0);
7457 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7458 assert(count1
+ treclen
<= count
);
7459 tde
->d_reclen
= tswap16(treclen
);
7460 tde
->d_ino
= tswapal(de
->d_ino
);
7461 tde
->d_off
= tswapal(de
->d_off
);
7462 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7463 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7465 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7469 unlock_user(target_dirp
, arg2
, ret
);
7475 struct linux_dirent
*dirp
;
7476 abi_long count
= arg3
;
7478 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7480 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7481 if (!is_error(ret
)) {
7482 struct linux_dirent
*de
;
7487 reclen
= de
->d_reclen
;
7490 de
->d_reclen
= tswap16(reclen
);
7491 tswapls(&de
->d_ino
);
7492 tswapls(&de
->d_off
);
7493 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7497 unlock_user(dirp
, arg2
, ret
);
7501 /* Implement getdents in terms of getdents64 */
7503 struct linux_dirent64
*dirp
;
7504 abi_long count
= arg3
;
7506 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7510 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7511 if (!is_error(ret
)) {
7512 /* Convert the dirent64 structs to target dirent. We do this
7513 * in-place, since we can guarantee that a target_dirent is no
7514 * larger than a dirent64; however this means we have to be
7515 * careful to read everything before writing in the new format.
7517 struct linux_dirent64
*de
;
7518 struct target_dirent
*tde
;
7523 tde
= (struct target_dirent
*)dirp
;
7525 int namelen
, treclen
;
7526 int reclen
= de
->d_reclen
;
7527 uint64_t ino
= de
->d_ino
;
7528 int64_t off
= de
->d_off
;
7529 uint8_t type
= de
->d_type
;
7531 namelen
= strlen(de
->d_name
);
7532 treclen
= offsetof(struct target_dirent
, d_name
)
7534 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7536 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7537 tde
->d_ino
= tswapal(ino
);
7538 tde
->d_off
= tswapal(off
);
7539 tde
->d_reclen
= tswap16(treclen
);
7540 /* The target_dirent type is in what was formerly a padding
7541 * byte at the end of the structure:
7543 *(((char *)tde
) + treclen
- 1) = type
;
7545 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7546 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7552 unlock_user(dirp
, arg2
, ret
);
7556 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7557 case TARGET_NR_getdents64
:
7559 struct linux_dirent64
*dirp
;
7560 abi_long count
= arg3
;
7561 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7563 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7564 if (!is_error(ret
)) {
7565 struct linux_dirent64
*de
;
7570 reclen
= de
->d_reclen
;
7573 de
->d_reclen
= tswap16(reclen
);
7574 tswap64s((uint64_t *)&de
->d_ino
);
7575 tswap64s((uint64_t *)&de
->d_off
);
7576 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7580 unlock_user(dirp
, arg2
, ret
);
7583 #endif /* TARGET_NR_getdents64 */
7584 #if defined(TARGET_NR__newselect)
7585 case TARGET_NR__newselect
:
7586 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7589 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7590 # ifdef TARGET_NR_poll
7591 case TARGET_NR_poll
:
7593 # ifdef TARGET_NR_ppoll
7594 case TARGET_NR_ppoll
:
7597 struct target_pollfd
*target_pfd
;
7598 unsigned int nfds
= arg2
;
7603 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7607 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7608 for(i
= 0; i
< nfds
; i
++) {
7609 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7610 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7613 # ifdef TARGET_NR_ppoll
7614 if (num
== TARGET_NR_ppoll
) {
7615 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7616 target_sigset_t
*target_set
;
7617 sigset_t _set
, *set
= &_set
;
7620 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7621 unlock_user(target_pfd
, arg1
, 0);
7629 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7631 unlock_user(target_pfd
, arg1
, 0);
7634 target_to_host_sigset(set
, target_set
);
7639 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7641 if (!is_error(ret
) && arg3
) {
7642 host_to_target_timespec(arg3
, timeout_ts
);
7645 unlock_user(target_set
, arg4
, 0);
7649 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7651 if (!is_error(ret
)) {
7652 for(i
= 0; i
< nfds
; i
++) {
7653 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7656 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7660 case TARGET_NR_flock
:
7661 /* NOTE: the flock constant seems to be the same for every
7663 ret
= get_errno(flock(arg1
, arg2
));
7665 case TARGET_NR_readv
:
7667 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7669 ret
= get_errno(readv(arg1
, vec
, arg3
));
7670 unlock_iovec(vec
, arg2
, arg3
, 1);
7672 ret
= -host_to_target_errno(errno
);
7676 case TARGET_NR_writev
:
7678 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7680 ret
= get_errno(writev(arg1
, vec
, arg3
));
7681 unlock_iovec(vec
, arg2
, arg3
, 0);
7683 ret
= -host_to_target_errno(errno
);
7687 case TARGET_NR_getsid
:
7688 ret
= get_errno(getsid(arg1
));
7690 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7691 case TARGET_NR_fdatasync
:
7692 ret
= get_errno(fdatasync(arg1
));
7695 case TARGET_NR__sysctl
:
7696 /* We don't implement this, but ENOTDIR is always a safe
7698 ret
= -TARGET_ENOTDIR
;
7700 case TARGET_NR_sched_getaffinity
:
7702 unsigned int mask_size
;
7703 unsigned long *mask
;
7706 * sched_getaffinity needs multiples of ulong, so need to take
7707 * care of mismatches between target ulong and host ulong sizes.
7709 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7710 ret
= -TARGET_EINVAL
;
7713 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7715 mask
= alloca(mask_size
);
7716 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7718 if (!is_error(ret
)) {
7720 /* More data returned than the caller's buffer will fit.
7721 * This only happens if sizeof(abi_long) < sizeof(long)
7722 * and the caller passed us a buffer holding an odd number
7723 * of abi_longs. If the host kernel is actually using the
7724 * extra 4 bytes then fail EINVAL; otherwise we can just
7725 * ignore them and only copy the interesting part.
7727 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
7728 if (numcpus
> arg2
* 8) {
7729 ret
= -TARGET_EINVAL
;
7735 if (copy_to_user(arg3
, mask
, ret
)) {
7741 case TARGET_NR_sched_setaffinity
:
7743 unsigned int mask_size
;
7744 unsigned long *mask
;
7747 * sched_setaffinity needs multiples of ulong, so need to take
7748 * care of mismatches between target ulong and host ulong sizes.
7750 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7751 ret
= -TARGET_EINVAL
;
7754 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7756 mask
= alloca(mask_size
);
7757 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7760 memcpy(mask
, p
, arg2
);
7761 unlock_user_struct(p
, arg2
, 0);
7763 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7766 case TARGET_NR_sched_setparam
:
7768 struct sched_param
*target_schp
;
7769 struct sched_param schp
;
7772 return -TARGET_EINVAL
;
7774 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7776 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7777 unlock_user_struct(target_schp
, arg2
, 0);
7778 ret
= get_errno(sched_setparam(arg1
, &schp
));
7781 case TARGET_NR_sched_getparam
:
7783 struct sched_param
*target_schp
;
7784 struct sched_param schp
;
7787 return -TARGET_EINVAL
;
7789 ret
= get_errno(sched_getparam(arg1
, &schp
));
7790 if (!is_error(ret
)) {
7791 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7793 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7794 unlock_user_struct(target_schp
, arg2
, 1);
7798 case TARGET_NR_sched_setscheduler
:
7800 struct sched_param
*target_schp
;
7801 struct sched_param schp
;
7803 return -TARGET_EINVAL
;
7805 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7807 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7808 unlock_user_struct(target_schp
, arg3
, 0);
7809 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7812 case TARGET_NR_sched_getscheduler
:
7813 ret
= get_errno(sched_getscheduler(arg1
));
7815 case TARGET_NR_sched_yield
:
7816 ret
= get_errno(sched_yield());
7818 case TARGET_NR_sched_get_priority_max
:
7819 ret
= get_errno(sched_get_priority_max(arg1
));
7821 case TARGET_NR_sched_get_priority_min
:
7822 ret
= get_errno(sched_get_priority_min(arg1
));
7824 case TARGET_NR_sched_rr_get_interval
:
7827 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7828 if (!is_error(ret
)) {
7829 ret
= host_to_target_timespec(arg2
, &ts
);
7833 case TARGET_NR_nanosleep
:
7835 struct timespec req
, rem
;
7836 target_to_host_timespec(&req
, arg1
);
7837 ret
= get_errno(nanosleep(&req
, &rem
));
7838 if (is_error(ret
) && arg2
) {
7839 host_to_target_timespec(arg2
, &rem
);
7843 #ifdef TARGET_NR_query_module
7844 case TARGET_NR_query_module
:
7847 #ifdef TARGET_NR_nfsservctl
7848 case TARGET_NR_nfsservctl
:
7851 case TARGET_NR_prctl
:
7853 case PR_GET_PDEATHSIG
:
7856 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7857 if (!is_error(ret
) && arg2
7858 && put_user_ual(deathsig
, arg2
)) {
7866 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7870 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7872 unlock_user(name
, arg2
, 16);
7877 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7881 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7883 unlock_user(name
, arg2
, 0);
7888 /* Most prctl options have no pointer arguments */
7889 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7893 #ifdef TARGET_NR_arch_prctl
7894 case TARGET_NR_arch_prctl
:
7895 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7896 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7902 #ifdef TARGET_NR_pread64
7903 case TARGET_NR_pread64
:
7904 if (regpairs_aligned(cpu_env
)) {
7908 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7910 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7911 unlock_user(p
, arg2
, ret
);
7913 case TARGET_NR_pwrite64
:
7914 if (regpairs_aligned(cpu_env
)) {
7918 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7920 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7921 unlock_user(p
, arg2
, 0);
7924 case TARGET_NR_getcwd
:
7925 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7927 ret
= get_errno(sys_getcwd1(p
, arg2
));
7928 unlock_user(p
, arg1
, ret
);
7930 case TARGET_NR_capget
:
7931 case TARGET_NR_capset
:
7933 struct target_user_cap_header
*target_header
;
7934 struct target_user_cap_data
*target_data
= NULL
;
7935 struct __user_cap_header_struct header
;
7936 struct __user_cap_data_struct data
[2];
7937 struct __user_cap_data_struct
*dataptr
= NULL
;
7938 int i
, target_datalen
;
7941 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
7944 header
.version
= tswap32(target_header
->version
);
7945 header
.pid
= tswap32(target_header
->pid
);
7947 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
7948 /* Version 2 and up takes pointer to two user_data structs */
7952 target_datalen
= sizeof(*target_data
) * data_items
;
7955 if (num
== TARGET_NR_capget
) {
7956 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
7958 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
7961 unlock_user_struct(target_header
, arg1
, 0);
7965 if (num
== TARGET_NR_capset
) {
7966 for (i
= 0; i
< data_items
; i
++) {
7967 data
[i
].effective
= tswap32(target_data
[i
].effective
);
7968 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
7969 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
7976 if (num
== TARGET_NR_capget
) {
7977 ret
= get_errno(capget(&header
, dataptr
));
7979 ret
= get_errno(capset(&header
, dataptr
));
7982 /* The kernel always updates version for both capget and capset */
7983 target_header
->version
= tswap32(header
.version
);
7984 unlock_user_struct(target_header
, arg1
, 1);
7987 if (num
== TARGET_NR_capget
) {
7988 for (i
= 0; i
< data_items
; i
++) {
7989 target_data
[i
].effective
= tswap32(data
[i
].effective
);
7990 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
7991 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
7993 unlock_user(target_data
, arg2
, target_datalen
);
7995 unlock_user(target_data
, arg2
, 0);
8000 case TARGET_NR_sigaltstack
:
8001 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8002 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8003 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8004 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8010 #ifdef CONFIG_SENDFILE
8011 case TARGET_NR_sendfile
:
8016 ret
= get_user_sal(off
, arg3
);
8017 if (is_error(ret
)) {
8022 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8023 if (!is_error(ret
) && arg3
) {
8024 abi_long ret2
= put_user_sal(off
, arg3
);
8025 if (is_error(ret2
)) {
8031 #ifdef TARGET_NR_sendfile64
8032 case TARGET_NR_sendfile64
:
8037 ret
= get_user_s64(off
, arg3
);
8038 if (is_error(ret
)) {
8043 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8044 if (!is_error(ret
) && arg3
) {
8045 abi_long ret2
= put_user_s64(off
, arg3
);
8046 if (is_error(ret2
)) {
8054 case TARGET_NR_sendfile
:
8055 #ifdef TARGET_NR_sendfile64
8056 case TARGET_NR_sendfile64
:
8061 #ifdef TARGET_NR_getpmsg
8062 case TARGET_NR_getpmsg
:
8065 #ifdef TARGET_NR_putpmsg
8066 case TARGET_NR_putpmsg
:
8069 #ifdef TARGET_NR_vfork
8070 case TARGET_NR_vfork
:
8071 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8075 #ifdef TARGET_NR_ugetrlimit
8076 case TARGET_NR_ugetrlimit
:
8079 int resource
= target_to_host_resource(arg1
);
8080 ret
= get_errno(getrlimit(resource
, &rlim
));
8081 if (!is_error(ret
)) {
8082 struct target_rlimit
*target_rlim
;
8083 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8085 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8086 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8087 unlock_user_struct(target_rlim
, arg2
, 1);
8092 #ifdef TARGET_NR_truncate64
8093 case TARGET_NR_truncate64
:
8094 if (!(p
= lock_user_string(arg1
)))
8096 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8097 unlock_user(p
, arg1
, 0);
8100 #ifdef TARGET_NR_ftruncate64
8101 case TARGET_NR_ftruncate64
:
8102 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8105 #ifdef TARGET_NR_stat64
8106 case TARGET_NR_stat64
:
8107 if (!(p
= lock_user_string(arg1
)))
8109 ret
= get_errno(stat(path(p
), &st
));
8110 unlock_user(p
, arg1
, 0);
8112 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8115 #ifdef TARGET_NR_lstat64
8116 case TARGET_NR_lstat64
:
8117 if (!(p
= lock_user_string(arg1
)))
8119 ret
= get_errno(lstat(path(p
), &st
));
8120 unlock_user(p
, arg1
, 0);
8122 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8125 #ifdef TARGET_NR_fstat64
8126 case TARGET_NR_fstat64
:
8127 ret
= get_errno(fstat(arg1
, &st
));
8129 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8132 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8133 #ifdef TARGET_NR_fstatat64
8134 case TARGET_NR_fstatat64
:
8136 #ifdef TARGET_NR_newfstatat
8137 case TARGET_NR_newfstatat
:
8139 if (!(p
= lock_user_string(arg2
)))
8141 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8143 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8146 case TARGET_NR_lchown
:
8147 if (!(p
= lock_user_string(arg1
)))
8149 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8150 unlock_user(p
, arg1
, 0);
8152 #ifdef TARGET_NR_getuid
8153 case TARGET_NR_getuid
:
8154 ret
= get_errno(high2lowuid(getuid()));
8157 #ifdef TARGET_NR_getgid
8158 case TARGET_NR_getgid
:
8159 ret
= get_errno(high2lowgid(getgid()));
8162 #ifdef TARGET_NR_geteuid
8163 case TARGET_NR_geteuid
:
8164 ret
= get_errno(high2lowuid(geteuid()));
8167 #ifdef TARGET_NR_getegid
8168 case TARGET_NR_getegid
:
8169 ret
= get_errno(high2lowgid(getegid()));
8172 case TARGET_NR_setreuid
:
8173 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8175 case TARGET_NR_setregid
:
8176 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8178 case TARGET_NR_getgroups
:
8180 int gidsetsize
= arg1
;
8181 target_id
*target_grouplist
;
8185 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8186 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8187 if (gidsetsize
== 0)
8189 if (!is_error(ret
)) {
8190 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8191 if (!target_grouplist
)
8193 for(i
= 0;i
< ret
; i
++)
8194 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8195 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8199 case TARGET_NR_setgroups
:
8201 int gidsetsize
= arg1
;
8202 target_id
*target_grouplist
;
8203 gid_t
*grouplist
= NULL
;
8206 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8207 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8208 if (!target_grouplist
) {
8209 ret
= -TARGET_EFAULT
;
8212 for (i
= 0; i
< gidsetsize
; i
++) {
8213 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8215 unlock_user(target_grouplist
, arg2
, 0);
8217 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8220 case TARGET_NR_fchown
:
8221 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8223 #if defined(TARGET_NR_fchownat)
8224 case TARGET_NR_fchownat
:
8225 if (!(p
= lock_user_string(arg2
)))
8227 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8228 low2highgid(arg4
), arg5
));
8229 unlock_user(p
, arg2
, 0);
8232 #ifdef TARGET_NR_setresuid
8233 case TARGET_NR_setresuid
:
8234 ret
= get_errno(setresuid(low2highuid(arg1
),
8236 low2highuid(arg3
)));
8239 #ifdef TARGET_NR_getresuid
8240 case TARGET_NR_getresuid
:
8242 uid_t ruid
, euid
, suid
;
8243 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8244 if (!is_error(ret
)) {
8245 if (put_user_id(high2lowuid(ruid
), arg1
)
8246 || put_user_id(high2lowuid(euid
), arg2
)
8247 || put_user_id(high2lowuid(suid
), arg3
))
8253 #ifdef TARGET_NR_getresgid
8254 case TARGET_NR_setresgid
:
8255 ret
= get_errno(setresgid(low2highgid(arg1
),
8257 low2highgid(arg3
)));
8260 #ifdef TARGET_NR_getresgid
8261 case TARGET_NR_getresgid
:
8263 gid_t rgid
, egid
, sgid
;
8264 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8265 if (!is_error(ret
)) {
8266 if (put_user_id(high2lowgid(rgid
), arg1
)
8267 || put_user_id(high2lowgid(egid
), arg2
)
8268 || put_user_id(high2lowgid(sgid
), arg3
))
8274 case TARGET_NR_chown
:
8275 if (!(p
= lock_user_string(arg1
)))
8277 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8278 unlock_user(p
, arg1
, 0);
8280 case TARGET_NR_setuid
:
8281 ret
= get_errno(setuid(low2highuid(arg1
)));
8283 case TARGET_NR_setgid
:
8284 ret
= get_errno(setgid(low2highgid(arg1
)));
8286 case TARGET_NR_setfsuid
:
8287 ret
= get_errno(setfsuid(arg1
));
8289 case TARGET_NR_setfsgid
:
8290 ret
= get_errno(setfsgid(arg1
));
8293 #ifdef TARGET_NR_lchown32
8294 case TARGET_NR_lchown32
:
8295 if (!(p
= lock_user_string(arg1
)))
8297 ret
= get_errno(lchown(p
, arg2
, arg3
));
8298 unlock_user(p
, arg1
, 0);
8301 #ifdef TARGET_NR_getuid32
8302 case TARGET_NR_getuid32
:
8303 ret
= get_errno(getuid());
8307 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8308 /* Alpha specific */
8309 case TARGET_NR_getxuid
:
8313 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8315 ret
= get_errno(getuid());
8318 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8319 /* Alpha specific */
8320 case TARGET_NR_getxgid
:
8324 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8326 ret
= get_errno(getgid());
8329 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8330 /* Alpha specific */
8331 case TARGET_NR_osf_getsysinfo
:
8332 ret
= -TARGET_EOPNOTSUPP
;
8334 case TARGET_GSI_IEEE_FP_CONTROL
:
8336 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8338 /* Copied from linux ieee_fpcr_to_swcr. */
8339 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8340 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8341 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8342 | SWCR_TRAP_ENABLE_DZE
8343 | SWCR_TRAP_ENABLE_OVF
);
8344 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8345 | SWCR_TRAP_ENABLE_INE
);
8346 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8347 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8349 if (put_user_u64 (swcr
, arg2
))
8355 /* case GSI_IEEE_STATE_AT_SIGNAL:
8356 -- Not implemented in linux kernel.
8358 -- Retrieves current unaligned access state; not much used.
8360 -- Retrieves implver information; surely not used.
8362 -- Grabs a copy of the HWRPB; surely not used.
8367 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8368 /* Alpha specific */
8369 case TARGET_NR_osf_setsysinfo
:
8370 ret
= -TARGET_EOPNOTSUPP
;
8372 case TARGET_SSI_IEEE_FP_CONTROL
:
8374 uint64_t swcr
, fpcr
, orig_fpcr
;
8376 if (get_user_u64 (swcr
, arg2
)) {
8379 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8380 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8382 /* Copied from linux ieee_swcr_to_fpcr. */
8383 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8384 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8385 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8386 | SWCR_TRAP_ENABLE_DZE
8387 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8388 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8389 | SWCR_TRAP_ENABLE_INE
)) << 57;
8390 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8391 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8393 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8398 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8400 uint64_t exc
, fpcr
, orig_fpcr
;
8403 if (get_user_u64(exc
, arg2
)) {
8407 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8409 /* We only add to the exception status here. */
8410 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8412 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8415 /* Old exceptions are not signaled. */
8416 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8418 /* If any exceptions set by this call,
8419 and are unmasked, send a signal. */
8421 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8422 si_code
= TARGET_FPE_FLTRES
;
8424 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8425 si_code
= TARGET_FPE_FLTUND
;
8427 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8428 si_code
= TARGET_FPE_FLTOVF
;
8430 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8431 si_code
= TARGET_FPE_FLTDIV
;
8433 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8434 si_code
= TARGET_FPE_FLTINV
;
8437 target_siginfo_t info
;
8438 info
.si_signo
= SIGFPE
;
8440 info
.si_code
= si_code
;
8441 info
._sifields
._sigfault
._addr
8442 = ((CPUArchState
*)cpu_env
)->pc
;
8443 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8448 /* case SSI_NVPAIRS:
8449 -- Used with SSIN_UACPROC to enable unaligned accesses.
8450 case SSI_IEEE_STATE_AT_SIGNAL:
8451 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8452 -- Not implemented in linux kernel
8457 #ifdef TARGET_NR_osf_sigprocmask
8458 /* Alpha specific. */
8459 case TARGET_NR_osf_sigprocmask
:
8463 sigset_t set
, oldset
;
8466 case TARGET_SIG_BLOCK
:
8469 case TARGET_SIG_UNBLOCK
:
8472 case TARGET_SIG_SETMASK
:
8476 ret
= -TARGET_EINVAL
;
8480 target_to_host_old_sigset(&set
, &mask
);
8481 do_sigprocmask(how
, &set
, &oldset
);
8482 host_to_target_old_sigset(&mask
, &oldset
);
8488 #ifdef TARGET_NR_getgid32
8489 case TARGET_NR_getgid32
:
8490 ret
= get_errno(getgid());
8493 #ifdef TARGET_NR_geteuid32
8494 case TARGET_NR_geteuid32
:
8495 ret
= get_errno(geteuid());
8498 #ifdef TARGET_NR_getegid32
8499 case TARGET_NR_getegid32
:
8500 ret
= get_errno(getegid());
8503 #ifdef TARGET_NR_setreuid32
8504 case TARGET_NR_setreuid32
:
8505 ret
= get_errno(setreuid(arg1
, arg2
));
8508 #ifdef TARGET_NR_setregid32
8509 case TARGET_NR_setregid32
:
8510 ret
= get_errno(setregid(arg1
, arg2
));
8513 #ifdef TARGET_NR_getgroups32
8514 case TARGET_NR_getgroups32
:
8516 int gidsetsize
= arg1
;
8517 uint32_t *target_grouplist
;
8521 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8522 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8523 if (gidsetsize
== 0)
8525 if (!is_error(ret
)) {
8526 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8527 if (!target_grouplist
) {
8528 ret
= -TARGET_EFAULT
;
8531 for(i
= 0;i
< ret
; i
++)
8532 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8533 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8538 #ifdef TARGET_NR_setgroups32
8539 case TARGET_NR_setgroups32
:
8541 int gidsetsize
= arg1
;
8542 uint32_t *target_grouplist
;
8546 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8547 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8548 if (!target_grouplist
) {
8549 ret
= -TARGET_EFAULT
;
8552 for(i
= 0;i
< gidsetsize
; i
++)
8553 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8554 unlock_user(target_grouplist
, arg2
, 0);
8555 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8559 #ifdef TARGET_NR_fchown32
8560 case TARGET_NR_fchown32
:
8561 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8564 #ifdef TARGET_NR_setresuid32
8565 case TARGET_NR_setresuid32
:
8566 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8569 #ifdef TARGET_NR_getresuid32
8570 case TARGET_NR_getresuid32
:
8572 uid_t ruid
, euid
, suid
;
8573 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8574 if (!is_error(ret
)) {
8575 if (put_user_u32(ruid
, arg1
)
8576 || put_user_u32(euid
, arg2
)
8577 || put_user_u32(suid
, arg3
))
8583 #ifdef TARGET_NR_setresgid32
8584 case TARGET_NR_setresgid32
:
8585 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8588 #ifdef TARGET_NR_getresgid32
8589 case TARGET_NR_getresgid32
:
8591 gid_t rgid
, egid
, sgid
;
8592 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8593 if (!is_error(ret
)) {
8594 if (put_user_u32(rgid
, arg1
)
8595 || put_user_u32(egid
, arg2
)
8596 || put_user_u32(sgid
, arg3
))
8602 #ifdef TARGET_NR_chown32
8603 case TARGET_NR_chown32
:
8604 if (!(p
= lock_user_string(arg1
)))
8606 ret
= get_errno(chown(p
, arg2
, arg3
));
8607 unlock_user(p
, arg1
, 0);
8610 #ifdef TARGET_NR_setuid32
8611 case TARGET_NR_setuid32
:
8612 ret
= get_errno(setuid(arg1
));
8615 #ifdef TARGET_NR_setgid32
8616 case TARGET_NR_setgid32
:
8617 ret
= get_errno(setgid(arg1
));
8620 #ifdef TARGET_NR_setfsuid32
8621 case TARGET_NR_setfsuid32
:
8622 ret
= get_errno(setfsuid(arg1
));
8625 #ifdef TARGET_NR_setfsgid32
8626 case TARGET_NR_setfsgid32
:
8627 ret
= get_errno(setfsgid(arg1
));
8631 case TARGET_NR_pivot_root
:
8633 #ifdef TARGET_NR_mincore
8634 case TARGET_NR_mincore
:
8637 ret
= -TARGET_EFAULT
;
8638 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8640 if (!(p
= lock_user_string(arg3
)))
8642 ret
= get_errno(mincore(a
, arg2
, p
));
8643 unlock_user(p
, arg3
, ret
);
8645 unlock_user(a
, arg1
, 0);
8649 #ifdef TARGET_NR_arm_fadvise64_64
8650 case TARGET_NR_arm_fadvise64_64
:
8653 * arm_fadvise64_64 looks like fadvise64_64 but
8654 * with different argument order
8662 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8663 #ifdef TARGET_NR_fadvise64_64
8664 case TARGET_NR_fadvise64_64
:
8666 #ifdef TARGET_NR_fadvise64
8667 case TARGET_NR_fadvise64
:
8671 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8672 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8673 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8674 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8678 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8681 #ifdef TARGET_NR_madvise
8682 case TARGET_NR_madvise
:
8683 /* A straight passthrough may not be safe because qemu sometimes
8684 turns private file-backed mappings into anonymous mappings.
8685 This will break MADV_DONTNEED.
8686 This is a hint, so ignoring and returning success is ok. */
8690 #if TARGET_ABI_BITS == 32
8691 case TARGET_NR_fcntl64
:
8695 struct target_flock64
*target_fl
;
8697 struct target_eabi_flock64
*target_efl
;
8700 cmd
= target_to_host_fcntl_cmd(arg2
);
8701 if (cmd
== -TARGET_EINVAL
) {
8707 case TARGET_F_GETLK64
:
8709 if (((CPUARMState
*)cpu_env
)->eabi
) {
8710 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8712 fl
.l_type
= tswap16(target_efl
->l_type
);
8713 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8714 fl
.l_start
= tswap64(target_efl
->l_start
);
8715 fl
.l_len
= tswap64(target_efl
->l_len
);
8716 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8717 unlock_user_struct(target_efl
, arg3
, 0);
8721 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8723 fl
.l_type
= tswap16(target_fl
->l_type
);
8724 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8725 fl
.l_start
= tswap64(target_fl
->l_start
);
8726 fl
.l_len
= tswap64(target_fl
->l_len
);
8727 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8728 unlock_user_struct(target_fl
, arg3
, 0);
8730 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8733 if (((CPUARMState
*)cpu_env
)->eabi
) {
8734 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8736 target_efl
->l_type
= tswap16(fl
.l_type
);
8737 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8738 target_efl
->l_start
= tswap64(fl
.l_start
);
8739 target_efl
->l_len
= tswap64(fl
.l_len
);
8740 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8741 unlock_user_struct(target_efl
, arg3
, 1);
8745 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8747 target_fl
->l_type
= tswap16(fl
.l_type
);
8748 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8749 target_fl
->l_start
= tswap64(fl
.l_start
);
8750 target_fl
->l_len
= tswap64(fl
.l_len
);
8751 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8752 unlock_user_struct(target_fl
, arg3
, 1);
8757 case TARGET_F_SETLK64
:
8758 case TARGET_F_SETLKW64
:
8760 if (((CPUARMState
*)cpu_env
)->eabi
) {
8761 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8763 fl
.l_type
= tswap16(target_efl
->l_type
);
8764 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8765 fl
.l_start
= tswap64(target_efl
->l_start
);
8766 fl
.l_len
= tswap64(target_efl
->l_len
);
8767 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8768 unlock_user_struct(target_efl
, arg3
, 0);
8772 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8774 fl
.l_type
= tswap16(target_fl
->l_type
);
8775 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8776 fl
.l_start
= tswap64(target_fl
->l_start
);
8777 fl
.l_len
= tswap64(target_fl
->l_len
);
8778 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8779 unlock_user_struct(target_fl
, arg3
, 0);
8781 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8784 ret
= do_fcntl(arg1
, arg2
, arg3
);
8790 #ifdef TARGET_NR_cacheflush
8791 case TARGET_NR_cacheflush
:
8792 /* self-modifying code is handled automatically, so nothing needed */
8796 #ifdef TARGET_NR_security
8797 case TARGET_NR_security
:
8800 #ifdef TARGET_NR_getpagesize
8801 case TARGET_NR_getpagesize
:
8802 ret
= TARGET_PAGE_SIZE
;
8805 case TARGET_NR_gettid
:
8806 ret
= get_errno(gettid());
8808 #ifdef TARGET_NR_readahead
8809 case TARGET_NR_readahead
:
8810 #if TARGET_ABI_BITS == 32
8811 if (regpairs_aligned(cpu_env
)) {
8816 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8818 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8823 #ifdef TARGET_NR_setxattr
8824 case TARGET_NR_listxattr
:
8825 case TARGET_NR_llistxattr
:
8829 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8831 ret
= -TARGET_EFAULT
;
8835 p
= lock_user_string(arg1
);
8837 if (num
== TARGET_NR_listxattr
) {
8838 ret
= get_errno(listxattr(p
, b
, arg3
));
8840 ret
= get_errno(llistxattr(p
, b
, arg3
));
8843 ret
= -TARGET_EFAULT
;
8845 unlock_user(p
, arg1
, 0);
8846 unlock_user(b
, arg2
, arg3
);
8849 case TARGET_NR_flistxattr
:
8853 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8855 ret
= -TARGET_EFAULT
;
8859 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8860 unlock_user(b
, arg2
, arg3
);
8863 case TARGET_NR_setxattr
:
8864 case TARGET_NR_lsetxattr
:
8866 void *p
, *n
, *v
= 0;
8868 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8870 ret
= -TARGET_EFAULT
;
8874 p
= lock_user_string(arg1
);
8875 n
= lock_user_string(arg2
);
8877 if (num
== TARGET_NR_setxattr
) {
8878 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8880 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8883 ret
= -TARGET_EFAULT
;
8885 unlock_user(p
, arg1
, 0);
8886 unlock_user(n
, arg2
, 0);
8887 unlock_user(v
, arg3
, 0);
8890 case TARGET_NR_fsetxattr
:
8894 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8896 ret
= -TARGET_EFAULT
;
8900 n
= lock_user_string(arg2
);
8902 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8904 ret
= -TARGET_EFAULT
;
8906 unlock_user(n
, arg2
, 0);
8907 unlock_user(v
, arg3
, 0);
8910 case TARGET_NR_getxattr
:
8911 case TARGET_NR_lgetxattr
:
8913 void *p
, *n
, *v
= 0;
8915 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8917 ret
= -TARGET_EFAULT
;
8921 p
= lock_user_string(arg1
);
8922 n
= lock_user_string(arg2
);
8924 if (num
== TARGET_NR_getxattr
) {
8925 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8927 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8930 ret
= -TARGET_EFAULT
;
8932 unlock_user(p
, arg1
, 0);
8933 unlock_user(n
, arg2
, 0);
8934 unlock_user(v
, arg3
, arg4
);
8937 case TARGET_NR_fgetxattr
:
8941 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8943 ret
= -TARGET_EFAULT
;
8947 n
= lock_user_string(arg2
);
8949 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8951 ret
= -TARGET_EFAULT
;
8953 unlock_user(n
, arg2
, 0);
8954 unlock_user(v
, arg3
, arg4
);
8957 case TARGET_NR_removexattr
:
8958 case TARGET_NR_lremovexattr
:
8961 p
= lock_user_string(arg1
);
8962 n
= lock_user_string(arg2
);
8964 if (num
== TARGET_NR_removexattr
) {
8965 ret
= get_errno(removexattr(p
, n
));
8967 ret
= get_errno(lremovexattr(p
, n
));
8970 ret
= -TARGET_EFAULT
;
8972 unlock_user(p
, arg1
, 0);
8973 unlock_user(n
, arg2
, 0);
8976 case TARGET_NR_fremovexattr
:
8979 n
= lock_user_string(arg2
);
8981 ret
= get_errno(fremovexattr(arg1
, n
));
8983 ret
= -TARGET_EFAULT
;
8985 unlock_user(n
, arg2
, 0);
8989 #endif /* CONFIG_ATTR */
8990 #ifdef TARGET_NR_set_thread_area
8991 case TARGET_NR_set_thread_area
:
8992 #if defined(TARGET_MIPS)
8993 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
8996 #elif defined(TARGET_CRIS)
8998 ret
= -TARGET_EINVAL
;
9000 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9004 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9005 ret
= do_set_thread_area(cpu_env
, arg1
);
9007 #elif defined(TARGET_M68K)
9009 TaskState
*ts
= cpu
->opaque
;
9010 ts
->tp_value
= arg1
;
9015 goto unimplemented_nowarn
;
9018 #ifdef TARGET_NR_get_thread_area
9019 case TARGET_NR_get_thread_area
:
9020 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9021 ret
= do_get_thread_area(cpu_env
, arg1
);
9023 #elif defined(TARGET_M68K)
9025 TaskState
*ts
= cpu
->opaque
;
9030 goto unimplemented_nowarn
;
9033 #ifdef TARGET_NR_getdomainname
9034 case TARGET_NR_getdomainname
:
9035 goto unimplemented_nowarn
;
9038 #ifdef TARGET_NR_clock_gettime
9039 case TARGET_NR_clock_gettime
:
9042 ret
= get_errno(clock_gettime(arg1
, &ts
));
9043 if (!is_error(ret
)) {
9044 host_to_target_timespec(arg2
, &ts
);
9049 #ifdef TARGET_NR_clock_getres
9050 case TARGET_NR_clock_getres
:
9053 ret
= get_errno(clock_getres(arg1
, &ts
));
9054 if (!is_error(ret
)) {
9055 host_to_target_timespec(arg2
, &ts
);
9060 #ifdef TARGET_NR_clock_nanosleep
9061 case TARGET_NR_clock_nanosleep
:
9064 target_to_host_timespec(&ts
, arg3
);
9065 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9067 host_to_target_timespec(arg4
, &ts
);
9069 #if defined(TARGET_PPC)
9070 /* clock_nanosleep is odd in that it returns positive errno values.
9071 * On PPC, CR0 bit 3 should be set in such a situation. */
9073 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9080 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9081 case TARGET_NR_set_tid_address
:
9082 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9086 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9087 case TARGET_NR_tkill
:
9088 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9092 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9093 case TARGET_NR_tgkill
:
9094 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9095 target_to_host_signal(arg3
)));
9099 #ifdef TARGET_NR_set_robust_list
9100 case TARGET_NR_set_robust_list
:
9101 case TARGET_NR_get_robust_list
:
9102 /* The ABI for supporting robust futexes has userspace pass
9103 * the kernel a pointer to a linked list which is updated by
9104 * userspace after the syscall; the list is walked by the kernel
9105 * when the thread exits. Since the linked list in QEMU guest
9106 * memory isn't a valid linked list for the host and we have
9107 * no way to reliably intercept the thread-death event, we can't
9108 * support these. Silently return ENOSYS so that guest userspace
9109 * falls back to a non-robust futex implementation (which should
9110 * be OK except in the corner case of the guest crashing while
9111 * holding a mutex that is shared with another process via
9114 goto unimplemented_nowarn
;
9117 #if defined(TARGET_NR_utimensat)
9118 case TARGET_NR_utimensat
:
9120 struct timespec
*tsp
, ts
[2];
9124 target_to_host_timespec(ts
, arg3
);
9125 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9129 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9131 if (!(p
= lock_user_string(arg2
))) {
9132 ret
= -TARGET_EFAULT
;
9135 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9136 unlock_user(p
, arg2
, 0);
9141 case TARGET_NR_futex
:
9142 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9144 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9145 case TARGET_NR_inotify_init
:
9146 ret
= get_errno(sys_inotify_init());
9149 #ifdef CONFIG_INOTIFY1
9150 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9151 case TARGET_NR_inotify_init1
:
9152 ret
= get_errno(sys_inotify_init1(arg1
));
9156 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9157 case TARGET_NR_inotify_add_watch
:
9158 p
= lock_user_string(arg2
);
9159 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9160 unlock_user(p
, arg2
, 0);
9163 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9164 case TARGET_NR_inotify_rm_watch
:
9165 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9169 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9170 case TARGET_NR_mq_open
:
9172 struct mq_attr posix_mq_attr
, *attrp
;
9174 p
= lock_user_string(arg1
- 1);
9176 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9177 attrp
= &posix_mq_attr
;
9181 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9182 unlock_user (p
, arg1
, 0);
9186 case TARGET_NR_mq_unlink
:
9187 p
= lock_user_string(arg1
- 1);
9188 ret
= get_errno(mq_unlink(p
));
9189 unlock_user (p
, arg1
, 0);
9192 case TARGET_NR_mq_timedsend
:
9196 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9198 target_to_host_timespec(&ts
, arg5
);
9199 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9200 host_to_target_timespec(arg5
, &ts
);
9203 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9204 unlock_user (p
, arg2
, arg3
);
9208 case TARGET_NR_mq_timedreceive
:
9213 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9215 target_to_host_timespec(&ts
, arg5
);
9216 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9217 host_to_target_timespec(arg5
, &ts
);
9220 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9221 unlock_user (p
, arg2
, arg3
);
9223 put_user_u32(prio
, arg4
);
9227 /* Not implemented for now... */
9228 /* case TARGET_NR_mq_notify: */
9231 case TARGET_NR_mq_getsetattr
:
9233 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9236 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9237 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9240 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9241 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9248 #ifdef CONFIG_SPLICE
9249 #ifdef TARGET_NR_tee
9252 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9256 #ifdef TARGET_NR_splice
9257 case TARGET_NR_splice
:
9259 loff_t loff_in
, loff_out
;
9260 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9262 get_user_u64(loff_in
, arg2
);
9263 ploff_in
= &loff_in
;
9266 get_user_u64(loff_out
, arg2
);
9267 ploff_out
= &loff_out
;
9269 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9273 #ifdef TARGET_NR_vmsplice
9274 case TARGET_NR_vmsplice
:
9276 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9278 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9279 unlock_iovec(vec
, arg2
, arg3
, 0);
9281 ret
= -host_to_target_errno(errno
);
9286 #endif /* CONFIG_SPLICE */
9287 #ifdef CONFIG_EVENTFD
9288 #if defined(TARGET_NR_eventfd)
9289 case TARGET_NR_eventfd
:
9290 ret
= get_errno(eventfd(arg1
, 0));
9293 #if defined(TARGET_NR_eventfd2)
9294 case TARGET_NR_eventfd2
:
9296 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9297 if (arg2
& TARGET_O_NONBLOCK
) {
9298 host_flags
|= O_NONBLOCK
;
9300 if (arg2
& TARGET_O_CLOEXEC
) {
9301 host_flags
|= O_CLOEXEC
;
9303 ret
= get_errno(eventfd(arg1
, host_flags
));
9307 #endif /* CONFIG_EVENTFD */
9308 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9309 case TARGET_NR_fallocate
:
9310 #if TARGET_ABI_BITS == 32
9311 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9312 target_offset64(arg5
, arg6
)));
9314 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9318 #if defined(CONFIG_SYNC_FILE_RANGE)
9319 #if defined(TARGET_NR_sync_file_range)
9320 case TARGET_NR_sync_file_range
:
9321 #if TARGET_ABI_BITS == 32
9322 #if defined(TARGET_MIPS)
9323 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9324 target_offset64(arg5
, arg6
), arg7
));
9326 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9327 target_offset64(arg4
, arg5
), arg6
));
9328 #endif /* !TARGET_MIPS */
9330 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9334 #if defined(TARGET_NR_sync_file_range2)
9335 case TARGET_NR_sync_file_range2
:
9336 /* This is like sync_file_range but the arguments are reordered */
9337 #if TARGET_ABI_BITS == 32
9338 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9339 target_offset64(arg5
, arg6
), arg2
));
9341 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9346 #if defined(CONFIG_EPOLL)
9347 #if defined(TARGET_NR_epoll_create)
9348 case TARGET_NR_epoll_create
:
9349 ret
= get_errno(epoll_create(arg1
));
9352 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9353 case TARGET_NR_epoll_create1
:
9354 ret
= get_errno(epoll_create1(arg1
));
9357 #if defined(TARGET_NR_epoll_ctl)
9358 case TARGET_NR_epoll_ctl
:
9360 struct epoll_event ep
;
9361 struct epoll_event
*epp
= 0;
9363 struct target_epoll_event
*target_ep
;
9364 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9367 ep
.events
= tswap32(target_ep
->events
);
9368 /* The epoll_data_t union is just opaque data to the kernel,
9369 * so we transfer all 64 bits across and need not worry what
9370 * actual data type it is.
9372 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9373 unlock_user_struct(target_ep
, arg4
, 0);
9376 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9381 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9382 #define IMPLEMENT_EPOLL_PWAIT
9384 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9385 #if defined(TARGET_NR_epoll_wait)
9386 case TARGET_NR_epoll_wait
:
9388 #if defined(IMPLEMENT_EPOLL_PWAIT)
9389 case TARGET_NR_epoll_pwait
:
9392 struct target_epoll_event
*target_ep
;
9393 struct epoll_event
*ep
;
9395 int maxevents
= arg3
;
9398 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9399 maxevents
* sizeof(struct target_epoll_event
), 1);
9404 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9407 #if defined(IMPLEMENT_EPOLL_PWAIT)
9408 case TARGET_NR_epoll_pwait
:
9410 target_sigset_t
*target_set
;
9411 sigset_t _set
, *set
= &_set
;
9414 target_set
= lock_user(VERIFY_READ
, arg5
,
9415 sizeof(target_sigset_t
), 1);
9417 unlock_user(target_ep
, arg2
, 0);
9420 target_to_host_sigset(set
, target_set
);
9421 unlock_user(target_set
, arg5
, 0);
9426 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9430 #if defined(TARGET_NR_epoll_wait)
9431 case TARGET_NR_epoll_wait
:
9432 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9436 ret
= -TARGET_ENOSYS
;
9438 if (!is_error(ret
)) {
9440 for (i
= 0; i
< ret
; i
++) {
9441 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9442 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9445 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9450 #ifdef TARGET_NR_prlimit64
9451 case TARGET_NR_prlimit64
:
9453 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9454 struct target_rlimit64
*target_rnew
, *target_rold
;
9455 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9457 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9460 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9461 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9462 unlock_user_struct(target_rnew
, arg3
, 0);
9466 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
9467 if (!is_error(ret
) && arg4
) {
9468 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9471 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9472 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9473 unlock_user_struct(target_rold
, arg4
, 1);
9478 #ifdef TARGET_NR_gethostname
9479 case TARGET_NR_gethostname
:
9481 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9483 ret
= get_errno(gethostname(name
, arg2
));
9484 unlock_user(name
, arg1
, arg2
);
9486 ret
= -TARGET_EFAULT
;
9491 #ifdef TARGET_NR_atomic_cmpxchg_32
9492 case TARGET_NR_atomic_cmpxchg_32
:
9494 /* should use start_exclusive from main.c */
9495 abi_ulong mem_value
;
9496 if (get_user_u32(mem_value
, arg6
)) {
9497 target_siginfo_t info
;
9498 info
.si_signo
= SIGSEGV
;
9500 info
.si_code
= TARGET_SEGV_MAPERR
;
9501 info
._sifields
._sigfault
._addr
= arg6
;
9502 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9506 if (mem_value
== arg2
)
9507 put_user_u32(arg1
, arg6
);
9512 #ifdef TARGET_NR_atomic_barrier
9513 case TARGET_NR_atomic_barrier
:
9515 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9521 #ifdef TARGET_NR_timer_create
9522 case TARGET_NR_timer_create
:
9524 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9526 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9527 struct target_timer_t
*ptarget_timer
;
9530 int timer_index
= next_free_host_timer();
9532 if (timer_index
< 0) {
9533 ret
= -TARGET_EAGAIN
;
9535 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9538 phost_sevp
= &host_sevp
;
9539 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
9545 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9549 if (!lock_user_struct(VERIFY_WRITE
, ptarget_timer
, arg3
, 1)) {
9552 ptarget_timer
->ptr
= tswap32(0xcafe0000 | timer_index
);
9553 unlock_user_struct(ptarget_timer
, arg3
, 1);
9560 #ifdef TARGET_NR_timer_settime
9561 case TARGET_NR_timer_settime
:
9563 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9564 * struct itimerspec * old_value */
9566 if (arg3
== 0 || arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9567 ret
= -TARGET_EINVAL
;
9569 timer_t htimer
= g_posix_timers
[arg1
];
9570 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9572 target_to_host_itimerspec(&hspec_new
, arg3
);
9574 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9575 host_to_target_itimerspec(arg2
, &hspec_old
);
9581 #ifdef TARGET_NR_timer_gettime
9582 case TARGET_NR_timer_gettime
:
9584 /* args: timer_t timerid, struct itimerspec *curr_value */
9587 return -TARGET_EFAULT
;
9588 } else if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9589 ret
= -TARGET_EINVAL
;
9591 timer_t htimer
= g_posix_timers
[arg1
];
9592 struct itimerspec hspec
;
9593 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9595 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9596 ret
= -TARGET_EFAULT
;
9603 #ifdef TARGET_NR_timer_getoverrun
9604 case TARGET_NR_timer_getoverrun
:
9606 /* args: timer_t timerid */
9608 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9609 ret
= -TARGET_EINVAL
;
9611 timer_t htimer
= g_posix_timers
[arg1
];
9612 ret
= get_errno(timer_getoverrun(htimer
));
9618 #ifdef TARGET_NR_timer_delete
9619 case TARGET_NR_timer_delete
:
9621 /* args: timer_t timerid */
9623 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9624 ret
= -TARGET_EINVAL
;
9626 timer_t htimer
= g_posix_timers
[arg1
];
9627 ret
= get_errno(timer_delete(htimer
));
9628 g_posix_timers
[arg1
] = 0;
9634 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9635 case TARGET_NR_timerfd_create
:
9636 ret
= get_errno(timerfd_create(arg1
,
9637 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
9641 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9642 case TARGET_NR_timerfd_gettime
:
9644 struct itimerspec its_curr
;
9646 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
9648 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
9655 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9656 case TARGET_NR_timerfd_settime
:
9658 struct itimerspec its_new
, its_old
, *p_new
;
9661 if (target_to_host_itimerspec(&its_new
, arg3
)) {
9669 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
9671 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
9678 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9679 case TARGET_NR_ioprio_get
:
9680 ret
= get_errno(ioprio_get(arg1
, arg2
));
9684 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9685 case TARGET_NR_ioprio_set
:
9686 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
9690 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9691 case TARGET_NR_setns
:
9692 ret
= get_errno(setns(arg1
, arg2
));
9695 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9696 case TARGET_NR_unshare
:
9697 ret
= get_errno(unshare(arg1
));
9703 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9704 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9705 unimplemented_nowarn
:
9707 ret
= -TARGET_ENOSYS
;
9712 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9715 print_syscall_ret(num
, ret
);
9718 ret
= -TARGET_EFAULT
;