4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn
)(void *), void *child_stack_base
,
51 size_t stack_size
, int flags
, void *arg
, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
98 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
120 //#include <linux/msdos_fs.h>
121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
133 #define _syscall0(type,name) \
134 static type name (void) \
136 return syscall(__NR_##name); \
139 #define _syscall1(type,name,type1,arg1) \
140 static type name (type1 arg1) \
142 return syscall(__NR_##name, arg1); \
145 #define _syscall2(type,name,type1,arg1,type2,arg2) \
146 static type name (type1 arg1,type2 arg2) \
148 return syscall(__NR_##name, arg1, arg2); \
151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152 static type name (type1 arg1,type2 arg2,type3 arg3) \
154 return syscall(__NR_##name, arg1, arg2, arg3); \
157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172 type5,arg5,type6,arg6) \
173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 #define __NR_sys_uname __NR_uname
181 #define __NR_sys_getcwd1 __NR_getcwd
182 #define __NR_sys_getdents __NR_getdents
183 #define __NR_sys_getdents64 __NR_getdents64
184 #define __NR_sys_getpriority __NR_getpriority
185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186 #define __NR_sys_syslog __NR_syslog
187 #define __NR_sys_tgkill __NR_tgkill
188 #define __NR_sys_tkill __NR_tkill
189 #define __NR_sys_futex __NR_futex
190 #define __NR_sys_inotify_init __NR_inotify_init
191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 #define __NR__llseek __NR_lseek
199 /* Newer kernel ports have llseek() instead of _llseek() */
200 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
201 #define TARGET_NR__llseek TARGET_NR_llseek
205 _syscall0(int, gettid
)
207 /* This is a replacement for the host gettid() and must return a host
209 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
216 #if !defined(__NR_getdents) || \
217 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
218 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
220 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
221 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
222 loff_t
*, res
, uint
, wh
);
224 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
225 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
226 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
227 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
229 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
230 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
232 #ifdef __NR_exit_group
233 _syscall1(int,exit_group
,int,error_code
)
235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
236 _syscall1(int,set_tid_address
,int *,tidptr
)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
244 unsigned long *, user_mask_ptr
);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
250 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
251 struct __user_cap_data_struct
*, data
);
252 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
253 struct __user_cap_data_struct
*, data
);
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
265 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
266 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
267 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
268 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
279 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
288 static int sys_getcwd1(char *buf
, size_t size
)
290 if (getcwd(buf
, size
) == NULL
) {
291 /* getcwd() sets errno */
294 return strlen(buf
)+1;
297 #ifdef TARGET_NR_openat
298 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
301 * open(2) has extra parameter 'mode' when called with
304 if ((flags
& O_CREAT
) != 0) {
305 return (openat(dirfd
, pathname
, flags
, mode
));
307 return (openat(dirfd
, pathname
, flags
));
311 #ifdef TARGET_NR_utimensat
312 #ifdef CONFIG_UTIMENSAT
313 static int sys_utimensat(int dirfd
, const char *pathname
,
314 const struct timespec times
[2], int flags
)
316 if (pathname
== NULL
)
317 return futimens(dirfd
, times
);
319 return utimensat(dirfd
, pathname
, times
, flags
);
321 #elif defined(__NR_utimensat)
322 #define __NR_sys_utimensat __NR_utimensat
323 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
324 const struct timespec
*,tsp
,int,flags
)
326 static int sys_utimensat(int dirfd
, const char *pathname
,
327 const struct timespec times
[2], int flags
)
333 #endif /* TARGET_NR_utimensat */
335 #ifdef CONFIG_INOTIFY
336 #include <sys/inotify.h>
338 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
339 static int sys_inotify_init(void)
341 return (inotify_init());
344 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
345 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
347 return (inotify_add_watch(fd
, pathname
, mask
));
350 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
351 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
353 return (inotify_rm_watch(fd
, wd
));
356 #ifdef CONFIG_INOTIFY1
357 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
358 static int sys_inotify_init1(int flags
)
360 return (inotify_init1(flags
));
365 /* Userspace can usually survive runtime without inotify */
366 #undef TARGET_NR_inotify_init
367 #undef TARGET_NR_inotify_init1
368 #undef TARGET_NR_inotify_add_watch
369 #undef TARGET_NR_inotify_rm_watch
370 #endif /* CONFIG_INOTIFY */
372 #if defined(TARGET_NR_ppoll)
374 # define __NR_ppoll -1
376 #define __NR_sys_ppoll __NR_ppoll
377 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
378 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
382 #if defined(TARGET_NR_pselect6)
383 #ifndef __NR_pselect6
384 # define __NR_pselect6 -1
386 #define __NR_sys_pselect6 __NR_pselect6
387 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
388 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
391 #if defined(TARGET_NR_prlimit64)
392 #ifndef __NR_prlimit64
393 # define __NR_prlimit64 -1
395 #define __NR_sys_prlimit64 __NR_prlimit64
396 /* The glibc rlimit structure may not be that used by the underlying syscall */
397 struct host_rlimit64
{
401 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
402 const struct host_rlimit64
*, new_limit
,
403 struct host_rlimit64
*, old_limit
)
407 #if defined(TARGET_NR_timer_create)
408 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
409 static timer_t g_posix_timers
[32] = { 0, } ;
411 static inline int next_free_host_timer(void)
414 /* FIXME: Does finding the next free slot require a lock? */
415 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
416 if (g_posix_timers
[k
] == 0) {
417 g_posix_timers
[k
] = (timer_t
) 1;
425 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
427 static inline int regpairs_aligned(void *cpu_env
) {
428 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
430 #elif defined(TARGET_MIPS)
431 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
432 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
433 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
434 * of registers which translates to the same as ARM/MIPS, because we start with
436 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
438 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
441 #define ERRNO_TABLE_SIZE 1200
443 /* target_to_host_errno_table[] is initialized from
444 * host_to_target_errno_table[] in syscall_init(). */
445 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
449 * This list is the union of errno values overridden in asm-<arch>/errno.h
450 * minus the errnos that are not actually generic to all archs.
452 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
453 [EIDRM
] = TARGET_EIDRM
,
454 [ECHRNG
] = TARGET_ECHRNG
,
455 [EL2NSYNC
] = TARGET_EL2NSYNC
,
456 [EL3HLT
] = TARGET_EL3HLT
,
457 [EL3RST
] = TARGET_EL3RST
,
458 [ELNRNG
] = TARGET_ELNRNG
,
459 [EUNATCH
] = TARGET_EUNATCH
,
460 [ENOCSI
] = TARGET_ENOCSI
,
461 [EL2HLT
] = TARGET_EL2HLT
,
462 [EDEADLK
] = TARGET_EDEADLK
,
463 [ENOLCK
] = TARGET_ENOLCK
,
464 [EBADE
] = TARGET_EBADE
,
465 [EBADR
] = TARGET_EBADR
,
466 [EXFULL
] = TARGET_EXFULL
,
467 [ENOANO
] = TARGET_ENOANO
,
468 [EBADRQC
] = TARGET_EBADRQC
,
469 [EBADSLT
] = TARGET_EBADSLT
,
470 [EBFONT
] = TARGET_EBFONT
,
471 [ENOSTR
] = TARGET_ENOSTR
,
472 [ENODATA
] = TARGET_ENODATA
,
473 [ETIME
] = TARGET_ETIME
,
474 [ENOSR
] = TARGET_ENOSR
,
475 [ENONET
] = TARGET_ENONET
,
476 [ENOPKG
] = TARGET_ENOPKG
,
477 [EREMOTE
] = TARGET_EREMOTE
,
478 [ENOLINK
] = TARGET_ENOLINK
,
479 [EADV
] = TARGET_EADV
,
480 [ESRMNT
] = TARGET_ESRMNT
,
481 [ECOMM
] = TARGET_ECOMM
,
482 [EPROTO
] = TARGET_EPROTO
,
483 [EDOTDOT
] = TARGET_EDOTDOT
,
484 [EMULTIHOP
] = TARGET_EMULTIHOP
,
485 [EBADMSG
] = TARGET_EBADMSG
,
486 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
487 [EOVERFLOW
] = TARGET_EOVERFLOW
,
488 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
489 [EBADFD
] = TARGET_EBADFD
,
490 [EREMCHG
] = TARGET_EREMCHG
,
491 [ELIBACC
] = TARGET_ELIBACC
,
492 [ELIBBAD
] = TARGET_ELIBBAD
,
493 [ELIBSCN
] = TARGET_ELIBSCN
,
494 [ELIBMAX
] = TARGET_ELIBMAX
,
495 [ELIBEXEC
] = TARGET_ELIBEXEC
,
496 [EILSEQ
] = TARGET_EILSEQ
,
497 [ENOSYS
] = TARGET_ENOSYS
,
498 [ELOOP
] = TARGET_ELOOP
,
499 [ERESTART
] = TARGET_ERESTART
,
500 [ESTRPIPE
] = TARGET_ESTRPIPE
,
501 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
502 [EUSERS
] = TARGET_EUSERS
,
503 [ENOTSOCK
] = TARGET_ENOTSOCK
,
504 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
505 [EMSGSIZE
] = TARGET_EMSGSIZE
,
506 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
507 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
508 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
509 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
510 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
511 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
512 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
513 [EADDRINUSE
] = TARGET_EADDRINUSE
,
514 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
515 [ENETDOWN
] = TARGET_ENETDOWN
,
516 [ENETUNREACH
] = TARGET_ENETUNREACH
,
517 [ENETRESET
] = TARGET_ENETRESET
,
518 [ECONNABORTED
] = TARGET_ECONNABORTED
,
519 [ECONNRESET
] = TARGET_ECONNRESET
,
520 [ENOBUFS
] = TARGET_ENOBUFS
,
521 [EISCONN
] = TARGET_EISCONN
,
522 [ENOTCONN
] = TARGET_ENOTCONN
,
523 [EUCLEAN
] = TARGET_EUCLEAN
,
524 [ENOTNAM
] = TARGET_ENOTNAM
,
525 [ENAVAIL
] = TARGET_ENAVAIL
,
526 [EISNAM
] = TARGET_EISNAM
,
527 [EREMOTEIO
] = TARGET_EREMOTEIO
,
528 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
529 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
530 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
531 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
532 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
533 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
534 [EALREADY
] = TARGET_EALREADY
,
535 [EINPROGRESS
] = TARGET_EINPROGRESS
,
536 [ESTALE
] = TARGET_ESTALE
,
537 [ECANCELED
] = TARGET_ECANCELED
,
538 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
539 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
541 [ENOKEY
] = TARGET_ENOKEY
,
544 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
547 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
550 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
553 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
555 #ifdef ENOTRECOVERABLE
556 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
560 static inline int host_to_target_errno(int err
)
562 if(host_to_target_errno_table
[err
])
563 return host_to_target_errno_table
[err
];
567 static inline int target_to_host_errno(int err
)
569 if (target_to_host_errno_table
[err
])
570 return target_to_host_errno_table
[err
];
574 static inline abi_long
get_errno(abi_long ret
)
577 return -host_to_target_errno(errno
);
582 static inline int is_error(abi_long ret
)
584 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
587 char *target_strerror(int err
)
589 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
592 return strerror(target_to_host_errno(err
));
595 static abi_ulong target_brk
;
596 static abi_ulong target_original_brk
;
597 static abi_ulong brk_page
;
599 void target_set_brk(abi_ulong new_brk
)
601 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
602 brk_page
= HOST_PAGE_ALIGN(target_brk
);
605 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
606 #define DEBUGF_BRK(message, args...)
608 /* do_brk() must return target values and target errnos. */
609 abi_long
do_brk(abi_ulong new_brk
)
611 abi_long mapped_addr
;
614 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
617 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
620 if (new_brk
< target_original_brk
) {
621 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
626 /* If the new brk is less than the highest page reserved to the
627 * target heap allocation, set it and we're almost done... */
628 if (new_brk
<= brk_page
) {
629 /* Heap contents are initialized to zero, as for anonymous
631 if (new_brk
> target_brk
) {
632 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
634 target_brk
= new_brk
;
635 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
639 /* We need to allocate more memory after the brk... Note that
640 * we don't use MAP_FIXED because that will map over the top of
641 * any existing mapping (like the one with the host libc or qemu
642 * itself); instead we treat "mapped but at wrong address" as
643 * a failure and unmap again.
645 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
646 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
647 PROT_READ
|PROT_WRITE
,
648 MAP_ANON
|MAP_PRIVATE
, 0, 0));
650 if (mapped_addr
== brk_page
) {
651 /* Heap contents are initialized to zero, as for anonymous
652 * mapped pages. Technically the new pages are already
653 * initialized to zero since they *are* anonymous mapped
654 * pages, however we have to take care with the contents that
655 * come from the remaining part of the previous page: it may
656 * contains garbage data due to a previous heap usage (grown
658 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
660 target_brk
= new_brk
;
661 brk_page
= HOST_PAGE_ALIGN(target_brk
);
662 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
665 } else if (mapped_addr
!= -1) {
666 /* Mapped but at wrong address, meaning there wasn't actually
667 * enough space for this brk.
669 target_munmap(mapped_addr
, new_alloc_size
);
671 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
674 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
677 #if defined(TARGET_ALPHA)
678 /* We (partially) emulate OSF/1 on Alpha, which requires we
679 return a proper errno, not an unchanged brk value. */
680 return -TARGET_ENOMEM
;
682 /* For everything else, return the previous break. */
686 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
687 abi_ulong target_fds_addr
,
691 abi_ulong b
, *target_fds
;
693 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
694 if (!(target_fds
= lock_user(VERIFY_READ
,
696 sizeof(abi_ulong
) * nw
,
698 return -TARGET_EFAULT
;
702 for (i
= 0; i
< nw
; i
++) {
703 /* grab the abi_ulong */
704 __get_user(b
, &target_fds
[i
]);
705 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
706 /* check the bit inside the abi_ulong */
713 unlock_user(target_fds
, target_fds_addr
, 0);
718 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
719 abi_ulong target_fds_addr
,
722 if (target_fds_addr
) {
723 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
724 return -TARGET_EFAULT
;
732 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
738 abi_ulong
*target_fds
;
740 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
741 if (!(target_fds
= lock_user(VERIFY_WRITE
,
743 sizeof(abi_ulong
) * nw
,
745 return -TARGET_EFAULT
;
748 for (i
= 0; i
< nw
; i
++) {
750 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
751 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
754 __put_user(v
, &target_fds
[i
]);
757 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
762 #if defined(__alpha__)
768 static inline abi_long
host_to_target_clock_t(long ticks
)
770 #if HOST_HZ == TARGET_HZ
773 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
777 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
778 const struct rusage
*rusage
)
780 struct target_rusage
*target_rusage
;
782 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
783 return -TARGET_EFAULT
;
784 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
785 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
786 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
787 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
788 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
789 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
790 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
791 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
792 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
793 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
794 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
795 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
796 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
797 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
798 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
799 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
800 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
801 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
802 unlock_user_struct(target_rusage
, target_addr
, 1);
807 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
809 abi_ulong target_rlim_swap
;
812 target_rlim_swap
= tswapal(target_rlim
);
813 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
814 return RLIM_INFINITY
;
816 result
= target_rlim_swap
;
817 if (target_rlim_swap
!= (rlim_t
)result
)
818 return RLIM_INFINITY
;
823 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
825 abi_ulong target_rlim_swap
;
828 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
829 target_rlim_swap
= TARGET_RLIM_INFINITY
;
831 target_rlim_swap
= rlim
;
832 result
= tswapal(target_rlim_swap
);
837 static inline int target_to_host_resource(int code
)
840 case TARGET_RLIMIT_AS
:
842 case TARGET_RLIMIT_CORE
:
844 case TARGET_RLIMIT_CPU
:
846 case TARGET_RLIMIT_DATA
:
848 case TARGET_RLIMIT_FSIZE
:
850 case TARGET_RLIMIT_LOCKS
:
852 case TARGET_RLIMIT_MEMLOCK
:
853 return RLIMIT_MEMLOCK
;
854 case TARGET_RLIMIT_MSGQUEUE
:
855 return RLIMIT_MSGQUEUE
;
856 case TARGET_RLIMIT_NICE
:
858 case TARGET_RLIMIT_NOFILE
:
859 return RLIMIT_NOFILE
;
860 case TARGET_RLIMIT_NPROC
:
862 case TARGET_RLIMIT_RSS
:
864 case TARGET_RLIMIT_RTPRIO
:
865 return RLIMIT_RTPRIO
;
866 case TARGET_RLIMIT_SIGPENDING
:
867 return RLIMIT_SIGPENDING
;
868 case TARGET_RLIMIT_STACK
:
875 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
876 abi_ulong target_tv_addr
)
878 struct target_timeval
*target_tv
;
880 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
881 return -TARGET_EFAULT
;
883 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
884 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
886 unlock_user_struct(target_tv
, target_tv_addr
, 0);
891 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
892 const struct timeval
*tv
)
894 struct target_timeval
*target_tv
;
896 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
897 return -TARGET_EFAULT
;
899 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
900 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
902 unlock_user_struct(target_tv
, target_tv_addr
, 1);
907 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
910 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
911 abi_ulong target_mq_attr_addr
)
913 struct target_mq_attr
*target_mq_attr
;
915 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
916 target_mq_attr_addr
, 1))
917 return -TARGET_EFAULT
;
919 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
920 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
921 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
922 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
924 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
929 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
930 const struct mq_attr
*attr
)
932 struct target_mq_attr
*target_mq_attr
;
934 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
935 target_mq_attr_addr
, 0))
936 return -TARGET_EFAULT
;
938 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
939 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
940 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
941 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
943 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
949 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
950 /* do_select() must return target values and target errnos. */
951 static abi_long
do_select(int n
,
952 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
953 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
955 fd_set rfds
, wfds
, efds
;
956 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
957 struct timeval tv
, *tv_ptr
;
960 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
964 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
968 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
973 if (target_tv_addr
) {
974 if (copy_from_user_timeval(&tv
, target_tv_addr
))
975 return -TARGET_EFAULT
;
981 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
983 if (!is_error(ret
)) {
984 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
985 return -TARGET_EFAULT
;
986 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
987 return -TARGET_EFAULT
;
988 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
989 return -TARGET_EFAULT
;
991 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
992 return -TARGET_EFAULT
;
999 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1002 return pipe2(host_pipe
, flags
);
1008 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1009 int flags
, int is_pipe2
)
1013 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1016 return get_errno(ret
);
1018 /* Several targets have special calling conventions for the original
1019 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1021 #if defined(TARGET_ALPHA)
1022 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1023 return host_pipe
[0];
1024 #elif defined(TARGET_MIPS)
1025 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1026 return host_pipe
[0];
1027 #elif defined(TARGET_SH4)
1028 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1029 return host_pipe
[0];
1030 #elif defined(TARGET_SPARC)
1031 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1032 return host_pipe
[0];
1036 if (put_user_s32(host_pipe
[0], pipedes
)
1037 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1038 return -TARGET_EFAULT
;
1039 return get_errno(ret
);
1042 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1043 abi_ulong target_addr
,
1046 struct target_ip_mreqn
*target_smreqn
;
1048 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1050 return -TARGET_EFAULT
;
1051 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1052 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1053 if (len
== sizeof(struct target_ip_mreqn
))
1054 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1055 unlock_user(target_smreqn
, target_addr
, 0);
1060 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1061 abi_ulong target_addr
,
1064 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1065 sa_family_t sa_family
;
1066 struct target_sockaddr
*target_saddr
;
1068 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1070 return -TARGET_EFAULT
;
1072 sa_family
= tswap16(target_saddr
->sa_family
);
1074 /* Oops. The caller might send a incomplete sun_path; sun_path
1075 * must be terminated by \0 (see the manual page), but
1076 * unfortunately it is quite common to specify sockaddr_un
1077 * length as "strlen(x->sun_path)" while it should be
1078 * "strlen(...) + 1". We'll fix that here if needed.
1079 * Linux kernel has a similar feature.
1082 if (sa_family
== AF_UNIX
) {
1083 if (len
< unix_maxlen
&& len
> 0) {
1084 char *cp
= (char*)target_saddr
;
1086 if ( cp
[len
-1] && !cp
[len
] )
1089 if (len
> unix_maxlen
)
1093 memcpy(addr
, target_saddr
, len
);
1094 addr
->sa_family
= sa_family
;
1095 unlock_user(target_saddr
, target_addr
, 0);
1100 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1101 struct sockaddr
*addr
,
1104 struct target_sockaddr
*target_saddr
;
1106 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1108 return -TARGET_EFAULT
;
1109 memcpy(target_saddr
, addr
, len
);
1110 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1111 unlock_user(target_saddr
, target_addr
, len
);
1116 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1117 struct target_msghdr
*target_msgh
)
1119 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1120 abi_long msg_controllen
;
1121 abi_ulong target_cmsg_addr
;
1122 struct target_cmsghdr
*target_cmsg
;
1123 socklen_t space
= 0;
1125 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1126 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1128 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1129 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1131 return -TARGET_EFAULT
;
1133 while (cmsg
&& target_cmsg
) {
1134 void *data
= CMSG_DATA(cmsg
);
1135 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1137 int len
= tswapal(target_cmsg
->cmsg_len
)
1138 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1140 space
+= CMSG_SPACE(len
);
1141 if (space
> msgh
->msg_controllen
) {
1142 space
-= CMSG_SPACE(len
);
1143 gemu_log("Host cmsg overflow\n");
1147 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1148 cmsg
->cmsg_level
= SOL_SOCKET
;
1150 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1152 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1153 cmsg
->cmsg_len
= CMSG_LEN(len
);
1155 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1156 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1157 memcpy(data
, target_data
, len
);
1159 int *fd
= (int *)data
;
1160 int *target_fd
= (int *)target_data
;
1161 int i
, numfds
= len
/ sizeof(int);
1163 for (i
= 0; i
< numfds
; i
++)
1164 fd
[i
] = tswap32(target_fd
[i
]);
1167 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1168 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1170 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1172 msgh
->msg_controllen
= space
;
1176 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1177 struct msghdr
*msgh
)
1179 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1180 abi_long msg_controllen
;
1181 abi_ulong target_cmsg_addr
;
1182 struct target_cmsghdr
*target_cmsg
;
1183 socklen_t space
= 0;
1185 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1186 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1188 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1189 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1191 return -TARGET_EFAULT
;
1193 while (cmsg
&& target_cmsg
) {
1194 void *data
= CMSG_DATA(cmsg
);
1195 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1197 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1199 space
+= TARGET_CMSG_SPACE(len
);
1200 if (space
> msg_controllen
) {
1201 space
-= TARGET_CMSG_SPACE(len
);
1202 gemu_log("Target cmsg overflow\n");
1206 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1207 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1209 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1211 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1212 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1214 switch (cmsg
->cmsg_level
) {
1216 switch (cmsg
->cmsg_type
) {
1219 int *fd
= (int *)data
;
1220 int *target_fd
= (int *)target_data
;
1221 int i
, numfds
= len
/ sizeof(int);
1223 for (i
= 0; i
< numfds
; i
++)
1224 target_fd
[i
] = tswap32(fd
[i
]);
1229 struct timeval
*tv
= (struct timeval
*)data
;
1230 struct target_timeval
*target_tv
=
1231 (struct target_timeval
*)target_data
;
1233 if (len
!= sizeof(struct timeval
))
1236 /* copy struct timeval to target */
1237 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1238 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1241 case SCM_CREDENTIALS
:
1243 struct ucred
*cred
= (struct ucred
*)data
;
1244 struct target_ucred
*target_cred
=
1245 (struct target_ucred
*)target_data
;
1247 __put_user(cred
->pid
, &target_cred
->pid
);
1248 __put_user(cred
->uid
, &target_cred
->uid
);
1249 __put_user(cred
->gid
, &target_cred
->gid
);
1259 gemu_log("Unsupported ancillary data: %d/%d\n",
1260 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1261 memcpy(target_data
, data
, len
);
1264 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1265 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1267 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1269 target_msgh
->msg_controllen
= tswapal(space
);
1273 /* do_setsockopt() Must return target values and target errnos. */
1274 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1275 abi_ulong optval_addr
, socklen_t optlen
)
1279 struct ip_mreqn
*ip_mreq
;
1280 struct ip_mreq_source
*ip_mreq_source
;
1284 /* TCP options all take an 'int' value. */
1285 if (optlen
< sizeof(uint32_t))
1286 return -TARGET_EINVAL
;
1288 if (get_user_u32(val
, optval_addr
))
1289 return -TARGET_EFAULT
;
1290 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1297 case IP_ROUTER_ALERT
:
1301 case IP_MTU_DISCOVER
:
1307 case IP_MULTICAST_TTL
:
1308 case IP_MULTICAST_LOOP
:
1310 if (optlen
>= sizeof(uint32_t)) {
1311 if (get_user_u32(val
, optval_addr
))
1312 return -TARGET_EFAULT
;
1313 } else if (optlen
>= 1) {
1314 if (get_user_u8(val
, optval_addr
))
1315 return -TARGET_EFAULT
;
1317 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1319 case IP_ADD_MEMBERSHIP
:
1320 case IP_DROP_MEMBERSHIP
:
1321 if (optlen
< sizeof (struct target_ip_mreq
) ||
1322 optlen
> sizeof (struct target_ip_mreqn
))
1323 return -TARGET_EINVAL
;
1325 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1326 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1327 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1330 case IP_BLOCK_SOURCE
:
1331 case IP_UNBLOCK_SOURCE
:
1332 case IP_ADD_SOURCE_MEMBERSHIP
:
1333 case IP_DROP_SOURCE_MEMBERSHIP
:
1334 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1335 return -TARGET_EINVAL
;
1337 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1338 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1339 unlock_user (ip_mreq_source
, optval_addr
, 0);
1348 case IPV6_MTU_DISCOVER
:
1351 case IPV6_RECVPKTINFO
:
1353 if (optlen
< sizeof(uint32_t)) {
1354 return -TARGET_EINVAL
;
1356 if (get_user_u32(val
, optval_addr
)) {
1357 return -TARGET_EFAULT
;
1359 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1360 &val
, sizeof(val
)));
1369 /* struct icmp_filter takes an u32 value */
1370 if (optlen
< sizeof(uint32_t)) {
1371 return -TARGET_EINVAL
;
1374 if (get_user_u32(val
, optval_addr
)) {
1375 return -TARGET_EFAULT
;
1377 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1378 &val
, sizeof(val
)));
1385 case TARGET_SOL_SOCKET
:
1387 case TARGET_SO_RCVTIMEO
:
1391 optname
= SO_RCVTIMEO
;
1394 if (optlen
!= sizeof(struct target_timeval
)) {
1395 return -TARGET_EINVAL
;
1398 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1399 return -TARGET_EFAULT
;
1402 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1406 case TARGET_SO_SNDTIMEO
:
1407 optname
= SO_SNDTIMEO
;
1409 case TARGET_SO_ATTACH_FILTER
:
1411 struct target_sock_fprog
*tfprog
;
1412 struct target_sock_filter
*tfilter
;
1413 struct sock_fprog fprog
;
1414 struct sock_filter
*filter
;
1417 if (optlen
!= sizeof(*tfprog
)) {
1418 return -TARGET_EINVAL
;
1420 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1421 return -TARGET_EFAULT
;
1423 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1424 tswapal(tfprog
->filter
), 0)) {
1425 unlock_user_struct(tfprog
, optval_addr
, 1);
1426 return -TARGET_EFAULT
;
1429 fprog
.len
= tswap16(tfprog
->len
);
1430 filter
= malloc(fprog
.len
* sizeof(*filter
));
1431 if (filter
== NULL
) {
1432 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1433 unlock_user_struct(tfprog
, optval_addr
, 1);
1434 return -TARGET_ENOMEM
;
1436 for (i
= 0; i
< fprog
.len
; i
++) {
1437 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1438 filter
[i
].jt
= tfilter
[i
].jt
;
1439 filter
[i
].jf
= tfilter
[i
].jf
;
1440 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1442 fprog
.filter
= filter
;
1444 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1445 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1448 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1449 unlock_user_struct(tfprog
, optval_addr
, 1);
1452 /* Options with 'int' argument. */
1453 case TARGET_SO_DEBUG
:
1456 case TARGET_SO_REUSEADDR
:
1457 optname
= SO_REUSEADDR
;
1459 case TARGET_SO_TYPE
:
1462 case TARGET_SO_ERROR
:
1465 case TARGET_SO_DONTROUTE
:
1466 optname
= SO_DONTROUTE
;
1468 case TARGET_SO_BROADCAST
:
1469 optname
= SO_BROADCAST
;
1471 case TARGET_SO_SNDBUF
:
1472 optname
= SO_SNDBUF
;
1474 case TARGET_SO_RCVBUF
:
1475 optname
= SO_RCVBUF
;
1477 case TARGET_SO_KEEPALIVE
:
1478 optname
= SO_KEEPALIVE
;
1480 case TARGET_SO_OOBINLINE
:
1481 optname
= SO_OOBINLINE
;
1483 case TARGET_SO_NO_CHECK
:
1484 optname
= SO_NO_CHECK
;
1486 case TARGET_SO_PRIORITY
:
1487 optname
= SO_PRIORITY
;
1490 case TARGET_SO_BSDCOMPAT
:
1491 optname
= SO_BSDCOMPAT
;
1494 case TARGET_SO_PASSCRED
:
1495 optname
= SO_PASSCRED
;
1497 case TARGET_SO_TIMESTAMP
:
1498 optname
= SO_TIMESTAMP
;
1500 case TARGET_SO_RCVLOWAT
:
1501 optname
= SO_RCVLOWAT
;
1507 if (optlen
< sizeof(uint32_t))
1508 return -TARGET_EINVAL
;
1510 if (get_user_u32(val
, optval_addr
))
1511 return -TARGET_EFAULT
;
1512 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1517 ret
= -TARGET_ENOPROTOOPT
;
1522 /* do_getsockopt() Must return target values and target errnos. */
1523 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1524 abi_ulong optval_addr
, abi_ulong optlen
)
1531 case TARGET_SOL_SOCKET
:
1534 /* These don't just return a single integer */
1535 case TARGET_SO_LINGER
:
1536 case TARGET_SO_RCVTIMEO
:
1537 case TARGET_SO_SNDTIMEO
:
1538 case TARGET_SO_PEERNAME
:
1540 case TARGET_SO_PEERCRED
: {
1543 struct target_ucred
*tcr
;
1545 if (get_user_u32(len
, optlen
)) {
1546 return -TARGET_EFAULT
;
1549 return -TARGET_EINVAL
;
1553 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1561 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1562 return -TARGET_EFAULT
;
1564 __put_user(cr
.pid
, &tcr
->pid
);
1565 __put_user(cr
.uid
, &tcr
->uid
);
1566 __put_user(cr
.gid
, &tcr
->gid
);
1567 unlock_user_struct(tcr
, optval_addr
, 1);
1568 if (put_user_u32(len
, optlen
)) {
1569 return -TARGET_EFAULT
;
1573 /* Options with 'int' argument. */
1574 case TARGET_SO_DEBUG
:
1577 case TARGET_SO_REUSEADDR
:
1578 optname
= SO_REUSEADDR
;
1580 case TARGET_SO_TYPE
:
1583 case TARGET_SO_ERROR
:
1586 case TARGET_SO_DONTROUTE
:
1587 optname
= SO_DONTROUTE
;
1589 case TARGET_SO_BROADCAST
:
1590 optname
= SO_BROADCAST
;
1592 case TARGET_SO_SNDBUF
:
1593 optname
= SO_SNDBUF
;
1595 case TARGET_SO_RCVBUF
:
1596 optname
= SO_RCVBUF
;
1598 case TARGET_SO_KEEPALIVE
:
1599 optname
= SO_KEEPALIVE
;
1601 case TARGET_SO_OOBINLINE
:
1602 optname
= SO_OOBINLINE
;
1604 case TARGET_SO_NO_CHECK
:
1605 optname
= SO_NO_CHECK
;
1607 case TARGET_SO_PRIORITY
:
1608 optname
= SO_PRIORITY
;
1611 case TARGET_SO_BSDCOMPAT
:
1612 optname
= SO_BSDCOMPAT
;
1615 case TARGET_SO_PASSCRED
:
1616 optname
= SO_PASSCRED
;
1618 case TARGET_SO_TIMESTAMP
:
1619 optname
= SO_TIMESTAMP
;
1621 case TARGET_SO_RCVLOWAT
:
1622 optname
= SO_RCVLOWAT
;
1629 /* TCP options all take an 'int' value. */
1631 if (get_user_u32(len
, optlen
))
1632 return -TARGET_EFAULT
;
1634 return -TARGET_EINVAL
;
1636 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1642 if (put_user_u32(val
, optval_addr
))
1643 return -TARGET_EFAULT
;
1645 if (put_user_u8(val
, optval_addr
))
1646 return -TARGET_EFAULT
;
1648 if (put_user_u32(len
, optlen
))
1649 return -TARGET_EFAULT
;
1656 case IP_ROUTER_ALERT
:
1660 case IP_MTU_DISCOVER
:
1666 case IP_MULTICAST_TTL
:
1667 case IP_MULTICAST_LOOP
:
1668 if (get_user_u32(len
, optlen
))
1669 return -TARGET_EFAULT
;
1671 return -TARGET_EINVAL
;
1673 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1676 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1678 if (put_user_u32(len
, optlen
)
1679 || put_user_u8(val
, optval_addr
))
1680 return -TARGET_EFAULT
;
1682 if (len
> sizeof(int))
1684 if (put_user_u32(len
, optlen
)
1685 || put_user_u32(val
, optval_addr
))
1686 return -TARGET_EFAULT
;
1690 ret
= -TARGET_ENOPROTOOPT
;
1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1698 ret
= -TARGET_EOPNOTSUPP
;
1704 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1705 int count
, int copy
)
1707 struct target_iovec
*target_vec
;
1709 abi_ulong total_len
, max_len
;
1717 if (count
< 0 || count
> IOV_MAX
) {
1722 vec
= calloc(count
, sizeof(struct iovec
));
1728 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1729 count
* sizeof(struct target_iovec
), 1);
1730 if (target_vec
== NULL
) {
1735 /* ??? If host page size > target page size, this will result in a
1736 value larger than what we can actually support. */
1737 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1740 for (i
= 0; i
< count
; i
++) {
1741 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1742 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1747 } else if (len
== 0) {
1748 /* Zero length pointer is ignored. */
1749 vec
[i
].iov_base
= 0;
1751 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1752 if (!vec
[i
].iov_base
) {
1756 if (len
> max_len
- total_len
) {
1757 len
= max_len
- total_len
;
1760 vec
[i
].iov_len
= len
;
1764 unlock_user(target_vec
, target_addr
, 0);
1768 unlock_user(target_vec
, target_addr
, 0);
1775 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1776 int count
, int copy
)
1778 struct target_iovec
*target_vec
;
1781 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1782 count
* sizeof(struct target_iovec
), 1);
1784 for (i
= 0; i
< count
; i
++) {
1785 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1786 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1790 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1792 unlock_user(target_vec
, target_addr
, 0);
1798 static inline int target_to_host_sock_type(int *type
)
1801 int target_type
= *type
;
1803 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1804 case TARGET_SOCK_DGRAM
:
1805 host_type
= SOCK_DGRAM
;
1807 case TARGET_SOCK_STREAM
:
1808 host_type
= SOCK_STREAM
;
1811 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1814 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1815 #if defined(SOCK_CLOEXEC)
1816 host_type
|= SOCK_CLOEXEC
;
1818 return -TARGET_EINVAL
;
1821 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1822 #if defined(SOCK_NONBLOCK)
1823 host_type
|= SOCK_NONBLOCK
;
1824 #elif !defined(O_NONBLOCK)
1825 return -TARGET_EINVAL
;
1832 /* Try to emulate socket type flags after socket creation. */
1833 static int sock_flags_fixup(int fd
, int target_type
)
1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1836 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1837 int flags
= fcntl(fd
, F_GETFL
);
1838 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
1840 return -TARGET_EINVAL
;
1847 /* do_socket() Must return target values and target errnos. */
1848 static abi_long
do_socket(int domain
, int type
, int protocol
)
1850 int target_type
= type
;
1853 ret
= target_to_host_sock_type(&type
);
1858 if (domain
== PF_NETLINK
)
1859 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1860 ret
= get_errno(socket(domain
, type
, protocol
));
1862 ret
= sock_flags_fixup(ret
, target_type
);
1867 /* do_bind() Must return target values and target errnos. */
1868 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1874 if ((int)addrlen
< 0) {
1875 return -TARGET_EINVAL
;
1878 addr
= alloca(addrlen
+1);
1880 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1884 return get_errno(bind(sockfd
, addr
, addrlen
));
1887 /* do_connect() Must return target values and target errnos. */
1888 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1894 if ((int)addrlen
< 0) {
1895 return -TARGET_EINVAL
;
1898 addr
= alloca(addrlen
);
1900 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1904 return get_errno(connect(sockfd
, addr
, addrlen
));
1907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1908 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
1909 int flags
, int send
)
1915 abi_ulong target_vec
;
1917 if (msgp
->msg_name
) {
1918 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1919 msg
.msg_name
= alloca(msg
.msg_namelen
);
1920 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1926 msg
.msg_name
= NULL
;
1927 msg
.msg_namelen
= 0;
1929 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1930 msg
.msg_control
= alloca(msg
.msg_controllen
);
1931 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1933 count
= tswapal(msgp
->msg_iovlen
);
1934 target_vec
= tswapal(msgp
->msg_iov
);
1935 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1936 target_vec
, count
, send
);
1938 ret
= -host_to_target_errno(errno
);
1941 msg
.msg_iovlen
= count
;
1945 ret
= target_to_host_cmsg(&msg
, msgp
);
1947 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1949 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1950 if (!is_error(ret
)) {
1952 ret
= host_to_target_cmsg(msgp
, &msg
);
1953 if (!is_error(ret
)) {
1954 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1955 if (msg
.msg_name
!= NULL
) {
1956 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1957 msg
.msg_name
, msg
.msg_namelen
);
1969 unlock_iovec(vec
, target_vec
, count
, !send
);
1974 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1975 int flags
, int send
)
1978 struct target_msghdr
*msgp
;
1980 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1984 return -TARGET_EFAULT
;
1986 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
1987 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1991 #ifdef TARGET_NR_sendmmsg
1992 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
1993 * so it might not have this *mmsg-specific flag either.
1995 #ifndef MSG_WAITFORONE
1996 #define MSG_WAITFORONE 0x10000
1999 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2000 unsigned int vlen
, unsigned int flags
,
2003 struct target_mmsghdr
*mmsgp
;
2007 if (vlen
> UIO_MAXIOV
) {
2011 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2013 return -TARGET_EFAULT
;
2016 for (i
= 0; i
< vlen
; i
++) {
2017 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2018 if (is_error(ret
)) {
2021 mmsgp
[i
].msg_len
= tswap32(ret
);
2022 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2023 if (flags
& MSG_WAITFORONE
) {
2024 flags
|= MSG_DONTWAIT
;
2028 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2030 /* Return number of datagrams sent if we sent any at all;
2031 * otherwise return the error.
2040 /* If we don't have a system accept4() then just call accept.
2041 * The callsites to do_accept4() will ensure that they don't
2042 * pass a non-zero flags argument in this config.
2044 #ifndef CONFIG_ACCEPT4
2045 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2046 socklen_t
*addrlen
, int flags
)
2049 return accept(sockfd
, addr
, addrlen
);
2053 /* do_accept4() Must return target values and target errnos. */
2054 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2055 abi_ulong target_addrlen_addr
, int flags
)
2062 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2064 if (target_addr
== 0) {
2065 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2068 /* linux returns EINVAL if addrlen pointer is invalid */
2069 if (get_user_u32(addrlen
, target_addrlen_addr
))
2070 return -TARGET_EINVAL
;
2072 if ((int)addrlen
< 0) {
2073 return -TARGET_EINVAL
;
2076 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2077 return -TARGET_EINVAL
;
2079 addr
= alloca(addrlen
);
2081 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2082 if (!is_error(ret
)) {
2083 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2084 if (put_user_u32(addrlen
, target_addrlen_addr
))
2085 ret
= -TARGET_EFAULT
;
2090 /* do_getpeername() Must return target values and target errnos. */
2091 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2092 abi_ulong target_addrlen_addr
)
2098 if (get_user_u32(addrlen
, target_addrlen_addr
))
2099 return -TARGET_EFAULT
;
2101 if ((int)addrlen
< 0) {
2102 return -TARGET_EINVAL
;
2105 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2106 return -TARGET_EFAULT
;
2108 addr
= alloca(addrlen
);
2110 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2111 if (!is_error(ret
)) {
2112 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2113 if (put_user_u32(addrlen
, target_addrlen_addr
))
2114 ret
= -TARGET_EFAULT
;
2119 /* do_getsockname() Must return target values and target errnos. */
2120 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2121 abi_ulong target_addrlen_addr
)
2127 if (get_user_u32(addrlen
, target_addrlen_addr
))
2128 return -TARGET_EFAULT
;
2130 if ((int)addrlen
< 0) {
2131 return -TARGET_EINVAL
;
2134 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2135 return -TARGET_EFAULT
;
2137 addr
= alloca(addrlen
);
2139 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2140 if (!is_error(ret
)) {
2141 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2142 if (put_user_u32(addrlen
, target_addrlen_addr
))
2143 ret
= -TARGET_EFAULT
;
2148 /* do_socketpair() Must return target values and target errnos. */
2149 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2150 abi_ulong target_tab_addr
)
2155 target_to_host_sock_type(&type
);
2157 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2158 if (!is_error(ret
)) {
2159 if (put_user_s32(tab
[0], target_tab_addr
)
2160 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2161 ret
= -TARGET_EFAULT
;
2166 /* do_sendto() Must return target values and target errnos. */
2167 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2168 abi_ulong target_addr
, socklen_t addrlen
)
2174 if ((int)addrlen
< 0) {
2175 return -TARGET_EINVAL
;
2178 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2180 return -TARGET_EFAULT
;
2182 addr
= alloca(addrlen
);
2183 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2185 unlock_user(host_msg
, msg
, 0);
2188 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2190 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2192 unlock_user(host_msg
, msg
, 0);
2196 /* do_recvfrom() Must return target values and target errnos. */
2197 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2198 abi_ulong target_addr
,
2199 abi_ulong target_addrlen
)
2206 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2208 return -TARGET_EFAULT
;
2210 if (get_user_u32(addrlen
, target_addrlen
)) {
2211 ret
= -TARGET_EFAULT
;
2214 if ((int)addrlen
< 0) {
2215 ret
= -TARGET_EINVAL
;
2218 addr
= alloca(addrlen
);
2219 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2221 addr
= NULL
; /* To keep compiler quiet. */
2222 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2224 if (!is_error(ret
)) {
2226 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2227 if (put_user_u32(addrlen
, target_addrlen
)) {
2228 ret
= -TARGET_EFAULT
;
2232 unlock_user(host_msg
, msg
, len
);
2235 unlock_user(host_msg
, msg
, 0);
2240 #ifdef TARGET_NR_socketcall
2241 /* do_socketcall() Must return target values and target errnos. */
2242 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2244 static const unsigned ac
[] = { /* number of arguments per call */
2245 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2246 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2247 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2248 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2249 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2250 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2251 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2252 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2253 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2254 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2255 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2256 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2257 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2258 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2259 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2260 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2261 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2262 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2264 abi_long a
[6]; /* max 6 args */
2266 /* first, collect the arguments in a[] according to ac[] */
2267 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2269 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2270 for (i
= 0; i
< ac
[num
]; ++i
) {
2271 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2272 return -TARGET_EFAULT
;
2277 /* now when we have the args, actually handle the call */
2279 case SOCKOP_socket
: /* domain, type, protocol */
2280 return do_socket(a
[0], a
[1], a
[2]);
2281 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2282 return do_bind(a
[0], a
[1], a
[2]);
2283 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2284 return do_connect(a
[0], a
[1], a
[2]);
2285 case SOCKOP_listen
: /* sockfd, backlog */
2286 return get_errno(listen(a
[0], a
[1]));
2287 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2288 return do_accept4(a
[0], a
[1], a
[2], 0);
2289 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2290 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2291 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2292 return do_getsockname(a
[0], a
[1], a
[2]);
2293 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2294 return do_getpeername(a
[0], a
[1], a
[2]);
2295 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2296 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2297 case SOCKOP_send
: /* sockfd, msg, len, flags */
2298 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2299 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2300 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2301 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2302 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2303 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2304 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2305 case SOCKOP_shutdown
: /* sockfd, how */
2306 return get_errno(shutdown(a
[0], a
[1]));
2307 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2308 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2309 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2310 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2311 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2312 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2313 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2314 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2316 gemu_log("Unsupported socketcall: %d\n", num
);
2317 return -TARGET_ENOSYS
;
2322 #define N_SHM_REGIONS 32
2324 static struct shm_region
{
2327 } shm_regions
[N_SHM_REGIONS
];
2329 struct target_semid_ds
2331 struct target_ipc_perm sem_perm
;
2332 abi_ulong sem_otime
;
2333 abi_ulong __unused1
;
2334 abi_ulong sem_ctime
;
2335 abi_ulong __unused2
;
2336 abi_ulong sem_nsems
;
2337 abi_ulong __unused3
;
2338 abi_ulong __unused4
;
2341 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2342 abi_ulong target_addr
)
2344 struct target_ipc_perm
*target_ip
;
2345 struct target_semid_ds
*target_sd
;
2347 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2348 return -TARGET_EFAULT
;
2349 target_ip
= &(target_sd
->sem_perm
);
2350 host_ip
->__key
= tswap32(target_ip
->__key
);
2351 host_ip
->uid
= tswap32(target_ip
->uid
);
2352 host_ip
->gid
= tswap32(target_ip
->gid
);
2353 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2354 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2355 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2356 host_ip
->mode
= tswap32(target_ip
->mode
);
2358 host_ip
->mode
= tswap16(target_ip
->mode
);
2360 #if defined(TARGET_PPC)
2361 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2363 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2365 unlock_user_struct(target_sd
, target_addr
, 0);
2369 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2370 struct ipc_perm
*host_ip
)
2372 struct target_ipc_perm
*target_ip
;
2373 struct target_semid_ds
*target_sd
;
2375 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2376 return -TARGET_EFAULT
;
2377 target_ip
= &(target_sd
->sem_perm
);
2378 target_ip
->__key
= tswap32(host_ip
->__key
);
2379 target_ip
->uid
= tswap32(host_ip
->uid
);
2380 target_ip
->gid
= tswap32(host_ip
->gid
);
2381 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2382 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2383 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2384 target_ip
->mode
= tswap32(host_ip
->mode
);
2386 target_ip
->mode
= tswap16(host_ip
->mode
);
2388 #if defined(TARGET_PPC)
2389 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2391 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2393 unlock_user_struct(target_sd
, target_addr
, 1);
2397 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2398 abi_ulong target_addr
)
2400 struct target_semid_ds
*target_sd
;
2402 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2403 return -TARGET_EFAULT
;
2404 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2405 return -TARGET_EFAULT
;
2406 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2407 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2408 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2409 unlock_user_struct(target_sd
, target_addr
, 0);
2413 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2414 struct semid_ds
*host_sd
)
2416 struct target_semid_ds
*target_sd
;
2418 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2419 return -TARGET_EFAULT
;
2420 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2421 return -TARGET_EFAULT
;
2422 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2423 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2424 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2425 unlock_user_struct(target_sd
, target_addr
, 1);
2429 struct target_seminfo
{
2442 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2443 struct seminfo
*host_seminfo
)
2445 struct target_seminfo
*target_seminfo
;
2446 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2447 return -TARGET_EFAULT
;
2448 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2449 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2450 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2451 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2452 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2453 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2454 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2455 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2456 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2457 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2458 unlock_user_struct(target_seminfo
, target_addr
, 1);
2464 struct semid_ds
*buf
;
2465 unsigned short *array
;
2466 struct seminfo
*__buf
;
2469 union target_semun
{
2476 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2477 abi_ulong target_addr
)
2480 unsigned short *array
;
2482 struct semid_ds semid_ds
;
2485 semun
.buf
= &semid_ds
;
2487 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2489 return get_errno(ret
);
2491 nsems
= semid_ds
.sem_nsems
;
2493 *host_array
= malloc(nsems
*sizeof(unsigned short));
2495 return -TARGET_ENOMEM
;
2497 array
= lock_user(VERIFY_READ
, target_addr
,
2498 nsems
*sizeof(unsigned short), 1);
2501 return -TARGET_EFAULT
;
2504 for(i
=0; i
<nsems
; i
++) {
2505 __get_user((*host_array
)[i
], &array
[i
]);
2507 unlock_user(array
, target_addr
, 0);
2512 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2513 unsigned short **host_array
)
2516 unsigned short *array
;
2518 struct semid_ds semid_ds
;
2521 semun
.buf
= &semid_ds
;
2523 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2525 return get_errno(ret
);
2527 nsems
= semid_ds
.sem_nsems
;
2529 array
= lock_user(VERIFY_WRITE
, target_addr
,
2530 nsems
*sizeof(unsigned short), 0);
2532 return -TARGET_EFAULT
;
2534 for(i
=0; i
<nsems
; i
++) {
2535 __put_user((*host_array
)[i
], &array
[i
]);
2538 unlock_user(array
, target_addr
, 1);
2543 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2544 union target_semun target_su
)
2547 struct semid_ds dsarg
;
2548 unsigned short *array
= NULL
;
2549 struct seminfo seminfo
;
2550 abi_long ret
= -TARGET_EINVAL
;
2557 arg
.val
= tswap32(target_su
.val
);
2558 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2559 target_su
.val
= tswap32(arg
.val
);
2563 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2567 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2568 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2575 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2579 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2580 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2586 arg
.__buf
= &seminfo
;
2587 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2588 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2596 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2603 struct target_sembuf
{
2604 unsigned short sem_num
;
2609 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2610 abi_ulong target_addr
,
2613 struct target_sembuf
*target_sembuf
;
2616 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2617 nsops
*sizeof(struct target_sembuf
), 1);
2619 return -TARGET_EFAULT
;
2621 for(i
=0; i
<nsops
; i
++) {
2622 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2623 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2624 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2627 unlock_user(target_sembuf
, target_addr
, 0);
2632 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2634 struct sembuf sops
[nsops
];
2636 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2637 return -TARGET_EFAULT
;
2639 return get_errno(semop(semid
, sops
, nsops
));
2642 struct target_msqid_ds
2644 struct target_ipc_perm msg_perm
;
2645 abi_ulong msg_stime
;
2646 #if TARGET_ABI_BITS == 32
2647 abi_ulong __unused1
;
2649 abi_ulong msg_rtime
;
2650 #if TARGET_ABI_BITS == 32
2651 abi_ulong __unused2
;
2653 abi_ulong msg_ctime
;
2654 #if TARGET_ABI_BITS == 32
2655 abi_ulong __unused3
;
2657 abi_ulong __msg_cbytes
;
2659 abi_ulong msg_qbytes
;
2660 abi_ulong msg_lspid
;
2661 abi_ulong msg_lrpid
;
2662 abi_ulong __unused4
;
2663 abi_ulong __unused5
;
2666 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2667 abi_ulong target_addr
)
2669 struct target_msqid_ds
*target_md
;
2671 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2672 return -TARGET_EFAULT
;
2673 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2674 return -TARGET_EFAULT
;
2675 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2676 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2677 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2678 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2679 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2680 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2681 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2682 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2683 unlock_user_struct(target_md
, target_addr
, 0);
2687 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2688 struct msqid_ds
*host_md
)
2690 struct target_msqid_ds
*target_md
;
2692 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2693 return -TARGET_EFAULT
;
2694 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2695 return -TARGET_EFAULT
;
2696 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2697 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2698 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2699 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2700 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2701 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2702 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2703 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2704 unlock_user_struct(target_md
, target_addr
, 1);
2708 struct target_msginfo
{
2716 unsigned short int msgseg
;
2719 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2720 struct msginfo
*host_msginfo
)
2722 struct target_msginfo
*target_msginfo
;
2723 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2724 return -TARGET_EFAULT
;
2725 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2726 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2727 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2728 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2729 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2730 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2731 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2732 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2733 unlock_user_struct(target_msginfo
, target_addr
, 1);
2737 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2739 struct msqid_ds dsarg
;
2740 struct msginfo msginfo
;
2741 abi_long ret
= -TARGET_EINVAL
;
2749 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2750 return -TARGET_EFAULT
;
2751 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2752 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2753 return -TARGET_EFAULT
;
2756 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2760 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2761 if (host_to_target_msginfo(ptr
, &msginfo
))
2762 return -TARGET_EFAULT
;
2769 struct target_msgbuf
{
2774 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2775 unsigned int msgsz
, int msgflg
)
2777 struct target_msgbuf
*target_mb
;
2778 struct msgbuf
*host_mb
;
2781 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2782 return -TARGET_EFAULT
;
2783 host_mb
= malloc(msgsz
+sizeof(long));
2784 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2785 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2786 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2788 unlock_user_struct(target_mb
, msgp
, 0);
2793 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2794 unsigned int msgsz
, abi_long msgtyp
,
2797 struct target_msgbuf
*target_mb
;
2799 struct msgbuf
*host_mb
;
2802 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2803 return -TARGET_EFAULT
;
2805 host_mb
= g_malloc(msgsz
+sizeof(long));
2806 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2809 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2810 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2811 if (!target_mtext
) {
2812 ret
= -TARGET_EFAULT
;
2815 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2816 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2819 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2823 unlock_user_struct(target_mb
, msgp
, 1);
2828 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2829 abi_ulong target_addr
)
2831 struct target_shmid_ds
*target_sd
;
2833 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2834 return -TARGET_EFAULT
;
2835 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2836 return -TARGET_EFAULT
;
2837 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2838 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2839 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2840 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2841 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2842 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2843 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2844 unlock_user_struct(target_sd
, target_addr
, 0);
2848 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2849 struct shmid_ds
*host_sd
)
2851 struct target_shmid_ds
*target_sd
;
2853 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2854 return -TARGET_EFAULT
;
2855 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2856 return -TARGET_EFAULT
;
2857 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2858 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2859 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2860 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2861 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2862 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2863 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2864 unlock_user_struct(target_sd
, target_addr
, 1);
2868 struct target_shminfo
{
2876 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2877 struct shminfo
*host_shminfo
)
2879 struct target_shminfo
*target_shminfo
;
2880 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2881 return -TARGET_EFAULT
;
2882 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2883 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2884 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2885 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2886 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2887 unlock_user_struct(target_shminfo
, target_addr
, 1);
2891 struct target_shm_info
{
2896 abi_ulong swap_attempts
;
2897 abi_ulong swap_successes
;
2900 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2901 struct shm_info
*host_shm_info
)
2903 struct target_shm_info
*target_shm_info
;
2904 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2905 return -TARGET_EFAULT
;
2906 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2907 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2908 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2909 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2910 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2911 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2912 unlock_user_struct(target_shm_info
, target_addr
, 1);
2916 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2918 struct shmid_ds dsarg
;
2919 struct shminfo shminfo
;
2920 struct shm_info shm_info
;
2921 abi_long ret
= -TARGET_EINVAL
;
2929 if (target_to_host_shmid_ds(&dsarg
, buf
))
2930 return -TARGET_EFAULT
;
2931 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2932 if (host_to_target_shmid_ds(buf
, &dsarg
))
2933 return -TARGET_EFAULT
;
2936 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2937 if (host_to_target_shminfo(buf
, &shminfo
))
2938 return -TARGET_EFAULT
;
2941 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2942 if (host_to_target_shm_info(buf
, &shm_info
))
2943 return -TARGET_EFAULT
;
2948 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2955 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2959 struct shmid_ds shm_info
;
2962 /* find out the length of the shared memory segment */
2963 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2964 if (is_error(ret
)) {
2965 /* can't get length, bail out */
2972 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2974 abi_ulong mmap_start
;
2976 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2978 if (mmap_start
== -1) {
2980 host_raddr
= (void *)-1;
2982 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2985 if (host_raddr
== (void *)-1) {
2987 return get_errno((long)host_raddr
);
2989 raddr
=h2g((unsigned long)host_raddr
);
2991 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2992 PAGE_VALID
| PAGE_READ
|
2993 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2995 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2996 if (shm_regions
[i
].start
== 0) {
2997 shm_regions
[i
].start
= raddr
;
2998 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3008 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3012 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3013 if (shm_regions
[i
].start
== shmaddr
) {
3014 shm_regions
[i
].start
= 0;
3015 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3020 return get_errno(shmdt(g2h(shmaddr
)));
3023 #ifdef TARGET_NR_ipc
3024 /* ??? This only works with linear mappings. */
3025 /* do_ipc() must return target values and target errnos. */
3026 static abi_long
do_ipc(unsigned int call
, int first
,
3027 int second
, int third
,
3028 abi_long ptr
, abi_long fifth
)
3033 version
= call
>> 16;
3038 ret
= do_semop(first
, ptr
, second
);
3042 ret
= get_errno(semget(first
, second
, third
));
3046 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3050 ret
= get_errno(msgget(first
, second
));
3054 ret
= do_msgsnd(first
, ptr
, second
, third
);
3058 ret
= do_msgctl(first
, second
, ptr
);
3065 struct target_ipc_kludge
{
3070 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3071 ret
= -TARGET_EFAULT
;
3075 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3077 unlock_user_struct(tmp
, ptr
, 0);
3081 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3090 raddr
= do_shmat(first
, ptr
, second
);
3091 if (is_error(raddr
))
3092 return get_errno(raddr
);
3093 if (put_user_ual(raddr
, third
))
3094 return -TARGET_EFAULT
;
3098 ret
= -TARGET_EINVAL
;
3103 ret
= do_shmdt(ptr
);
3107 /* IPC_* flag values are the same on all linux platforms */
3108 ret
= get_errno(shmget(first
, second
, third
));
3111 /* IPC_* and SHM_* command values are the same on all linux platforms */
3113 ret
= do_shmctl(first
, second
, ptr
);
3116 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3117 ret
= -TARGET_ENOSYS
;
3124 /* kernel structure types definitions */
3126 #define STRUCT(name, ...) STRUCT_ ## name,
3127 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3129 #include "syscall_types.h"
3132 #undef STRUCT_SPECIAL
3134 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3135 #define STRUCT_SPECIAL(name)
3136 #include "syscall_types.h"
3138 #undef STRUCT_SPECIAL
3140 typedef struct IOCTLEntry IOCTLEntry
;
3142 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3143 int fd
, abi_long cmd
, abi_long arg
);
3146 unsigned int target_cmd
;
3147 unsigned int host_cmd
;
3150 do_ioctl_fn
*do_ioctl
;
3151 const argtype arg_type
[5];
3154 #define IOC_R 0x0001
3155 #define IOC_W 0x0002
3156 #define IOC_RW (IOC_R | IOC_W)
3158 #define MAX_STRUCT_SIZE 4096
3160 #ifdef CONFIG_FIEMAP
3161 /* So fiemap access checks don't overflow on 32 bit systems.
3162 * This is very slightly smaller than the limit imposed by
3163 * the underlying kernel.
3165 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3166 / sizeof(struct fiemap_extent))
3168 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3169 int fd
, abi_long cmd
, abi_long arg
)
3171 /* The parameter for this ioctl is a struct fiemap followed
3172 * by an array of struct fiemap_extent whose size is set
3173 * in fiemap->fm_extent_count. The array is filled in by the
3176 int target_size_in
, target_size_out
;
3178 const argtype
*arg_type
= ie
->arg_type
;
3179 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3182 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3186 assert(arg_type
[0] == TYPE_PTR
);
3187 assert(ie
->access
== IOC_RW
);
3189 target_size_in
= thunk_type_size(arg_type
, 0);
3190 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3192 return -TARGET_EFAULT
;
3194 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3195 unlock_user(argptr
, arg
, 0);
3196 fm
= (struct fiemap
*)buf_temp
;
3197 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3198 return -TARGET_EINVAL
;
3201 outbufsz
= sizeof (*fm
) +
3202 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3204 if (outbufsz
> MAX_STRUCT_SIZE
) {
3205 /* We can't fit all the extents into the fixed size buffer.
3206 * Allocate one that is large enough and use it instead.
3208 fm
= malloc(outbufsz
);
3210 return -TARGET_ENOMEM
;
3212 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3215 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3216 if (!is_error(ret
)) {
3217 target_size_out
= target_size_in
;
3218 /* An extent_count of 0 means we were only counting the extents
3219 * so there are no structs to copy
3221 if (fm
->fm_extent_count
!= 0) {
3222 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3224 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3226 ret
= -TARGET_EFAULT
;
3228 /* Convert the struct fiemap */
3229 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3230 if (fm
->fm_extent_count
!= 0) {
3231 p
= argptr
+ target_size_in
;
3232 /* ...and then all the struct fiemap_extents */
3233 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3234 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3239 unlock_user(argptr
, arg
, target_size_out
);
3249 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3250 int fd
, abi_long cmd
, abi_long arg
)
3252 const argtype
*arg_type
= ie
->arg_type
;
3256 struct ifconf
*host_ifconf
;
3258 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3259 int target_ifreq_size
;
3264 abi_long target_ifc_buf
;
3268 assert(arg_type
[0] == TYPE_PTR
);
3269 assert(ie
->access
== IOC_RW
);
3272 target_size
= thunk_type_size(arg_type
, 0);
3274 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3276 return -TARGET_EFAULT
;
3277 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3278 unlock_user(argptr
, arg
, 0);
3280 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3281 target_ifc_len
= host_ifconf
->ifc_len
;
3282 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3284 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3285 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3286 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3288 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3289 if (outbufsz
> MAX_STRUCT_SIZE
) {
3290 /* We can't fit all the extents into the fixed size buffer.
3291 * Allocate one that is large enough and use it instead.
3293 host_ifconf
= malloc(outbufsz
);
3295 return -TARGET_ENOMEM
;
3297 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3300 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3302 host_ifconf
->ifc_len
= host_ifc_len
;
3303 host_ifconf
->ifc_buf
= host_ifc_buf
;
3305 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3306 if (!is_error(ret
)) {
3307 /* convert host ifc_len to target ifc_len */
3309 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3310 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3311 host_ifconf
->ifc_len
= target_ifc_len
;
3313 /* restore target ifc_buf */
3315 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3317 /* copy struct ifconf to target user */
3319 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3321 return -TARGET_EFAULT
;
3322 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3323 unlock_user(argptr
, arg
, target_size
);
3325 /* copy ifreq[] to target user */
3327 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3328 for (i
= 0; i
< nb_ifreq
; i
++) {
3329 thunk_convert(argptr
+ i
* target_ifreq_size
,
3330 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3331 ifreq_arg_type
, THUNK_TARGET
);
3333 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3343 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3344 abi_long cmd
, abi_long arg
)
3347 struct dm_ioctl
*host_dm
;
3348 abi_long guest_data
;
3349 uint32_t guest_data_size
;
3351 const argtype
*arg_type
= ie
->arg_type
;
3353 void *big_buf
= NULL
;
3357 target_size
= thunk_type_size(arg_type
, 0);
3358 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3360 ret
= -TARGET_EFAULT
;
3363 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3364 unlock_user(argptr
, arg
, 0);
3366 /* buf_temp is too small, so fetch things into a bigger buffer */
3367 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3368 memcpy(big_buf
, buf_temp
, target_size
);
3372 guest_data
= arg
+ host_dm
->data_start
;
3373 if ((guest_data
- arg
) < 0) {
3377 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3378 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3380 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3381 switch (ie
->host_cmd
) {
3383 case DM_LIST_DEVICES
:
3386 case DM_DEV_SUSPEND
:
3389 case DM_TABLE_STATUS
:
3390 case DM_TABLE_CLEAR
:
3392 case DM_LIST_VERSIONS
:
3396 case DM_DEV_SET_GEOMETRY
:
3397 /* data contains only strings */
3398 memcpy(host_data
, argptr
, guest_data_size
);
3401 memcpy(host_data
, argptr
, guest_data_size
);
3402 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3406 void *gspec
= argptr
;
3407 void *cur_data
= host_data
;
3408 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3409 int spec_size
= thunk_type_size(arg_type
, 0);
3412 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3413 struct dm_target_spec
*spec
= cur_data
;
3417 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3418 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3420 spec
->next
= sizeof(*spec
) + slen
;
3421 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3423 cur_data
+= spec
->next
;
3428 ret
= -TARGET_EINVAL
;
3431 unlock_user(argptr
, guest_data
, 0);
3433 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3434 if (!is_error(ret
)) {
3435 guest_data
= arg
+ host_dm
->data_start
;
3436 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3437 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3438 switch (ie
->host_cmd
) {
3443 case DM_DEV_SUSPEND
:
3446 case DM_TABLE_CLEAR
:
3448 case DM_DEV_SET_GEOMETRY
:
3449 /* no return data */
3451 case DM_LIST_DEVICES
:
3453 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3454 uint32_t remaining_data
= guest_data_size
;
3455 void *cur_data
= argptr
;
3456 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3457 int nl_size
= 12; /* can't use thunk_size due to alignment */
3460 uint32_t next
= nl
->next
;
3462 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3464 if (remaining_data
< nl
->next
) {
3465 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3468 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3469 strcpy(cur_data
+ nl_size
, nl
->name
);
3470 cur_data
+= nl
->next
;
3471 remaining_data
-= nl
->next
;
3475 nl
= (void*)nl
+ next
;
3480 case DM_TABLE_STATUS
:
3482 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3483 void *cur_data
= argptr
;
3484 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3485 int spec_size
= thunk_type_size(arg_type
, 0);
3488 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3489 uint32_t next
= spec
->next
;
3490 int slen
= strlen((char*)&spec
[1]) + 1;
3491 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3492 if (guest_data_size
< spec
->next
) {
3493 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3496 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3497 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3498 cur_data
= argptr
+ spec
->next
;
3499 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3505 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3506 int count
= *(uint32_t*)hdata
;
3507 uint64_t *hdev
= hdata
+ 8;
3508 uint64_t *gdev
= argptr
+ 8;
3511 *(uint32_t*)argptr
= tswap32(count
);
3512 for (i
= 0; i
< count
; i
++) {
3513 *gdev
= tswap64(*hdev
);
3519 case DM_LIST_VERSIONS
:
3521 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3522 uint32_t remaining_data
= guest_data_size
;
3523 void *cur_data
= argptr
;
3524 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3525 int vers_size
= thunk_type_size(arg_type
, 0);
3528 uint32_t next
= vers
->next
;
3530 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3532 if (remaining_data
< vers
->next
) {
3533 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3536 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3537 strcpy(cur_data
+ vers_size
, vers
->name
);
3538 cur_data
+= vers
->next
;
3539 remaining_data
-= vers
->next
;
3543 vers
= (void*)vers
+ next
;
3548 ret
= -TARGET_EINVAL
;
3551 unlock_user(argptr
, guest_data
, guest_data_size
);
3553 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3555 ret
= -TARGET_EFAULT
;
3558 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3559 unlock_user(argptr
, arg
, target_size
);
3566 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3567 int fd
, abi_long cmd
, abi_long arg
)
3569 const argtype
*arg_type
= ie
->arg_type
;
3570 const StructEntry
*se
;
3571 const argtype
*field_types
;
3572 const int *dst_offsets
, *src_offsets
;
3575 abi_ulong
*target_rt_dev_ptr
;
3576 unsigned long *host_rt_dev_ptr
;
3580 assert(ie
->access
== IOC_W
);
3581 assert(*arg_type
== TYPE_PTR
);
3583 assert(*arg_type
== TYPE_STRUCT
);
3584 target_size
= thunk_type_size(arg_type
, 0);
3585 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3587 return -TARGET_EFAULT
;
3590 assert(*arg_type
== (int)STRUCT_rtentry
);
3591 se
= struct_entries
+ *arg_type
++;
3592 assert(se
->convert
[0] == NULL
);
3593 /* convert struct here to be able to catch rt_dev string */
3594 field_types
= se
->field_types
;
3595 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3596 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3597 for (i
= 0; i
< se
->nb_fields
; i
++) {
3598 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3599 assert(*field_types
== TYPE_PTRVOID
);
3600 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3601 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3602 if (*target_rt_dev_ptr
!= 0) {
3603 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3604 tswapal(*target_rt_dev_ptr
));
3605 if (!*host_rt_dev_ptr
) {
3606 unlock_user(argptr
, arg
, 0);
3607 return -TARGET_EFAULT
;
3610 *host_rt_dev_ptr
= 0;
3615 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3616 argptr
+ src_offsets
[i
],
3617 field_types
, THUNK_HOST
);
3619 unlock_user(argptr
, arg
, 0);
3621 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3622 if (*host_rt_dev_ptr
!= 0) {
3623 unlock_user((void *)*host_rt_dev_ptr
,
3624 *target_rt_dev_ptr
, 0);
3629 static IOCTLEntry ioctl_entries
[] = {
3630 #define IOCTL(cmd, access, ...) \
3631 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3632 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3633 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3638 /* ??? Implement proper locking for ioctls. */
3639 /* do_ioctl() Must return target values and target errnos. */
3640 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3642 const IOCTLEntry
*ie
;
3643 const argtype
*arg_type
;
3645 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3651 if (ie
->target_cmd
== 0) {
3652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3653 return -TARGET_ENOSYS
;
3655 if (ie
->target_cmd
== cmd
)
3659 arg_type
= ie
->arg_type
;
3661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3664 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3667 switch(arg_type
[0]) {
3670 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3675 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3679 target_size
= thunk_type_size(arg_type
, 0);
3680 switch(ie
->access
) {
3682 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3683 if (!is_error(ret
)) {
3684 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3686 return -TARGET_EFAULT
;
3687 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3688 unlock_user(argptr
, arg
, target_size
);
3692 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3694 return -TARGET_EFAULT
;
3695 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3696 unlock_user(argptr
, arg
, 0);
3697 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3701 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3703 return -TARGET_EFAULT
;
3704 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3705 unlock_user(argptr
, arg
, 0);
3706 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3707 if (!is_error(ret
)) {
3708 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3710 return -TARGET_EFAULT
;
3711 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3712 unlock_user(argptr
, arg
, target_size
);
3718 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3719 (long)cmd
, arg_type
[0]);
3720 ret
= -TARGET_ENOSYS
;
3726 static const bitmask_transtbl iflag_tbl
[] = {
3727 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3728 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3729 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3730 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3731 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3732 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3733 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3734 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3735 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3736 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3737 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3738 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3739 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3740 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3744 static const bitmask_transtbl oflag_tbl
[] = {
3745 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3746 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3747 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3748 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3749 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3750 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3751 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3752 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3753 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3754 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3755 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3756 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3757 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3758 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3759 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3760 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3761 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3762 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3763 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3764 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3765 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3766 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3767 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3768 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3772 static const bitmask_transtbl cflag_tbl
[] = {
3773 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3774 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3775 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3776 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3777 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3778 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3779 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3780 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3781 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3782 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3783 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3784 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3785 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3786 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3787 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3788 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3789 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3790 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3791 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3792 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3793 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3794 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3795 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3796 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3797 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3798 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3799 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3800 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3801 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3802 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3803 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3807 static const bitmask_transtbl lflag_tbl
[] = {
3808 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3809 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3810 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3811 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3812 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3813 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3814 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3815 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3816 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3817 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3818 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3819 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3820 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3821 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3822 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3826 static void target_to_host_termios (void *dst
, const void *src
)
3828 struct host_termios
*host
= dst
;
3829 const struct target_termios
*target
= src
;
3832 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3834 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3836 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3838 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3839 host
->c_line
= target
->c_line
;
3841 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3842 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3843 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3844 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3845 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3846 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3847 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3848 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3849 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3850 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3851 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3852 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3853 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3854 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3855 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3856 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3857 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3858 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3861 static void host_to_target_termios (void *dst
, const void *src
)
3863 struct target_termios
*target
= dst
;
3864 const struct host_termios
*host
= src
;
3867 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3869 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3871 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3873 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3874 target
->c_line
= host
->c_line
;
3876 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3877 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3878 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3879 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3880 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3881 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3882 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3883 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3884 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3885 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3886 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3887 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3888 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3889 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3890 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3891 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3892 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3893 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3896 static const StructEntry struct_termios_def
= {
3897 .convert
= { host_to_target_termios
, target_to_host_termios
},
3898 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3899 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3902 static bitmask_transtbl mmap_flags_tbl
[] = {
3903 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3904 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3905 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3906 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3907 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3908 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3909 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3910 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3914 #if defined(TARGET_I386)
3916 /* NOTE: there is really one LDT for all the threads */
3917 static uint8_t *ldt_table
;
3919 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3926 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3927 if (size
> bytecount
)
3929 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3931 return -TARGET_EFAULT
;
3932 /* ??? Should this by byteswapped? */
3933 memcpy(p
, ldt_table
, size
);
3934 unlock_user(p
, ptr
, size
);
3938 /* XXX: add locking support */
3939 static abi_long
write_ldt(CPUX86State
*env
,
3940 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3942 struct target_modify_ldt_ldt_s ldt_info
;
3943 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3944 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3945 int seg_not_present
, useable
, lm
;
3946 uint32_t *lp
, entry_1
, entry_2
;
3948 if (bytecount
!= sizeof(ldt_info
))
3949 return -TARGET_EINVAL
;
3950 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3951 return -TARGET_EFAULT
;
3952 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3953 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3954 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3955 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3956 unlock_user_struct(target_ldt_info
, ptr
, 0);
3958 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3959 return -TARGET_EINVAL
;
3960 seg_32bit
= ldt_info
.flags
& 1;
3961 contents
= (ldt_info
.flags
>> 1) & 3;
3962 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3963 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3964 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3965 useable
= (ldt_info
.flags
>> 6) & 1;
3969 lm
= (ldt_info
.flags
>> 7) & 1;
3971 if (contents
== 3) {
3973 return -TARGET_EINVAL
;
3974 if (seg_not_present
== 0)
3975 return -TARGET_EINVAL
;
3977 /* allocate the LDT */
3979 env
->ldt
.base
= target_mmap(0,
3980 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3981 PROT_READ
|PROT_WRITE
,
3982 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3983 if (env
->ldt
.base
== -1)
3984 return -TARGET_ENOMEM
;
3985 memset(g2h(env
->ldt
.base
), 0,
3986 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3987 env
->ldt
.limit
= 0xffff;
3988 ldt_table
= g2h(env
->ldt
.base
);
3991 /* NOTE: same code as Linux kernel */
3992 /* Allow LDTs to be cleared by the user. */
3993 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3996 read_exec_only
== 1 &&
3998 limit_in_pages
== 0 &&
3999 seg_not_present
== 1 &&
4007 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4008 (ldt_info
.limit
& 0x0ffff);
4009 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4010 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4011 (ldt_info
.limit
& 0xf0000) |
4012 ((read_exec_only
^ 1) << 9) |
4014 ((seg_not_present
^ 1) << 15) |
4016 (limit_in_pages
<< 23) |
4020 entry_2
|= (useable
<< 20);
4022 /* Install the new entry ... */
4024 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4025 lp
[0] = tswap32(entry_1
);
4026 lp
[1] = tswap32(entry_2
);
4030 /* specific and weird i386 syscalls */
4031 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4032 unsigned long bytecount
)
4038 ret
= read_ldt(ptr
, bytecount
);
4041 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4044 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4047 ret
= -TARGET_ENOSYS
;
4053 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4054 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4056 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4057 struct target_modify_ldt_ldt_s ldt_info
;
4058 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4059 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4060 int seg_not_present
, useable
, lm
;
4061 uint32_t *lp
, entry_1
, entry_2
;
4064 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4065 if (!target_ldt_info
)
4066 return -TARGET_EFAULT
;
4067 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4068 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4069 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4070 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4071 if (ldt_info
.entry_number
== -1) {
4072 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4073 if (gdt_table
[i
] == 0) {
4074 ldt_info
.entry_number
= i
;
4075 target_ldt_info
->entry_number
= tswap32(i
);
4080 unlock_user_struct(target_ldt_info
, ptr
, 1);
4082 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4083 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4084 return -TARGET_EINVAL
;
4085 seg_32bit
= ldt_info
.flags
& 1;
4086 contents
= (ldt_info
.flags
>> 1) & 3;
4087 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4088 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4089 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4090 useable
= (ldt_info
.flags
>> 6) & 1;
4094 lm
= (ldt_info
.flags
>> 7) & 1;
4097 if (contents
== 3) {
4098 if (seg_not_present
== 0)
4099 return -TARGET_EINVAL
;
4102 /* NOTE: same code as Linux kernel */
4103 /* Allow LDTs to be cleared by the user. */
4104 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4105 if ((contents
== 0 &&
4106 read_exec_only
== 1 &&
4108 limit_in_pages
== 0 &&
4109 seg_not_present
== 1 &&
4117 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4118 (ldt_info
.limit
& 0x0ffff);
4119 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4120 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4121 (ldt_info
.limit
& 0xf0000) |
4122 ((read_exec_only
^ 1) << 9) |
4124 ((seg_not_present
^ 1) << 15) |
4126 (limit_in_pages
<< 23) |
4131 /* Install the new entry ... */
4133 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4134 lp
[0] = tswap32(entry_1
);
4135 lp
[1] = tswap32(entry_2
);
4139 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4141 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4142 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4143 uint32_t base_addr
, limit
, flags
;
4144 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4145 int seg_not_present
, useable
, lm
;
4146 uint32_t *lp
, entry_1
, entry_2
;
4148 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4149 if (!target_ldt_info
)
4150 return -TARGET_EFAULT
;
4151 idx
= tswap32(target_ldt_info
->entry_number
);
4152 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4153 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4154 unlock_user_struct(target_ldt_info
, ptr
, 1);
4155 return -TARGET_EINVAL
;
4157 lp
= (uint32_t *)(gdt_table
+ idx
);
4158 entry_1
= tswap32(lp
[0]);
4159 entry_2
= tswap32(lp
[1]);
4161 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4162 contents
= (entry_2
>> 10) & 3;
4163 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4164 seg_32bit
= (entry_2
>> 22) & 1;
4165 limit_in_pages
= (entry_2
>> 23) & 1;
4166 useable
= (entry_2
>> 20) & 1;
4170 lm
= (entry_2
>> 21) & 1;
4172 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4173 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4174 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4175 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4176 base_addr
= (entry_1
>> 16) |
4177 (entry_2
& 0xff000000) |
4178 ((entry_2
& 0xff) << 16);
4179 target_ldt_info
->base_addr
= tswapal(base_addr
);
4180 target_ldt_info
->limit
= tswap32(limit
);
4181 target_ldt_info
->flags
= tswap32(flags
);
4182 unlock_user_struct(target_ldt_info
, ptr
, 1);
4185 #endif /* TARGET_I386 && TARGET_ABI32 */
4187 #ifndef TARGET_ABI32
4188 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4195 case TARGET_ARCH_SET_GS
:
4196 case TARGET_ARCH_SET_FS
:
4197 if (code
== TARGET_ARCH_SET_GS
)
4201 cpu_x86_load_seg(env
, idx
, 0);
4202 env
->segs
[idx
].base
= addr
;
4204 case TARGET_ARCH_GET_GS
:
4205 case TARGET_ARCH_GET_FS
:
4206 if (code
== TARGET_ARCH_GET_GS
)
4210 val
= env
->segs
[idx
].base
;
4211 if (put_user(val
, addr
, abi_ulong
))
4212 ret
= -TARGET_EFAULT
;
4215 ret
= -TARGET_EINVAL
;
4222 #endif /* defined(TARGET_I386) */
4224 #define NEW_STACK_SIZE 0x40000
4227 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4230 pthread_mutex_t mutex
;
4231 pthread_cond_t cond
;
4234 abi_ulong child_tidptr
;
4235 abi_ulong parent_tidptr
;
4239 static void *clone_func(void *arg
)
4241 new_thread_info
*info
= arg
;
4247 cpu
= ENV_GET_CPU(env
);
4249 ts
= (TaskState
*)cpu
->opaque
;
4250 info
->tid
= gettid();
4251 cpu
->host_tid
= info
->tid
;
4253 if (info
->child_tidptr
)
4254 put_user_u32(info
->tid
, info
->child_tidptr
);
4255 if (info
->parent_tidptr
)
4256 put_user_u32(info
->tid
, info
->parent_tidptr
);
4257 /* Enable signals. */
4258 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4259 /* Signal to the parent that we're ready. */
4260 pthread_mutex_lock(&info
->mutex
);
4261 pthread_cond_broadcast(&info
->cond
);
4262 pthread_mutex_unlock(&info
->mutex
);
4263 /* Wait until the parent has finshed initializing the tls state. */
4264 pthread_mutex_lock(&clone_lock
);
4265 pthread_mutex_unlock(&clone_lock
);
4271 /* do_fork() Must return host values and target errnos (unlike most
4272 do_*() functions). */
4273 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4274 abi_ulong parent_tidptr
, target_ulong newtls
,
4275 abi_ulong child_tidptr
)
4277 CPUState
*cpu
= ENV_GET_CPU(env
);
4281 CPUArchState
*new_env
;
4282 unsigned int nptl_flags
;
4285 /* Emulate vfork() with fork() */
4286 if (flags
& CLONE_VFORK
)
4287 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4289 if (flags
& CLONE_VM
) {
4290 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4291 new_thread_info info
;
4292 pthread_attr_t attr
;
4294 ts
= g_malloc0(sizeof(TaskState
));
4295 init_task_state(ts
);
4296 /* we create a new CPU instance. */
4297 new_env
= cpu_copy(env
);
4298 /* Init regs that differ from the parent. */
4299 cpu_clone_regs(new_env
, newsp
);
4300 new_cpu
= ENV_GET_CPU(new_env
);
4301 new_cpu
->opaque
= ts
;
4302 ts
->bprm
= parent_ts
->bprm
;
4303 ts
->info
= parent_ts
->info
;
4305 flags
&= ~CLONE_NPTL_FLAGS2
;
4307 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4308 ts
->child_tidptr
= child_tidptr
;
4311 if (nptl_flags
& CLONE_SETTLS
)
4312 cpu_set_tls (new_env
, newtls
);
4314 /* Grab a mutex so that thread setup appears atomic. */
4315 pthread_mutex_lock(&clone_lock
);
4317 memset(&info
, 0, sizeof(info
));
4318 pthread_mutex_init(&info
.mutex
, NULL
);
4319 pthread_mutex_lock(&info
.mutex
);
4320 pthread_cond_init(&info
.cond
, NULL
);
4322 if (nptl_flags
& CLONE_CHILD_SETTID
)
4323 info
.child_tidptr
= child_tidptr
;
4324 if (nptl_flags
& CLONE_PARENT_SETTID
)
4325 info
.parent_tidptr
= parent_tidptr
;
4327 ret
= pthread_attr_init(&attr
);
4328 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4329 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4330 /* It is not safe to deliver signals until the child has finished
4331 initializing, so temporarily block all signals. */
4332 sigfillset(&sigmask
);
4333 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4335 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4336 /* TODO: Free new CPU state if thread creation failed. */
4338 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4339 pthread_attr_destroy(&attr
);
4341 /* Wait for the child to initialize. */
4342 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4344 if (flags
& CLONE_PARENT_SETTID
)
4345 put_user_u32(ret
, parent_tidptr
);
4349 pthread_mutex_unlock(&info
.mutex
);
4350 pthread_cond_destroy(&info
.cond
);
4351 pthread_mutex_destroy(&info
.mutex
);
4352 pthread_mutex_unlock(&clone_lock
);
4354 /* if no CLONE_VM, we consider it is a fork */
4355 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4360 /* Child Process. */
4361 cpu_clone_regs(env
, newsp
);
4363 /* There is a race condition here. The parent process could
4364 theoretically read the TID in the child process before the child
4365 tid is set. This would require using either ptrace
4366 (not implemented) or having *_tidptr to point at a shared memory
4367 mapping. We can't repeat the spinlock hack used above because
4368 the child process gets its own copy of the lock. */
4369 if (flags
& CLONE_CHILD_SETTID
)
4370 put_user_u32(gettid(), child_tidptr
);
4371 if (flags
& CLONE_PARENT_SETTID
)
4372 put_user_u32(gettid(), parent_tidptr
);
4373 ts
= (TaskState
*)cpu
->opaque
;
4374 if (flags
& CLONE_SETTLS
)
4375 cpu_set_tls (env
, newtls
);
4376 if (flags
& CLONE_CHILD_CLEARTID
)
4377 ts
->child_tidptr
= child_tidptr
;
4385 /* warning : doesn't handle linux specific flags... */
4386 static int target_to_host_fcntl_cmd(int cmd
)
4389 case TARGET_F_DUPFD
:
4390 case TARGET_F_GETFD
:
4391 case TARGET_F_SETFD
:
4392 case TARGET_F_GETFL
:
4393 case TARGET_F_SETFL
:
4395 case TARGET_F_GETLK
:
4397 case TARGET_F_SETLK
:
4399 case TARGET_F_SETLKW
:
4401 case TARGET_F_GETOWN
:
4403 case TARGET_F_SETOWN
:
4405 case TARGET_F_GETSIG
:
4407 case TARGET_F_SETSIG
:
4409 #if TARGET_ABI_BITS == 32
4410 case TARGET_F_GETLK64
:
4412 case TARGET_F_SETLK64
:
4414 case TARGET_F_SETLKW64
:
4417 case TARGET_F_SETLEASE
:
4419 case TARGET_F_GETLEASE
:
4421 #ifdef F_DUPFD_CLOEXEC
4422 case TARGET_F_DUPFD_CLOEXEC
:
4423 return F_DUPFD_CLOEXEC
;
4425 case TARGET_F_NOTIFY
:
4428 case TARGET_F_GETOWN_EX
:
4432 case TARGET_F_SETOWN_EX
:
4436 return -TARGET_EINVAL
;
4438 return -TARGET_EINVAL
;
4441 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4442 static const bitmask_transtbl flock_tbl
[] = {
4443 TRANSTBL_CONVERT(F_RDLCK
),
4444 TRANSTBL_CONVERT(F_WRLCK
),
4445 TRANSTBL_CONVERT(F_UNLCK
),
4446 TRANSTBL_CONVERT(F_EXLCK
),
4447 TRANSTBL_CONVERT(F_SHLCK
),
4451 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4454 struct target_flock
*target_fl
;
4455 struct flock64 fl64
;
4456 struct target_flock64
*target_fl64
;
4458 struct f_owner_ex fox
;
4459 struct target_f_owner_ex
*target_fox
;
4462 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4464 if (host_cmd
== -TARGET_EINVAL
)
4468 case TARGET_F_GETLK
:
4469 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4470 return -TARGET_EFAULT
;
4472 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4473 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4474 fl
.l_start
= tswapal(target_fl
->l_start
);
4475 fl
.l_len
= tswapal(target_fl
->l_len
);
4476 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4477 unlock_user_struct(target_fl
, arg
, 0);
4478 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4480 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4481 return -TARGET_EFAULT
;
4483 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4484 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4485 target_fl
->l_start
= tswapal(fl
.l_start
);
4486 target_fl
->l_len
= tswapal(fl
.l_len
);
4487 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4488 unlock_user_struct(target_fl
, arg
, 1);
4492 case TARGET_F_SETLK
:
4493 case TARGET_F_SETLKW
:
4494 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4495 return -TARGET_EFAULT
;
4497 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4498 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4499 fl
.l_start
= tswapal(target_fl
->l_start
);
4500 fl
.l_len
= tswapal(target_fl
->l_len
);
4501 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4502 unlock_user_struct(target_fl
, arg
, 0);
4503 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4506 case TARGET_F_GETLK64
:
4507 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4508 return -TARGET_EFAULT
;
4510 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4511 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4512 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4513 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4514 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4515 unlock_user_struct(target_fl64
, arg
, 0);
4516 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4518 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4519 return -TARGET_EFAULT
;
4520 target_fl64
->l_type
=
4521 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4522 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4523 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4524 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4525 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4526 unlock_user_struct(target_fl64
, arg
, 1);
4529 case TARGET_F_SETLK64
:
4530 case TARGET_F_SETLKW64
:
4531 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4532 return -TARGET_EFAULT
;
4534 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4535 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4536 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4537 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4538 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4539 unlock_user_struct(target_fl64
, arg
, 0);
4540 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4543 case TARGET_F_GETFL
:
4544 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4546 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4550 case TARGET_F_SETFL
:
4551 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4555 case TARGET_F_GETOWN_EX
:
4556 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4558 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4559 return -TARGET_EFAULT
;
4560 target_fox
->type
= tswap32(fox
.type
);
4561 target_fox
->pid
= tswap32(fox
.pid
);
4562 unlock_user_struct(target_fox
, arg
, 1);
4568 case TARGET_F_SETOWN_EX
:
4569 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4570 return -TARGET_EFAULT
;
4571 fox
.type
= tswap32(target_fox
->type
);
4572 fox
.pid
= tswap32(target_fox
->pid
);
4573 unlock_user_struct(target_fox
, arg
, 0);
4574 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4578 case TARGET_F_SETOWN
:
4579 case TARGET_F_GETOWN
:
4580 case TARGET_F_SETSIG
:
4581 case TARGET_F_GETSIG
:
4582 case TARGET_F_SETLEASE
:
4583 case TARGET_F_GETLEASE
:
4584 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4588 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4596 static inline int high2lowuid(int uid
)
4604 static inline int high2lowgid(int gid
)
4612 static inline int low2highuid(int uid
)
4614 if ((int16_t)uid
== -1)
4620 static inline int low2highgid(int gid
)
4622 if ((int16_t)gid
== -1)
4627 static inline int tswapid(int id
)
4632 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4634 #else /* !USE_UID16 */
4635 static inline int high2lowuid(int uid
)
4639 static inline int high2lowgid(int gid
)
4643 static inline int low2highuid(int uid
)
4647 static inline int low2highgid(int gid
)
4651 static inline int tswapid(int id
)
4656 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4658 #endif /* USE_UID16 */
4660 void syscall_init(void)
4663 const argtype
*arg_type
;
4667 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4668 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4669 #include "syscall_types.h"
4671 #undef STRUCT_SPECIAL
4673 /* Build target_to_host_errno_table[] table from
4674 * host_to_target_errno_table[]. */
4675 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4676 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4679 /* we patch the ioctl size if necessary. We rely on the fact that
4680 no ioctl has all the bits at '1' in the size field */
4682 while (ie
->target_cmd
!= 0) {
4683 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4684 TARGET_IOC_SIZEMASK
) {
4685 arg_type
= ie
->arg_type
;
4686 if (arg_type
[0] != TYPE_PTR
) {
4687 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4692 size
= thunk_type_size(arg_type
, 0);
4693 ie
->target_cmd
= (ie
->target_cmd
&
4694 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4695 (size
<< TARGET_IOC_SIZESHIFT
);
4698 /* automatic consistency check if same arch */
4699 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4700 (defined(__x86_64__) && defined(TARGET_X86_64))
4701 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4702 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4703 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4710 #if TARGET_ABI_BITS == 32
4711 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4713 #ifdef TARGET_WORDS_BIGENDIAN
4714 return ((uint64_t)word0
<< 32) | word1
;
4716 return ((uint64_t)word1
<< 32) | word0
;
4719 #else /* TARGET_ABI_BITS == 32 */
4720 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4724 #endif /* TARGET_ABI_BITS != 32 */
4726 #ifdef TARGET_NR_truncate64
4727 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4732 if (regpairs_aligned(cpu_env
)) {
4736 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4740 #ifdef TARGET_NR_ftruncate64
4741 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4746 if (regpairs_aligned(cpu_env
)) {
4750 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4754 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4755 abi_ulong target_addr
)
4757 struct target_timespec
*target_ts
;
4759 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4760 return -TARGET_EFAULT
;
4761 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4762 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4763 unlock_user_struct(target_ts
, target_addr
, 0);
4767 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4768 struct timespec
*host_ts
)
4770 struct target_timespec
*target_ts
;
4772 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4773 return -TARGET_EFAULT
;
4774 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4775 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4776 unlock_user_struct(target_ts
, target_addr
, 1);
4780 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
4781 abi_ulong target_addr
)
4783 struct target_itimerspec
*target_itspec
;
4785 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
4786 return -TARGET_EFAULT
;
4789 host_itspec
->it_interval
.tv_sec
=
4790 tswapal(target_itspec
->it_interval
.tv_sec
);
4791 host_itspec
->it_interval
.tv_nsec
=
4792 tswapal(target_itspec
->it_interval
.tv_nsec
);
4793 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
4794 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
4796 unlock_user_struct(target_itspec
, target_addr
, 1);
4800 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
4801 struct itimerspec
*host_its
)
4803 struct target_itimerspec
*target_itspec
;
4805 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
4806 return -TARGET_EFAULT
;
4809 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
4810 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
4812 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
4813 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
4815 unlock_user_struct(target_itspec
, target_addr
, 0);
4819 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4820 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4821 abi_ulong target_addr
,
4822 struct stat
*host_st
)
4824 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4825 if (((CPUARMState
*)cpu_env
)->eabi
) {
4826 struct target_eabi_stat64
*target_st
;
4828 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4829 return -TARGET_EFAULT
;
4830 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4831 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4832 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4833 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4834 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4836 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4837 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4838 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4839 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4840 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4841 __put_user(host_st
->st_size
, &target_st
->st_size
);
4842 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4843 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4844 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4845 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4846 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4847 unlock_user_struct(target_st
, target_addr
, 1);
4851 #if defined(TARGET_HAS_STRUCT_STAT64)
4852 struct target_stat64
*target_st
;
4854 struct target_stat
*target_st
;
4857 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4858 return -TARGET_EFAULT
;
4859 memset(target_st
, 0, sizeof(*target_st
));
4860 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4861 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4862 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4863 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4865 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4866 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4867 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4868 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4869 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4870 /* XXX: better use of kernel struct */
4871 __put_user(host_st
->st_size
, &target_st
->st_size
);
4872 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4873 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4874 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4875 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4876 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4877 unlock_user_struct(target_st
, target_addr
, 1);
4884 /* ??? Using host futex calls even when target atomic operations
4885 are not really atomic probably breaks things. However implementing
4886 futexes locally would make futexes shared between multiple processes
4887 tricky. However they're probably useless because guest atomic
4888 operations won't work either. */
4889 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4890 target_ulong uaddr2
, int val3
)
4892 struct timespec ts
, *pts
;
4895 /* ??? We assume FUTEX_* constants are the same on both host
4897 #ifdef FUTEX_CMD_MASK
4898 base_op
= op
& FUTEX_CMD_MASK
;
4904 case FUTEX_WAIT_BITSET
:
4907 target_to_host_timespec(pts
, timeout
);
4911 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4914 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4916 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4918 case FUTEX_CMP_REQUEUE
:
4920 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4921 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4922 But the prototype takes a `struct timespec *'; insert casts
4923 to satisfy the compiler. We do not need to tswap TIMEOUT
4924 since it's not compared to guest memory. */
4925 pts
= (struct timespec
*)(uintptr_t) timeout
;
4926 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4928 (base_op
== FUTEX_CMP_REQUEUE
4932 return -TARGET_ENOSYS
;
4936 /* Map host to target signal numbers for the wait family of syscalls.
4937 Assume all other status bits are the same. */
4938 int host_to_target_waitstatus(int status
)
4940 if (WIFSIGNALED(status
)) {
4941 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4943 if (WIFSTOPPED(status
)) {
4944 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4950 static int open_self_maps(void *cpu_env
, int fd
)
4952 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4953 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
4954 TaskState
*ts
= cpu
->opaque
;
4961 fp
= fopen("/proc/self/maps", "r");
4966 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4967 int fields
, dev_maj
, dev_min
, inode
;
4968 uint64_t min
, max
, offset
;
4969 char flag_r
, flag_w
, flag_x
, flag_p
;
4970 char path
[512] = "";
4971 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4972 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4973 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4975 if ((fields
< 10) || (fields
> 11)) {
4978 if (!strncmp(path
, "[stack]", 7)) {
4981 if (h2g_valid(min
) && h2g_valid(max
)) {
4982 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
4983 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
4984 h2g(min
), h2g(max
), flag_r
, flag_w
,
4985 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
4986 path
[0] ? " " : "", path
);
4993 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4994 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4995 (unsigned long long)ts
->info
->stack_limit
,
4996 (unsigned long long)(ts
->info
->start_stack
+
4997 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
4998 (unsigned long long)0);
5004 static int open_self_stat(void *cpu_env
, int fd
)
5006 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5007 TaskState
*ts
= cpu
->opaque
;
5008 abi_ulong start_stack
= ts
->info
->start_stack
;
5011 for (i
= 0; i
< 44; i
++) {
5019 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5020 } else if (i
== 1) {
5022 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5023 } else if (i
== 27) {
5026 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5028 /* for the rest, there is MasterCard */
5029 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5033 if (write(fd
, buf
, len
) != len
) {
5041 static int open_self_auxv(void *cpu_env
, int fd
)
5043 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5044 TaskState
*ts
= cpu
->opaque
;
5045 abi_ulong auxv
= ts
->info
->saved_auxv
;
5046 abi_ulong len
= ts
->info
->auxv_len
;
5050 * Auxiliary vector is stored in target process stack.
5051 * read in whole auxv vector and copy it to file
5053 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5057 r
= write(fd
, ptr
, len
);
5064 lseek(fd
, 0, SEEK_SET
);
5065 unlock_user(ptr
, auxv
, len
);
5071 static int is_proc_myself(const char *filename
, const char *entry
)
5073 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5074 filename
+= strlen("/proc/");
5075 if (!strncmp(filename
, "self/", strlen("self/"))) {
5076 filename
+= strlen("self/");
5077 } else if (*filename
>= '1' && *filename
<= '9') {
5079 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5080 if (!strncmp(filename
, myself
, strlen(myself
))) {
5081 filename
+= strlen(myself
);
5088 if (!strcmp(filename
, entry
)) {
5095 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5096 static int is_proc(const char *filename
, const char *entry
)
5098 return strcmp(filename
, entry
) == 0;
5101 static int open_net_route(void *cpu_env
, int fd
)
5108 fp
= fopen("/proc/net/route", "r");
5115 read
= getline(&line
, &len
, fp
);
5116 dprintf(fd
, "%s", line
);
5120 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5122 uint32_t dest
, gw
, mask
;
5123 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5124 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5125 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5126 &mask
, &mtu
, &window
, &irtt
);
5127 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5128 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5129 metric
, tswap32(mask
), mtu
, window
, irtt
);
5139 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5142 const char *filename
;
5143 int (*fill
)(void *cpu_env
, int fd
);
5144 int (*cmp
)(const char *s1
, const char *s2
);
5146 const struct fake_open
*fake_open
;
5147 static const struct fake_open fakes
[] = {
5148 { "maps", open_self_maps
, is_proc_myself
},
5149 { "stat", open_self_stat
, is_proc_myself
},
5150 { "auxv", open_self_auxv
, is_proc_myself
},
5151 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5152 { "/proc/net/route", open_net_route
, is_proc
},
5154 { NULL
, NULL
, NULL
}
5157 if (is_proc_myself(pathname
, "exe")) {
5158 int execfd
= qemu_getauxval(AT_EXECFD
);
5159 return execfd
? execfd
: get_errno(open(exec_path
, flags
, mode
));
5162 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5163 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5168 if (fake_open
->filename
) {
5170 char filename
[PATH_MAX
];
5173 /* create temporary file to map stat to */
5174 tmpdir
= getenv("TMPDIR");
5177 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5178 fd
= mkstemp(filename
);
5184 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5188 lseek(fd
, 0, SEEK_SET
);
5193 return get_errno(open(path(pathname
), flags
, mode
));
5196 /* do_syscall() should always have a single exit point at the end so
5197 that actions, such as logging of syscall results, can be performed.
5198 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5199 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5200 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5201 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5204 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5211 gemu_log("syscall %d", num
);
5214 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5217 case TARGET_NR_exit
:
5218 /* In old applications this may be used to implement _exit(2).
5219 However in threaded applictions it is used for thread termination,
5220 and _exit_group is used for application termination.
5221 Do thread termination if we have more then one thread. */
5222 /* FIXME: This probably breaks if a signal arrives. We should probably
5223 be disabling signals. */
5224 if (CPU_NEXT(first_cpu
)) {
5228 /* Remove the CPU from the list. */
5229 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5232 if (ts
->child_tidptr
) {
5233 put_user_u32(0, ts
->child_tidptr
);
5234 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5238 object_unref(OBJECT(cpu
));
5245 gdb_exit(cpu_env
, arg1
);
5247 ret
= 0; /* avoid warning */
5249 case TARGET_NR_read
:
5253 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5255 ret
= get_errno(read(arg1
, p
, arg3
));
5256 unlock_user(p
, arg2
, ret
);
5259 case TARGET_NR_write
:
5260 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5262 ret
= get_errno(write(arg1
, p
, arg3
));
5263 unlock_user(p
, arg2
, 0);
5265 case TARGET_NR_open
:
5266 if (!(p
= lock_user_string(arg1
)))
5268 ret
= get_errno(do_open(cpu_env
, p
,
5269 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5271 unlock_user(p
, arg1
, 0);
5273 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5274 case TARGET_NR_openat
:
5275 if (!(p
= lock_user_string(arg2
)))
5277 ret
= get_errno(sys_openat(arg1
,
5279 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5281 unlock_user(p
, arg2
, 0);
5284 case TARGET_NR_close
:
5285 ret
= get_errno(close(arg1
));
5290 case TARGET_NR_fork
:
5291 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5293 #ifdef TARGET_NR_waitpid
5294 case TARGET_NR_waitpid
:
5297 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5298 if (!is_error(ret
) && arg2
&& ret
5299 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5304 #ifdef TARGET_NR_waitid
5305 case TARGET_NR_waitid
:
5309 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5310 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5311 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5313 host_to_target_siginfo(p
, &info
);
5314 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5319 #ifdef TARGET_NR_creat /* not on alpha */
5320 case TARGET_NR_creat
:
5321 if (!(p
= lock_user_string(arg1
)))
5323 ret
= get_errno(creat(p
, arg2
));
5324 unlock_user(p
, arg1
, 0);
5327 case TARGET_NR_link
:
5330 p
= lock_user_string(arg1
);
5331 p2
= lock_user_string(arg2
);
5333 ret
= -TARGET_EFAULT
;
5335 ret
= get_errno(link(p
, p2
));
5336 unlock_user(p2
, arg2
, 0);
5337 unlock_user(p
, arg1
, 0);
5340 #if defined(TARGET_NR_linkat)
5341 case TARGET_NR_linkat
:
5346 p
= lock_user_string(arg2
);
5347 p2
= lock_user_string(arg4
);
5349 ret
= -TARGET_EFAULT
;
5351 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5352 unlock_user(p
, arg2
, 0);
5353 unlock_user(p2
, arg4
, 0);
5357 case TARGET_NR_unlink
:
5358 if (!(p
= lock_user_string(arg1
)))
5360 ret
= get_errno(unlink(p
));
5361 unlock_user(p
, arg1
, 0);
5363 #if defined(TARGET_NR_unlinkat)
5364 case TARGET_NR_unlinkat
:
5365 if (!(p
= lock_user_string(arg2
)))
5367 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5368 unlock_user(p
, arg2
, 0);
5371 case TARGET_NR_execve
:
5373 char **argp
, **envp
;
5376 abi_ulong guest_argp
;
5377 abi_ulong guest_envp
;
5384 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5385 if (get_user_ual(addr
, gp
))
5393 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5394 if (get_user_ual(addr
, gp
))
5401 argp
= alloca((argc
+ 1) * sizeof(void *));
5402 envp
= alloca((envc
+ 1) * sizeof(void *));
5404 for (gp
= guest_argp
, q
= argp
; gp
;
5405 gp
+= sizeof(abi_ulong
), q
++) {
5406 if (get_user_ual(addr
, gp
))
5410 if (!(*q
= lock_user_string(addr
)))
5412 total_size
+= strlen(*q
) + 1;
5416 for (gp
= guest_envp
, q
= envp
; gp
;
5417 gp
+= sizeof(abi_ulong
), q
++) {
5418 if (get_user_ual(addr
, gp
))
5422 if (!(*q
= lock_user_string(addr
)))
5424 total_size
+= strlen(*q
) + 1;
5428 /* This case will not be caught by the host's execve() if its
5429 page size is bigger than the target's. */
5430 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5431 ret
= -TARGET_E2BIG
;
5434 if (!(p
= lock_user_string(arg1
)))
5436 ret
= get_errno(execve(p
, argp
, envp
));
5437 unlock_user(p
, arg1
, 0);
5442 ret
= -TARGET_EFAULT
;
5445 for (gp
= guest_argp
, q
= argp
; *q
;
5446 gp
+= sizeof(abi_ulong
), q
++) {
5447 if (get_user_ual(addr
, gp
)
5450 unlock_user(*q
, addr
, 0);
5452 for (gp
= guest_envp
, q
= envp
; *q
;
5453 gp
+= sizeof(abi_ulong
), q
++) {
5454 if (get_user_ual(addr
, gp
)
5457 unlock_user(*q
, addr
, 0);
5461 case TARGET_NR_chdir
:
5462 if (!(p
= lock_user_string(arg1
)))
5464 ret
= get_errno(chdir(p
));
5465 unlock_user(p
, arg1
, 0);
5467 #ifdef TARGET_NR_time
5468 case TARGET_NR_time
:
5471 ret
= get_errno(time(&host_time
));
5474 && put_user_sal(host_time
, arg1
))
5479 case TARGET_NR_mknod
:
5480 if (!(p
= lock_user_string(arg1
)))
5482 ret
= get_errno(mknod(p
, arg2
, arg3
));
5483 unlock_user(p
, arg1
, 0);
5485 #if defined(TARGET_NR_mknodat)
5486 case TARGET_NR_mknodat
:
5487 if (!(p
= lock_user_string(arg2
)))
5489 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5490 unlock_user(p
, arg2
, 0);
5493 case TARGET_NR_chmod
:
5494 if (!(p
= lock_user_string(arg1
)))
5496 ret
= get_errno(chmod(p
, arg2
));
5497 unlock_user(p
, arg1
, 0);
5499 #ifdef TARGET_NR_break
5500 case TARGET_NR_break
:
5503 #ifdef TARGET_NR_oldstat
5504 case TARGET_NR_oldstat
:
5507 case TARGET_NR_lseek
:
5508 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5510 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5511 /* Alpha specific */
5512 case TARGET_NR_getxpid
:
5513 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5514 ret
= get_errno(getpid());
5517 #ifdef TARGET_NR_getpid
5518 case TARGET_NR_getpid
:
5519 ret
= get_errno(getpid());
5522 case TARGET_NR_mount
:
5524 /* need to look at the data field */
5526 p
= lock_user_string(arg1
);
5527 p2
= lock_user_string(arg2
);
5528 p3
= lock_user_string(arg3
);
5529 if (!p
|| !p2
|| !p3
)
5530 ret
= -TARGET_EFAULT
;
5532 /* FIXME - arg5 should be locked, but it isn't clear how to
5533 * do that since it's not guaranteed to be a NULL-terminated
5537 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5539 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5541 unlock_user(p
, arg1
, 0);
5542 unlock_user(p2
, arg2
, 0);
5543 unlock_user(p3
, arg3
, 0);
5546 #ifdef TARGET_NR_umount
5547 case TARGET_NR_umount
:
5548 if (!(p
= lock_user_string(arg1
)))
5550 ret
= get_errno(umount(p
));
5551 unlock_user(p
, arg1
, 0);
5554 #ifdef TARGET_NR_stime /* not on alpha */
5555 case TARGET_NR_stime
:
5558 if (get_user_sal(host_time
, arg1
))
5560 ret
= get_errno(stime(&host_time
));
5564 case TARGET_NR_ptrace
:
5566 #ifdef TARGET_NR_alarm /* not on alpha */
5567 case TARGET_NR_alarm
:
5571 #ifdef TARGET_NR_oldfstat
5572 case TARGET_NR_oldfstat
:
5575 #ifdef TARGET_NR_pause /* not on alpha */
5576 case TARGET_NR_pause
:
5577 ret
= get_errno(pause());
5580 #ifdef TARGET_NR_utime
5581 case TARGET_NR_utime
:
5583 struct utimbuf tbuf
, *host_tbuf
;
5584 struct target_utimbuf
*target_tbuf
;
5586 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5588 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5589 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5590 unlock_user_struct(target_tbuf
, arg2
, 0);
5595 if (!(p
= lock_user_string(arg1
)))
5597 ret
= get_errno(utime(p
, host_tbuf
));
5598 unlock_user(p
, arg1
, 0);
5602 case TARGET_NR_utimes
:
5604 struct timeval
*tvp
, tv
[2];
5606 if (copy_from_user_timeval(&tv
[0], arg2
)
5607 || copy_from_user_timeval(&tv
[1],
5608 arg2
+ sizeof(struct target_timeval
)))
5614 if (!(p
= lock_user_string(arg1
)))
5616 ret
= get_errno(utimes(p
, tvp
));
5617 unlock_user(p
, arg1
, 0);
5620 #if defined(TARGET_NR_futimesat)
5621 case TARGET_NR_futimesat
:
5623 struct timeval
*tvp
, tv
[2];
5625 if (copy_from_user_timeval(&tv
[0], arg3
)
5626 || copy_from_user_timeval(&tv
[1],
5627 arg3
+ sizeof(struct target_timeval
)))
5633 if (!(p
= lock_user_string(arg2
)))
5635 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5636 unlock_user(p
, arg2
, 0);
5640 #ifdef TARGET_NR_stty
5641 case TARGET_NR_stty
:
5644 #ifdef TARGET_NR_gtty
5645 case TARGET_NR_gtty
:
5648 case TARGET_NR_access
:
5649 if (!(p
= lock_user_string(arg1
)))
5651 ret
= get_errno(access(path(p
), arg2
));
5652 unlock_user(p
, arg1
, 0);
5654 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5655 case TARGET_NR_faccessat
:
5656 if (!(p
= lock_user_string(arg2
)))
5658 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5659 unlock_user(p
, arg2
, 0);
5662 #ifdef TARGET_NR_nice /* not on alpha */
5663 case TARGET_NR_nice
:
5664 ret
= get_errno(nice(arg1
));
5667 #ifdef TARGET_NR_ftime
5668 case TARGET_NR_ftime
:
5671 case TARGET_NR_sync
:
5675 case TARGET_NR_kill
:
5676 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5678 case TARGET_NR_rename
:
5681 p
= lock_user_string(arg1
);
5682 p2
= lock_user_string(arg2
);
5684 ret
= -TARGET_EFAULT
;
5686 ret
= get_errno(rename(p
, p2
));
5687 unlock_user(p2
, arg2
, 0);
5688 unlock_user(p
, arg1
, 0);
5691 #if defined(TARGET_NR_renameat)
5692 case TARGET_NR_renameat
:
5695 p
= lock_user_string(arg2
);
5696 p2
= lock_user_string(arg4
);
5698 ret
= -TARGET_EFAULT
;
5700 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
5701 unlock_user(p2
, arg4
, 0);
5702 unlock_user(p
, arg2
, 0);
5706 case TARGET_NR_mkdir
:
5707 if (!(p
= lock_user_string(arg1
)))
5709 ret
= get_errno(mkdir(p
, arg2
));
5710 unlock_user(p
, arg1
, 0);
5712 #if defined(TARGET_NR_mkdirat)
5713 case TARGET_NR_mkdirat
:
5714 if (!(p
= lock_user_string(arg2
)))
5716 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
5717 unlock_user(p
, arg2
, 0);
5720 case TARGET_NR_rmdir
:
5721 if (!(p
= lock_user_string(arg1
)))
5723 ret
= get_errno(rmdir(p
));
5724 unlock_user(p
, arg1
, 0);
5727 ret
= get_errno(dup(arg1
));
5729 case TARGET_NR_pipe
:
5730 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5732 #ifdef TARGET_NR_pipe2
5733 case TARGET_NR_pipe2
:
5734 ret
= do_pipe(cpu_env
, arg1
,
5735 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5738 case TARGET_NR_times
:
5740 struct target_tms
*tmsp
;
5742 ret
= get_errno(times(&tms
));
5744 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5747 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5748 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5749 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5750 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5753 ret
= host_to_target_clock_t(ret
);
5756 #ifdef TARGET_NR_prof
5757 case TARGET_NR_prof
:
5760 #ifdef TARGET_NR_signal
5761 case TARGET_NR_signal
:
5764 case TARGET_NR_acct
:
5766 ret
= get_errno(acct(NULL
));
5768 if (!(p
= lock_user_string(arg1
)))
5770 ret
= get_errno(acct(path(p
)));
5771 unlock_user(p
, arg1
, 0);
5774 #ifdef TARGET_NR_umount2
5775 case TARGET_NR_umount2
:
5776 if (!(p
= lock_user_string(arg1
)))
5778 ret
= get_errno(umount2(p
, arg2
));
5779 unlock_user(p
, arg1
, 0);
5782 #ifdef TARGET_NR_lock
5783 case TARGET_NR_lock
:
5786 case TARGET_NR_ioctl
:
5787 ret
= do_ioctl(arg1
, arg2
, arg3
);
5789 case TARGET_NR_fcntl
:
5790 ret
= do_fcntl(arg1
, arg2
, arg3
);
5792 #ifdef TARGET_NR_mpx
5796 case TARGET_NR_setpgid
:
5797 ret
= get_errno(setpgid(arg1
, arg2
));
5799 #ifdef TARGET_NR_ulimit
5800 case TARGET_NR_ulimit
:
5803 #ifdef TARGET_NR_oldolduname
5804 case TARGET_NR_oldolduname
:
5807 case TARGET_NR_umask
:
5808 ret
= get_errno(umask(arg1
));
5810 case TARGET_NR_chroot
:
5811 if (!(p
= lock_user_string(arg1
)))
5813 ret
= get_errno(chroot(p
));
5814 unlock_user(p
, arg1
, 0);
5816 case TARGET_NR_ustat
:
5818 case TARGET_NR_dup2
:
5819 ret
= get_errno(dup2(arg1
, arg2
));
5821 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5822 case TARGET_NR_dup3
:
5823 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5826 #ifdef TARGET_NR_getppid /* not on alpha */
5827 case TARGET_NR_getppid
:
5828 ret
= get_errno(getppid());
5831 case TARGET_NR_getpgrp
:
5832 ret
= get_errno(getpgrp());
5834 case TARGET_NR_setsid
:
5835 ret
= get_errno(setsid());
5837 #ifdef TARGET_NR_sigaction
5838 case TARGET_NR_sigaction
:
5840 #if defined(TARGET_ALPHA)
5841 struct target_sigaction act
, oact
, *pact
= 0;
5842 struct target_old_sigaction
*old_act
;
5844 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5846 act
._sa_handler
= old_act
->_sa_handler
;
5847 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5848 act
.sa_flags
= old_act
->sa_flags
;
5849 act
.sa_restorer
= 0;
5850 unlock_user_struct(old_act
, arg2
, 0);
5853 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5854 if (!is_error(ret
) && arg3
) {
5855 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5857 old_act
->_sa_handler
= oact
._sa_handler
;
5858 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5859 old_act
->sa_flags
= oact
.sa_flags
;
5860 unlock_user_struct(old_act
, arg3
, 1);
5862 #elif defined(TARGET_MIPS)
5863 struct target_sigaction act
, oact
, *pact
, *old_act
;
5866 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5868 act
._sa_handler
= old_act
->_sa_handler
;
5869 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5870 act
.sa_flags
= old_act
->sa_flags
;
5871 unlock_user_struct(old_act
, arg2
, 0);
5877 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5879 if (!is_error(ret
) && arg3
) {
5880 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5882 old_act
->_sa_handler
= oact
._sa_handler
;
5883 old_act
->sa_flags
= oact
.sa_flags
;
5884 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5885 old_act
->sa_mask
.sig
[1] = 0;
5886 old_act
->sa_mask
.sig
[2] = 0;
5887 old_act
->sa_mask
.sig
[3] = 0;
5888 unlock_user_struct(old_act
, arg3
, 1);
5891 struct target_old_sigaction
*old_act
;
5892 struct target_sigaction act
, oact
, *pact
;
5894 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5896 act
._sa_handler
= old_act
->_sa_handler
;
5897 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5898 act
.sa_flags
= old_act
->sa_flags
;
5899 act
.sa_restorer
= old_act
->sa_restorer
;
5900 unlock_user_struct(old_act
, arg2
, 0);
5905 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5906 if (!is_error(ret
) && arg3
) {
5907 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5909 old_act
->_sa_handler
= oact
._sa_handler
;
5910 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5911 old_act
->sa_flags
= oact
.sa_flags
;
5912 old_act
->sa_restorer
= oact
.sa_restorer
;
5913 unlock_user_struct(old_act
, arg3
, 1);
5919 case TARGET_NR_rt_sigaction
:
5921 #if defined(TARGET_ALPHA)
5922 struct target_sigaction act
, oact
, *pact
= 0;
5923 struct target_rt_sigaction
*rt_act
;
5924 /* ??? arg4 == sizeof(sigset_t). */
5926 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5928 act
._sa_handler
= rt_act
->_sa_handler
;
5929 act
.sa_mask
= rt_act
->sa_mask
;
5930 act
.sa_flags
= rt_act
->sa_flags
;
5931 act
.sa_restorer
= arg5
;
5932 unlock_user_struct(rt_act
, arg2
, 0);
5935 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5936 if (!is_error(ret
) && arg3
) {
5937 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5939 rt_act
->_sa_handler
= oact
._sa_handler
;
5940 rt_act
->sa_mask
= oact
.sa_mask
;
5941 rt_act
->sa_flags
= oact
.sa_flags
;
5942 unlock_user_struct(rt_act
, arg3
, 1);
5945 struct target_sigaction
*act
;
5946 struct target_sigaction
*oact
;
5949 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5954 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5955 ret
= -TARGET_EFAULT
;
5956 goto rt_sigaction_fail
;
5960 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5963 unlock_user_struct(act
, arg2
, 0);
5965 unlock_user_struct(oact
, arg3
, 1);
5969 #ifdef TARGET_NR_sgetmask /* not on alpha */
5970 case TARGET_NR_sgetmask
:
5973 abi_ulong target_set
;
5974 do_sigprocmask(0, NULL
, &cur_set
);
5975 host_to_target_old_sigset(&target_set
, &cur_set
);
5980 #ifdef TARGET_NR_ssetmask /* not on alpha */
5981 case TARGET_NR_ssetmask
:
5983 sigset_t set
, oset
, cur_set
;
5984 abi_ulong target_set
= arg1
;
5985 do_sigprocmask(0, NULL
, &cur_set
);
5986 target_to_host_old_sigset(&set
, &target_set
);
5987 sigorset(&set
, &set
, &cur_set
);
5988 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
5989 host_to_target_old_sigset(&target_set
, &oset
);
5994 #ifdef TARGET_NR_sigprocmask
5995 case TARGET_NR_sigprocmask
:
5997 #if defined(TARGET_ALPHA)
5998 sigset_t set
, oldset
;
6003 case TARGET_SIG_BLOCK
:
6006 case TARGET_SIG_UNBLOCK
:
6009 case TARGET_SIG_SETMASK
:
6013 ret
= -TARGET_EINVAL
;
6017 target_to_host_old_sigset(&set
, &mask
);
6019 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6020 if (!is_error(ret
)) {
6021 host_to_target_old_sigset(&mask
, &oldset
);
6023 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6026 sigset_t set
, oldset
, *set_ptr
;
6031 case TARGET_SIG_BLOCK
:
6034 case TARGET_SIG_UNBLOCK
:
6037 case TARGET_SIG_SETMASK
:
6041 ret
= -TARGET_EINVAL
;
6044 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6046 target_to_host_old_sigset(&set
, p
);
6047 unlock_user(p
, arg2
, 0);
6053 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6054 if (!is_error(ret
) && arg3
) {
6055 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6057 host_to_target_old_sigset(p
, &oldset
);
6058 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6064 case TARGET_NR_rt_sigprocmask
:
6067 sigset_t set
, oldset
, *set_ptr
;
6071 case TARGET_SIG_BLOCK
:
6074 case TARGET_SIG_UNBLOCK
:
6077 case TARGET_SIG_SETMASK
:
6081 ret
= -TARGET_EINVAL
;
6084 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6086 target_to_host_sigset(&set
, p
);
6087 unlock_user(p
, arg2
, 0);
6093 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6094 if (!is_error(ret
) && arg3
) {
6095 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6097 host_to_target_sigset(p
, &oldset
);
6098 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6102 #ifdef TARGET_NR_sigpending
6103 case TARGET_NR_sigpending
:
6106 ret
= get_errno(sigpending(&set
));
6107 if (!is_error(ret
)) {
6108 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6110 host_to_target_old_sigset(p
, &set
);
6111 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6116 case TARGET_NR_rt_sigpending
:
6119 ret
= get_errno(sigpending(&set
));
6120 if (!is_error(ret
)) {
6121 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6123 host_to_target_sigset(p
, &set
);
6124 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6128 #ifdef TARGET_NR_sigsuspend
6129 case TARGET_NR_sigsuspend
:
6132 #if defined(TARGET_ALPHA)
6133 abi_ulong mask
= arg1
;
6134 target_to_host_old_sigset(&set
, &mask
);
6136 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6138 target_to_host_old_sigset(&set
, p
);
6139 unlock_user(p
, arg1
, 0);
6141 ret
= get_errno(sigsuspend(&set
));
6145 case TARGET_NR_rt_sigsuspend
:
6148 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6150 target_to_host_sigset(&set
, p
);
6151 unlock_user(p
, arg1
, 0);
6152 ret
= get_errno(sigsuspend(&set
));
6155 case TARGET_NR_rt_sigtimedwait
:
6158 struct timespec uts
, *puts
;
6161 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6163 target_to_host_sigset(&set
, p
);
6164 unlock_user(p
, arg1
, 0);
6167 target_to_host_timespec(puts
, arg3
);
6171 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6172 if (!is_error(ret
)) {
6174 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6179 host_to_target_siginfo(p
, &uinfo
);
6180 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6182 ret
= host_to_target_signal(ret
);
6186 case TARGET_NR_rt_sigqueueinfo
:
6189 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6191 target_to_host_siginfo(&uinfo
, p
);
6192 unlock_user(p
, arg1
, 0);
6193 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6196 #ifdef TARGET_NR_sigreturn
6197 case TARGET_NR_sigreturn
:
6198 /* NOTE: ret is eax, so not transcoding must be done */
6199 ret
= do_sigreturn(cpu_env
);
6202 case TARGET_NR_rt_sigreturn
:
6203 /* NOTE: ret is eax, so not transcoding must be done */
6204 ret
= do_rt_sigreturn(cpu_env
);
6206 case TARGET_NR_sethostname
:
6207 if (!(p
= lock_user_string(arg1
)))
6209 ret
= get_errno(sethostname(p
, arg2
));
6210 unlock_user(p
, arg1
, 0);
6212 case TARGET_NR_setrlimit
:
6214 int resource
= target_to_host_resource(arg1
);
6215 struct target_rlimit
*target_rlim
;
6217 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6219 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6220 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6221 unlock_user_struct(target_rlim
, arg2
, 0);
6222 ret
= get_errno(setrlimit(resource
, &rlim
));
6225 case TARGET_NR_getrlimit
:
6227 int resource
= target_to_host_resource(arg1
);
6228 struct target_rlimit
*target_rlim
;
6231 ret
= get_errno(getrlimit(resource
, &rlim
));
6232 if (!is_error(ret
)) {
6233 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6235 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6236 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6237 unlock_user_struct(target_rlim
, arg2
, 1);
6241 case TARGET_NR_getrusage
:
6243 struct rusage rusage
;
6244 ret
= get_errno(getrusage(arg1
, &rusage
));
6245 if (!is_error(ret
)) {
6246 ret
= host_to_target_rusage(arg2
, &rusage
);
6250 case TARGET_NR_gettimeofday
:
6253 ret
= get_errno(gettimeofday(&tv
, NULL
));
6254 if (!is_error(ret
)) {
6255 if (copy_to_user_timeval(arg1
, &tv
))
6260 case TARGET_NR_settimeofday
:
6263 if (copy_from_user_timeval(&tv
, arg1
))
6265 ret
= get_errno(settimeofday(&tv
, NULL
));
6268 #if defined(TARGET_NR_select)
6269 case TARGET_NR_select
:
6270 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6271 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6274 struct target_sel_arg_struct
*sel
;
6275 abi_ulong inp
, outp
, exp
, tvp
;
6278 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6280 nsel
= tswapal(sel
->n
);
6281 inp
= tswapal(sel
->inp
);
6282 outp
= tswapal(sel
->outp
);
6283 exp
= tswapal(sel
->exp
);
6284 tvp
= tswapal(sel
->tvp
);
6285 unlock_user_struct(sel
, arg1
, 0);
6286 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6291 #ifdef TARGET_NR_pselect6
6292 case TARGET_NR_pselect6
:
6294 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6295 fd_set rfds
, wfds
, efds
;
6296 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6297 struct timespec ts
, *ts_ptr
;
6300 * The 6th arg is actually two args smashed together,
6301 * so we cannot use the C library.
6309 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6310 target_sigset_t
*target_sigset
;
6318 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6322 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6326 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6332 * This takes a timespec, and not a timeval, so we cannot
6333 * use the do_select() helper ...
6336 if (target_to_host_timespec(&ts
, ts_addr
)) {
6344 /* Extract the two packed args for the sigset */
6347 sig
.size
= _NSIG
/ 8;
6349 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6353 arg_sigset
= tswapal(arg7
[0]);
6354 arg_sigsize
= tswapal(arg7
[1]);
6355 unlock_user(arg7
, arg6
, 0);
6359 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6360 /* Like the kernel, we enforce correct size sigsets */
6361 ret
= -TARGET_EINVAL
;
6364 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6365 sizeof(*target_sigset
), 1);
6366 if (!target_sigset
) {
6369 target_to_host_sigset(&set
, target_sigset
);
6370 unlock_user(target_sigset
, arg_sigset
, 0);
6378 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6381 if (!is_error(ret
)) {
6382 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6384 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6386 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6389 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6395 case TARGET_NR_symlink
:
6398 p
= lock_user_string(arg1
);
6399 p2
= lock_user_string(arg2
);
6401 ret
= -TARGET_EFAULT
;
6403 ret
= get_errno(symlink(p
, p2
));
6404 unlock_user(p2
, arg2
, 0);
6405 unlock_user(p
, arg1
, 0);
6408 #if defined(TARGET_NR_symlinkat)
6409 case TARGET_NR_symlinkat
:
6412 p
= lock_user_string(arg1
);
6413 p2
= lock_user_string(arg3
);
6415 ret
= -TARGET_EFAULT
;
6417 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6418 unlock_user(p2
, arg3
, 0);
6419 unlock_user(p
, arg1
, 0);
6423 #ifdef TARGET_NR_oldlstat
6424 case TARGET_NR_oldlstat
:
6427 case TARGET_NR_readlink
:
6430 p
= lock_user_string(arg1
);
6431 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6433 ret
= -TARGET_EFAULT
;
6434 } else if (is_proc_myself((const char *)p
, "exe")) {
6435 char real
[PATH_MAX
], *temp
;
6436 temp
= realpath(exec_path
, real
);
6437 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6438 snprintf((char *)p2
, arg3
, "%s", real
);
6440 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6442 unlock_user(p2
, arg2
, ret
);
6443 unlock_user(p
, arg1
, 0);
6446 #if defined(TARGET_NR_readlinkat)
6447 case TARGET_NR_readlinkat
:
6450 p
= lock_user_string(arg2
);
6451 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6453 ret
= -TARGET_EFAULT
;
6454 } else if (is_proc_myself((const char *)p
, "exe")) {
6455 char real
[PATH_MAX
], *temp
;
6456 temp
= realpath(exec_path
, real
);
6457 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6458 snprintf((char *)p2
, arg4
, "%s", real
);
6460 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6462 unlock_user(p2
, arg3
, ret
);
6463 unlock_user(p
, arg2
, 0);
6467 #ifdef TARGET_NR_uselib
6468 case TARGET_NR_uselib
:
6471 #ifdef TARGET_NR_swapon
6472 case TARGET_NR_swapon
:
6473 if (!(p
= lock_user_string(arg1
)))
6475 ret
= get_errno(swapon(p
, arg2
));
6476 unlock_user(p
, arg1
, 0);
6479 case TARGET_NR_reboot
:
6480 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6481 /* arg4 must be ignored in all other cases */
6482 p
= lock_user_string(arg4
);
6486 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6487 unlock_user(p
, arg4
, 0);
6489 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6492 #ifdef TARGET_NR_readdir
6493 case TARGET_NR_readdir
:
6496 #ifdef TARGET_NR_mmap
6497 case TARGET_NR_mmap
:
6498 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6499 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6500 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6501 || defined(TARGET_S390X)
6504 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6505 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6513 unlock_user(v
, arg1
, 0);
6514 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6515 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6519 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6520 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6526 #ifdef TARGET_NR_mmap2
6527 case TARGET_NR_mmap2
:
6529 #define MMAP_SHIFT 12
6531 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6532 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6534 arg6
<< MMAP_SHIFT
));
6537 case TARGET_NR_munmap
:
6538 ret
= get_errno(target_munmap(arg1
, arg2
));
6540 case TARGET_NR_mprotect
:
6542 TaskState
*ts
= cpu
->opaque
;
6543 /* Special hack to detect libc making the stack executable. */
6544 if ((arg3
& PROT_GROWSDOWN
)
6545 && arg1
>= ts
->info
->stack_limit
6546 && arg1
<= ts
->info
->start_stack
) {
6547 arg3
&= ~PROT_GROWSDOWN
;
6548 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6549 arg1
= ts
->info
->stack_limit
;
6552 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6554 #ifdef TARGET_NR_mremap
6555 case TARGET_NR_mremap
:
6556 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6559 /* ??? msync/mlock/munlock are broken for softmmu. */
6560 #ifdef TARGET_NR_msync
6561 case TARGET_NR_msync
:
6562 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6565 #ifdef TARGET_NR_mlock
6566 case TARGET_NR_mlock
:
6567 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6570 #ifdef TARGET_NR_munlock
6571 case TARGET_NR_munlock
:
6572 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6575 #ifdef TARGET_NR_mlockall
6576 case TARGET_NR_mlockall
:
6577 ret
= get_errno(mlockall(arg1
));
6580 #ifdef TARGET_NR_munlockall
6581 case TARGET_NR_munlockall
:
6582 ret
= get_errno(munlockall());
6585 case TARGET_NR_truncate
:
6586 if (!(p
= lock_user_string(arg1
)))
6588 ret
= get_errno(truncate(p
, arg2
));
6589 unlock_user(p
, arg1
, 0);
6591 case TARGET_NR_ftruncate
:
6592 ret
= get_errno(ftruncate(arg1
, arg2
));
6594 case TARGET_NR_fchmod
:
6595 ret
= get_errno(fchmod(arg1
, arg2
));
6597 #if defined(TARGET_NR_fchmodat)
6598 case TARGET_NR_fchmodat
:
6599 if (!(p
= lock_user_string(arg2
)))
6601 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6602 unlock_user(p
, arg2
, 0);
6605 case TARGET_NR_getpriority
:
6606 /* Note that negative values are valid for getpriority, so we must
6607 differentiate based on errno settings. */
6609 ret
= getpriority(arg1
, arg2
);
6610 if (ret
== -1 && errno
!= 0) {
6611 ret
= -host_to_target_errno(errno
);
6615 /* Return value is the unbiased priority. Signal no error. */
6616 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6618 /* Return value is a biased priority to avoid negative numbers. */
6622 case TARGET_NR_setpriority
:
6623 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6625 #ifdef TARGET_NR_profil
6626 case TARGET_NR_profil
:
6629 case TARGET_NR_statfs
:
6630 if (!(p
= lock_user_string(arg1
)))
6632 ret
= get_errno(statfs(path(p
), &stfs
));
6633 unlock_user(p
, arg1
, 0);
6635 if (!is_error(ret
)) {
6636 struct target_statfs
*target_stfs
;
6638 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6640 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6641 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6642 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6643 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6644 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6645 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6646 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6647 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6648 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6649 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6650 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6651 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6652 unlock_user_struct(target_stfs
, arg2
, 1);
6655 case TARGET_NR_fstatfs
:
6656 ret
= get_errno(fstatfs(arg1
, &stfs
));
6657 goto convert_statfs
;
6658 #ifdef TARGET_NR_statfs64
6659 case TARGET_NR_statfs64
:
6660 if (!(p
= lock_user_string(arg1
)))
6662 ret
= get_errno(statfs(path(p
), &stfs
));
6663 unlock_user(p
, arg1
, 0);
6665 if (!is_error(ret
)) {
6666 struct target_statfs64
*target_stfs
;
6668 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6670 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6671 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6672 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6673 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6674 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6675 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6676 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6677 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6678 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6679 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6680 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6681 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6682 unlock_user_struct(target_stfs
, arg3
, 1);
6685 case TARGET_NR_fstatfs64
:
6686 ret
= get_errno(fstatfs(arg1
, &stfs
));
6687 goto convert_statfs64
;
6689 #ifdef TARGET_NR_ioperm
6690 case TARGET_NR_ioperm
:
6693 #ifdef TARGET_NR_socketcall
6694 case TARGET_NR_socketcall
:
6695 ret
= do_socketcall(arg1
, arg2
);
6698 #ifdef TARGET_NR_accept
6699 case TARGET_NR_accept
:
6700 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
6703 #ifdef TARGET_NR_accept4
6704 case TARGET_NR_accept4
:
6705 #ifdef CONFIG_ACCEPT4
6706 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
6712 #ifdef TARGET_NR_bind
6713 case TARGET_NR_bind
:
6714 ret
= do_bind(arg1
, arg2
, arg3
);
6717 #ifdef TARGET_NR_connect
6718 case TARGET_NR_connect
:
6719 ret
= do_connect(arg1
, arg2
, arg3
);
6722 #ifdef TARGET_NR_getpeername
6723 case TARGET_NR_getpeername
:
6724 ret
= do_getpeername(arg1
, arg2
, arg3
);
6727 #ifdef TARGET_NR_getsockname
6728 case TARGET_NR_getsockname
:
6729 ret
= do_getsockname(arg1
, arg2
, arg3
);
6732 #ifdef TARGET_NR_getsockopt
6733 case TARGET_NR_getsockopt
:
6734 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6737 #ifdef TARGET_NR_listen
6738 case TARGET_NR_listen
:
6739 ret
= get_errno(listen(arg1
, arg2
));
6742 #ifdef TARGET_NR_recv
6743 case TARGET_NR_recv
:
6744 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6747 #ifdef TARGET_NR_recvfrom
6748 case TARGET_NR_recvfrom
:
6749 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6752 #ifdef TARGET_NR_recvmsg
6753 case TARGET_NR_recvmsg
:
6754 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6757 #ifdef TARGET_NR_send
6758 case TARGET_NR_send
:
6759 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6762 #ifdef TARGET_NR_sendmsg
6763 case TARGET_NR_sendmsg
:
6764 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6767 #ifdef TARGET_NR_sendmmsg
6768 case TARGET_NR_sendmmsg
:
6769 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
6771 case TARGET_NR_recvmmsg
:
6772 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
6775 #ifdef TARGET_NR_sendto
6776 case TARGET_NR_sendto
:
6777 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6780 #ifdef TARGET_NR_shutdown
6781 case TARGET_NR_shutdown
:
6782 ret
= get_errno(shutdown(arg1
, arg2
));
6785 #ifdef TARGET_NR_socket
6786 case TARGET_NR_socket
:
6787 ret
= do_socket(arg1
, arg2
, arg3
);
6790 #ifdef TARGET_NR_socketpair
6791 case TARGET_NR_socketpair
:
6792 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6795 #ifdef TARGET_NR_setsockopt
6796 case TARGET_NR_setsockopt
:
6797 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6801 case TARGET_NR_syslog
:
6802 if (!(p
= lock_user_string(arg2
)))
6804 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6805 unlock_user(p
, arg2
, 0);
6808 case TARGET_NR_setitimer
:
6810 struct itimerval value
, ovalue
, *pvalue
;
6814 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6815 || copy_from_user_timeval(&pvalue
->it_value
,
6816 arg2
+ sizeof(struct target_timeval
)))
6821 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6822 if (!is_error(ret
) && arg3
) {
6823 if (copy_to_user_timeval(arg3
,
6824 &ovalue
.it_interval
)
6825 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6831 case TARGET_NR_getitimer
:
6833 struct itimerval value
;
6835 ret
= get_errno(getitimer(arg1
, &value
));
6836 if (!is_error(ret
) && arg2
) {
6837 if (copy_to_user_timeval(arg2
,
6839 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6845 case TARGET_NR_stat
:
6846 if (!(p
= lock_user_string(arg1
)))
6848 ret
= get_errno(stat(path(p
), &st
));
6849 unlock_user(p
, arg1
, 0);
6851 case TARGET_NR_lstat
:
6852 if (!(p
= lock_user_string(arg1
)))
6854 ret
= get_errno(lstat(path(p
), &st
));
6855 unlock_user(p
, arg1
, 0);
6857 case TARGET_NR_fstat
:
6859 ret
= get_errno(fstat(arg1
, &st
));
6861 if (!is_error(ret
)) {
6862 struct target_stat
*target_st
;
6864 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6866 memset(target_st
, 0, sizeof(*target_st
));
6867 __put_user(st
.st_dev
, &target_st
->st_dev
);
6868 __put_user(st
.st_ino
, &target_st
->st_ino
);
6869 __put_user(st
.st_mode
, &target_st
->st_mode
);
6870 __put_user(st
.st_uid
, &target_st
->st_uid
);
6871 __put_user(st
.st_gid
, &target_st
->st_gid
);
6872 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6873 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6874 __put_user(st
.st_size
, &target_st
->st_size
);
6875 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6876 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6877 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6878 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6879 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6880 unlock_user_struct(target_st
, arg2
, 1);
6884 #ifdef TARGET_NR_olduname
6885 case TARGET_NR_olduname
:
6888 #ifdef TARGET_NR_iopl
6889 case TARGET_NR_iopl
:
6892 case TARGET_NR_vhangup
:
6893 ret
= get_errno(vhangup());
6895 #ifdef TARGET_NR_idle
6896 case TARGET_NR_idle
:
6899 #ifdef TARGET_NR_syscall
6900 case TARGET_NR_syscall
:
6901 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6902 arg6
, arg7
, arg8
, 0);
6905 case TARGET_NR_wait4
:
6908 abi_long status_ptr
= arg2
;
6909 struct rusage rusage
, *rusage_ptr
;
6910 abi_ulong target_rusage
= arg4
;
6911 abi_long rusage_err
;
6913 rusage_ptr
= &rusage
;
6916 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6917 if (!is_error(ret
)) {
6918 if (status_ptr
&& ret
) {
6919 status
= host_to_target_waitstatus(status
);
6920 if (put_user_s32(status
, status_ptr
))
6923 if (target_rusage
) {
6924 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
6932 #ifdef TARGET_NR_swapoff
6933 case TARGET_NR_swapoff
:
6934 if (!(p
= lock_user_string(arg1
)))
6936 ret
= get_errno(swapoff(p
));
6937 unlock_user(p
, arg1
, 0);
6940 case TARGET_NR_sysinfo
:
6942 struct target_sysinfo
*target_value
;
6943 struct sysinfo value
;
6944 ret
= get_errno(sysinfo(&value
));
6945 if (!is_error(ret
) && arg1
)
6947 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6949 __put_user(value
.uptime
, &target_value
->uptime
);
6950 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6951 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6952 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6953 __put_user(value
.totalram
, &target_value
->totalram
);
6954 __put_user(value
.freeram
, &target_value
->freeram
);
6955 __put_user(value
.sharedram
, &target_value
->sharedram
);
6956 __put_user(value
.bufferram
, &target_value
->bufferram
);
6957 __put_user(value
.totalswap
, &target_value
->totalswap
);
6958 __put_user(value
.freeswap
, &target_value
->freeswap
);
6959 __put_user(value
.procs
, &target_value
->procs
);
6960 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6961 __put_user(value
.freehigh
, &target_value
->freehigh
);
6962 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6963 unlock_user_struct(target_value
, arg1
, 1);
6967 #ifdef TARGET_NR_ipc
6969 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6972 #ifdef TARGET_NR_semget
6973 case TARGET_NR_semget
:
6974 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6977 #ifdef TARGET_NR_semop
6978 case TARGET_NR_semop
:
6979 ret
= do_semop(arg1
, arg2
, arg3
);
6982 #ifdef TARGET_NR_semctl
6983 case TARGET_NR_semctl
:
6984 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6987 #ifdef TARGET_NR_msgctl
6988 case TARGET_NR_msgctl
:
6989 ret
= do_msgctl(arg1
, arg2
, arg3
);
6992 #ifdef TARGET_NR_msgget
6993 case TARGET_NR_msgget
:
6994 ret
= get_errno(msgget(arg1
, arg2
));
6997 #ifdef TARGET_NR_msgrcv
6998 case TARGET_NR_msgrcv
:
6999 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7002 #ifdef TARGET_NR_msgsnd
7003 case TARGET_NR_msgsnd
:
7004 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7007 #ifdef TARGET_NR_shmget
7008 case TARGET_NR_shmget
:
7009 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7012 #ifdef TARGET_NR_shmctl
7013 case TARGET_NR_shmctl
:
7014 ret
= do_shmctl(arg1
, arg2
, arg3
);
7017 #ifdef TARGET_NR_shmat
7018 case TARGET_NR_shmat
:
7019 ret
= do_shmat(arg1
, arg2
, arg3
);
7022 #ifdef TARGET_NR_shmdt
7023 case TARGET_NR_shmdt
:
7024 ret
= do_shmdt(arg1
);
7027 case TARGET_NR_fsync
:
7028 ret
= get_errno(fsync(arg1
));
7030 case TARGET_NR_clone
:
7031 /* Linux manages to have three different orderings for its
7032 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7033 * match the kernel's CONFIG_CLONE_* settings.
7034 * Microblaze is further special in that it uses a sixth
7035 * implicit argument to clone for the TLS pointer.
7037 #if defined(TARGET_MICROBLAZE)
7038 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7039 #elif defined(TARGET_CLONE_BACKWARDS)
7040 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7041 #elif defined(TARGET_CLONE_BACKWARDS2)
7042 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7044 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7047 #ifdef __NR_exit_group
7048 /* new thread calls */
7049 case TARGET_NR_exit_group
:
7053 gdb_exit(cpu_env
, arg1
);
7054 ret
= get_errno(exit_group(arg1
));
7057 case TARGET_NR_setdomainname
:
7058 if (!(p
= lock_user_string(arg1
)))
7060 ret
= get_errno(setdomainname(p
, arg2
));
7061 unlock_user(p
, arg1
, 0);
7063 case TARGET_NR_uname
:
7064 /* no need to transcode because we use the linux syscall */
7066 struct new_utsname
* buf
;
7068 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7070 ret
= get_errno(sys_uname(buf
));
7071 if (!is_error(ret
)) {
7072 /* Overrite the native machine name with whatever is being
7074 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7075 /* Allow the user to override the reported release. */
7076 if (qemu_uname_release
&& *qemu_uname_release
)
7077 strcpy (buf
->release
, qemu_uname_release
);
7079 unlock_user_struct(buf
, arg1
, 1);
7083 case TARGET_NR_modify_ldt
:
7084 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7086 #if !defined(TARGET_X86_64)
7087 case TARGET_NR_vm86old
:
7089 case TARGET_NR_vm86
:
7090 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7094 case TARGET_NR_adjtimex
:
7096 #ifdef TARGET_NR_create_module
7097 case TARGET_NR_create_module
:
7099 case TARGET_NR_init_module
:
7100 case TARGET_NR_delete_module
:
7101 #ifdef TARGET_NR_get_kernel_syms
7102 case TARGET_NR_get_kernel_syms
:
7105 case TARGET_NR_quotactl
:
7107 case TARGET_NR_getpgid
:
7108 ret
= get_errno(getpgid(arg1
));
7110 case TARGET_NR_fchdir
:
7111 ret
= get_errno(fchdir(arg1
));
7113 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7114 case TARGET_NR_bdflush
:
7117 #ifdef TARGET_NR_sysfs
7118 case TARGET_NR_sysfs
:
7121 case TARGET_NR_personality
:
7122 ret
= get_errno(personality(arg1
));
7124 #ifdef TARGET_NR_afs_syscall
7125 case TARGET_NR_afs_syscall
:
7128 #ifdef TARGET_NR__llseek /* Not on alpha */
7129 case TARGET_NR__llseek
:
7132 #if !defined(__NR_llseek)
7133 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7135 ret
= get_errno(res
);
7140 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7142 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7148 case TARGET_NR_getdents
:
7149 #ifdef __NR_getdents
7150 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7152 struct target_dirent
*target_dirp
;
7153 struct linux_dirent
*dirp
;
7154 abi_long count
= arg3
;
7156 dirp
= malloc(count
);
7158 ret
= -TARGET_ENOMEM
;
7162 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7163 if (!is_error(ret
)) {
7164 struct linux_dirent
*de
;
7165 struct target_dirent
*tde
;
7167 int reclen
, treclen
;
7168 int count1
, tnamelen
;
7172 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7176 reclen
= de
->d_reclen
;
7177 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7178 assert(tnamelen
>= 0);
7179 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7180 assert(count1
+ treclen
<= count
);
7181 tde
->d_reclen
= tswap16(treclen
);
7182 tde
->d_ino
= tswapal(de
->d_ino
);
7183 tde
->d_off
= tswapal(de
->d_off
);
7184 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7185 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7187 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7191 unlock_user(target_dirp
, arg2
, ret
);
7197 struct linux_dirent
*dirp
;
7198 abi_long count
= arg3
;
7200 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7202 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7203 if (!is_error(ret
)) {
7204 struct linux_dirent
*de
;
7209 reclen
= de
->d_reclen
;
7212 de
->d_reclen
= tswap16(reclen
);
7213 tswapls(&de
->d_ino
);
7214 tswapls(&de
->d_off
);
7215 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7219 unlock_user(dirp
, arg2
, ret
);
7223 /* Implement getdents in terms of getdents64 */
7225 struct linux_dirent64
*dirp
;
7226 abi_long count
= arg3
;
7228 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7232 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7233 if (!is_error(ret
)) {
7234 /* Convert the dirent64 structs to target dirent. We do this
7235 * in-place, since we can guarantee that a target_dirent is no
7236 * larger than a dirent64; however this means we have to be
7237 * careful to read everything before writing in the new format.
7239 struct linux_dirent64
*de
;
7240 struct target_dirent
*tde
;
7245 tde
= (struct target_dirent
*)dirp
;
7247 int namelen
, treclen
;
7248 int reclen
= de
->d_reclen
;
7249 uint64_t ino
= de
->d_ino
;
7250 int64_t off
= de
->d_off
;
7251 uint8_t type
= de
->d_type
;
7253 namelen
= strlen(de
->d_name
);
7254 treclen
= offsetof(struct target_dirent
, d_name
)
7256 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7258 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7259 tde
->d_ino
= tswapal(ino
);
7260 tde
->d_off
= tswapal(off
);
7261 tde
->d_reclen
= tswap16(treclen
);
7262 /* The target_dirent type is in what was formerly a padding
7263 * byte at the end of the structure:
7265 *(((char *)tde
) + treclen
- 1) = type
;
7267 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7268 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7274 unlock_user(dirp
, arg2
, ret
);
7278 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7279 case TARGET_NR_getdents64
:
7281 struct linux_dirent64
*dirp
;
7282 abi_long count
= arg3
;
7283 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7285 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7286 if (!is_error(ret
)) {
7287 struct linux_dirent64
*de
;
7292 reclen
= de
->d_reclen
;
7295 de
->d_reclen
= tswap16(reclen
);
7296 tswap64s((uint64_t *)&de
->d_ino
);
7297 tswap64s((uint64_t *)&de
->d_off
);
7298 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7302 unlock_user(dirp
, arg2
, ret
);
7305 #endif /* TARGET_NR_getdents64 */
7306 #if defined(TARGET_NR__newselect)
7307 case TARGET_NR__newselect
:
7308 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7311 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7312 # ifdef TARGET_NR_poll
7313 case TARGET_NR_poll
:
7315 # ifdef TARGET_NR_ppoll
7316 case TARGET_NR_ppoll
:
7319 struct target_pollfd
*target_pfd
;
7320 unsigned int nfds
= arg2
;
7325 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7329 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7330 for(i
= 0; i
< nfds
; i
++) {
7331 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7332 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7335 # ifdef TARGET_NR_ppoll
7336 if (num
== TARGET_NR_ppoll
) {
7337 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7338 target_sigset_t
*target_set
;
7339 sigset_t _set
, *set
= &_set
;
7342 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7343 unlock_user(target_pfd
, arg1
, 0);
7351 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7353 unlock_user(target_pfd
, arg1
, 0);
7356 target_to_host_sigset(set
, target_set
);
7361 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7363 if (!is_error(ret
) && arg3
) {
7364 host_to_target_timespec(arg3
, timeout_ts
);
7367 unlock_user(target_set
, arg4
, 0);
7371 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7373 if (!is_error(ret
)) {
7374 for(i
= 0; i
< nfds
; i
++) {
7375 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7378 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7382 case TARGET_NR_flock
:
7383 /* NOTE: the flock constant seems to be the same for every
7385 ret
= get_errno(flock(arg1
, arg2
));
7387 case TARGET_NR_readv
:
7389 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7391 ret
= get_errno(readv(arg1
, vec
, arg3
));
7392 unlock_iovec(vec
, arg2
, arg3
, 1);
7394 ret
= -host_to_target_errno(errno
);
7398 case TARGET_NR_writev
:
7400 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7402 ret
= get_errno(writev(arg1
, vec
, arg3
));
7403 unlock_iovec(vec
, arg2
, arg3
, 0);
7405 ret
= -host_to_target_errno(errno
);
7409 case TARGET_NR_getsid
:
7410 ret
= get_errno(getsid(arg1
));
7412 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7413 case TARGET_NR_fdatasync
:
7414 ret
= get_errno(fdatasync(arg1
));
7417 case TARGET_NR__sysctl
:
7418 /* We don't implement this, but ENOTDIR is always a safe
7420 ret
= -TARGET_ENOTDIR
;
7422 case TARGET_NR_sched_getaffinity
:
7424 unsigned int mask_size
;
7425 unsigned long *mask
;
7428 * sched_getaffinity needs multiples of ulong, so need to take
7429 * care of mismatches between target ulong and host ulong sizes.
7431 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7432 ret
= -TARGET_EINVAL
;
7435 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7437 mask
= alloca(mask_size
);
7438 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7440 if (!is_error(ret
)) {
7441 if (copy_to_user(arg3
, mask
, ret
)) {
7447 case TARGET_NR_sched_setaffinity
:
7449 unsigned int mask_size
;
7450 unsigned long *mask
;
7453 * sched_setaffinity needs multiples of ulong, so need to take
7454 * care of mismatches between target ulong and host ulong sizes.
7456 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7457 ret
= -TARGET_EINVAL
;
7460 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7462 mask
= alloca(mask_size
);
7463 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7466 memcpy(mask
, p
, arg2
);
7467 unlock_user_struct(p
, arg2
, 0);
7469 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7472 case TARGET_NR_sched_setparam
:
7474 struct sched_param
*target_schp
;
7475 struct sched_param schp
;
7477 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7479 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7480 unlock_user_struct(target_schp
, arg2
, 0);
7481 ret
= get_errno(sched_setparam(arg1
, &schp
));
7484 case TARGET_NR_sched_getparam
:
7486 struct sched_param
*target_schp
;
7487 struct sched_param schp
;
7488 ret
= get_errno(sched_getparam(arg1
, &schp
));
7489 if (!is_error(ret
)) {
7490 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7492 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7493 unlock_user_struct(target_schp
, arg2
, 1);
7497 case TARGET_NR_sched_setscheduler
:
7499 struct sched_param
*target_schp
;
7500 struct sched_param schp
;
7501 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7503 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7504 unlock_user_struct(target_schp
, arg3
, 0);
7505 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7508 case TARGET_NR_sched_getscheduler
:
7509 ret
= get_errno(sched_getscheduler(arg1
));
7511 case TARGET_NR_sched_yield
:
7512 ret
= get_errno(sched_yield());
7514 case TARGET_NR_sched_get_priority_max
:
7515 ret
= get_errno(sched_get_priority_max(arg1
));
7517 case TARGET_NR_sched_get_priority_min
:
7518 ret
= get_errno(sched_get_priority_min(arg1
));
7520 case TARGET_NR_sched_rr_get_interval
:
7523 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7524 if (!is_error(ret
)) {
7525 host_to_target_timespec(arg2
, &ts
);
7529 case TARGET_NR_nanosleep
:
7531 struct timespec req
, rem
;
7532 target_to_host_timespec(&req
, arg1
);
7533 ret
= get_errno(nanosleep(&req
, &rem
));
7534 if (is_error(ret
) && arg2
) {
7535 host_to_target_timespec(arg2
, &rem
);
7539 #ifdef TARGET_NR_query_module
7540 case TARGET_NR_query_module
:
7543 #ifdef TARGET_NR_nfsservctl
7544 case TARGET_NR_nfsservctl
:
7547 case TARGET_NR_prctl
:
7549 case PR_GET_PDEATHSIG
:
7552 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7553 if (!is_error(ret
) && arg2
7554 && put_user_ual(deathsig
, arg2
)) {
7562 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7566 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7568 unlock_user(name
, arg2
, 16);
7573 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7577 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7579 unlock_user(name
, arg2
, 0);
7584 /* Most prctl options have no pointer arguments */
7585 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7589 #ifdef TARGET_NR_arch_prctl
7590 case TARGET_NR_arch_prctl
:
7591 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7592 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7598 #ifdef TARGET_NR_pread64
7599 case TARGET_NR_pread64
:
7600 if (regpairs_aligned(cpu_env
)) {
7604 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7606 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7607 unlock_user(p
, arg2
, ret
);
7609 case TARGET_NR_pwrite64
:
7610 if (regpairs_aligned(cpu_env
)) {
7614 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7616 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7617 unlock_user(p
, arg2
, 0);
7620 case TARGET_NR_getcwd
:
7621 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7623 ret
= get_errno(sys_getcwd1(p
, arg2
));
7624 unlock_user(p
, arg1
, ret
);
7626 case TARGET_NR_capget
:
7627 case TARGET_NR_capset
:
7629 struct target_user_cap_header
*target_header
;
7630 struct target_user_cap_data
*target_data
= NULL
;
7631 struct __user_cap_header_struct header
;
7632 struct __user_cap_data_struct data
[2];
7633 struct __user_cap_data_struct
*dataptr
= NULL
;
7634 int i
, target_datalen
;
7637 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
7640 header
.version
= tswap32(target_header
->version
);
7641 header
.pid
= tswap32(target_header
->pid
);
7643 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
7644 /* Version 2 and up takes pointer to two user_data structs */
7648 target_datalen
= sizeof(*target_data
) * data_items
;
7651 if (num
== TARGET_NR_capget
) {
7652 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
7654 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
7657 unlock_user_struct(target_header
, arg1
, 0);
7661 if (num
== TARGET_NR_capset
) {
7662 for (i
= 0; i
< data_items
; i
++) {
7663 data
[i
].effective
= tswap32(target_data
[i
].effective
);
7664 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
7665 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
7672 if (num
== TARGET_NR_capget
) {
7673 ret
= get_errno(capget(&header
, dataptr
));
7675 ret
= get_errno(capset(&header
, dataptr
));
7678 /* The kernel always updates version for both capget and capset */
7679 target_header
->version
= tswap32(header
.version
);
7680 unlock_user_struct(target_header
, arg1
, 1);
7683 if (num
== TARGET_NR_capget
) {
7684 for (i
= 0; i
< data_items
; i
++) {
7685 target_data
[i
].effective
= tswap32(data
[i
].effective
);
7686 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
7687 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
7689 unlock_user(target_data
, arg2
, target_datalen
);
7691 unlock_user(target_data
, arg2
, 0);
7696 case TARGET_NR_sigaltstack
:
7697 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7698 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7699 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7700 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7706 #ifdef CONFIG_SENDFILE
7707 case TARGET_NR_sendfile
:
7712 ret
= get_user_sal(off
, arg3
);
7713 if (is_error(ret
)) {
7718 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7719 if (!is_error(ret
) && arg3
) {
7720 abi_long ret2
= put_user_sal(off
, arg3
);
7721 if (is_error(ret2
)) {
7727 #ifdef TARGET_NR_sendfile64
7728 case TARGET_NR_sendfile64
:
7733 ret
= get_user_s64(off
, arg3
);
7734 if (is_error(ret
)) {
7739 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7740 if (!is_error(ret
) && arg3
) {
7741 abi_long ret2
= put_user_s64(off
, arg3
);
7742 if (is_error(ret2
)) {
7750 case TARGET_NR_sendfile
:
7751 #ifdef TARGET_NR_sendfile64
7752 case TARGET_NR_sendfile64
:
7757 #ifdef TARGET_NR_getpmsg
7758 case TARGET_NR_getpmsg
:
7761 #ifdef TARGET_NR_putpmsg
7762 case TARGET_NR_putpmsg
:
7765 #ifdef TARGET_NR_vfork
7766 case TARGET_NR_vfork
:
7767 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7771 #ifdef TARGET_NR_ugetrlimit
7772 case TARGET_NR_ugetrlimit
:
7775 int resource
= target_to_host_resource(arg1
);
7776 ret
= get_errno(getrlimit(resource
, &rlim
));
7777 if (!is_error(ret
)) {
7778 struct target_rlimit
*target_rlim
;
7779 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7781 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7782 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7783 unlock_user_struct(target_rlim
, arg2
, 1);
7788 #ifdef TARGET_NR_truncate64
7789 case TARGET_NR_truncate64
:
7790 if (!(p
= lock_user_string(arg1
)))
7792 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7793 unlock_user(p
, arg1
, 0);
7796 #ifdef TARGET_NR_ftruncate64
7797 case TARGET_NR_ftruncate64
:
7798 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7801 #ifdef TARGET_NR_stat64
7802 case TARGET_NR_stat64
:
7803 if (!(p
= lock_user_string(arg1
)))
7805 ret
= get_errno(stat(path(p
), &st
));
7806 unlock_user(p
, arg1
, 0);
7808 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7811 #ifdef TARGET_NR_lstat64
7812 case TARGET_NR_lstat64
:
7813 if (!(p
= lock_user_string(arg1
)))
7815 ret
= get_errno(lstat(path(p
), &st
));
7816 unlock_user(p
, arg1
, 0);
7818 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7821 #ifdef TARGET_NR_fstat64
7822 case TARGET_NR_fstat64
:
7823 ret
= get_errno(fstat(arg1
, &st
));
7825 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7828 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7829 #ifdef TARGET_NR_fstatat64
7830 case TARGET_NR_fstatat64
:
7832 #ifdef TARGET_NR_newfstatat
7833 case TARGET_NR_newfstatat
:
7835 if (!(p
= lock_user_string(arg2
)))
7837 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
7839 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7842 case TARGET_NR_lchown
:
7843 if (!(p
= lock_user_string(arg1
)))
7845 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7846 unlock_user(p
, arg1
, 0);
7848 #ifdef TARGET_NR_getuid
7849 case TARGET_NR_getuid
:
7850 ret
= get_errno(high2lowuid(getuid()));
7853 #ifdef TARGET_NR_getgid
7854 case TARGET_NR_getgid
:
7855 ret
= get_errno(high2lowgid(getgid()));
7858 #ifdef TARGET_NR_geteuid
7859 case TARGET_NR_geteuid
:
7860 ret
= get_errno(high2lowuid(geteuid()));
7863 #ifdef TARGET_NR_getegid
7864 case TARGET_NR_getegid
:
7865 ret
= get_errno(high2lowgid(getegid()));
7868 case TARGET_NR_setreuid
:
7869 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7871 case TARGET_NR_setregid
:
7872 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7874 case TARGET_NR_getgroups
:
7876 int gidsetsize
= arg1
;
7877 target_id
*target_grouplist
;
7881 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7882 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7883 if (gidsetsize
== 0)
7885 if (!is_error(ret
)) {
7886 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
7887 if (!target_grouplist
)
7889 for(i
= 0;i
< ret
; i
++)
7890 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7891 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
7895 case TARGET_NR_setgroups
:
7897 int gidsetsize
= arg1
;
7898 target_id
*target_grouplist
;
7899 gid_t
*grouplist
= NULL
;
7902 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7903 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
7904 if (!target_grouplist
) {
7905 ret
= -TARGET_EFAULT
;
7908 for (i
= 0; i
< gidsetsize
; i
++) {
7909 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7911 unlock_user(target_grouplist
, arg2
, 0);
7913 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7916 case TARGET_NR_fchown
:
7917 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7919 #if defined(TARGET_NR_fchownat)
7920 case TARGET_NR_fchownat
:
7921 if (!(p
= lock_user_string(arg2
)))
7923 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
7924 low2highgid(arg4
), arg5
));
7925 unlock_user(p
, arg2
, 0);
7928 #ifdef TARGET_NR_setresuid
7929 case TARGET_NR_setresuid
:
7930 ret
= get_errno(setresuid(low2highuid(arg1
),
7932 low2highuid(arg3
)));
7935 #ifdef TARGET_NR_getresuid
7936 case TARGET_NR_getresuid
:
7938 uid_t ruid
, euid
, suid
;
7939 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7940 if (!is_error(ret
)) {
7941 if (put_user_id(high2lowuid(ruid
), arg1
)
7942 || put_user_id(high2lowuid(euid
), arg2
)
7943 || put_user_id(high2lowuid(suid
), arg3
))
7949 #ifdef TARGET_NR_getresgid
7950 case TARGET_NR_setresgid
:
7951 ret
= get_errno(setresgid(low2highgid(arg1
),
7953 low2highgid(arg3
)));
7956 #ifdef TARGET_NR_getresgid
7957 case TARGET_NR_getresgid
:
7959 gid_t rgid
, egid
, sgid
;
7960 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7961 if (!is_error(ret
)) {
7962 if (put_user_id(high2lowgid(rgid
), arg1
)
7963 || put_user_id(high2lowgid(egid
), arg2
)
7964 || put_user_id(high2lowgid(sgid
), arg3
))
7970 case TARGET_NR_chown
:
7971 if (!(p
= lock_user_string(arg1
)))
7973 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7974 unlock_user(p
, arg1
, 0);
7976 case TARGET_NR_setuid
:
7977 ret
= get_errno(setuid(low2highuid(arg1
)));
7979 case TARGET_NR_setgid
:
7980 ret
= get_errno(setgid(low2highgid(arg1
)));
7982 case TARGET_NR_setfsuid
:
7983 ret
= get_errno(setfsuid(arg1
));
7985 case TARGET_NR_setfsgid
:
7986 ret
= get_errno(setfsgid(arg1
));
7989 #ifdef TARGET_NR_lchown32
7990 case TARGET_NR_lchown32
:
7991 if (!(p
= lock_user_string(arg1
)))
7993 ret
= get_errno(lchown(p
, arg2
, arg3
));
7994 unlock_user(p
, arg1
, 0);
7997 #ifdef TARGET_NR_getuid32
7998 case TARGET_NR_getuid32
:
7999 ret
= get_errno(getuid());
8003 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8004 /* Alpha specific */
8005 case TARGET_NR_getxuid
:
8009 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8011 ret
= get_errno(getuid());
8014 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8015 /* Alpha specific */
8016 case TARGET_NR_getxgid
:
8020 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8022 ret
= get_errno(getgid());
8025 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8026 /* Alpha specific */
8027 case TARGET_NR_osf_getsysinfo
:
8028 ret
= -TARGET_EOPNOTSUPP
;
8030 case TARGET_GSI_IEEE_FP_CONTROL
:
8032 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8034 /* Copied from linux ieee_fpcr_to_swcr. */
8035 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8036 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8037 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8038 | SWCR_TRAP_ENABLE_DZE
8039 | SWCR_TRAP_ENABLE_OVF
);
8040 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8041 | SWCR_TRAP_ENABLE_INE
);
8042 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8043 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8045 if (put_user_u64 (swcr
, arg2
))
8051 /* case GSI_IEEE_STATE_AT_SIGNAL:
8052 -- Not implemented in linux kernel.
8054 -- Retrieves current unaligned access state; not much used.
8056 -- Retrieves implver information; surely not used.
8058 -- Grabs a copy of the HWRPB; surely not used.
8063 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8064 /* Alpha specific */
8065 case TARGET_NR_osf_setsysinfo
:
8066 ret
= -TARGET_EOPNOTSUPP
;
8068 case TARGET_SSI_IEEE_FP_CONTROL
:
8070 uint64_t swcr
, fpcr
, orig_fpcr
;
8072 if (get_user_u64 (swcr
, arg2
)) {
8075 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8076 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8078 /* Copied from linux ieee_swcr_to_fpcr. */
8079 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8080 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8081 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8082 | SWCR_TRAP_ENABLE_DZE
8083 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8084 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8085 | SWCR_TRAP_ENABLE_INE
)) << 57;
8086 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8087 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8089 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8094 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8096 uint64_t exc
, fpcr
, orig_fpcr
;
8099 if (get_user_u64(exc
, arg2
)) {
8103 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8105 /* We only add to the exception status here. */
8106 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8108 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8111 /* Old exceptions are not signaled. */
8112 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8114 /* If any exceptions set by this call,
8115 and are unmasked, send a signal. */
8117 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8118 si_code
= TARGET_FPE_FLTRES
;
8120 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8121 si_code
= TARGET_FPE_FLTUND
;
8123 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8124 si_code
= TARGET_FPE_FLTOVF
;
8126 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8127 si_code
= TARGET_FPE_FLTDIV
;
8129 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8130 si_code
= TARGET_FPE_FLTINV
;
8133 target_siginfo_t info
;
8134 info
.si_signo
= SIGFPE
;
8136 info
.si_code
= si_code
;
8137 info
._sifields
._sigfault
._addr
8138 = ((CPUArchState
*)cpu_env
)->pc
;
8139 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8144 /* case SSI_NVPAIRS:
8145 -- Used with SSIN_UACPROC to enable unaligned accesses.
8146 case SSI_IEEE_STATE_AT_SIGNAL:
8147 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8148 -- Not implemented in linux kernel
8153 #ifdef TARGET_NR_osf_sigprocmask
8154 /* Alpha specific. */
8155 case TARGET_NR_osf_sigprocmask
:
8159 sigset_t set
, oldset
;
8162 case TARGET_SIG_BLOCK
:
8165 case TARGET_SIG_UNBLOCK
:
8168 case TARGET_SIG_SETMASK
:
8172 ret
= -TARGET_EINVAL
;
8176 target_to_host_old_sigset(&set
, &mask
);
8177 do_sigprocmask(how
, &set
, &oldset
);
8178 host_to_target_old_sigset(&mask
, &oldset
);
8184 #ifdef TARGET_NR_getgid32
8185 case TARGET_NR_getgid32
:
8186 ret
= get_errno(getgid());
8189 #ifdef TARGET_NR_geteuid32
8190 case TARGET_NR_geteuid32
:
8191 ret
= get_errno(geteuid());
8194 #ifdef TARGET_NR_getegid32
8195 case TARGET_NR_getegid32
:
8196 ret
= get_errno(getegid());
8199 #ifdef TARGET_NR_setreuid32
8200 case TARGET_NR_setreuid32
:
8201 ret
= get_errno(setreuid(arg1
, arg2
));
8204 #ifdef TARGET_NR_setregid32
8205 case TARGET_NR_setregid32
:
8206 ret
= get_errno(setregid(arg1
, arg2
));
8209 #ifdef TARGET_NR_getgroups32
8210 case TARGET_NR_getgroups32
:
8212 int gidsetsize
= arg1
;
8213 uint32_t *target_grouplist
;
8217 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8218 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8219 if (gidsetsize
== 0)
8221 if (!is_error(ret
)) {
8222 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8223 if (!target_grouplist
) {
8224 ret
= -TARGET_EFAULT
;
8227 for(i
= 0;i
< ret
; i
++)
8228 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8229 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8234 #ifdef TARGET_NR_setgroups32
8235 case TARGET_NR_setgroups32
:
8237 int gidsetsize
= arg1
;
8238 uint32_t *target_grouplist
;
8242 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8243 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8244 if (!target_grouplist
) {
8245 ret
= -TARGET_EFAULT
;
8248 for(i
= 0;i
< gidsetsize
; i
++)
8249 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8250 unlock_user(target_grouplist
, arg2
, 0);
8251 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8255 #ifdef TARGET_NR_fchown32
8256 case TARGET_NR_fchown32
:
8257 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8260 #ifdef TARGET_NR_setresuid32
8261 case TARGET_NR_setresuid32
:
8262 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8265 #ifdef TARGET_NR_getresuid32
8266 case TARGET_NR_getresuid32
:
8268 uid_t ruid
, euid
, suid
;
8269 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8270 if (!is_error(ret
)) {
8271 if (put_user_u32(ruid
, arg1
)
8272 || put_user_u32(euid
, arg2
)
8273 || put_user_u32(suid
, arg3
))
8279 #ifdef TARGET_NR_setresgid32
8280 case TARGET_NR_setresgid32
:
8281 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8284 #ifdef TARGET_NR_getresgid32
8285 case TARGET_NR_getresgid32
:
8287 gid_t rgid
, egid
, sgid
;
8288 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8289 if (!is_error(ret
)) {
8290 if (put_user_u32(rgid
, arg1
)
8291 || put_user_u32(egid
, arg2
)
8292 || put_user_u32(sgid
, arg3
))
8298 #ifdef TARGET_NR_chown32
8299 case TARGET_NR_chown32
:
8300 if (!(p
= lock_user_string(arg1
)))
8302 ret
= get_errno(chown(p
, arg2
, arg3
));
8303 unlock_user(p
, arg1
, 0);
8306 #ifdef TARGET_NR_setuid32
8307 case TARGET_NR_setuid32
:
8308 ret
= get_errno(setuid(arg1
));
8311 #ifdef TARGET_NR_setgid32
8312 case TARGET_NR_setgid32
:
8313 ret
= get_errno(setgid(arg1
));
8316 #ifdef TARGET_NR_setfsuid32
8317 case TARGET_NR_setfsuid32
:
8318 ret
= get_errno(setfsuid(arg1
));
8321 #ifdef TARGET_NR_setfsgid32
8322 case TARGET_NR_setfsgid32
:
8323 ret
= get_errno(setfsgid(arg1
));
8327 case TARGET_NR_pivot_root
:
8329 #ifdef TARGET_NR_mincore
8330 case TARGET_NR_mincore
:
8333 ret
= -TARGET_EFAULT
;
8334 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8336 if (!(p
= lock_user_string(arg3
)))
8338 ret
= get_errno(mincore(a
, arg2
, p
));
8339 unlock_user(p
, arg3
, ret
);
8341 unlock_user(a
, arg1
, 0);
8345 #ifdef TARGET_NR_arm_fadvise64_64
8346 case TARGET_NR_arm_fadvise64_64
:
8349 * arm_fadvise64_64 looks like fadvise64_64 but
8350 * with different argument order
8358 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8359 #ifdef TARGET_NR_fadvise64_64
8360 case TARGET_NR_fadvise64_64
:
8362 #ifdef TARGET_NR_fadvise64
8363 case TARGET_NR_fadvise64
:
8367 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8368 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8369 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8370 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8374 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8377 #ifdef TARGET_NR_madvise
8378 case TARGET_NR_madvise
:
8379 /* A straight passthrough may not be safe because qemu sometimes
8380 turns private file-backed mappings into anonymous mappings.
8381 This will break MADV_DONTNEED.
8382 This is a hint, so ignoring and returning success is ok. */
8386 #if TARGET_ABI_BITS == 32
8387 case TARGET_NR_fcntl64
:
8391 struct target_flock64
*target_fl
;
8393 struct target_eabi_flock64
*target_efl
;
8396 cmd
= target_to_host_fcntl_cmd(arg2
);
8397 if (cmd
== -TARGET_EINVAL
) {
8403 case TARGET_F_GETLK64
:
8405 if (((CPUARMState
*)cpu_env
)->eabi
) {
8406 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8408 fl
.l_type
= tswap16(target_efl
->l_type
);
8409 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8410 fl
.l_start
= tswap64(target_efl
->l_start
);
8411 fl
.l_len
= tswap64(target_efl
->l_len
);
8412 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8413 unlock_user_struct(target_efl
, arg3
, 0);
8417 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8419 fl
.l_type
= tswap16(target_fl
->l_type
);
8420 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8421 fl
.l_start
= tswap64(target_fl
->l_start
);
8422 fl
.l_len
= tswap64(target_fl
->l_len
);
8423 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8424 unlock_user_struct(target_fl
, arg3
, 0);
8426 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8429 if (((CPUARMState
*)cpu_env
)->eabi
) {
8430 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8432 target_efl
->l_type
= tswap16(fl
.l_type
);
8433 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8434 target_efl
->l_start
= tswap64(fl
.l_start
);
8435 target_efl
->l_len
= tswap64(fl
.l_len
);
8436 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8437 unlock_user_struct(target_efl
, arg3
, 1);
8441 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8443 target_fl
->l_type
= tswap16(fl
.l_type
);
8444 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8445 target_fl
->l_start
= tswap64(fl
.l_start
);
8446 target_fl
->l_len
= tswap64(fl
.l_len
);
8447 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8448 unlock_user_struct(target_fl
, arg3
, 1);
8453 case TARGET_F_SETLK64
:
8454 case TARGET_F_SETLKW64
:
8456 if (((CPUARMState
*)cpu_env
)->eabi
) {
8457 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8459 fl
.l_type
= tswap16(target_efl
->l_type
);
8460 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8461 fl
.l_start
= tswap64(target_efl
->l_start
);
8462 fl
.l_len
= tswap64(target_efl
->l_len
);
8463 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8464 unlock_user_struct(target_efl
, arg3
, 0);
8468 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8470 fl
.l_type
= tswap16(target_fl
->l_type
);
8471 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8472 fl
.l_start
= tswap64(target_fl
->l_start
);
8473 fl
.l_len
= tswap64(target_fl
->l_len
);
8474 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8475 unlock_user_struct(target_fl
, arg3
, 0);
8477 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8480 ret
= do_fcntl(arg1
, arg2
, arg3
);
8486 #ifdef TARGET_NR_cacheflush
8487 case TARGET_NR_cacheflush
:
8488 /* self-modifying code is handled automatically, so nothing needed */
8492 #ifdef TARGET_NR_security
8493 case TARGET_NR_security
:
8496 #ifdef TARGET_NR_getpagesize
8497 case TARGET_NR_getpagesize
:
8498 ret
= TARGET_PAGE_SIZE
;
8501 case TARGET_NR_gettid
:
8502 ret
= get_errno(gettid());
8504 #ifdef TARGET_NR_readahead
8505 case TARGET_NR_readahead
:
8506 #if TARGET_ABI_BITS == 32
8507 if (regpairs_aligned(cpu_env
)) {
8512 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8514 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8519 #ifdef TARGET_NR_setxattr
8520 case TARGET_NR_listxattr
:
8521 case TARGET_NR_llistxattr
:
8525 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8527 ret
= -TARGET_EFAULT
;
8531 p
= lock_user_string(arg1
);
8533 if (num
== TARGET_NR_listxattr
) {
8534 ret
= get_errno(listxattr(p
, b
, arg3
));
8536 ret
= get_errno(llistxattr(p
, b
, arg3
));
8539 ret
= -TARGET_EFAULT
;
8541 unlock_user(p
, arg1
, 0);
8542 unlock_user(b
, arg2
, arg3
);
8545 case TARGET_NR_flistxattr
:
8549 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8551 ret
= -TARGET_EFAULT
;
8555 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8556 unlock_user(b
, arg2
, arg3
);
8559 case TARGET_NR_setxattr
:
8560 case TARGET_NR_lsetxattr
:
8562 void *p
, *n
, *v
= 0;
8564 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8566 ret
= -TARGET_EFAULT
;
8570 p
= lock_user_string(arg1
);
8571 n
= lock_user_string(arg2
);
8573 if (num
== TARGET_NR_setxattr
) {
8574 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8576 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8579 ret
= -TARGET_EFAULT
;
8581 unlock_user(p
, arg1
, 0);
8582 unlock_user(n
, arg2
, 0);
8583 unlock_user(v
, arg3
, 0);
8586 case TARGET_NR_fsetxattr
:
8590 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8592 ret
= -TARGET_EFAULT
;
8596 n
= lock_user_string(arg2
);
8598 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8600 ret
= -TARGET_EFAULT
;
8602 unlock_user(n
, arg2
, 0);
8603 unlock_user(v
, arg3
, 0);
8606 case TARGET_NR_getxattr
:
8607 case TARGET_NR_lgetxattr
:
8609 void *p
, *n
, *v
= 0;
8611 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8613 ret
= -TARGET_EFAULT
;
8617 p
= lock_user_string(arg1
);
8618 n
= lock_user_string(arg2
);
8620 if (num
== TARGET_NR_getxattr
) {
8621 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8623 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8626 ret
= -TARGET_EFAULT
;
8628 unlock_user(p
, arg1
, 0);
8629 unlock_user(n
, arg2
, 0);
8630 unlock_user(v
, arg3
, arg4
);
8633 case TARGET_NR_fgetxattr
:
8637 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8639 ret
= -TARGET_EFAULT
;
8643 n
= lock_user_string(arg2
);
8645 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8647 ret
= -TARGET_EFAULT
;
8649 unlock_user(n
, arg2
, 0);
8650 unlock_user(v
, arg3
, arg4
);
8653 case TARGET_NR_removexattr
:
8654 case TARGET_NR_lremovexattr
:
8657 p
= lock_user_string(arg1
);
8658 n
= lock_user_string(arg2
);
8660 if (num
== TARGET_NR_removexattr
) {
8661 ret
= get_errno(removexattr(p
, n
));
8663 ret
= get_errno(lremovexattr(p
, n
));
8666 ret
= -TARGET_EFAULT
;
8668 unlock_user(p
, arg1
, 0);
8669 unlock_user(n
, arg2
, 0);
8672 case TARGET_NR_fremovexattr
:
8675 n
= lock_user_string(arg2
);
8677 ret
= get_errno(fremovexattr(arg1
, n
));
8679 ret
= -TARGET_EFAULT
;
8681 unlock_user(n
, arg2
, 0);
8685 #endif /* CONFIG_ATTR */
8686 #ifdef TARGET_NR_set_thread_area
8687 case TARGET_NR_set_thread_area
:
8688 #if defined(TARGET_MIPS)
8689 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8692 #elif defined(TARGET_CRIS)
8694 ret
= -TARGET_EINVAL
;
8696 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8700 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8701 ret
= do_set_thread_area(cpu_env
, arg1
);
8703 #elif defined(TARGET_M68K)
8705 TaskState
*ts
= cpu
->opaque
;
8706 ts
->tp_value
= arg1
;
8711 goto unimplemented_nowarn
;
8714 #ifdef TARGET_NR_get_thread_area
8715 case TARGET_NR_get_thread_area
:
8716 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8717 ret
= do_get_thread_area(cpu_env
, arg1
);
8719 #elif defined(TARGET_M68K)
8721 TaskState
*ts
= cpu
->opaque
;
8726 goto unimplemented_nowarn
;
8729 #ifdef TARGET_NR_getdomainname
8730 case TARGET_NR_getdomainname
:
8731 goto unimplemented_nowarn
;
8734 #ifdef TARGET_NR_clock_gettime
8735 case TARGET_NR_clock_gettime
:
8738 ret
= get_errno(clock_gettime(arg1
, &ts
));
8739 if (!is_error(ret
)) {
8740 host_to_target_timespec(arg2
, &ts
);
8745 #ifdef TARGET_NR_clock_getres
8746 case TARGET_NR_clock_getres
:
8749 ret
= get_errno(clock_getres(arg1
, &ts
));
8750 if (!is_error(ret
)) {
8751 host_to_target_timespec(arg2
, &ts
);
8756 #ifdef TARGET_NR_clock_nanosleep
8757 case TARGET_NR_clock_nanosleep
:
8760 target_to_host_timespec(&ts
, arg3
);
8761 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8763 host_to_target_timespec(arg4
, &ts
);
8768 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8769 case TARGET_NR_set_tid_address
:
8770 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8774 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8775 case TARGET_NR_tkill
:
8776 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8780 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8781 case TARGET_NR_tgkill
:
8782 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8783 target_to_host_signal(arg3
)));
8787 #ifdef TARGET_NR_set_robust_list
8788 case TARGET_NR_set_robust_list
:
8789 case TARGET_NR_get_robust_list
:
8790 /* The ABI for supporting robust futexes has userspace pass
8791 * the kernel a pointer to a linked list which is updated by
8792 * userspace after the syscall; the list is walked by the kernel
8793 * when the thread exits. Since the linked list in QEMU guest
8794 * memory isn't a valid linked list for the host and we have
8795 * no way to reliably intercept the thread-death event, we can't
8796 * support these. Silently return ENOSYS so that guest userspace
8797 * falls back to a non-robust futex implementation (which should
8798 * be OK except in the corner case of the guest crashing while
8799 * holding a mutex that is shared with another process via
8802 goto unimplemented_nowarn
;
8805 #if defined(TARGET_NR_utimensat)
8806 case TARGET_NR_utimensat
:
8808 struct timespec
*tsp
, ts
[2];
8812 target_to_host_timespec(ts
, arg3
);
8813 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8817 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8819 if (!(p
= lock_user_string(arg2
))) {
8820 ret
= -TARGET_EFAULT
;
8823 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8824 unlock_user(p
, arg2
, 0);
8829 case TARGET_NR_futex
:
8830 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8832 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8833 case TARGET_NR_inotify_init
:
8834 ret
= get_errno(sys_inotify_init());
8837 #ifdef CONFIG_INOTIFY1
8838 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8839 case TARGET_NR_inotify_init1
:
8840 ret
= get_errno(sys_inotify_init1(arg1
));
8844 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8845 case TARGET_NR_inotify_add_watch
:
8846 p
= lock_user_string(arg2
);
8847 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8848 unlock_user(p
, arg2
, 0);
8851 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8852 case TARGET_NR_inotify_rm_watch
:
8853 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8857 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8858 case TARGET_NR_mq_open
:
8860 struct mq_attr posix_mq_attr
;
8862 p
= lock_user_string(arg1
- 1);
8864 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8865 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8866 unlock_user (p
, arg1
, 0);
8870 case TARGET_NR_mq_unlink
:
8871 p
= lock_user_string(arg1
- 1);
8872 ret
= get_errno(mq_unlink(p
));
8873 unlock_user (p
, arg1
, 0);
8876 case TARGET_NR_mq_timedsend
:
8880 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8882 target_to_host_timespec(&ts
, arg5
);
8883 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8884 host_to_target_timespec(arg5
, &ts
);
8887 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8888 unlock_user (p
, arg2
, arg3
);
8892 case TARGET_NR_mq_timedreceive
:
8897 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8899 target_to_host_timespec(&ts
, arg5
);
8900 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8901 host_to_target_timespec(arg5
, &ts
);
8904 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8905 unlock_user (p
, arg2
, arg3
);
8907 put_user_u32(prio
, arg4
);
8911 /* Not implemented for now... */
8912 /* case TARGET_NR_mq_notify: */
8915 case TARGET_NR_mq_getsetattr
:
8917 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8920 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8921 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8924 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8925 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8932 #ifdef CONFIG_SPLICE
8933 #ifdef TARGET_NR_tee
8936 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8940 #ifdef TARGET_NR_splice
8941 case TARGET_NR_splice
:
8943 loff_t loff_in
, loff_out
;
8944 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8946 get_user_u64(loff_in
, arg2
);
8947 ploff_in
= &loff_in
;
8950 get_user_u64(loff_out
, arg2
);
8951 ploff_out
= &loff_out
;
8953 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8957 #ifdef TARGET_NR_vmsplice
8958 case TARGET_NR_vmsplice
:
8960 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8962 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8963 unlock_iovec(vec
, arg2
, arg3
, 0);
8965 ret
= -host_to_target_errno(errno
);
8970 #endif /* CONFIG_SPLICE */
8971 #ifdef CONFIG_EVENTFD
8972 #if defined(TARGET_NR_eventfd)
8973 case TARGET_NR_eventfd
:
8974 ret
= get_errno(eventfd(arg1
, 0));
8977 #if defined(TARGET_NR_eventfd2)
8978 case TARGET_NR_eventfd2
:
8980 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
8981 if (arg2
& TARGET_O_NONBLOCK
) {
8982 host_flags
|= O_NONBLOCK
;
8984 if (arg2
& TARGET_O_CLOEXEC
) {
8985 host_flags
|= O_CLOEXEC
;
8987 ret
= get_errno(eventfd(arg1
, host_flags
));
8991 #endif /* CONFIG_EVENTFD */
8992 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8993 case TARGET_NR_fallocate
:
8994 #if TARGET_ABI_BITS == 32
8995 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8996 target_offset64(arg5
, arg6
)));
8998 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9002 #if defined(CONFIG_SYNC_FILE_RANGE)
9003 #if defined(TARGET_NR_sync_file_range)
9004 case TARGET_NR_sync_file_range
:
9005 #if TARGET_ABI_BITS == 32
9006 #if defined(TARGET_MIPS)
9007 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9008 target_offset64(arg5
, arg6
), arg7
));
9010 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9011 target_offset64(arg4
, arg5
), arg6
));
9012 #endif /* !TARGET_MIPS */
9014 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9018 #if defined(TARGET_NR_sync_file_range2)
9019 case TARGET_NR_sync_file_range2
:
9020 /* This is like sync_file_range but the arguments are reordered */
9021 #if TARGET_ABI_BITS == 32
9022 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9023 target_offset64(arg5
, arg6
), arg2
));
9025 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9030 #if defined(CONFIG_EPOLL)
9031 #if defined(TARGET_NR_epoll_create)
9032 case TARGET_NR_epoll_create
:
9033 ret
= get_errno(epoll_create(arg1
));
9036 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9037 case TARGET_NR_epoll_create1
:
9038 ret
= get_errno(epoll_create1(arg1
));
9041 #if defined(TARGET_NR_epoll_ctl)
9042 case TARGET_NR_epoll_ctl
:
9044 struct epoll_event ep
;
9045 struct epoll_event
*epp
= 0;
9047 struct target_epoll_event
*target_ep
;
9048 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9051 ep
.events
= tswap32(target_ep
->events
);
9052 /* The epoll_data_t union is just opaque data to the kernel,
9053 * so we transfer all 64 bits across and need not worry what
9054 * actual data type it is.
9056 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9057 unlock_user_struct(target_ep
, arg4
, 0);
9060 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9065 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9066 #define IMPLEMENT_EPOLL_PWAIT
9068 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9069 #if defined(TARGET_NR_epoll_wait)
9070 case TARGET_NR_epoll_wait
:
9072 #if defined(IMPLEMENT_EPOLL_PWAIT)
9073 case TARGET_NR_epoll_pwait
:
9076 struct target_epoll_event
*target_ep
;
9077 struct epoll_event
*ep
;
9079 int maxevents
= arg3
;
9082 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9083 maxevents
* sizeof(struct target_epoll_event
), 1);
9088 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9091 #if defined(IMPLEMENT_EPOLL_PWAIT)
9092 case TARGET_NR_epoll_pwait
:
9094 target_sigset_t
*target_set
;
9095 sigset_t _set
, *set
= &_set
;
9098 target_set
= lock_user(VERIFY_READ
, arg5
,
9099 sizeof(target_sigset_t
), 1);
9101 unlock_user(target_ep
, arg2
, 0);
9104 target_to_host_sigset(set
, target_set
);
9105 unlock_user(target_set
, arg5
, 0);
9110 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9114 #if defined(TARGET_NR_epoll_wait)
9115 case TARGET_NR_epoll_wait
:
9116 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9120 ret
= -TARGET_ENOSYS
;
9122 if (!is_error(ret
)) {
9124 for (i
= 0; i
< ret
; i
++) {
9125 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9126 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9129 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9134 #ifdef TARGET_NR_prlimit64
9135 case TARGET_NR_prlimit64
:
9137 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9138 struct target_rlimit64
*target_rnew
, *target_rold
;
9139 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9141 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9144 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9145 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9146 unlock_user_struct(target_rnew
, arg3
, 0);
9150 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
9151 if (!is_error(ret
) && arg4
) {
9152 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9155 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9156 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9157 unlock_user_struct(target_rold
, arg4
, 1);
9162 #ifdef TARGET_NR_gethostname
9163 case TARGET_NR_gethostname
:
9165 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9167 ret
= get_errno(gethostname(name
, arg2
));
9168 unlock_user(name
, arg1
, arg2
);
9170 ret
= -TARGET_EFAULT
;
9175 #ifdef TARGET_NR_atomic_cmpxchg_32
9176 case TARGET_NR_atomic_cmpxchg_32
:
9178 /* should use start_exclusive from main.c */
9179 abi_ulong mem_value
;
9180 if (get_user_u32(mem_value
, arg6
)) {
9181 target_siginfo_t info
;
9182 info
.si_signo
= SIGSEGV
;
9184 info
.si_code
= TARGET_SEGV_MAPERR
;
9185 info
._sifields
._sigfault
._addr
= arg6
;
9186 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9190 if (mem_value
== arg2
)
9191 put_user_u32(arg1
, arg6
);
9196 #ifdef TARGET_NR_atomic_barrier
9197 case TARGET_NR_atomic_barrier
:
9199 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9205 #ifdef TARGET_NR_timer_create
9206 case TARGET_NR_timer_create
:
9208 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9210 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9211 struct target_sigevent
*ptarget_sevp
;
9212 struct target_timer_t
*ptarget_timer
;
9215 int timer_index
= next_free_host_timer();
9217 if (timer_index
< 0) {
9218 ret
= -TARGET_EAGAIN
;
9220 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9223 if (!lock_user_struct(VERIFY_READ
, ptarget_sevp
, arg2
, 1)) {
9227 host_sevp
.sigev_signo
= tswap32(ptarget_sevp
->sigev_signo
);
9228 host_sevp
.sigev_notify
= tswap32(ptarget_sevp
->sigev_notify
);
9230 phost_sevp
= &host_sevp
;
9233 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9237 if (!lock_user_struct(VERIFY_WRITE
, ptarget_timer
, arg3
, 1)) {
9240 ptarget_timer
->ptr
= tswap32(0xcafe0000 | timer_index
);
9241 unlock_user_struct(ptarget_timer
, arg3
, 1);
9248 #ifdef TARGET_NR_timer_settime
9249 case TARGET_NR_timer_settime
:
9251 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9252 * struct itimerspec * old_value */
9254 if (arg3
== 0 || arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9255 ret
= -TARGET_EINVAL
;
9257 timer_t htimer
= g_posix_timers
[arg1
];
9258 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9260 target_to_host_itimerspec(&hspec_new
, arg3
);
9262 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9263 host_to_target_itimerspec(arg2
, &hspec_old
);
9269 #ifdef TARGET_NR_timer_gettime
9270 case TARGET_NR_timer_gettime
:
9272 /* args: timer_t timerid, struct itimerspec *curr_value */
9275 return -TARGET_EFAULT
;
9276 } else if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9277 ret
= -TARGET_EINVAL
;
9279 timer_t htimer
= g_posix_timers
[arg1
];
9280 struct itimerspec hspec
;
9281 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9283 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9284 ret
= -TARGET_EFAULT
;
9291 #ifdef TARGET_NR_timer_getoverrun
9292 case TARGET_NR_timer_getoverrun
:
9294 /* args: timer_t timerid */
9296 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9297 ret
= -TARGET_EINVAL
;
9299 timer_t htimer
= g_posix_timers
[arg1
];
9300 ret
= get_errno(timer_getoverrun(htimer
));
9306 #ifdef TARGET_NR_timer_delete
9307 case TARGET_NR_timer_delete
:
9309 /* args: timer_t timerid */
9311 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9312 ret
= -TARGET_EINVAL
;
9314 timer_t htimer
= g_posix_timers
[arg1
];
9315 ret
= get_errno(timer_delete(htimer
));
9316 g_posix_timers
[arg1
] = 0;
9324 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9325 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9326 unimplemented_nowarn
:
9328 ret
= -TARGET_ENOSYS
;
9333 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9336 print_syscall_ret(num
, ret
);
9339 ret
= -TARGET_EFAULT
;