4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include "linux_loop.h"
112 #include "cpu-uname.h"
116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#include <linux/msdos_fs.h>
122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_tgkill __NR_tgkill
189 #define __NR_sys_tkill __NR_tkill
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
201 _syscall0(int, gettid
)
203 /* This is a replacement for the host gettid() and must return a host
205 static int gettid(void) {
210 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
212 #if !defined(__NR_getdents) || \
213 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
214 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
216 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
217 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
218 loff_t
*, res
, uint
, wh
);
220 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
221 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
222 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
223 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
225 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
226 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
228 #ifdef __NR_exit_group
229 _syscall1(int,exit_group
,int,error_code
)
231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
232 _syscall1(int,set_tid_address
,int *,tidptr
)
234 #if defined(TARGET_NR_futex) && defined(__NR_futex)
235 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
236 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
239 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
240 unsigned long *, user_mask_ptr
);
241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
242 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
243 unsigned long *, user_mask_ptr
);
244 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
247 static bitmask_transtbl fcntl_flags_tbl
[] = {
248 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
249 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
250 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
251 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
252 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
253 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
254 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
255 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
256 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
257 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
258 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
259 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
260 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
261 #if defined(O_DIRECT)
262 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
264 #if defined(O_NOATIME)
265 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
267 #if defined(O_CLOEXEC)
268 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
271 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
273 /* Don't terminate the list prematurely on 64-bit host+guest. */
274 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
275 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
280 #define COPY_UTSNAME_FIELD(dest, src) \
282 /* __NEW_UTS_LEN doesn't include terminating null */ \
283 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
284 (dest)[__NEW_UTS_LEN] = '\0'; \
287 static int sys_uname(struct new_utsname
*buf
)
289 struct utsname uts_buf
;
291 if (uname(&uts_buf
) < 0)
295 * Just in case these have some differences, we
296 * translate utsname to new_utsname (which is the
297 * struct linux kernel uses).
300 memset(buf
, 0, sizeof(*buf
));
301 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
302 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
303 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
304 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
305 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
307 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
311 #undef COPY_UTSNAME_FIELD
314 static int sys_getcwd1(char *buf
, size_t size
)
316 if (getcwd(buf
, size
) == NULL
) {
317 /* getcwd() sets errno */
320 return strlen(buf
)+1;
323 #ifdef TARGET_NR_openat
324 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
327 * open(2) has extra parameter 'mode' when called with
330 if ((flags
& O_CREAT
) != 0) {
331 return (openat(dirfd
, pathname
, flags
, mode
));
333 return (openat(dirfd
, pathname
, flags
));
337 #ifdef TARGET_NR_utimensat
338 #ifdef CONFIG_UTIMENSAT
339 static int sys_utimensat(int dirfd
, const char *pathname
,
340 const struct timespec times
[2], int flags
)
342 if (pathname
== NULL
)
343 return futimens(dirfd
, times
);
345 return utimensat(dirfd
, pathname
, times
, flags
);
347 #elif defined(__NR_utimensat)
348 #define __NR_sys_utimensat __NR_utimensat
349 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
350 const struct timespec
*,tsp
,int,flags
)
352 static int sys_utimensat(int dirfd
, const char *pathname
,
353 const struct timespec times
[2], int flags
)
359 #endif /* TARGET_NR_utimensat */
361 #ifdef CONFIG_INOTIFY
362 #include <sys/inotify.h>
364 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
365 static int sys_inotify_init(void)
367 return (inotify_init());
370 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
371 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
373 return (inotify_add_watch(fd
, pathname
, mask
));
376 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
377 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
379 return (inotify_rm_watch(fd
, wd
));
382 #ifdef CONFIG_INOTIFY1
383 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
384 static int sys_inotify_init1(int flags
)
386 return (inotify_init1(flags
));
391 /* Userspace can usually survive runtime without inotify */
392 #undef TARGET_NR_inotify_init
393 #undef TARGET_NR_inotify_init1
394 #undef TARGET_NR_inotify_add_watch
395 #undef TARGET_NR_inotify_rm_watch
396 #endif /* CONFIG_INOTIFY */
398 #if defined(TARGET_NR_ppoll)
400 # define __NR_ppoll -1
402 #define __NR_sys_ppoll __NR_ppoll
403 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
404 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
408 #if defined(TARGET_NR_pselect6)
409 #ifndef __NR_pselect6
410 # define __NR_pselect6 -1
412 #define __NR_sys_pselect6 __NR_pselect6
413 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
414 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
417 #if defined(TARGET_NR_prlimit64)
418 #ifndef __NR_prlimit64
419 # define __NR_prlimit64 -1
421 #define __NR_sys_prlimit64 __NR_prlimit64
422 /* The glibc rlimit structure may not be that used by the underlying syscall */
423 struct host_rlimit64
{
427 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
428 const struct host_rlimit64
*, new_limit
,
429 struct host_rlimit64
*, old_limit
)
433 #if defined(TARGET_NR_timer_create)
434 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
435 static timer_t g_posix_timers
[32] = { 0, } ;
437 static inline int next_free_host_timer(void)
440 /* FIXME: Does finding the next free slot require a lock? */
441 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
442 if (g_posix_timers
[k
] == 0) {
443 g_posix_timers
[k
] = (timer_t
) 1;
451 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
453 static inline int regpairs_aligned(void *cpu_env
) {
454 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
456 #elif defined(TARGET_MIPS)
457 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
458 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
459 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
460 * of registers which translates to the same as ARM/MIPS, because we start with
462 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
464 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
467 #define ERRNO_TABLE_SIZE 1200
469 /* target_to_host_errno_table[] is initialized from
470 * host_to_target_errno_table[] in syscall_init(). */
471 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
475 * This list is the union of errno values overridden in asm-<arch>/errno.h
476 * minus the errnos that are not actually generic to all archs.
478 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
479 [EIDRM
] = TARGET_EIDRM
,
480 [ECHRNG
] = TARGET_ECHRNG
,
481 [EL2NSYNC
] = TARGET_EL2NSYNC
,
482 [EL3HLT
] = TARGET_EL3HLT
,
483 [EL3RST
] = TARGET_EL3RST
,
484 [ELNRNG
] = TARGET_ELNRNG
,
485 [EUNATCH
] = TARGET_EUNATCH
,
486 [ENOCSI
] = TARGET_ENOCSI
,
487 [EL2HLT
] = TARGET_EL2HLT
,
488 [EDEADLK
] = TARGET_EDEADLK
,
489 [ENOLCK
] = TARGET_ENOLCK
,
490 [EBADE
] = TARGET_EBADE
,
491 [EBADR
] = TARGET_EBADR
,
492 [EXFULL
] = TARGET_EXFULL
,
493 [ENOANO
] = TARGET_ENOANO
,
494 [EBADRQC
] = TARGET_EBADRQC
,
495 [EBADSLT
] = TARGET_EBADSLT
,
496 [EBFONT
] = TARGET_EBFONT
,
497 [ENOSTR
] = TARGET_ENOSTR
,
498 [ENODATA
] = TARGET_ENODATA
,
499 [ETIME
] = TARGET_ETIME
,
500 [ENOSR
] = TARGET_ENOSR
,
501 [ENONET
] = TARGET_ENONET
,
502 [ENOPKG
] = TARGET_ENOPKG
,
503 [EREMOTE
] = TARGET_EREMOTE
,
504 [ENOLINK
] = TARGET_ENOLINK
,
505 [EADV
] = TARGET_EADV
,
506 [ESRMNT
] = TARGET_ESRMNT
,
507 [ECOMM
] = TARGET_ECOMM
,
508 [EPROTO
] = TARGET_EPROTO
,
509 [EDOTDOT
] = TARGET_EDOTDOT
,
510 [EMULTIHOP
] = TARGET_EMULTIHOP
,
511 [EBADMSG
] = TARGET_EBADMSG
,
512 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
513 [EOVERFLOW
] = TARGET_EOVERFLOW
,
514 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
515 [EBADFD
] = TARGET_EBADFD
,
516 [EREMCHG
] = TARGET_EREMCHG
,
517 [ELIBACC
] = TARGET_ELIBACC
,
518 [ELIBBAD
] = TARGET_ELIBBAD
,
519 [ELIBSCN
] = TARGET_ELIBSCN
,
520 [ELIBMAX
] = TARGET_ELIBMAX
,
521 [ELIBEXEC
] = TARGET_ELIBEXEC
,
522 [EILSEQ
] = TARGET_EILSEQ
,
523 [ENOSYS
] = TARGET_ENOSYS
,
524 [ELOOP
] = TARGET_ELOOP
,
525 [ERESTART
] = TARGET_ERESTART
,
526 [ESTRPIPE
] = TARGET_ESTRPIPE
,
527 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
528 [EUSERS
] = TARGET_EUSERS
,
529 [ENOTSOCK
] = TARGET_ENOTSOCK
,
530 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
531 [EMSGSIZE
] = TARGET_EMSGSIZE
,
532 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
533 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
534 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
535 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
536 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
537 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
538 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
539 [EADDRINUSE
] = TARGET_EADDRINUSE
,
540 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
541 [ENETDOWN
] = TARGET_ENETDOWN
,
542 [ENETUNREACH
] = TARGET_ENETUNREACH
,
543 [ENETRESET
] = TARGET_ENETRESET
,
544 [ECONNABORTED
] = TARGET_ECONNABORTED
,
545 [ECONNRESET
] = TARGET_ECONNRESET
,
546 [ENOBUFS
] = TARGET_ENOBUFS
,
547 [EISCONN
] = TARGET_EISCONN
,
548 [ENOTCONN
] = TARGET_ENOTCONN
,
549 [EUCLEAN
] = TARGET_EUCLEAN
,
550 [ENOTNAM
] = TARGET_ENOTNAM
,
551 [ENAVAIL
] = TARGET_ENAVAIL
,
552 [EISNAM
] = TARGET_EISNAM
,
553 [EREMOTEIO
] = TARGET_EREMOTEIO
,
554 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
555 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
556 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
557 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
558 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
559 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
560 [EALREADY
] = TARGET_EALREADY
,
561 [EINPROGRESS
] = TARGET_EINPROGRESS
,
562 [ESTALE
] = TARGET_ESTALE
,
563 [ECANCELED
] = TARGET_ECANCELED
,
564 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
565 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
567 [ENOKEY
] = TARGET_ENOKEY
,
570 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
573 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
576 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
579 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
581 #ifdef ENOTRECOVERABLE
582 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
586 static inline int host_to_target_errno(int err
)
588 if(host_to_target_errno_table
[err
])
589 return host_to_target_errno_table
[err
];
593 static inline int target_to_host_errno(int err
)
595 if (target_to_host_errno_table
[err
])
596 return target_to_host_errno_table
[err
];
600 static inline abi_long
get_errno(abi_long ret
)
603 return -host_to_target_errno(errno
);
608 static inline int is_error(abi_long ret
)
610 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
613 char *target_strerror(int err
)
615 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
618 return strerror(target_to_host_errno(err
));
621 static abi_ulong target_brk
;
622 static abi_ulong target_original_brk
;
623 static abi_ulong brk_page
;
625 void target_set_brk(abi_ulong new_brk
)
627 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
628 brk_page
= HOST_PAGE_ALIGN(target_brk
);
631 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
632 #define DEBUGF_BRK(message, args...)
634 /* do_brk() must return target values and target errnos. */
635 abi_long
do_brk(abi_ulong new_brk
)
637 abi_long mapped_addr
;
640 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
643 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
646 if (new_brk
< target_original_brk
) {
647 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
652 /* If the new brk is less than the highest page reserved to the
653 * target heap allocation, set it and we're almost done... */
654 if (new_brk
<= brk_page
) {
655 /* Heap contents are initialized to zero, as for anonymous
657 if (new_brk
> target_brk
) {
658 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
660 target_brk
= new_brk
;
661 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
665 /* We need to allocate more memory after the brk... Note that
666 * we don't use MAP_FIXED because that will map over the top of
667 * any existing mapping (like the one with the host libc or qemu
668 * itself); instead we treat "mapped but at wrong address" as
669 * a failure and unmap again.
671 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
672 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
673 PROT_READ
|PROT_WRITE
,
674 MAP_ANON
|MAP_PRIVATE
, 0, 0));
676 if (mapped_addr
== brk_page
) {
677 /* Heap contents are initialized to zero, as for anonymous
678 * mapped pages. Technically the new pages are already
679 * initialized to zero since they *are* anonymous mapped
680 * pages, however we have to take care with the contents that
681 * come from the remaining part of the previous page: it may
682 * contains garbage data due to a previous heap usage (grown
684 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
686 target_brk
= new_brk
;
687 brk_page
= HOST_PAGE_ALIGN(target_brk
);
688 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
691 } else if (mapped_addr
!= -1) {
692 /* Mapped but at wrong address, meaning there wasn't actually
693 * enough space for this brk.
695 target_munmap(mapped_addr
, new_alloc_size
);
697 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
703 #if defined(TARGET_ALPHA)
704 /* We (partially) emulate OSF/1 on Alpha, which requires we
705 return a proper errno, not an unchanged brk value. */
706 return -TARGET_ENOMEM
;
708 /* For everything else, return the previous break. */
712 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
713 abi_ulong target_fds_addr
,
717 abi_ulong b
, *target_fds
;
719 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
720 if (!(target_fds
= lock_user(VERIFY_READ
,
722 sizeof(abi_ulong
) * nw
,
724 return -TARGET_EFAULT
;
728 for (i
= 0; i
< nw
; i
++) {
729 /* grab the abi_ulong */
730 __get_user(b
, &target_fds
[i
]);
731 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
732 /* check the bit inside the abi_ulong */
739 unlock_user(target_fds
, target_fds_addr
, 0);
744 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
745 abi_ulong target_fds_addr
,
748 if (target_fds_addr
) {
749 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
750 return -TARGET_EFAULT
;
758 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
764 abi_ulong
*target_fds
;
766 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
767 if (!(target_fds
= lock_user(VERIFY_WRITE
,
769 sizeof(abi_ulong
) * nw
,
771 return -TARGET_EFAULT
;
774 for (i
= 0; i
< nw
; i
++) {
776 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
777 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
780 __put_user(v
, &target_fds
[i
]);
783 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
788 #if defined(__alpha__)
794 static inline abi_long
host_to_target_clock_t(long ticks
)
796 #if HOST_HZ == TARGET_HZ
799 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
803 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
804 const struct rusage
*rusage
)
806 struct target_rusage
*target_rusage
;
808 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
809 return -TARGET_EFAULT
;
810 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
811 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
812 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
813 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
814 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
815 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
816 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
817 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
818 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
819 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
820 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
821 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
822 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
823 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
824 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
825 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
826 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
827 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
828 unlock_user_struct(target_rusage
, target_addr
, 1);
833 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
835 abi_ulong target_rlim_swap
;
838 target_rlim_swap
= tswapal(target_rlim
);
839 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
840 return RLIM_INFINITY
;
842 result
= target_rlim_swap
;
843 if (target_rlim_swap
!= (rlim_t
)result
)
844 return RLIM_INFINITY
;
849 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
851 abi_ulong target_rlim_swap
;
854 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
855 target_rlim_swap
= TARGET_RLIM_INFINITY
;
857 target_rlim_swap
= rlim
;
858 result
= tswapal(target_rlim_swap
);
863 static inline int target_to_host_resource(int code
)
866 case TARGET_RLIMIT_AS
:
868 case TARGET_RLIMIT_CORE
:
870 case TARGET_RLIMIT_CPU
:
872 case TARGET_RLIMIT_DATA
:
874 case TARGET_RLIMIT_FSIZE
:
876 case TARGET_RLIMIT_LOCKS
:
878 case TARGET_RLIMIT_MEMLOCK
:
879 return RLIMIT_MEMLOCK
;
880 case TARGET_RLIMIT_MSGQUEUE
:
881 return RLIMIT_MSGQUEUE
;
882 case TARGET_RLIMIT_NICE
:
884 case TARGET_RLIMIT_NOFILE
:
885 return RLIMIT_NOFILE
;
886 case TARGET_RLIMIT_NPROC
:
888 case TARGET_RLIMIT_RSS
:
890 case TARGET_RLIMIT_RTPRIO
:
891 return RLIMIT_RTPRIO
;
892 case TARGET_RLIMIT_SIGPENDING
:
893 return RLIMIT_SIGPENDING
;
894 case TARGET_RLIMIT_STACK
:
901 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
902 abi_ulong target_tv_addr
)
904 struct target_timeval
*target_tv
;
906 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
907 return -TARGET_EFAULT
;
909 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
910 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
912 unlock_user_struct(target_tv
, target_tv_addr
, 0);
917 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
918 const struct timeval
*tv
)
920 struct target_timeval
*target_tv
;
922 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
923 return -TARGET_EFAULT
;
925 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
926 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
928 unlock_user_struct(target_tv
, target_tv_addr
, 1);
933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
936 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
937 abi_ulong target_mq_attr_addr
)
939 struct target_mq_attr
*target_mq_attr
;
941 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
942 target_mq_attr_addr
, 1))
943 return -TARGET_EFAULT
;
945 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
946 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
947 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
948 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
950 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
955 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
956 const struct mq_attr
*attr
)
958 struct target_mq_attr
*target_mq_attr
;
960 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
961 target_mq_attr_addr
, 0))
962 return -TARGET_EFAULT
;
964 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
965 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
966 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
967 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
969 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
975 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
976 /* do_select() must return target values and target errnos. */
977 static abi_long
do_select(int n
,
978 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
979 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
981 fd_set rfds
, wfds
, efds
;
982 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
983 struct timeval tv
, *tv_ptr
;
986 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
990 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
994 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
999 if (target_tv_addr
) {
1000 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1001 return -TARGET_EFAULT
;
1007 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1009 if (!is_error(ret
)) {
1010 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1011 return -TARGET_EFAULT
;
1012 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1013 return -TARGET_EFAULT
;
1014 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1015 return -TARGET_EFAULT
;
1017 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1018 return -TARGET_EFAULT
;
1025 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1028 return pipe2(host_pipe
, flags
);
1034 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1035 int flags
, int is_pipe2
)
1039 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1042 return get_errno(ret
);
1044 /* Several targets have special calling conventions for the original
1045 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1047 #if defined(TARGET_ALPHA)
1048 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1049 return host_pipe
[0];
1050 #elif defined(TARGET_MIPS)
1051 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1052 return host_pipe
[0];
1053 #elif defined(TARGET_SH4)
1054 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1055 return host_pipe
[0];
1056 #elif defined(TARGET_SPARC)
1057 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1058 return host_pipe
[0];
1062 if (put_user_s32(host_pipe
[0], pipedes
)
1063 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1064 return -TARGET_EFAULT
;
1065 return get_errno(ret
);
1068 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1069 abi_ulong target_addr
,
1072 struct target_ip_mreqn
*target_smreqn
;
1074 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1076 return -TARGET_EFAULT
;
1077 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1078 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1079 if (len
== sizeof(struct target_ip_mreqn
))
1080 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1081 unlock_user(target_smreqn
, target_addr
, 0);
1086 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1087 abi_ulong target_addr
,
1090 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1091 sa_family_t sa_family
;
1092 struct target_sockaddr
*target_saddr
;
1094 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1096 return -TARGET_EFAULT
;
1098 sa_family
= tswap16(target_saddr
->sa_family
);
1100 /* Oops. The caller might send a incomplete sun_path; sun_path
1101 * must be terminated by \0 (see the manual page), but
1102 * unfortunately it is quite common to specify sockaddr_un
1103 * length as "strlen(x->sun_path)" while it should be
1104 * "strlen(...) + 1". We'll fix that here if needed.
1105 * Linux kernel has a similar feature.
1108 if (sa_family
== AF_UNIX
) {
1109 if (len
< unix_maxlen
&& len
> 0) {
1110 char *cp
= (char*)target_saddr
;
1112 if ( cp
[len
-1] && !cp
[len
] )
1115 if (len
> unix_maxlen
)
1119 memcpy(addr
, target_saddr
, len
);
1120 addr
->sa_family
= sa_family
;
1121 unlock_user(target_saddr
, target_addr
, 0);
1126 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1127 struct sockaddr
*addr
,
1130 struct target_sockaddr
*target_saddr
;
1132 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1134 return -TARGET_EFAULT
;
1135 memcpy(target_saddr
, addr
, len
);
1136 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1137 unlock_user(target_saddr
, target_addr
, len
);
1142 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1143 struct target_msghdr
*target_msgh
)
1145 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1146 abi_long msg_controllen
;
1147 abi_ulong target_cmsg_addr
;
1148 struct target_cmsghdr
*target_cmsg
;
1149 socklen_t space
= 0;
1151 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1152 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1154 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1155 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1157 return -TARGET_EFAULT
;
1159 while (cmsg
&& target_cmsg
) {
1160 void *data
= CMSG_DATA(cmsg
);
1161 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1163 int len
= tswapal(target_cmsg
->cmsg_len
)
1164 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1166 space
+= CMSG_SPACE(len
);
1167 if (space
> msgh
->msg_controllen
) {
1168 space
-= CMSG_SPACE(len
);
1169 gemu_log("Host cmsg overflow\n");
1173 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1174 cmsg
->cmsg_level
= SOL_SOCKET
;
1176 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1178 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1179 cmsg
->cmsg_len
= CMSG_LEN(len
);
1181 if (cmsg
->cmsg_level
!= SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1182 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1183 memcpy(data
, target_data
, len
);
1185 int *fd
= (int *)data
;
1186 int *target_fd
= (int *)target_data
;
1187 int i
, numfds
= len
/ sizeof(int);
1189 for (i
= 0; i
< numfds
; i
++)
1190 fd
[i
] = tswap32(target_fd
[i
]);
1193 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1194 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1196 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1198 msgh
->msg_controllen
= space
;
1202 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1203 struct msghdr
*msgh
)
1205 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1206 abi_long msg_controllen
;
1207 abi_ulong target_cmsg_addr
;
1208 struct target_cmsghdr
*target_cmsg
;
1209 socklen_t space
= 0;
1211 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1212 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1214 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1215 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1217 return -TARGET_EFAULT
;
1219 while (cmsg
&& target_cmsg
) {
1220 void *data
= CMSG_DATA(cmsg
);
1221 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1223 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1225 space
+= TARGET_CMSG_SPACE(len
);
1226 if (space
> msg_controllen
) {
1227 space
-= TARGET_CMSG_SPACE(len
);
1228 gemu_log("Target cmsg overflow\n");
1232 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1233 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1235 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1237 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1238 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1240 if ((cmsg
->cmsg_level
== SOL_SOCKET
) &&
1241 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1242 int *fd
= (int *)data
;
1243 int *target_fd
= (int *)target_data
;
1244 int i
, numfds
= len
/ sizeof(int);
1246 for (i
= 0; i
< numfds
; i
++)
1247 target_fd
[i
] = tswap32(fd
[i
]);
1248 } else if ((cmsg
->cmsg_level
== SOL_SOCKET
) &&
1249 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1250 (len
== sizeof(struct timeval
))) {
1251 /* copy struct timeval to target */
1252 struct timeval
*tv
= (struct timeval
*)data
;
1253 struct target_timeval
*target_tv
=
1254 (struct target_timeval
*)target_data
;
1256 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1257 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1259 gemu_log("Unsupported ancillary data: %d/%d\n",
1260 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1261 memcpy(target_data
, data
, len
);
1264 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1265 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1267 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1269 target_msgh
->msg_controllen
= tswapal(space
);
1273 /* do_setsockopt() Must return target values and target errnos. */
1274 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1275 abi_ulong optval_addr
, socklen_t optlen
)
1279 struct ip_mreqn
*ip_mreq
;
1280 struct ip_mreq_source
*ip_mreq_source
;
1284 /* TCP options all take an 'int' value. */
1285 if (optlen
< sizeof(uint32_t))
1286 return -TARGET_EINVAL
;
1288 if (get_user_u32(val
, optval_addr
))
1289 return -TARGET_EFAULT
;
1290 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1297 case IP_ROUTER_ALERT
:
1301 case IP_MTU_DISCOVER
:
1307 case IP_MULTICAST_TTL
:
1308 case IP_MULTICAST_LOOP
:
1310 if (optlen
>= sizeof(uint32_t)) {
1311 if (get_user_u32(val
, optval_addr
))
1312 return -TARGET_EFAULT
;
1313 } else if (optlen
>= 1) {
1314 if (get_user_u8(val
, optval_addr
))
1315 return -TARGET_EFAULT
;
1317 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1319 case IP_ADD_MEMBERSHIP
:
1320 case IP_DROP_MEMBERSHIP
:
1321 if (optlen
< sizeof (struct target_ip_mreq
) ||
1322 optlen
> sizeof (struct target_ip_mreqn
))
1323 return -TARGET_EINVAL
;
1325 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1326 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1327 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1330 case IP_BLOCK_SOURCE
:
1331 case IP_UNBLOCK_SOURCE
:
1332 case IP_ADD_SOURCE_MEMBERSHIP
:
1333 case IP_DROP_SOURCE_MEMBERSHIP
:
1334 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1335 return -TARGET_EINVAL
;
1337 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1338 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1339 unlock_user (ip_mreq_source
, optval_addr
, 0);
1348 case IPV6_MTU_DISCOVER
:
1351 case IPV6_RECVPKTINFO
:
1353 if (optlen
< sizeof(uint32_t)) {
1354 return -TARGET_EINVAL
;
1356 if (get_user_u32(val
, optval_addr
)) {
1357 return -TARGET_EFAULT
;
1359 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1360 &val
, sizeof(val
)));
1369 /* struct icmp_filter takes an u32 value */
1370 if (optlen
< sizeof(uint32_t)) {
1371 return -TARGET_EINVAL
;
1374 if (get_user_u32(val
, optval_addr
)) {
1375 return -TARGET_EFAULT
;
1377 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1378 &val
, sizeof(val
)));
1385 case TARGET_SOL_SOCKET
:
1387 case TARGET_SO_RCVTIMEO
:
1391 optname
= SO_RCVTIMEO
;
1394 if (optlen
!= sizeof(struct target_timeval
)) {
1395 return -TARGET_EINVAL
;
1398 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1399 return -TARGET_EFAULT
;
1402 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1406 case TARGET_SO_SNDTIMEO
:
1407 optname
= SO_SNDTIMEO
;
1409 case TARGET_SO_ATTACH_FILTER
:
1411 struct target_sock_fprog
*tfprog
;
1412 struct target_sock_filter
*tfilter
;
1413 struct sock_fprog fprog
;
1414 struct sock_filter
*filter
;
1417 if (optlen
!= sizeof(*tfprog
)) {
1418 return -TARGET_EINVAL
;
1420 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1421 return -TARGET_EFAULT
;
1423 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1424 tswapal(tfprog
->filter
), 0)) {
1425 unlock_user_struct(tfprog
, optval_addr
, 1);
1426 return -TARGET_EFAULT
;
1429 fprog
.len
= tswap16(tfprog
->len
);
1430 filter
= malloc(fprog
.len
* sizeof(*filter
));
1431 if (filter
== NULL
) {
1432 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1433 unlock_user_struct(tfprog
, optval_addr
, 1);
1434 return -TARGET_ENOMEM
;
1436 for (i
= 0; i
< fprog
.len
; i
++) {
1437 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1438 filter
[i
].jt
= tfilter
[i
].jt
;
1439 filter
[i
].jf
= tfilter
[i
].jf
;
1440 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1442 fprog
.filter
= filter
;
1444 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1445 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1448 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1449 unlock_user_struct(tfprog
, optval_addr
, 1);
1452 /* Options with 'int' argument. */
1453 case TARGET_SO_DEBUG
:
1456 case TARGET_SO_REUSEADDR
:
1457 optname
= SO_REUSEADDR
;
1459 case TARGET_SO_TYPE
:
1462 case TARGET_SO_ERROR
:
1465 case TARGET_SO_DONTROUTE
:
1466 optname
= SO_DONTROUTE
;
1468 case TARGET_SO_BROADCAST
:
1469 optname
= SO_BROADCAST
;
1471 case TARGET_SO_SNDBUF
:
1472 optname
= SO_SNDBUF
;
1474 case TARGET_SO_RCVBUF
:
1475 optname
= SO_RCVBUF
;
1477 case TARGET_SO_KEEPALIVE
:
1478 optname
= SO_KEEPALIVE
;
1480 case TARGET_SO_OOBINLINE
:
1481 optname
= SO_OOBINLINE
;
1483 case TARGET_SO_NO_CHECK
:
1484 optname
= SO_NO_CHECK
;
1486 case TARGET_SO_PRIORITY
:
1487 optname
= SO_PRIORITY
;
1490 case TARGET_SO_BSDCOMPAT
:
1491 optname
= SO_BSDCOMPAT
;
1494 case TARGET_SO_PASSCRED
:
1495 optname
= SO_PASSCRED
;
1497 case TARGET_SO_TIMESTAMP
:
1498 optname
= SO_TIMESTAMP
;
1500 case TARGET_SO_RCVLOWAT
:
1501 optname
= SO_RCVLOWAT
;
1507 if (optlen
< sizeof(uint32_t))
1508 return -TARGET_EINVAL
;
1510 if (get_user_u32(val
, optval_addr
))
1511 return -TARGET_EFAULT
;
1512 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1517 ret
= -TARGET_ENOPROTOOPT
;
1522 /* do_getsockopt() Must return target values and target errnos. */
1523 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1524 abi_ulong optval_addr
, abi_ulong optlen
)
1531 case TARGET_SOL_SOCKET
:
1534 /* These don't just return a single integer */
1535 case TARGET_SO_LINGER
:
1536 case TARGET_SO_RCVTIMEO
:
1537 case TARGET_SO_SNDTIMEO
:
1538 case TARGET_SO_PEERNAME
:
1540 case TARGET_SO_PEERCRED
: {
1543 struct target_ucred
*tcr
;
1545 if (get_user_u32(len
, optlen
)) {
1546 return -TARGET_EFAULT
;
1549 return -TARGET_EINVAL
;
1553 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1561 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1562 return -TARGET_EFAULT
;
1564 __put_user(cr
.pid
, &tcr
->pid
);
1565 __put_user(cr
.uid
, &tcr
->uid
);
1566 __put_user(cr
.gid
, &tcr
->gid
);
1567 unlock_user_struct(tcr
, optval_addr
, 1);
1568 if (put_user_u32(len
, optlen
)) {
1569 return -TARGET_EFAULT
;
1573 /* Options with 'int' argument. */
1574 case TARGET_SO_DEBUG
:
1577 case TARGET_SO_REUSEADDR
:
1578 optname
= SO_REUSEADDR
;
1580 case TARGET_SO_TYPE
:
1583 case TARGET_SO_ERROR
:
1586 case TARGET_SO_DONTROUTE
:
1587 optname
= SO_DONTROUTE
;
1589 case TARGET_SO_BROADCAST
:
1590 optname
= SO_BROADCAST
;
1592 case TARGET_SO_SNDBUF
:
1593 optname
= SO_SNDBUF
;
1595 case TARGET_SO_RCVBUF
:
1596 optname
= SO_RCVBUF
;
1598 case TARGET_SO_KEEPALIVE
:
1599 optname
= SO_KEEPALIVE
;
1601 case TARGET_SO_OOBINLINE
:
1602 optname
= SO_OOBINLINE
;
1604 case TARGET_SO_NO_CHECK
:
1605 optname
= SO_NO_CHECK
;
1607 case TARGET_SO_PRIORITY
:
1608 optname
= SO_PRIORITY
;
1611 case TARGET_SO_BSDCOMPAT
:
1612 optname
= SO_BSDCOMPAT
;
1615 case TARGET_SO_PASSCRED
:
1616 optname
= SO_PASSCRED
;
1618 case TARGET_SO_TIMESTAMP
:
1619 optname
= SO_TIMESTAMP
;
1621 case TARGET_SO_RCVLOWAT
:
1622 optname
= SO_RCVLOWAT
;
1629 /* TCP options all take an 'int' value. */
1631 if (get_user_u32(len
, optlen
))
1632 return -TARGET_EFAULT
;
1634 return -TARGET_EINVAL
;
1636 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1642 if (put_user_u32(val
, optval_addr
))
1643 return -TARGET_EFAULT
;
1645 if (put_user_u8(val
, optval_addr
))
1646 return -TARGET_EFAULT
;
1648 if (put_user_u32(len
, optlen
))
1649 return -TARGET_EFAULT
;
1656 case IP_ROUTER_ALERT
:
1660 case IP_MTU_DISCOVER
:
1666 case IP_MULTICAST_TTL
:
1667 case IP_MULTICAST_LOOP
:
1668 if (get_user_u32(len
, optlen
))
1669 return -TARGET_EFAULT
;
1671 return -TARGET_EINVAL
;
1673 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1676 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1678 if (put_user_u32(len
, optlen
)
1679 || put_user_u8(val
, optval_addr
))
1680 return -TARGET_EFAULT
;
1682 if (len
> sizeof(int))
1684 if (put_user_u32(len
, optlen
)
1685 || put_user_u32(val
, optval_addr
))
1686 return -TARGET_EFAULT
;
1690 ret
= -TARGET_ENOPROTOOPT
;
1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1698 ret
= -TARGET_EOPNOTSUPP
;
1704 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1705 int count
, int copy
)
1707 struct target_iovec
*target_vec
;
1709 abi_ulong total_len
, max_len
;
1717 if (count
< 0 || count
> IOV_MAX
) {
1722 vec
= calloc(count
, sizeof(struct iovec
));
1728 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1729 count
* sizeof(struct target_iovec
), 1);
1730 if (target_vec
== NULL
) {
1735 /* ??? If host page size > target page size, this will result in a
1736 value larger than what we can actually support. */
1737 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1740 for (i
= 0; i
< count
; i
++) {
1741 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1742 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1747 } else if (len
== 0) {
1748 /* Zero length pointer is ignored. */
1749 vec
[i
].iov_base
= 0;
1751 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1752 if (!vec
[i
].iov_base
) {
1756 if (len
> max_len
- total_len
) {
1757 len
= max_len
- total_len
;
1760 vec
[i
].iov_len
= len
;
1764 unlock_user(target_vec
, target_addr
, 0);
1768 unlock_user(target_vec
, target_addr
, 0);
1775 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1776 int count
, int copy
)
1778 struct target_iovec
*target_vec
;
1781 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1782 count
* sizeof(struct target_iovec
), 1);
1784 for (i
= 0; i
< count
; i
++) {
1785 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1786 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1790 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1792 unlock_user(target_vec
, target_addr
, 0);
1798 static inline int target_to_host_sock_type(int *type
)
1801 int target_type
= *type
;
1803 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1804 case TARGET_SOCK_DGRAM
:
1805 host_type
= SOCK_DGRAM
;
1807 case TARGET_SOCK_STREAM
:
1808 host_type
= SOCK_STREAM
;
1811 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1814 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1815 #if defined(SOCK_CLOEXEC)
1816 host_type
|= SOCK_CLOEXEC
;
1818 return -TARGET_EINVAL
;
1821 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1822 #if defined(SOCK_NONBLOCK)
1823 host_type
|= SOCK_NONBLOCK
;
1824 #elif !defined(O_NONBLOCK)
1825 return -TARGET_EINVAL
;
1832 /* Try to emulate socket type flags after socket creation. */
1833 static int sock_flags_fixup(int fd
, int target_type
)
1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1836 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1837 int flags
= fcntl(fd
, F_GETFL
);
1838 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
1840 return -TARGET_EINVAL
;
1847 /* do_socket() Must return target values and target errnos. */
1848 static abi_long
do_socket(int domain
, int type
, int protocol
)
1850 int target_type
= type
;
1853 ret
= target_to_host_sock_type(&type
);
1858 if (domain
== PF_NETLINK
)
1859 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1860 ret
= get_errno(socket(domain
, type
, protocol
));
1862 ret
= sock_flags_fixup(ret
, target_type
);
1867 /* do_bind() Must return target values and target errnos. */
1868 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1874 if ((int)addrlen
< 0) {
1875 return -TARGET_EINVAL
;
1878 addr
= alloca(addrlen
+1);
1880 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1884 return get_errno(bind(sockfd
, addr
, addrlen
));
1887 /* do_connect() Must return target values and target errnos. */
1888 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1894 if ((int)addrlen
< 0) {
1895 return -TARGET_EINVAL
;
1898 addr
= alloca(addrlen
);
1900 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1904 return get_errno(connect(sockfd
, addr
, addrlen
));
1907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1908 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
1909 int flags
, int send
)
1915 abi_ulong target_vec
;
1917 if (msgp
->msg_name
) {
1918 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1919 msg
.msg_name
= alloca(msg
.msg_namelen
);
1920 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1926 msg
.msg_name
= NULL
;
1927 msg
.msg_namelen
= 0;
1929 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1930 msg
.msg_control
= alloca(msg
.msg_controllen
);
1931 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1933 count
= tswapal(msgp
->msg_iovlen
);
1934 target_vec
= tswapal(msgp
->msg_iov
);
1935 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1936 target_vec
, count
, send
);
1938 ret
= -host_to_target_errno(errno
);
1941 msg
.msg_iovlen
= count
;
1945 ret
= target_to_host_cmsg(&msg
, msgp
);
1947 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1949 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1950 if (!is_error(ret
)) {
1952 ret
= host_to_target_cmsg(msgp
, &msg
);
1953 if (!is_error(ret
)) {
1954 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1955 if (msg
.msg_name
!= NULL
) {
1956 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1957 msg
.msg_name
, msg
.msg_namelen
);
1969 unlock_iovec(vec
, target_vec
, count
, !send
);
1974 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1975 int flags
, int send
)
1978 struct target_msghdr
*msgp
;
1980 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1984 return -TARGET_EFAULT
;
1986 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
1987 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1991 #ifdef TARGET_NR_sendmmsg
1992 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
1993 * so it might not have this *mmsg-specific flag either.
1995 #ifndef MSG_WAITFORONE
1996 #define MSG_WAITFORONE 0x10000
1999 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2000 unsigned int vlen
, unsigned int flags
,
2003 struct target_mmsghdr
*mmsgp
;
2007 if (vlen
> UIO_MAXIOV
) {
2011 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2013 return -TARGET_EFAULT
;
2016 for (i
= 0; i
< vlen
; i
++) {
2017 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2018 if (is_error(ret
)) {
2021 mmsgp
[i
].msg_len
= tswap32(ret
);
2022 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2023 if (flags
& MSG_WAITFORONE
) {
2024 flags
|= MSG_DONTWAIT
;
2028 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2030 /* Return number of datagrams sent if we sent any at all;
2031 * otherwise return the error.
2040 /* If we don't have a system accept4() then just call accept.
2041 * The callsites to do_accept4() will ensure that they don't
2042 * pass a non-zero flags argument in this config.
2044 #ifndef CONFIG_ACCEPT4
2045 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2046 socklen_t
*addrlen
, int flags
)
2049 return accept(sockfd
, addr
, addrlen
);
2053 /* do_accept4() Must return target values and target errnos. */
2054 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2055 abi_ulong target_addrlen_addr
, int flags
)
2061 if (target_addr
== 0) {
2062 return get_errno(accept4(fd
, NULL
, NULL
, flags
));
2065 /* linux returns EINVAL if addrlen pointer is invalid */
2066 if (get_user_u32(addrlen
, target_addrlen_addr
))
2067 return -TARGET_EINVAL
;
2069 if ((int)addrlen
< 0) {
2070 return -TARGET_EINVAL
;
2073 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2074 return -TARGET_EINVAL
;
2076 addr
= alloca(addrlen
);
2078 ret
= get_errno(accept4(fd
, addr
, &addrlen
, flags
));
2079 if (!is_error(ret
)) {
2080 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2081 if (put_user_u32(addrlen
, target_addrlen_addr
))
2082 ret
= -TARGET_EFAULT
;
2087 /* do_getpeername() Must return target values and target errnos. */
2088 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2089 abi_ulong target_addrlen_addr
)
2095 if (get_user_u32(addrlen
, target_addrlen_addr
))
2096 return -TARGET_EFAULT
;
2098 if ((int)addrlen
< 0) {
2099 return -TARGET_EINVAL
;
2102 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2103 return -TARGET_EFAULT
;
2105 addr
= alloca(addrlen
);
2107 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2108 if (!is_error(ret
)) {
2109 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2110 if (put_user_u32(addrlen
, target_addrlen_addr
))
2111 ret
= -TARGET_EFAULT
;
2116 /* do_getsockname() Must return target values and target errnos. */
2117 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2118 abi_ulong target_addrlen_addr
)
2124 if (get_user_u32(addrlen
, target_addrlen_addr
))
2125 return -TARGET_EFAULT
;
2127 if ((int)addrlen
< 0) {
2128 return -TARGET_EINVAL
;
2131 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2132 return -TARGET_EFAULT
;
2134 addr
= alloca(addrlen
);
2136 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2137 if (!is_error(ret
)) {
2138 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2139 if (put_user_u32(addrlen
, target_addrlen_addr
))
2140 ret
= -TARGET_EFAULT
;
2145 /* do_socketpair() Must return target values and target errnos. */
2146 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2147 abi_ulong target_tab_addr
)
2152 target_to_host_sock_type(&type
);
2154 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2155 if (!is_error(ret
)) {
2156 if (put_user_s32(tab
[0], target_tab_addr
)
2157 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2158 ret
= -TARGET_EFAULT
;
2163 /* do_sendto() Must return target values and target errnos. */
2164 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2165 abi_ulong target_addr
, socklen_t addrlen
)
2171 if ((int)addrlen
< 0) {
2172 return -TARGET_EINVAL
;
2175 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2177 return -TARGET_EFAULT
;
2179 addr
= alloca(addrlen
);
2180 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2182 unlock_user(host_msg
, msg
, 0);
2185 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2187 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2189 unlock_user(host_msg
, msg
, 0);
2193 /* do_recvfrom() Must return target values and target errnos. */
2194 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2195 abi_ulong target_addr
,
2196 abi_ulong target_addrlen
)
2203 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2205 return -TARGET_EFAULT
;
2207 if (get_user_u32(addrlen
, target_addrlen
)) {
2208 ret
= -TARGET_EFAULT
;
2211 if ((int)addrlen
< 0) {
2212 ret
= -TARGET_EINVAL
;
2215 addr
= alloca(addrlen
);
2216 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2218 addr
= NULL
; /* To keep compiler quiet. */
2219 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2221 if (!is_error(ret
)) {
2223 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2224 if (put_user_u32(addrlen
, target_addrlen
)) {
2225 ret
= -TARGET_EFAULT
;
2229 unlock_user(host_msg
, msg
, len
);
2232 unlock_user(host_msg
, msg
, 0);
2237 #ifdef TARGET_NR_socketcall
2238 /* do_socketcall() Must return target values and target errnos. */
2239 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2241 static const unsigned ac
[] = { /* number of arguments per call */
2242 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2243 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2244 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2245 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2246 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2247 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2248 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2249 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2250 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2251 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2252 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2253 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2254 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2255 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2256 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2257 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2258 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2259 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2261 abi_long a
[6]; /* max 6 args */
2263 /* first, collect the arguments in a[] according to ac[] */
2264 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2266 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2267 for (i
= 0; i
< ac
[num
]; ++i
) {
2268 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2269 return -TARGET_EFAULT
;
2274 /* now when we have the args, actually handle the call */
2276 case SOCKOP_socket
: /* domain, type, protocol */
2277 return do_socket(a
[0], a
[1], a
[2]);
2278 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2279 return do_bind(a
[0], a
[1], a
[2]);
2280 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2281 return do_connect(a
[0], a
[1], a
[2]);
2282 case SOCKOP_listen
: /* sockfd, backlog */
2283 return get_errno(listen(a
[0], a
[1]));
2284 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2285 return do_accept4(a
[0], a
[1], a
[2], 0);
2286 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2287 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2288 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2289 return do_getsockname(a
[0], a
[1], a
[2]);
2290 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2291 return do_getpeername(a
[0], a
[1], a
[2]);
2292 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2293 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2294 case SOCKOP_send
: /* sockfd, msg, len, flags */
2295 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2296 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2297 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2298 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2299 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2300 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2301 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2302 case SOCKOP_shutdown
: /* sockfd, how */
2303 return get_errno(shutdown(a
[0], a
[1]));
2304 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2305 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2306 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2307 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2308 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2309 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2310 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2311 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2313 gemu_log("Unsupported socketcall: %d\n", num
);
2314 return -TARGET_ENOSYS
;
2319 #define N_SHM_REGIONS 32
2321 static struct shm_region
{
2324 } shm_regions
[N_SHM_REGIONS
];
2326 struct target_semid_ds
2328 struct target_ipc_perm sem_perm
;
2329 abi_ulong sem_otime
;
2330 abi_ulong __unused1
;
2331 abi_ulong sem_ctime
;
2332 abi_ulong __unused2
;
2333 abi_ulong sem_nsems
;
2334 abi_ulong __unused3
;
2335 abi_ulong __unused4
;
2338 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2339 abi_ulong target_addr
)
2341 struct target_ipc_perm
*target_ip
;
2342 struct target_semid_ds
*target_sd
;
2344 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2345 return -TARGET_EFAULT
;
2346 target_ip
= &(target_sd
->sem_perm
);
2347 host_ip
->__key
= tswap32(target_ip
->__key
);
2348 host_ip
->uid
= tswap32(target_ip
->uid
);
2349 host_ip
->gid
= tswap32(target_ip
->gid
);
2350 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2351 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2352 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2353 host_ip
->mode
= tswap32(target_ip
->mode
);
2355 host_ip
->mode
= tswap16(target_ip
->mode
);
2357 #if defined(TARGET_PPC)
2358 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2360 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2362 unlock_user_struct(target_sd
, target_addr
, 0);
2366 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2367 struct ipc_perm
*host_ip
)
2369 struct target_ipc_perm
*target_ip
;
2370 struct target_semid_ds
*target_sd
;
2372 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2373 return -TARGET_EFAULT
;
2374 target_ip
= &(target_sd
->sem_perm
);
2375 target_ip
->__key
= tswap32(host_ip
->__key
);
2376 target_ip
->uid
= tswap32(host_ip
->uid
);
2377 target_ip
->gid
= tswap32(host_ip
->gid
);
2378 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2379 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2380 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2381 target_ip
->mode
= tswap32(host_ip
->mode
);
2383 target_ip
->mode
= tswap16(host_ip
->mode
);
2385 #if defined(TARGET_PPC)
2386 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2388 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2390 unlock_user_struct(target_sd
, target_addr
, 1);
2394 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2395 abi_ulong target_addr
)
2397 struct target_semid_ds
*target_sd
;
2399 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2400 return -TARGET_EFAULT
;
2401 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2402 return -TARGET_EFAULT
;
2403 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2404 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2405 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2406 unlock_user_struct(target_sd
, target_addr
, 0);
2410 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2411 struct semid_ds
*host_sd
)
2413 struct target_semid_ds
*target_sd
;
2415 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2416 return -TARGET_EFAULT
;
2417 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2418 return -TARGET_EFAULT
;
2419 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2420 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2421 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2422 unlock_user_struct(target_sd
, target_addr
, 1);
2426 struct target_seminfo
{
2439 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2440 struct seminfo
*host_seminfo
)
2442 struct target_seminfo
*target_seminfo
;
2443 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2444 return -TARGET_EFAULT
;
2445 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2446 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2447 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2448 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2449 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2450 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2451 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2452 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2453 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2454 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2455 unlock_user_struct(target_seminfo
, target_addr
, 1);
2461 struct semid_ds
*buf
;
2462 unsigned short *array
;
2463 struct seminfo
*__buf
;
2466 union target_semun
{
2473 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2474 abi_ulong target_addr
)
2477 unsigned short *array
;
2479 struct semid_ds semid_ds
;
2482 semun
.buf
= &semid_ds
;
2484 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2486 return get_errno(ret
);
2488 nsems
= semid_ds
.sem_nsems
;
2490 *host_array
= malloc(nsems
*sizeof(unsigned short));
2492 return -TARGET_ENOMEM
;
2494 array
= lock_user(VERIFY_READ
, target_addr
,
2495 nsems
*sizeof(unsigned short), 1);
2498 return -TARGET_EFAULT
;
2501 for(i
=0; i
<nsems
; i
++) {
2502 __get_user((*host_array
)[i
], &array
[i
]);
2504 unlock_user(array
, target_addr
, 0);
2509 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2510 unsigned short **host_array
)
2513 unsigned short *array
;
2515 struct semid_ds semid_ds
;
2518 semun
.buf
= &semid_ds
;
2520 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2522 return get_errno(ret
);
2524 nsems
= semid_ds
.sem_nsems
;
2526 array
= lock_user(VERIFY_WRITE
, target_addr
,
2527 nsems
*sizeof(unsigned short), 0);
2529 return -TARGET_EFAULT
;
2531 for(i
=0; i
<nsems
; i
++) {
2532 __put_user((*host_array
)[i
], &array
[i
]);
2535 unlock_user(array
, target_addr
, 1);
2540 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2541 union target_semun target_su
)
2544 struct semid_ds dsarg
;
2545 unsigned short *array
= NULL
;
2546 struct seminfo seminfo
;
2547 abi_long ret
= -TARGET_EINVAL
;
2554 arg
.val
= tswap32(target_su
.val
);
2555 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2556 target_su
.val
= tswap32(arg
.val
);
2560 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2564 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2565 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2572 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2576 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2577 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2583 arg
.__buf
= &seminfo
;
2584 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2585 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2593 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2600 struct target_sembuf
{
2601 unsigned short sem_num
;
2606 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2607 abi_ulong target_addr
,
2610 struct target_sembuf
*target_sembuf
;
2613 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2614 nsops
*sizeof(struct target_sembuf
), 1);
2616 return -TARGET_EFAULT
;
2618 for(i
=0; i
<nsops
; i
++) {
2619 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2620 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2621 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2624 unlock_user(target_sembuf
, target_addr
, 0);
2629 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2631 struct sembuf sops
[nsops
];
2633 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2634 return -TARGET_EFAULT
;
2636 return get_errno(semop(semid
, sops
, nsops
));
2639 struct target_msqid_ds
2641 struct target_ipc_perm msg_perm
;
2642 abi_ulong msg_stime
;
2643 #if TARGET_ABI_BITS == 32
2644 abi_ulong __unused1
;
2646 abi_ulong msg_rtime
;
2647 #if TARGET_ABI_BITS == 32
2648 abi_ulong __unused2
;
2650 abi_ulong msg_ctime
;
2651 #if TARGET_ABI_BITS == 32
2652 abi_ulong __unused3
;
2654 abi_ulong __msg_cbytes
;
2656 abi_ulong msg_qbytes
;
2657 abi_ulong msg_lspid
;
2658 abi_ulong msg_lrpid
;
2659 abi_ulong __unused4
;
2660 abi_ulong __unused5
;
2663 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2664 abi_ulong target_addr
)
2666 struct target_msqid_ds
*target_md
;
2668 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2669 return -TARGET_EFAULT
;
2670 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2671 return -TARGET_EFAULT
;
2672 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2673 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2674 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2675 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2676 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2677 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2678 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2679 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2680 unlock_user_struct(target_md
, target_addr
, 0);
2684 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2685 struct msqid_ds
*host_md
)
2687 struct target_msqid_ds
*target_md
;
2689 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2690 return -TARGET_EFAULT
;
2691 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2692 return -TARGET_EFAULT
;
2693 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2694 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2695 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2696 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2697 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2698 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2699 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2700 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2701 unlock_user_struct(target_md
, target_addr
, 1);
2705 struct target_msginfo
{
2713 unsigned short int msgseg
;
2716 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2717 struct msginfo
*host_msginfo
)
2719 struct target_msginfo
*target_msginfo
;
2720 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2721 return -TARGET_EFAULT
;
2722 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2723 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2724 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2725 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2726 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2727 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2728 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2729 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2730 unlock_user_struct(target_msginfo
, target_addr
, 1);
2734 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2736 struct msqid_ds dsarg
;
2737 struct msginfo msginfo
;
2738 abi_long ret
= -TARGET_EINVAL
;
2746 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2747 return -TARGET_EFAULT
;
2748 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2749 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2750 return -TARGET_EFAULT
;
2753 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2757 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2758 if (host_to_target_msginfo(ptr
, &msginfo
))
2759 return -TARGET_EFAULT
;
2766 struct target_msgbuf
{
2771 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2772 unsigned int msgsz
, int msgflg
)
2774 struct target_msgbuf
*target_mb
;
2775 struct msgbuf
*host_mb
;
2778 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2779 return -TARGET_EFAULT
;
2780 host_mb
= malloc(msgsz
+sizeof(long));
2781 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2782 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2783 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2785 unlock_user_struct(target_mb
, msgp
, 0);
2790 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2791 unsigned int msgsz
, abi_long msgtyp
,
2794 struct target_msgbuf
*target_mb
;
2796 struct msgbuf
*host_mb
;
2799 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2800 return -TARGET_EFAULT
;
2802 host_mb
= g_malloc(msgsz
+sizeof(long));
2803 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2806 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2807 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2808 if (!target_mtext
) {
2809 ret
= -TARGET_EFAULT
;
2812 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2813 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2816 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2820 unlock_user_struct(target_mb
, msgp
, 1);
2825 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2826 abi_ulong target_addr
)
2828 struct target_shmid_ds
*target_sd
;
2830 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2831 return -TARGET_EFAULT
;
2832 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2833 return -TARGET_EFAULT
;
2834 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2835 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2836 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2837 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2838 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2839 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2840 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2841 unlock_user_struct(target_sd
, target_addr
, 0);
2845 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2846 struct shmid_ds
*host_sd
)
2848 struct target_shmid_ds
*target_sd
;
2850 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2851 return -TARGET_EFAULT
;
2852 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2853 return -TARGET_EFAULT
;
2854 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2855 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2856 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2857 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2858 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2859 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2860 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2861 unlock_user_struct(target_sd
, target_addr
, 1);
2865 struct target_shminfo
{
2873 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2874 struct shminfo
*host_shminfo
)
2876 struct target_shminfo
*target_shminfo
;
2877 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2878 return -TARGET_EFAULT
;
2879 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2880 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2881 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2882 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2883 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2884 unlock_user_struct(target_shminfo
, target_addr
, 1);
2888 struct target_shm_info
{
2893 abi_ulong swap_attempts
;
2894 abi_ulong swap_successes
;
2897 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2898 struct shm_info
*host_shm_info
)
2900 struct target_shm_info
*target_shm_info
;
2901 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2902 return -TARGET_EFAULT
;
2903 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2904 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2905 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2906 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2907 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2908 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2909 unlock_user_struct(target_shm_info
, target_addr
, 1);
2913 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2915 struct shmid_ds dsarg
;
2916 struct shminfo shminfo
;
2917 struct shm_info shm_info
;
2918 abi_long ret
= -TARGET_EINVAL
;
2926 if (target_to_host_shmid_ds(&dsarg
, buf
))
2927 return -TARGET_EFAULT
;
2928 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2929 if (host_to_target_shmid_ds(buf
, &dsarg
))
2930 return -TARGET_EFAULT
;
2933 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2934 if (host_to_target_shminfo(buf
, &shminfo
))
2935 return -TARGET_EFAULT
;
2938 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2939 if (host_to_target_shm_info(buf
, &shm_info
))
2940 return -TARGET_EFAULT
;
2945 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2952 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2956 struct shmid_ds shm_info
;
2959 /* find out the length of the shared memory segment */
2960 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2961 if (is_error(ret
)) {
2962 /* can't get length, bail out */
2969 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2971 abi_ulong mmap_start
;
2973 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2975 if (mmap_start
== -1) {
2977 host_raddr
= (void *)-1;
2979 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2982 if (host_raddr
== (void *)-1) {
2984 return get_errno((long)host_raddr
);
2986 raddr
=h2g((unsigned long)host_raddr
);
2988 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2989 PAGE_VALID
| PAGE_READ
|
2990 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2992 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2993 if (shm_regions
[i
].start
== 0) {
2994 shm_regions
[i
].start
= raddr
;
2995 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3005 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3009 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3010 if (shm_regions
[i
].start
== shmaddr
) {
3011 shm_regions
[i
].start
= 0;
3012 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3017 return get_errno(shmdt(g2h(shmaddr
)));
3020 #ifdef TARGET_NR_ipc
3021 /* ??? This only works with linear mappings. */
3022 /* do_ipc() must return target values and target errnos. */
3023 static abi_long
do_ipc(unsigned int call
, int first
,
3024 int second
, int third
,
3025 abi_long ptr
, abi_long fifth
)
3030 version
= call
>> 16;
3035 ret
= do_semop(first
, ptr
, second
);
3039 ret
= get_errno(semget(first
, second
, third
));
3043 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3047 ret
= get_errno(msgget(first
, second
));
3051 ret
= do_msgsnd(first
, ptr
, second
, third
);
3055 ret
= do_msgctl(first
, second
, ptr
);
3062 struct target_ipc_kludge
{
3067 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3068 ret
= -TARGET_EFAULT
;
3072 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3074 unlock_user_struct(tmp
, ptr
, 0);
3078 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3087 raddr
= do_shmat(first
, ptr
, second
);
3088 if (is_error(raddr
))
3089 return get_errno(raddr
);
3090 if (put_user_ual(raddr
, third
))
3091 return -TARGET_EFAULT
;
3095 ret
= -TARGET_EINVAL
;
3100 ret
= do_shmdt(ptr
);
3104 /* IPC_* flag values are the same on all linux platforms */
3105 ret
= get_errno(shmget(first
, second
, third
));
3108 /* IPC_* and SHM_* command values are the same on all linux platforms */
3110 ret
= do_shmctl(first
, second
, ptr
);
3113 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3114 ret
= -TARGET_ENOSYS
;
3121 /* kernel structure types definitions */
3123 #define STRUCT(name, ...) STRUCT_ ## name,
3124 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3126 #include "syscall_types.h"
3129 #undef STRUCT_SPECIAL
3131 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3132 #define STRUCT_SPECIAL(name)
3133 #include "syscall_types.h"
3135 #undef STRUCT_SPECIAL
3137 typedef struct IOCTLEntry IOCTLEntry
;
3139 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3140 int fd
, abi_long cmd
, abi_long arg
);
3143 unsigned int target_cmd
;
3144 unsigned int host_cmd
;
3147 do_ioctl_fn
*do_ioctl
;
3148 const argtype arg_type
[5];
3151 #define IOC_R 0x0001
3152 #define IOC_W 0x0002
3153 #define IOC_RW (IOC_R | IOC_W)
3155 #define MAX_STRUCT_SIZE 4096
3157 #ifdef CONFIG_FIEMAP
3158 /* So fiemap access checks don't overflow on 32 bit systems.
3159 * This is very slightly smaller than the limit imposed by
3160 * the underlying kernel.
3162 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3163 / sizeof(struct fiemap_extent))
3165 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3166 int fd
, abi_long cmd
, abi_long arg
)
3168 /* The parameter for this ioctl is a struct fiemap followed
3169 * by an array of struct fiemap_extent whose size is set
3170 * in fiemap->fm_extent_count. The array is filled in by the
3173 int target_size_in
, target_size_out
;
3175 const argtype
*arg_type
= ie
->arg_type
;
3176 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3179 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3183 assert(arg_type
[0] == TYPE_PTR
);
3184 assert(ie
->access
== IOC_RW
);
3186 target_size_in
= thunk_type_size(arg_type
, 0);
3187 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3189 return -TARGET_EFAULT
;
3191 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3192 unlock_user(argptr
, arg
, 0);
3193 fm
= (struct fiemap
*)buf_temp
;
3194 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3195 return -TARGET_EINVAL
;
3198 outbufsz
= sizeof (*fm
) +
3199 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3201 if (outbufsz
> MAX_STRUCT_SIZE
) {
3202 /* We can't fit all the extents into the fixed size buffer.
3203 * Allocate one that is large enough and use it instead.
3205 fm
= malloc(outbufsz
);
3207 return -TARGET_ENOMEM
;
3209 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3212 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3213 if (!is_error(ret
)) {
3214 target_size_out
= target_size_in
;
3215 /* An extent_count of 0 means we were only counting the extents
3216 * so there are no structs to copy
3218 if (fm
->fm_extent_count
!= 0) {
3219 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3221 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3223 ret
= -TARGET_EFAULT
;
3225 /* Convert the struct fiemap */
3226 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3227 if (fm
->fm_extent_count
!= 0) {
3228 p
= argptr
+ target_size_in
;
3229 /* ...and then all the struct fiemap_extents */
3230 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3231 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3236 unlock_user(argptr
, arg
, target_size_out
);
3246 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3247 int fd
, abi_long cmd
, abi_long arg
)
3249 const argtype
*arg_type
= ie
->arg_type
;
3253 struct ifconf
*host_ifconf
;
3255 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3256 int target_ifreq_size
;
3261 abi_long target_ifc_buf
;
3265 assert(arg_type
[0] == TYPE_PTR
);
3266 assert(ie
->access
== IOC_RW
);
3269 target_size
= thunk_type_size(arg_type
, 0);
3271 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3273 return -TARGET_EFAULT
;
3274 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3275 unlock_user(argptr
, arg
, 0);
3277 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3278 target_ifc_len
= host_ifconf
->ifc_len
;
3279 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3281 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3282 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3283 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3285 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3286 if (outbufsz
> MAX_STRUCT_SIZE
) {
3287 /* We can't fit all the extents into the fixed size buffer.
3288 * Allocate one that is large enough and use it instead.
3290 host_ifconf
= malloc(outbufsz
);
3292 return -TARGET_ENOMEM
;
3294 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3297 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3299 host_ifconf
->ifc_len
= host_ifc_len
;
3300 host_ifconf
->ifc_buf
= host_ifc_buf
;
3302 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3303 if (!is_error(ret
)) {
3304 /* convert host ifc_len to target ifc_len */
3306 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3307 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3308 host_ifconf
->ifc_len
= target_ifc_len
;
3310 /* restore target ifc_buf */
3312 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3314 /* copy struct ifconf to target user */
3316 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3318 return -TARGET_EFAULT
;
3319 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3320 unlock_user(argptr
, arg
, target_size
);
3322 /* copy ifreq[] to target user */
3324 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3325 for (i
= 0; i
< nb_ifreq
; i
++) {
3326 thunk_convert(argptr
+ i
* target_ifreq_size
,
3327 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3328 ifreq_arg_type
, THUNK_TARGET
);
3330 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3340 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3341 abi_long cmd
, abi_long arg
)
3344 struct dm_ioctl
*host_dm
;
3345 abi_long guest_data
;
3346 uint32_t guest_data_size
;
3348 const argtype
*arg_type
= ie
->arg_type
;
3350 void *big_buf
= NULL
;
3354 target_size
= thunk_type_size(arg_type
, 0);
3355 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3357 ret
= -TARGET_EFAULT
;
3360 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3361 unlock_user(argptr
, arg
, 0);
3363 /* buf_temp is too small, so fetch things into a bigger buffer */
3364 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3365 memcpy(big_buf
, buf_temp
, target_size
);
3369 guest_data
= arg
+ host_dm
->data_start
;
3370 if ((guest_data
- arg
) < 0) {
3374 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3375 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3377 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3378 switch (ie
->host_cmd
) {
3380 case DM_LIST_DEVICES
:
3383 case DM_DEV_SUSPEND
:
3386 case DM_TABLE_STATUS
:
3387 case DM_TABLE_CLEAR
:
3389 case DM_LIST_VERSIONS
:
3393 case DM_DEV_SET_GEOMETRY
:
3394 /* data contains only strings */
3395 memcpy(host_data
, argptr
, guest_data_size
);
3398 memcpy(host_data
, argptr
, guest_data_size
);
3399 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3403 void *gspec
= argptr
;
3404 void *cur_data
= host_data
;
3405 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3406 int spec_size
= thunk_type_size(arg_type
, 0);
3409 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3410 struct dm_target_spec
*spec
= cur_data
;
3414 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3415 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3417 spec
->next
= sizeof(*spec
) + slen
;
3418 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3420 cur_data
+= spec
->next
;
3425 ret
= -TARGET_EINVAL
;
3428 unlock_user(argptr
, guest_data
, 0);
3430 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3431 if (!is_error(ret
)) {
3432 guest_data
= arg
+ host_dm
->data_start
;
3433 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3434 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3435 switch (ie
->host_cmd
) {
3440 case DM_DEV_SUSPEND
:
3443 case DM_TABLE_CLEAR
:
3445 case DM_DEV_SET_GEOMETRY
:
3446 /* no return data */
3448 case DM_LIST_DEVICES
:
3450 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3451 uint32_t remaining_data
= guest_data_size
;
3452 void *cur_data
= argptr
;
3453 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3454 int nl_size
= 12; /* can't use thunk_size due to alignment */
3457 uint32_t next
= nl
->next
;
3459 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3461 if (remaining_data
< nl
->next
) {
3462 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3465 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3466 strcpy(cur_data
+ nl_size
, nl
->name
);
3467 cur_data
+= nl
->next
;
3468 remaining_data
-= nl
->next
;
3472 nl
= (void*)nl
+ next
;
3477 case DM_TABLE_STATUS
:
3479 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3480 void *cur_data
= argptr
;
3481 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3482 int spec_size
= thunk_type_size(arg_type
, 0);
3485 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3486 uint32_t next
= spec
->next
;
3487 int slen
= strlen((char*)&spec
[1]) + 1;
3488 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3489 if (guest_data_size
< spec
->next
) {
3490 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3493 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3494 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3495 cur_data
= argptr
+ spec
->next
;
3496 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3502 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3503 int count
= *(uint32_t*)hdata
;
3504 uint64_t *hdev
= hdata
+ 8;
3505 uint64_t *gdev
= argptr
+ 8;
3508 *(uint32_t*)argptr
= tswap32(count
);
3509 for (i
= 0; i
< count
; i
++) {
3510 *gdev
= tswap64(*hdev
);
3516 case DM_LIST_VERSIONS
:
3518 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3519 uint32_t remaining_data
= guest_data_size
;
3520 void *cur_data
= argptr
;
3521 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3522 int vers_size
= thunk_type_size(arg_type
, 0);
3525 uint32_t next
= vers
->next
;
3527 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3529 if (remaining_data
< vers
->next
) {
3530 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3533 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3534 strcpy(cur_data
+ vers_size
, vers
->name
);
3535 cur_data
+= vers
->next
;
3536 remaining_data
-= vers
->next
;
3540 vers
= (void*)vers
+ next
;
3545 ret
= -TARGET_EINVAL
;
3548 unlock_user(argptr
, guest_data
, guest_data_size
);
3550 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3552 ret
= -TARGET_EFAULT
;
3555 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3556 unlock_user(argptr
, arg
, target_size
);
3563 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3564 int fd
, abi_long cmd
, abi_long arg
)
3566 const argtype
*arg_type
= ie
->arg_type
;
3567 const StructEntry
*se
;
3568 const argtype
*field_types
;
3569 const int *dst_offsets
, *src_offsets
;
3572 abi_ulong
*target_rt_dev_ptr
;
3573 unsigned long *host_rt_dev_ptr
;
3577 assert(ie
->access
== IOC_W
);
3578 assert(*arg_type
== TYPE_PTR
);
3580 assert(*arg_type
== TYPE_STRUCT
);
3581 target_size
= thunk_type_size(arg_type
, 0);
3582 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3584 return -TARGET_EFAULT
;
3587 assert(*arg_type
== (int)STRUCT_rtentry
);
3588 se
= struct_entries
+ *arg_type
++;
3589 assert(se
->convert
[0] == NULL
);
3590 /* convert struct here to be able to catch rt_dev string */
3591 field_types
= se
->field_types
;
3592 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3593 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3594 for (i
= 0; i
< se
->nb_fields
; i
++) {
3595 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3596 assert(*field_types
== TYPE_PTRVOID
);
3597 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3598 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3599 if (*target_rt_dev_ptr
!= 0) {
3600 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3601 tswapal(*target_rt_dev_ptr
));
3602 if (!*host_rt_dev_ptr
) {
3603 unlock_user(argptr
, arg
, 0);
3604 return -TARGET_EFAULT
;
3607 *host_rt_dev_ptr
= 0;
3612 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3613 argptr
+ src_offsets
[i
],
3614 field_types
, THUNK_HOST
);
3616 unlock_user(argptr
, arg
, 0);
3618 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3619 if (*host_rt_dev_ptr
!= 0) {
3620 unlock_user((void *)*host_rt_dev_ptr
,
3621 *target_rt_dev_ptr
, 0);
3626 static IOCTLEntry ioctl_entries
[] = {
3627 #define IOCTL(cmd, access, ...) \
3628 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3629 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3630 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3635 /* ??? Implement proper locking for ioctls. */
3636 /* do_ioctl() Must return target values and target errnos. */
3637 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3639 const IOCTLEntry
*ie
;
3640 const argtype
*arg_type
;
3642 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3648 if (ie
->target_cmd
== 0) {
3649 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3650 return -TARGET_ENOSYS
;
3652 if (ie
->target_cmd
== cmd
)
3656 arg_type
= ie
->arg_type
;
3658 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3661 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3664 switch(arg_type
[0]) {
3667 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3672 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3676 target_size
= thunk_type_size(arg_type
, 0);
3677 switch(ie
->access
) {
3679 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3680 if (!is_error(ret
)) {
3681 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3683 return -TARGET_EFAULT
;
3684 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3685 unlock_user(argptr
, arg
, target_size
);
3689 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3691 return -TARGET_EFAULT
;
3692 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3693 unlock_user(argptr
, arg
, 0);
3694 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3698 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3700 return -TARGET_EFAULT
;
3701 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3702 unlock_user(argptr
, arg
, 0);
3703 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3704 if (!is_error(ret
)) {
3705 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3707 return -TARGET_EFAULT
;
3708 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3709 unlock_user(argptr
, arg
, target_size
);
3715 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3716 (long)cmd
, arg_type
[0]);
3717 ret
= -TARGET_ENOSYS
;
3723 static const bitmask_transtbl iflag_tbl
[] = {
3724 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3725 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3726 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3727 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3728 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3729 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3730 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3731 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3732 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3733 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3734 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3735 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3736 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3737 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3741 static const bitmask_transtbl oflag_tbl
[] = {
3742 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3743 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3744 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3745 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3746 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3747 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3748 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3749 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3750 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3751 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3752 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3753 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3754 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3755 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3756 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3757 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3758 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3759 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3760 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3761 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3762 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3763 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3764 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3765 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3769 static const bitmask_transtbl cflag_tbl
[] = {
3770 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3771 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3772 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3773 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3774 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3775 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3776 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3777 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3778 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3779 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3780 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3781 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3782 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3783 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3784 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3785 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3786 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3787 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3788 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3789 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3790 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3791 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3792 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3793 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3794 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3795 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3796 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3797 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3798 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3799 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3800 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3804 static const bitmask_transtbl lflag_tbl
[] = {
3805 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3806 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3807 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3808 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3809 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3810 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3811 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3812 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3813 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3814 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3815 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3816 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3817 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3818 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3819 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3823 static void target_to_host_termios (void *dst
, const void *src
)
3825 struct host_termios
*host
= dst
;
3826 const struct target_termios
*target
= src
;
3829 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3831 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3833 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3835 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3836 host
->c_line
= target
->c_line
;
3838 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3839 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3840 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3841 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3842 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3843 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3844 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3845 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3846 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3847 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3848 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3849 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3850 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3851 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3852 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3853 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3854 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3855 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3858 static void host_to_target_termios (void *dst
, const void *src
)
3860 struct target_termios
*target
= dst
;
3861 const struct host_termios
*host
= src
;
3864 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3866 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3868 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3870 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3871 target
->c_line
= host
->c_line
;
3873 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3874 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3875 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3876 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3877 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3878 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3879 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3880 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3881 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3882 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3883 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3884 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3885 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3886 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3887 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3888 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3889 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3890 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3893 static const StructEntry struct_termios_def
= {
3894 .convert
= { host_to_target_termios
, target_to_host_termios
},
3895 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3896 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3899 static bitmask_transtbl mmap_flags_tbl
[] = {
3900 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3901 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3902 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3903 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3904 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3905 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3906 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3907 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3911 #if defined(TARGET_I386)
3913 /* NOTE: there is really one LDT for all the threads */
3914 static uint8_t *ldt_table
;
3916 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3923 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3924 if (size
> bytecount
)
3926 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3928 return -TARGET_EFAULT
;
3929 /* ??? Should this by byteswapped? */
3930 memcpy(p
, ldt_table
, size
);
3931 unlock_user(p
, ptr
, size
);
3935 /* XXX: add locking support */
3936 static abi_long
write_ldt(CPUX86State
*env
,
3937 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3939 struct target_modify_ldt_ldt_s ldt_info
;
3940 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3941 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3942 int seg_not_present
, useable
, lm
;
3943 uint32_t *lp
, entry_1
, entry_2
;
3945 if (bytecount
!= sizeof(ldt_info
))
3946 return -TARGET_EINVAL
;
3947 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3948 return -TARGET_EFAULT
;
3949 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3950 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3951 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3952 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3953 unlock_user_struct(target_ldt_info
, ptr
, 0);
3955 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3956 return -TARGET_EINVAL
;
3957 seg_32bit
= ldt_info
.flags
& 1;
3958 contents
= (ldt_info
.flags
>> 1) & 3;
3959 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3960 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3961 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3962 useable
= (ldt_info
.flags
>> 6) & 1;
3966 lm
= (ldt_info
.flags
>> 7) & 1;
3968 if (contents
== 3) {
3970 return -TARGET_EINVAL
;
3971 if (seg_not_present
== 0)
3972 return -TARGET_EINVAL
;
3974 /* allocate the LDT */
3976 env
->ldt
.base
= target_mmap(0,
3977 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3978 PROT_READ
|PROT_WRITE
,
3979 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3980 if (env
->ldt
.base
== -1)
3981 return -TARGET_ENOMEM
;
3982 memset(g2h(env
->ldt
.base
), 0,
3983 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3984 env
->ldt
.limit
= 0xffff;
3985 ldt_table
= g2h(env
->ldt
.base
);
3988 /* NOTE: same code as Linux kernel */
3989 /* Allow LDTs to be cleared by the user. */
3990 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3993 read_exec_only
== 1 &&
3995 limit_in_pages
== 0 &&
3996 seg_not_present
== 1 &&
4004 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4005 (ldt_info
.limit
& 0x0ffff);
4006 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4007 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4008 (ldt_info
.limit
& 0xf0000) |
4009 ((read_exec_only
^ 1) << 9) |
4011 ((seg_not_present
^ 1) << 15) |
4013 (limit_in_pages
<< 23) |
4017 entry_2
|= (useable
<< 20);
4019 /* Install the new entry ... */
4021 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4022 lp
[0] = tswap32(entry_1
);
4023 lp
[1] = tswap32(entry_2
);
4027 /* specific and weird i386 syscalls */
4028 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4029 unsigned long bytecount
)
4035 ret
= read_ldt(ptr
, bytecount
);
4038 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4041 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4044 ret
= -TARGET_ENOSYS
;
4050 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4051 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4053 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4054 struct target_modify_ldt_ldt_s ldt_info
;
4055 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4056 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4057 int seg_not_present
, useable
, lm
;
4058 uint32_t *lp
, entry_1
, entry_2
;
4061 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4062 if (!target_ldt_info
)
4063 return -TARGET_EFAULT
;
4064 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4065 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4066 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4067 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4068 if (ldt_info
.entry_number
== -1) {
4069 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4070 if (gdt_table
[i
] == 0) {
4071 ldt_info
.entry_number
= i
;
4072 target_ldt_info
->entry_number
= tswap32(i
);
4077 unlock_user_struct(target_ldt_info
, ptr
, 1);
4079 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4080 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4081 return -TARGET_EINVAL
;
4082 seg_32bit
= ldt_info
.flags
& 1;
4083 contents
= (ldt_info
.flags
>> 1) & 3;
4084 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4085 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4086 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4087 useable
= (ldt_info
.flags
>> 6) & 1;
4091 lm
= (ldt_info
.flags
>> 7) & 1;
4094 if (contents
== 3) {
4095 if (seg_not_present
== 0)
4096 return -TARGET_EINVAL
;
4099 /* NOTE: same code as Linux kernel */
4100 /* Allow LDTs to be cleared by the user. */
4101 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4102 if ((contents
== 0 &&
4103 read_exec_only
== 1 &&
4105 limit_in_pages
== 0 &&
4106 seg_not_present
== 1 &&
4114 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4115 (ldt_info
.limit
& 0x0ffff);
4116 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4117 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4118 (ldt_info
.limit
& 0xf0000) |
4119 ((read_exec_only
^ 1) << 9) |
4121 ((seg_not_present
^ 1) << 15) |
4123 (limit_in_pages
<< 23) |
4128 /* Install the new entry ... */
4130 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4131 lp
[0] = tswap32(entry_1
);
4132 lp
[1] = tswap32(entry_2
);
4136 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4138 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4139 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4140 uint32_t base_addr
, limit
, flags
;
4141 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4142 int seg_not_present
, useable
, lm
;
4143 uint32_t *lp
, entry_1
, entry_2
;
4145 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4146 if (!target_ldt_info
)
4147 return -TARGET_EFAULT
;
4148 idx
= tswap32(target_ldt_info
->entry_number
);
4149 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4150 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4151 unlock_user_struct(target_ldt_info
, ptr
, 1);
4152 return -TARGET_EINVAL
;
4154 lp
= (uint32_t *)(gdt_table
+ idx
);
4155 entry_1
= tswap32(lp
[0]);
4156 entry_2
= tswap32(lp
[1]);
4158 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4159 contents
= (entry_2
>> 10) & 3;
4160 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4161 seg_32bit
= (entry_2
>> 22) & 1;
4162 limit_in_pages
= (entry_2
>> 23) & 1;
4163 useable
= (entry_2
>> 20) & 1;
4167 lm
= (entry_2
>> 21) & 1;
4169 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4170 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4171 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4172 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4173 base_addr
= (entry_1
>> 16) |
4174 (entry_2
& 0xff000000) |
4175 ((entry_2
& 0xff) << 16);
4176 target_ldt_info
->base_addr
= tswapal(base_addr
);
4177 target_ldt_info
->limit
= tswap32(limit
);
4178 target_ldt_info
->flags
= tswap32(flags
);
4179 unlock_user_struct(target_ldt_info
, ptr
, 1);
4182 #endif /* TARGET_I386 && TARGET_ABI32 */
4184 #ifndef TARGET_ABI32
4185 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4192 case TARGET_ARCH_SET_GS
:
4193 case TARGET_ARCH_SET_FS
:
4194 if (code
== TARGET_ARCH_SET_GS
)
4198 cpu_x86_load_seg(env
, idx
, 0);
4199 env
->segs
[idx
].base
= addr
;
4201 case TARGET_ARCH_GET_GS
:
4202 case TARGET_ARCH_GET_FS
:
4203 if (code
== TARGET_ARCH_GET_GS
)
4207 val
= env
->segs
[idx
].base
;
4208 if (put_user(val
, addr
, abi_ulong
))
4209 ret
= -TARGET_EFAULT
;
4212 ret
= -TARGET_EINVAL
;
4219 #endif /* defined(TARGET_I386) */
4221 #define NEW_STACK_SIZE 0x40000
4224 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4227 pthread_mutex_t mutex
;
4228 pthread_cond_t cond
;
4231 abi_ulong child_tidptr
;
4232 abi_ulong parent_tidptr
;
4236 static void *clone_func(void *arg
)
4238 new_thread_info
*info
= arg
;
4244 cpu
= ENV_GET_CPU(env
);
4246 ts
= (TaskState
*)env
->opaque
;
4247 info
->tid
= gettid();
4248 cpu
->host_tid
= info
->tid
;
4250 if (info
->child_tidptr
)
4251 put_user_u32(info
->tid
, info
->child_tidptr
);
4252 if (info
->parent_tidptr
)
4253 put_user_u32(info
->tid
, info
->parent_tidptr
);
4254 /* Enable signals. */
4255 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4256 /* Signal to the parent that we're ready. */
4257 pthread_mutex_lock(&info
->mutex
);
4258 pthread_cond_broadcast(&info
->cond
);
4259 pthread_mutex_unlock(&info
->mutex
);
4260 /* Wait until the parent has finshed initializing the tls state. */
4261 pthread_mutex_lock(&clone_lock
);
4262 pthread_mutex_unlock(&clone_lock
);
4268 /* do_fork() Must return host values and target errnos (unlike most
4269 do_*() functions). */
4270 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4271 abi_ulong parent_tidptr
, target_ulong newtls
,
4272 abi_ulong child_tidptr
)
4276 CPUArchState
*new_env
;
4277 unsigned int nptl_flags
;
4280 /* Emulate vfork() with fork() */
4281 if (flags
& CLONE_VFORK
)
4282 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4284 if (flags
& CLONE_VM
) {
4285 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4286 new_thread_info info
;
4287 pthread_attr_t attr
;
4289 ts
= g_malloc0(sizeof(TaskState
));
4290 init_task_state(ts
);
4291 /* we create a new CPU instance. */
4292 new_env
= cpu_copy(env
);
4293 /* Init regs that differ from the parent. */
4294 cpu_clone_regs(new_env
, newsp
);
4295 new_env
->opaque
= ts
;
4296 ts
->bprm
= parent_ts
->bprm
;
4297 ts
->info
= parent_ts
->info
;
4299 flags
&= ~CLONE_NPTL_FLAGS2
;
4301 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4302 ts
->child_tidptr
= child_tidptr
;
4305 if (nptl_flags
& CLONE_SETTLS
)
4306 cpu_set_tls (new_env
, newtls
);
4308 /* Grab a mutex so that thread setup appears atomic. */
4309 pthread_mutex_lock(&clone_lock
);
4311 memset(&info
, 0, sizeof(info
));
4312 pthread_mutex_init(&info
.mutex
, NULL
);
4313 pthread_mutex_lock(&info
.mutex
);
4314 pthread_cond_init(&info
.cond
, NULL
);
4316 if (nptl_flags
& CLONE_CHILD_SETTID
)
4317 info
.child_tidptr
= child_tidptr
;
4318 if (nptl_flags
& CLONE_PARENT_SETTID
)
4319 info
.parent_tidptr
= parent_tidptr
;
4321 ret
= pthread_attr_init(&attr
);
4322 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4323 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4324 /* It is not safe to deliver signals until the child has finished
4325 initializing, so temporarily block all signals. */
4326 sigfillset(&sigmask
);
4327 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4329 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4330 /* TODO: Free new CPU state if thread creation failed. */
4332 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4333 pthread_attr_destroy(&attr
);
4335 /* Wait for the child to initialize. */
4336 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4338 if (flags
& CLONE_PARENT_SETTID
)
4339 put_user_u32(ret
, parent_tidptr
);
4343 pthread_mutex_unlock(&info
.mutex
);
4344 pthread_cond_destroy(&info
.cond
);
4345 pthread_mutex_destroy(&info
.mutex
);
4346 pthread_mutex_unlock(&clone_lock
);
4348 /* if no CLONE_VM, we consider it is a fork */
4349 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4354 /* Child Process. */
4355 cpu_clone_regs(env
, newsp
);
4357 /* There is a race condition here. The parent process could
4358 theoretically read the TID in the child process before the child
4359 tid is set. This would require using either ptrace
4360 (not implemented) or having *_tidptr to point at a shared memory
4361 mapping. We can't repeat the spinlock hack used above because
4362 the child process gets its own copy of the lock. */
4363 if (flags
& CLONE_CHILD_SETTID
)
4364 put_user_u32(gettid(), child_tidptr
);
4365 if (flags
& CLONE_PARENT_SETTID
)
4366 put_user_u32(gettid(), parent_tidptr
);
4367 ts
= (TaskState
*)env
->opaque
;
4368 if (flags
& CLONE_SETTLS
)
4369 cpu_set_tls (env
, newtls
);
4370 if (flags
& CLONE_CHILD_CLEARTID
)
4371 ts
->child_tidptr
= child_tidptr
;
4379 /* warning : doesn't handle linux specific flags... */
4380 static int target_to_host_fcntl_cmd(int cmd
)
4383 case TARGET_F_DUPFD
:
4384 case TARGET_F_GETFD
:
4385 case TARGET_F_SETFD
:
4386 case TARGET_F_GETFL
:
4387 case TARGET_F_SETFL
:
4389 case TARGET_F_GETLK
:
4391 case TARGET_F_SETLK
:
4393 case TARGET_F_SETLKW
:
4395 case TARGET_F_GETOWN
:
4397 case TARGET_F_SETOWN
:
4399 case TARGET_F_GETSIG
:
4401 case TARGET_F_SETSIG
:
4403 #if TARGET_ABI_BITS == 32
4404 case TARGET_F_GETLK64
:
4406 case TARGET_F_SETLK64
:
4408 case TARGET_F_SETLKW64
:
4411 case TARGET_F_SETLEASE
:
4413 case TARGET_F_GETLEASE
:
4415 #ifdef F_DUPFD_CLOEXEC
4416 case TARGET_F_DUPFD_CLOEXEC
:
4417 return F_DUPFD_CLOEXEC
;
4419 case TARGET_F_NOTIFY
:
4422 return -TARGET_EINVAL
;
4424 return -TARGET_EINVAL
;
4427 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4428 static const bitmask_transtbl flock_tbl
[] = {
4429 TRANSTBL_CONVERT(F_RDLCK
),
4430 TRANSTBL_CONVERT(F_WRLCK
),
4431 TRANSTBL_CONVERT(F_UNLCK
),
4432 TRANSTBL_CONVERT(F_EXLCK
),
4433 TRANSTBL_CONVERT(F_SHLCK
),
4437 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4440 struct target_flock
*target_fl
;
4441 struct flock64 fl64
;
4442 struct target_flock64
*target_fl64
;
4444 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4446 if (host_cmd
== -TARGET_EINVAL
)
4450 case TARGET_F_GETLK
:
4451 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4452 return -TARGET_EFAULT
;
4454 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4455 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4456 fl
.l_start
= tswapal(target_fl
->l_start
);
4457 fl
.l_len
= tswapal(target_fl
->l_len
);
4458 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4459 unlock_user_struct(target_fl
, arg
, 0);
4460 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4462 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4463 return -TARGET_EFAULT
;
4465 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4466 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4467 target_fl
->l_start
= tswapal(fl
.l_start
);
4468 target_fl
->l_len
= tswapal(fl
.l_len
);
4469 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4470 unlock_user_struct(target_fl
, arg
, 1);
4474 case TARGET_F_SETLK
:
4475 case TARGET_F_SETLKW
:
4476 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4477 return -TARGET_EFAULT
;
4479 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4480 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4481 fl
.l_start
= tswapal(target_fl
->l_start
);
4482 fl
.l_len
= tswapal(target_fl
->l_len
);
4483 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4484 unlock_user_struct(target_fl
, arg
, 0);
4485 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4488 case TARGET_F_GETLK64
:
4489 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4490 return -TARGET_EFAULT
;
4492 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4493 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4494 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4495 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4496 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4497 unlock_user_struct(target_fl64
, arg
, 0);
4498 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4500 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4501 return -TARGET_EFAULT
;
4502 target_fl64
->l_type
=
4503 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4504 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4505 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4506 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4507 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4508 unlock_user_struct(target_fl64
, arg
, 1);
4511 case TARGET_F_SETLK64
:
4512 case TARGET_F_SETLKW64
:
4513 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4514 return -TARGET_EFAULT
;
4516 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4517 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4518 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4519 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4520 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4521 unlock_user_struct(target_fl64
, arg
, 0);
4522 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4525 case TARGET_F_GETFL
:
4526 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4528 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4532 case TARGET_F_SETFL
:
4533 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4536 case TARGET_F_SETOWN
:
4537 case TARGET_F_GETOWN
:
4538 case TARGET_F_SETSIG
:
4539 case TARGET_F_GETSIG
:
4540 case TARGET_F_SETLEASE
:
4541 case TARGET_F_GETLEASE
:
4542 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4546 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4554 static inline int high2lowuid(int uid
)
4562 static inline int high2lowgid(int gid
)
4570 static inline int low2highuid(int uid
)
4572 if ((int16_t)uid
== -1)
4578 static inline int low2highgid(int gid
)
4580 if ((int16_t)gid
== -1)
4585 static inline int tswapid(int id
)
4590 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4592 #else /* !USE_UID16 */
4593 static inline int high2lowuid(int uid
)
4597 static inline int high2lowgid(int gid
)
4601 static inline int low2highuid(int uid
)
4605 static inline int low2highgid(int gid
)
4609 static inline int tswapid(int id
)
4614 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4616 #endif /* USE_UID16 */
4618 void syscall_init(void)
4621 const argtype
*arg_type
;
4625 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4626 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4627 #include "syscall_types.h"
4629 #undef STRUCT_SPECIAL
4631 /* Build target_to_host_errno_table[] table from
4632 * host_to_target_errno_table[]. */
4633 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4634 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4637 /* we patch the ioctl size if necessary. We rely on the fact that
4638 no ioctl has all the bits at '1' in the size field */
4640 while (ie
->target_cmd
!= 0) {
4641 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4642 TARGET_IOC_SIZEMASK
) {
4643 arg_type
= ie
->arg_type
;
4644 if (arg_type
[0] != TYPE_PTR
) {
4645 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4650 size
= thunk_type_size(arg_type
, 0);
4651 ie
->target_cmd
= (ie
->target_cmd
&
4652 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4653 (size
<< TARGET_IOC_SIZESHIFT
);
4656 /* automatic consistency check if same arch */
4657 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4658 (defined(__x86_64__) && defined(TARGET_X86_64))
4659 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4660 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4661 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4668 #if TARGET_ABI_BITS == 32
4669 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4671 #ifdef TARGET_WORDS_BIGENDIAN
4672 return ((uint64_t)word0
<< 32) | word1
;
4674 return ((uint64_t)word1
<< 32) | word0
;
4677 #else /* TARGET_ABI_BITS == 32 */
4678 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4682 #endif /* TARGET_ABI_BITS != 32 */
4684 #ifdef TARGET_NR_truncate64
4685 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4690 if (regpairs_aligned(cpu_env
)) {
4694 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4698 #ifdef TARGET_NR_ftruncate64
4699 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4704 if (regpairs_aligned(cpu_env
)) {
4708 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4712 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4713 abi_ulong target_addr
)
4715 struct target_timespec
*target_ts
;
4717 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4718 return -TARGET_EFAULT
;
4719 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4720 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4721 unlock_user_struct(target_ts
, target_addr
, 0);
4725 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4726 struct timespec
*host_ts
)
4728 struct target_timespec
*target_ts
;
4730 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4731 return -TARGET_EFAULT
;
4732 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4733 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4734 unlock_user_struct(target_ts
, target_addr
, 1);
4738 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
4739 abi_ulong target_addr
)
4741 struct target_itimerspec
*target_itspec
;
4743 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
4744 return -TARGET_EFAULT
;
4747 host_itspec
->it_interval
.tv_sec
=
4748 tswapal(target_itspec
->it_interval
.tv_sec
);
4749 host_itspec
->it_interval
.tv_nsec
=
4750 tswapal(target_itspec
->it_interval
.tv_nsec
);
4751 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
4752 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
4754 unlock_user_struct(target_itspec
, target_addr
, 1);
4758 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
4759 struct itimerspec
*host_its
)
4761 struct target_itimerspec
*target_itspec
;
4763 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
4764 return -TARGET_EFAULT
;
4767 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
4768 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
4770 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
4771 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
4773 unlock_user_struct(target_itspec
, target_addr
, 0);
4777 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4778 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4779 abi_ulong target_addr
,
4780 struct stat
*host_st
)
4782 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4783 if (((CPUARMState
*)cpu_env
)->eabi
) {
4784 struct target_eabi_stat64
*target_st
;
4786 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4787 return -TARGET_EFAULT
;
4788 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4789 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4790 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4791 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4792 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4794 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4795 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4796 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4797 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4798 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4799 __put_user(host_st
->st_size
, &target_st
->st_size
);
4800 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4801 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4802 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4803 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4804 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4805 unlock_user_struct(target_st
, target_addr
, 1);
4809 #if defined(TARGET_HAS_STRUCT_STAT64)
4810 struct target_stat64
*target_st
;
4812 struct target_stat
*target_st
;
4815 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4816 return -TARGET_EFAULT
;
4817 memset(target_st
, 0, sizeof(*target_st
));
4818 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4819 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4820 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4821 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4823 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4824 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4825 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4826 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4827 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4828 /* XXX: better use of kernel struct */
4829 __put_user(host_st
->st_size
, &target_st
->st_size
);
4830 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4831 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4832 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4833 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4834 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4835 unlock_user_struct(target_st
, target_addr
, 1);
4842 /* ??? Using host futex calls even when target atomic operations
4843 are not really atomic probably breaks things. However implementing
4844 futexes locally would make futexes shared between multiple processes
4845 tricky. However they're probably useless because guest atomic
4846 operations won't work either. */
4847 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4848 target_ulong uaddr2
, int val3
)
4850 struct timespec ts
, *pts
;
4853 /* ??? We assume FUTEX_* constants are the same on both host
4855 #ifdef FUTEX_CMD_MASK
4856 base_op
= op
& FUTEX_CMD_MASK
;
4862 case FUTEX_WAIT_BITSET
:
4865 target_to_host_timespec(pts
, timeout
);
4869 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4872 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4874 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4876 case FUTEX_CMP_REQUEUE
:
4878 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4879 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4880 But the prototype takes a `struct timespec *'; insert casts
4881 to satisfy the compiler. We do not need to tswap TIMEOUT
4882 since it's not compared to guest memory. */
4883 pts
= (struct timespec
*)(uintptr_t) timeout
;
4884 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4886 (base_op
== FUTEX_CMP_REQUEUE
4890 return -TARGET_ENOSYS
;
4894 /* Map host to target signal numbers for the wait family of syscalls.
4895 Assume all other status bits are the same. */
4896 int host_to_target_waitstatus(int status
)
4898 if (WIFSIGNALED(status
)) {
4899 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4901 if (WIFSTOPPED(status
)) {
4902 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4908 static int relstr_to_int(const char *s
)
4910 /* Convert a uname release string like "2.6.18" to an integer
4911 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4916 for (i
= 0; i
< 3; i
++) {
4918 while (*s
>= '0' && *s
<= '9') {
4923 tmp
= (tmp
<< 8) + n
;
4931 int get_osversion(void)
4933 static int osversion
;
4934 struct new_utsname buf
;
4939 if (qemu_uname_release
&& *qemu_uname_release
) {
4940 s
= qemu_uname_release
;
4942 if (sys_uname(&buf
))
4946 osversion
= relstr_to_int(s
);
4950 void init_qemu_uname_release(void)
4952 /* Initialize qemu_uname_release for later use.
4953 * If the host kernel is too old and the user hasn't asked for
4954 * a specific fake version number, we might want to fake a minimum
4955 * target kernel version.
4957 #ifdef UNAME_MINIMUM_RELEASE
4958 struct new_utsname buf
;
4960 if (qemu_uname_release
&& *qemu_uname_release
) {
4964 if (sys_uname(&buf
)) {
4968 if (relstr_to_int(buf
.release
) < relstr_to_int(UNAME_MINIMUM_RELEASE
)) {
4969 qemu_uname_release
= UNAME_MINIMUM_RELEASE
;
4974 static int open_self_maps(void *cpu_env
, int fd
)
4976 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4977 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4984 fp
= fopen("/proc/self/maps", "r");
4989 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4990 int fields
, dev_maj
, dev_min
, inode
;
4991 uint64_t min
, max
, offset
;
4992 char flag_r
, flag_w
, flag_x
, flag_p
;
4993 char path
[512] = "";
4994 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4995 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4996 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4998 if ((fields
< 10) || (fields
> 11)) {
5001 if (!strncmp(path
, "[stack]", 7)) {
5004 if (h2g_valid(min
) && h2g_valid(max
)) {
5005 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5006 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5007 h2g(min
), h2g(max
), flag_r
, flag_w
,
5008 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5009 path
[0] ? " " : "", path
);
5016 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5017 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5018 (unsigned long long)ts
->info
->stack_limit
,
5019 (unsigned long long)(ts
->info
->start_stack
+
5020 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5021 (unsigned long long)0);
5027 static int open_self_stat(void *cpu_env
, int fd
)
5029 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5030 abi_ulong start_stack
= ts
->info
->start_stack
;
5033 for (i
= 0; i
< 44; i
++) {
5041 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5042 } else if (i
== 1) {
5044 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5045 } else if (i
== 27) {
5048 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5050 /* for the rest, there is MasterCard */
5051 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5055 if (write(fd
, buf
, len
) != len
) {
5063 static int open_self_auxv(void *cpu_env
, int fd
)
5065 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5066 abi_ulong auxv
= ts
->info
->saved_auxv
;
5067 abi_ulong len
= ts
->info
->auxv_len
;
5071 * Auxiliary vector is stored in target process stack.
5072 * read in whole auxv vector and copy it to file
5074 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5078 r
= write(fd
, ptr
, len
);
5085 lseek(fd
, 0, SEEK_SET
);
5086 unlock_user(ptr
, auxv
, len
);
5092 static int is_proc_myself(const char *filename
, const char *entry
)
5094 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5095 filename
+= strlen("/proc/");
5096 if (!strncmp(filename
, "self/", strlen("self/"))) {
5097 filename
+= strlen("self/");
5098 } else if (*filename
>= '1' && *filename
<= '9') {
5100 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5101 if (!strncmp(filename
, myself
, strlen(myself
))) {
5102 filename
+= strlen(myself
);
5109 if (!strcmp(filename
, entry
)) {
5116 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5117 static int is_proc(const char *filename
, const char *entry
)
5119 return strcmp(filename
, entry
) == 0;
5122 static int open_net_route(void *cpu_env
, int fd
)
5129 fp
= fopen("/proc/net/route", "r");
5136 read
= getline(&line
, &len
, fp
);
5137 dprintf(fd
, "%s", line
);
5141 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5143 uint32_t dest
, gw
, mask
;
5144 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5145 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5146 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5147 &mask
, &mtu
, &window
, &irtt
);
5148 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5149 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5150 metric
, tswap32(mask
), mtu
, window
, irtt
);
5160 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5163 const char *filename
;
5164 int (*fill
)(void *cpu_env
, int fd
);
5165 int (*cmp
)(const char *s1
, const char *s2
);
5167 const struct fake_open
*fake_open
;
5168 static const struct fake_open fakes
[] = {
5169 { "maps", open_self_maps
, is_proc_myself
},
5170 { "stat", open_self_stat
, is_proc_myself
},
5171 { "auxv", open_self_auxv
, is_proc_myself
},
5172 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5173 { "/proc/net/route", open_net_route
, is_proc
},
5175 { NULL
, NULL
, NULL
}
5178 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5179 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5184 if (fake_open
->filename
) {
5186 char filename
[PATH_MAX
];
5189 /* create temporary file to map stat to */
5190 tmpdir
= getenv("TMPDIR");
5193 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5194 fd
= mkstemp(filename
);
5200 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5204 lseek(fd
, 0, SEEK_SET
);
5209 return get_errno(open(path(pathname
), flags
, mode
));
5212 /* do_syscall() should always have a single exit point at the end so
5213 that actions, such as logging of syscall results, can be performed.
5214 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5215 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5216 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5217 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5220 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5227 gemu_log("syscall %d", num
);
5230 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5233 case TARGET_NR_exit
:
5234 /* In old applications this may be used to implement _exit(2).
5235 However in threaded applictions it is used for thread termination,
5236 and _exit_group is used for application termination.
5237 Do thread termination if we have more then one thread. */
5238 /* FIXME: This probably breaks if a signal arrives. We should probably
5239 be disabling signals. */
5240 if (CPU_NEXT(first_cpu
)) {
5244 /* Remove the CPU from the list. */
5245 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5247 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5248 if (ts
->child_tidptr
) {
5249 put_user_u32(0, ts
->child_tidptr
);
5250 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5254 object_unref(OBJECT(ENV_GET_CPU(cpu_env
)));
5261 gdb_exit(cpu_env
, arg1
);
5263 ret
= 0; /* avoid warning */
5265 case TARGET_NR_read
:
5269 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5271 ret
= get_errno(read(arg1
, p
, arg3
));
5272 unlock_user(p
, arg2
, ret
);
5275 case TARGET_NR_write
:
5276 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5278 ret
= get_errno(write(arg1
, p
, arg3
));
5279 unlock_user(p
, arg2
, 0);
5281 case TARGET_NR_open
:
5282 if (!(p
= lock_user_string(arg1
)))
5284 ret
= get_errno(do_open(cpu_env
, p
,
5285 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5287 unlock_user(p
, arg1
, 0);
5289 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5290 case TARGET_NR_openat
:
5291 if (!(p
= lock_user_string(arg2
)))
5293 ret
= get_errno(sys_openat(arg1
,
5295 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5297 unlock_user(p
, arg2
, 0);
5300 case TARGET_NR_close
:
5301 ret
= get_errno(close(arg1
));
5306 case TARGET_NR_fork
:
5307 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5309 #ifdef TARGET_NR_waitpid
5310 case TARGET_NR_waitpid
:
5313 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5314 if (!is_error(ret
) && arg2
&& ret
5315 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5320 #ifdef TARGET_NR_waitid
5321 case TARGET_NR_waitid
:
5325 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5326 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5327 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5329 host_to_target_siginfo(p
, &info
);
5330 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5335 #ifdef TARGET_NR_creat /* not on alpha */
5336 case TARGET_NR_creat
:
5337 if (!(p
= lock_user_string(arg1
)))
5339 ret
= get_errno(creat(p
, arg2
));
5340 unlock_user(p
, arg1
, 0);
5343 case TARGET_NR_link
:
5346 p
= lock_user_string(arg1
);
5347 p2
= lock_user_string(arg2
);
5349 ret
= -TARGET_EFAULT
;
5351 ret
= get_errno(link(p
, p2
));
5352 unlock_user(p2
, arg2
, 0);
5353 unlock_user(p
, arg1
, 0);
5356 #if defined(TARGET_NR_linkat)
5357 case TARGET_NR_linkat
:
5362 p
= lock_user_string(arg2
);
5363 p2
= lock_user_string(arg4
);
5365 ret
= -TARGET_EFAULT
;
5367 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5368 unlock_user(p
, arg2
, 0);
5369 unlock_user(p2
, arg4
, 0);
5373 case TARGET_NR_unlink
:
5374 if (!(p
= lock_user_string(arg1
)))
5376 ret
= get_errno(unlink(p
));
5377 unlock_user(p
, arg1
, 0);
5379 #if defined(TARGET_NR_unlinkat)
5380 case TARGET_NR_unlinkat
:
5381 if (!(p
= lock_user_string(arg2
)))
5383 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5384 unlock_user(p
, arg2
, 0);
5387 case TARGET_NR_execve
:
5389 char **argp
, **envp
;
5392 abi_ulong guest_argp
;
5393 abi_ulong guest_envp
;
5400 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5401 if (get_user_ual(addr
, gp
))
5409 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5410 if (get_user_ual(addr
, gp
))
5417 argp
= alloca((argc
+ 1) * sizeof(void *));
5418 envp
= alloca((envc
+ 1) * sizeof(void *));
5420 for (gp
= guest_argp
, q
= argp
; gp
;
5421 gp
+= sizeof(abi_ulong
), q
++) {
5422 if (get_user_ual(addr
, gp
))
5426 if (!(*q
= lock_user_string(addr
)))
5428 total_size
+= strlen(*q
) + 1;
5432 for (gp
= guest_envp
, q
= envp
; gp
;
5433 gp
+= sizeof(abi_ulong
), q
++) {
5434 if (get_user_ual(addr
, gp
))
5438 if (!(*q
= lock_user_string(addr
)))
5440 total_size
+= strlen(*q
) + 1;
5444 /* This case will not be caught by the host's execve() if its
5445 page size is bigger than the target's. */
5446 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5447 ret
= -TARGET_E2BIG
;
5450 if (!(p
= lock_user_string(arg1
)))
5452 ret
= get_errno(execve(p
, argp
, envp
));
5453 unlock_user(p
, arg1
, 0);
5458 ret
= -TARGET_EFAULT
;
5461 for (gp
= guest_argp
, q
= argp
; *q
;
5462 gp
+= sizeof(abi_ulong
), q
++) {
5463 if (get_user_ual(addr
, gp
)
5466 unlock_user(*q
, addr
, 0);
5468 for (gp
= guest_envp
, q
= envp
; *q
;
5469 gp
+= sizeof(abi_ulong
), q
++) {
5470 if (get_user_ual(addr
, gp
)
5473 unlock_user(*q
, addr
, 0);
5477 case TARGET_NR_chdir
:
5478 if (!(p
= lock_user_string(arg1
)))
5480 ret
= get_errno(chdir(p
));
5481 unlock_user(p
, arg1
, 0);
5483 #ifdef TARGET_NR_time
5484 case TARGET_NR_time
:
5487 ret
= get_errno(time(&host_time
));
5490 && put_user_sal(host_time
, arg1
))
5495 case TARGET_NR_mknod
:
5496 if (!(p
= lock_user_string(arg1
)))
5498 ret
= get_errno(mknod(p
, arg2
, arg3
));
5499 unlock_user(p
, arg1
, 0);
5501 #if defined(TARGET_NR_mknodat)
5502 case TARGET_NR_mknodat
:
5503 if (!(p
= lock_user_string(arg2
)))
5505 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5506 unlock_user(p
, arg2
, 0);
5509 case TARGET_NR_chmod
:
5510 if (!(p
= lock_user_string(arg1
)))
5512 ret
= get_errno(chmod(p
, arg2
));
5513 unlock_user(p
, arg1
, 0);
5515 #ifdef TARGET_NR_break
5516 case TARGET_NR_break
:
5519 #ifdef TARGET_NR_oldstat
5520 case TARGET_NR_oldstat
:
5523 case TARGET_NR_lseek
:
5524 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5526 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5527 /* Alpha specific */
5528 case TARGET_NR_getxpid
:
5529 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5530 ret
= get_errno(getpid());
5533 #ifdef TARGET_NR_getpid
5534 case TARGET_NR_getpid
:
5535 ret
= get_errno(getpid());
5538 case TARGET_NR_mount
:
5540 /* need to look at the data field */
5542 p
= lock_user_string(arg1
);
5543 p2
= lock_user_string(arg2
);
5544 p3
= lock_user_string(arg3
);
5545 if (!p
|| !p2
|| !p3
)
5546 ret
= -TARGET_EFAULT
;
5548 /* FIXME - arg5 should be locked, but it isn't clear how to
5549 * do that since it's not guaranteed to be a NULL-terminated
5553 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5555 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5557 unlock_user(p
, arg1
, 0);
5558 unlock_user(p2
, arg2
, 0);
5559 unlock_user(p3
, arg3
, 0);
5562 #ifdef TARGET_NR_umount
5563 case TARGET_NR_umount
:
5564 if (!(p
= lock_user_string(arg1
)))
5566 ret
= get_errno(umount(p
));
5567 unlock_user(p
, arg1
, 0);
5570 #ifdef TARGET_NR_stime /* not on alpha */
5571 case TARGET_NR_stime
:
5574 if (get_user_sal(host_time
, arg1
))
5576 ret
= get_errno(stime(&host_time
));
5580 case TARGET_NR_ptrace
:
5582 #ifdef TARGET_NR_alarm /* not on alpha */
5583 case TARGET_NR_alarm
:
5587 #ifdef TARGET_NR_oldfstat
5588 case TARGET_NR_oldfstat
:
5591 #ifdef TARGET_NR_pause /* not on alpha */
5592 case TARGET_NR_pause
:
5593 ret
= get_errno(pause());
5596 #ifdef TARGET_NR_utime
5597 case TARGET_NR_utime
:
5599 struct utimbuf tbuf
, *host_tbuf
;
5600 struct target_utimbuf
*target_tbuf
;
5602 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5604 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5605 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5606 unlock_user_struct(target_tbuf
, arg2
, 0);
5611 if (!(p
= lock_user_string(arg1
)))
5613 ret
= get_errno(utime(p
, host_tbuf
));
5614 unlock_user(p
, arg1
, 0);
5618 case TARGET_NR_utimes
:
5620 struct timeval
*tvp
, tv
[2];
5622 if (copy_from_user_timeval(&tv
[0], arg2
)
5623 || copy_from_user_timeval(&tv
[1],
5624 arg2
+ sizeof(struct target_timeval
)))
5630 if (!(p
= lock_user_string(arg1
)))
5632 ret
= get_errno(utimes(p
, tvp
));
5633 unlock_user(p
, arg1
, 0);
5636 #if defined(TARGET_NR_futimesat)
5637 case TARGET_NR_futimesat
:
5639 struct timeval
*tvp
, tv
[2];
5641 if (copy_from_user_timeval(&tv
[0], arg3
)
5642 || copy_from_user_timeval(&tv
[1],
5643 arg3
+ sizeof(struct target_timeval
)))
5649 if (!(p
= lock_user_string(arg2
)))
5651 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5652 unlock_user(p
, arg2
, 0);
5656 #ifdef TARGET_NR_stty
5657 case TARGET_NR_stty
:
5660 #ifdef TARGET_NR_gtty
5661 case TARGET_NR_gtty
:
5664 case TARGET_NR_access
:
5665 if (!(p
= lock_user_string(arg1
)))
5667 ret
= get_errno(access(path(p
), arg2
));
5668 unlock_user(p
, arg1
, 0);
5670 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5671 case TARGET_NR_faccessat
:
5672 if (!(p
= lock_user_string(arg2
)))
5674 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5675 unlock_user(p
, arg2
, 0);
5678 #ifdef TARGET_NR_nice /* not on alpha */
5679 case TARGET_NR_nice
:
5680 ret
= get_errno(nice(arg1
));
5683 #ifdef TARGET_NR_ftime
5684 case TARGET_NR_ftime
:
5687 case TARGET_NR_sync
:
5691 case TARGET_NR_kill
:
5692 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5694 case TARGET_NR_rename
:
5697 p
= lock_user_string(arg1
);
5698 p2
= lock_user_string(arg2
);
5700 ret
= -TARGET_EFAULT
;
5702 ret
= get_errno(rename(p
, p2
));
5703 unlock_user(p2
, arg2
, 0);
5704 unlock_user(p
, arg1
, 0);
5707 #if defined(TARGET_NR_renameat)
5708 case TARGET_NR_renameat
:
5711 p
= lock_user_string(arg2
);
5712 p2
= lock_user_string(arg4
);
5714 ret
= -TARGET_EFAULT
;
5716 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
5717 unlock_user(p2
, arg4
, 0);
5718 unlock_user(p
, arg2
, 0);
5722 case TARGET_NR_mkdir
:
5723 if (!(p
= lock_user_string(arg1
)))
5725 ret
= get_errno(mkdir(p
, arg2
));
5726 unlock_user(p
, arg1
, 0);
5728 #if defined(TARGET_NR_mkdirat)
5729 case TARGET_NR_mkdirat
:
5730 if (!(p
= lock_user_string(arg2
)))
5732 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
5733 unlock_user(p
, arg2
, 0);
5736 case TARGET_NR_rmdir
:
5737 if (!(p
= lock_user_string(arg1
)))
5739 ret
= get_errno(rmdir(p
));
5740 unlock_user(p
, arg1
, 0);
5743 ret
= get_errno(dup(arg1
));
5745 case TARGET_NR_pipe
:
5746 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5748 #ifdef TARGET_NR_pipe2
5749 case TARGET_NR_pipe2
:
5750 ret
= do_pipe(cpu_env
, arg1
,
5751 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5754 case TARGET_NR_times
:
5756 struct target_tms
*tmsp
;
5758 ret
= get_errno(times(&tms
));
5760 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5763 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5764 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5765 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5766 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5769 ret
= host_to_target_clock_t(ret
);
5772 #ifdef TARGET_NR_prof
5773 case TARGET_NR_prof
:
5776 #ifdef TARGET_NR_signal
5777 case TARGET_NR_signal
:
5780 case TARGET_NR_acct
:
5782 ret
= get_errno(acct(NULL
));
5784 if (!(p
= lock_user_string(arg1
)))
5786 ret
= get_errno(acct(path(p
)));
5787 unlock_user(p
, arg1
, 0);
5790 #ifdef TARGET_NR_umount2
5791 case TARGET_NR_umount2
:
5792 if (!(p
= lock_user_string(arg1
)))
5794 ret
= get_errno(umount2(p
, arg2
));
5795 unlock_user(p
, arg1
, 0);
5798 #ifdef TARGET_NR_lock
5799 case TARGET_NR_lock
:
5802 case TARGET_NR_ioctl
:
5803 ret
= do_ioctl(arg1
, arg2
, arg3
);
5805 case TARGET_NR_fcntl
:
5806 ret
= do_fcntl(arg1
, arg2
, arg3
);
5808 #ifdef TARGET_NR_mpx
5812 case TARGET_NR_setpgid
:
5813 ret
= get_errno(setpgid(arg1
, arg2
));
5815 #ifdef TARGET_NR_ulimit
5816 case TARGET_NR_ulimit
:
5819 #ifdef TARGET_NR_oldolduname
5820 case TARGET_NR_oldolduname
:
5823 case TARGET_NR_umask
:
5824 ret
= get_errno(umask(arg1
));
5826 case TARGET_NR_chroot
:
5827 if (!(p
= lock_user_string(arg1
)))
5829 ret
= get_errno(chroot(p
));
5830 unlock_user(p
, arg1
, 0);
5832 case TARGET_NR_ustat
:
5834 case TARGET_NR_dup2
:
5835 ret
= get_errno(dup2(arg1
, arg2
));
5837 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5838 case TARGET_NR_dup3
:
5839 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5842 #ifdef TARGET_NR_getppid /* not on alpha */
5843 case TARGET_NR_getppid
:
5844 ret
= get_errno(getppid());
5847 case TARGET_NR_getpgrp
:
5848 ret
= get_errno(getpgrp());
5850 case TARGET_NR_setsid
:
5851 ret
= get_errno(setsid());
5853 #ifdef TARGET_NR_sigaction
5854 case TARGET_NR_sigaction
:
5856 #if defined(TARGET_ALPHA)
5857 struct target_sigaction act
, oact
, *pact
= 0;
5858 struct target_old_sigaction
*old_act
;
5860 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5862 act
._sa_handler
= old_act
->_sa_handler
;
5863 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5864 act
.sa_flags
= old_act
->sa_flags
;
5865 act
.sa_restorer
= 0;
5866 unlock_user_struct(old_act
, arg2
, 0);
5869 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5870 if (!is_error(ret
) && arg3
) {
5871 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5873 old_act
->_sa_handler
= oact
._sa_handler
;
5874 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5875 old_act
->sa_flags
= oact
.sa_flags
;
5876 unlock_user_struct(old_act
, arg3
, 1);
5878 #elif defined(TARGET_MIPS)
5879 struct target_sigaction act
, oact
, *pact
, *old_act
;
5882 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5884 act
._sa_handler
= old_act
->_sa_handler
;
5885 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5886 act
.sa_flags
= old_act
->sa_flags
;
5887 unlock_user_struct(old_act
, arg2
, 0);
5893 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5895 if (!is_error(ret
) && arg3
) {
5896 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5898 old_act
->_sa_handler
= oact
._sa_handler
;
5899 old_act
->sa_flags
= oact
.sa_flags
;
5900 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5901 old_act
->sa_mask
.sig
[1] = 0;
5902 old_act
->sa_mask
.sig
[2] = 0;
5903 old_act
->sa_mask
.sig
[3] = 0;
5904 unlock_user_struct(old_act
, arg3
, 1);
5907 struct target_old_sigaction
*old_act
;
5908 struct target_sigaction act
, oact
, *pact
;
5910 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5912 act
._sa_handler
= old_act
->_sa_handler
;
5913 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5914 act
.sa_flags
= old_act
->sa_flags
;
5915 act
.sa_restorer
= old_act
->sa_restorer
;
5916 unlock_user_struct(old_act
, arg2
, 0);
5921 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5922 if (!is_error(ret
) && arg3
) {
5923 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5925 old_act
->_sa_handler
= oact
._sa_handler
;
5926 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5927 old_act
->sa_flags
= oact
.sa_flags
;
5928 old_act
->sa_restorer
= oact
.sa_restorer
;
5929 unlock_user_struct(old_act
, arg3
, 1);
5935 case TARGET_NR_rt_sigaction
:
5937 #if defined(TARGET_ALPHA)
5938 struct target_sigaction act
, oact
, *pact
= 0;
5939 struct target_rt_sigaction
*rt_act
;
5940 /* ??? arg4 == sizeof(sigset_t). */
5942 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5944 act
._sa_handler
= rt_act
->_sa_handler
;
5945 act
.sa_mask
= rt_act
->sa_mask
;
5946 act
.sa_flags
= rt_act
->sa_flags
;
5947 act
.sa_restorer
= arg5
;
5948 unlock_user_struct(rt_act
, arg2
, 0);
5951 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5952 if (!is_error(ret
) && arg3
) {
5953 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5955 rt_act
->_sa_handler
= oact
._sa_handler
;
5956 rt_act
->sa_mask
= oact
.sa_mask
;
5957 rt_act
->sa_flags
= oact
.sa_flags
;
5958 unlock_user_struct(rt_act
, arg3
, 1);
5961 struct target_sigaction
*act
;
5962 struct target_sigaction
*oact
;
5965 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5970 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5971 ret
= -TARGET_EFAULT
;
5972 goto rt_sigaction_fail
;
5976 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5979 unlock_user_struct(act
, arg2
, 0);
5981 unlock_user_struct(oact
, arg3
, 1);
5985 #ifdef TARGET_NR_sgetmask /* not on alpha */
5986 case TARGET_NR_sgetmask
:
5989 abi_ulong target_set
;
5990 sigprocmask(0, NULL
, &cur_set
);
5991 host_to_target_old_sigset(&target_set
, &cur_set
);
5996 #ifdef TARGET_NR_ssetmask /* not on alpha */
5997 case TARGET_NR_ssetmask
:
5999 sigset_t set
, oset
, cur_set
;
6000 abi_ulong target_set
= arg1
;
6001 sigprocmask(0, NULL
, &cur_set
);
6002 target_to_host_old_sigset(&set
, &target_set
);
6003 sigorset(&set
, &set
, &cur_set
);
6004 sigprocmask(SIG_SETMASK
, &set
, &oset
);
6005 host_to_target_old_sigset(&target_set
, &oset
);
6010 #ifdef TARGET_NR_sigprocmask
6011 case TARGET_NR_sigprocmask
:
6013 #if defined(TARGET_ALPHA)
6014 sigset_t set
, oldset
;
6019 case TARGET_SIG_BLOCK
:
6022 case TARGET_SIG_UNBLOCK
:
6025 case TARGET_SIG_SETMASK
:
6029 ret
= -TARGET_EINVAL
;
6033 target_to_host_old_sigset(&set
, &mask
);
6035 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
6036 if (!is_error(ret
)) {
6037 host_to_target_old_sigset(&mask
, &oldset
);
6039 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6042 sigset_t set
, oldset
, *set_ptr
;
6047 case TARGET_SIG_BLOCK
:
6050 case TARGET_SIG_UNBLOCK
:
6053 case TARGET_SIG_SETMASK
:
6057 ret
= -TARGET_EINVAL
;
6060 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6062 target_to_host_old_sigset(&set
, p
);
6063 unlock_user(p
, arg2
, 0);
6069 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6070 if (!is_error(ret
) && arg3
) {
6071 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6073 host_to_target_old_sigset(p
, &oldset
);
6074 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6080 case TARGET_NR_rt_sigprocmask
:
6083 sigset_t set
, oldset
, *set_ptr
;
6087 case TARGET_SIG_BLOCK
:
6090 case TARGET_SIG_UNBLOCK
:
6093 case TARGET_SIG_SETMASK
:
6097 ret
= -TARGET_EINVAL
;
6100 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6102 target_to_host_sigset(&set
, p
);
6103 unlock_user(p
, arg2
, 0);
6109 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6110 if (!is_error(ret
) && arg3
) {
6111 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6113 host_to_target_sigset(p
, &oldset
);
6114 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6118 #ifdef TARGET_NR_sigpending
6119 case TARGET_NR_sigpending
:
6122 ret
= get_errno(sigpending(&set
));
6123 if (!is_error(ret
)) {
6124 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6126 host_to_target_old_sigset(p
, &set
);
6127 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6132 case TARGET_NR_rt_sigpending
:
6135 ret
= get_errno(sigpending(&set
));
6136 if (!is_error(ret
)) {
6137 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6139 host_to_target_sigset(p
, &set
);
6140 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6144 #ifdef TARGET_NR_sigsuspend
6145 case TARGET_NR_sigsuspend
:
6148 #if defined(TARGET_ALPHA)
6149 abi_ulong mask
= arg1
;
6150 target_to_host_old_sigset(&set
, &mask
);
6152 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6154 target_to_host_old_sigset(&set
, p
);
6155 unlock_user(p
, arg1
, 0);
6157 ret
= get_errno(sigsuspend(&set
));
6161 case TARGET_NR_rt_sigsuspend
:
6164 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6166 target_to_host_sigset(&set
, p
);
6167 unlock_user(p
, arg1
, 0);
6168 ret
= get_errno(sigsuspend(&set
));
6171 case TARGET_NR_rt_sigtimedwait
:
6174 struct timespec uts
, *puts
;
6177 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6179 target_to_host_sigset(&set
, p
);
6180 unlock_user(p
, arg1
, 0);
6183 target_to_host_timespec(puts
, arg3
);
6187 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6188 if (!is_error(ret
)) {
6190 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6195 host_to_target_siginfo(p
, &uinfo
);
6196 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6198 ret
= host_to_target_signal(ret
);
6202 case TARGET_NR_rt_sigqueueinfo
:
6205 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6207 target_to_host_siginfo(&uinfo
, p
);
6208 unlock_user(p
, arg1
, 0);
6209 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6212 #ifdef TARGET_NR_sigreturn
6213 case TARGET_NR_sigreturn
:
6214 /* NOTE: ret is eax, so not transcoding must be done */
6215 ret
= do_sigreturn(cpu_env
);
6218 case TARGET_NR_rt_sigreturn
:
6219 /* NOTE: ret is eax, so not transcoding must be done */
6220 ret
= do_rt_sigreturn(cpu_env
);
6222 case TARGET_NR_sethostname
:
6223 if (!(p
= lock_user_string(arg1
)))
6225 ret
= get_errno(sethostname(p
, arg2
));
6226 unlock_user(p
, arg1
, 0);
6228 case TARGET_NR_setrlimit
:
6230 int resource
= target_to_host_resource(arg1
);
6231 struct target_rlimit
*target_rlim
;
6233 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6235 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6236 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6237 unlock_user_struct(target_rlim
, arg2
, 0);
6238 ret
= get_errno(setrlimit(resource
, &rlim
));
6241 case TARGET_NR_getrlimit
:
6243 int resource
= target_to_host_resource(arg1
);
6244 struct target_rlimit
*target_rlim
;
6247 ret
= get_errno(getrlimit(resource
, &rlim
));
6248 if (!is_error(ret
)) {
6249 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6251 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6252 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6253 unlock_user_struct(target_rlim
, arg2
, 1);
6257 case TARGET_NR_getrusage
:
6259 struct rusage rusage
;
6260 ret
= get_errno(getrusage(arg1
, &rusage
));
6261 if (!is_error(ret
)) {
6262 host_to_target_rusage(arg2
, &rusage
);
6266 case TARGET_NR_gettimeofday
:
6269 ret
= get_errno(gettimeofday(&tv
, NULL
));
6270 if (!is_error(ret
)) {
6271 if (copy_to_user_timeval(arg1
, &tv
))
6276 case TARGET_NR_settimeofday
:
6279 if (copy_from_user_timeval(&tv
, arg1
))
6281 ret
= get_errno(settimeofday(&tv
, NULL
));
6284 #if defined(TARGET_NR_select)
6285 case TARGET_NR_select
:
6286 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6287 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6290 struct target_sel_arg_struct
*sel
;
6291 abi_ulong inp
, outp
, exp
, tvp
;
6294 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6296 nsel
= tswapal(sel
->n
);
6297 inp
= tswapal(sel
->inp
);
6298 outp
= tswapal(sel
->outp
);
6299 exp
= tswapal(sel
->exp
);
6300 tvp
= tswapal(sel
->tvp
);
6301 unlock_user_struct(sel
, arg1
, 0);
6302 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6307 #ifdef TARGET_NR_pselect6
6308 case TARGET_NR_pselect6
:
6310 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6311 fd_set rfds
, wfds
, efds
;
6312 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6313 struct timespec ts
, *ts_ptr
;
6316 * The 6th arg is actually two args smashed together,
6317 * so we cannot use the C library.
6325 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6326 target_sigset_t
*target_sigset
;
6334 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6338 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6342 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6348 * This takes a timespec, and not a timeval, so we cannot
6349 * use the do_select() helper ...
6352 if (target_to_host_timespec(&ts
, ts_addr
)) {
6360 /* Extract the two packed args for the sigset */
6363 sig
.size
= _NSIG
/ 8;
6365 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6369 arg_sigset
= tswapal(arg7
[0]);
6370 arg_sigsize
= tswapal(arg7
[1]);
6371 unlock_user(arg7
, arg6
, 0);
6375 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6376 /* Like the kernel, we enforce correct size sigsets */
6377 ret
= -TARGET_EINVAL
;
6380 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6381 sizeof(*target_sigset
), 1);
6382 if (!target_sigset
) {
6385 target_to_host_sigset(&set
, target_sigset
);
6386 unlock_user(target_sigset
, arg_sigset
, 0);
6394 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6397 if (!is_error(ret
)) {
6398 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6400 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6402 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6405 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6411 case TARGET_NR_symlink
:
6414 p
= lock_user_string(arg1
);
6415 p2
= lock_user_string(arg2
);
6417 ret
= -TARGET_EFAULT
;
6419 ret
= get_errno(symlink(p
, p2
));
6420 unlock_user(p2
, arg2
, 0);
6421 unlock_user(p
, arg1
, 0);
6424 #if defined(TARGET_NR_symlinkat)
6425 case TARGET_NR_symlinkat
:
6428 p
= lock_user_string(arg1
);
6429 p2
= lock_user_string(arg3
);
6431 ret
= -TARGET_EFAULT
;
6433 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6434 unlock_user(p2
, arg3
, 0);
6435 unlock_user(p
, arg1
, 0);
6439 #ifdef TARGET_NR_oldlstat
6440 case TARGET_NR_oldlstat
:
6443 case TARGET_NR_readlink
:
6446 p
= lock_user_string(arg1
);
6447 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6449 ret
= -TARGET_EFAULT
;
6450 } else if (is_proc_myself((const char *)p
, "exe")) {
6451 char real
[PATH_MAX
], *temp
;
6452 temp
= realpath(exec_path
, real
);
6453 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6454 snprintf((char *)p2
, arg3
, "%s", real
);
6456 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6458 unlock_user(p2
, arg2
, ret
);
6459 unlock_user(p
, arg1
, 0);
6462 #if defined(TARGET_NR_readlinkat)
6463 case TARGET_NR_readlinkat
:
6466 p
= lock_user_string(arg2
);
6467 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6469 ret
= -TARGET_EFAULT
;
6470 } else if (is_proc_myself((const char *)p
, "exe")) {
6471 char real
[PATH_MAX
], *temp
;
6472 temp
= realpath(exec_path
, real
);
6473 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6474 snprintf((char *)p2
, arg4
, "%s", real
);
6476 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6478 unlock_user(p2
, arg3
, ret
);
6479 unlock_user(p
, arg2
, 0);
6483 #ifdef TARGET_NR_uselib
6484 case TARGET_NR_uselib
:
6487 #ifdef TARGET_NR_swapon
6488 case TARGET_NR_swapon
:
6489 if (!(p
= lock_user_string(arg1
)))
6491 ret
= get_errno(swapon(p
, arg2
));
6492 unlock_user(p
, arg1
, 0);
6495 case TARGET_NR_reboot
:
6496 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6497 /* arg4 must be ignored in all other cases */
6498 p
= lock_user_string(arg4
);
6502 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6503 unlock_user(p
, arg4
, 0);
6505 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6508 #ifdef TARGET_NR_readdir
6509 case TARGET_NR_readdir
:
6512 #ifdef TARGET_NR_mmap
6513 case TARGET_NR_mmap
:
6514 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6515 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6516 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6517 || defined(TARGET_S390X)
6520 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6521 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6529 unlock_user(v
, arg1
, 0);
6530 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6531 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6535 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6536 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6542 #ifdef TARGET_NR_mmap2
6543 case TARGET_NR_mmap2
:
6545 #define MMAP_SHIFT 12
6547 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6548 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6550 arg6
<< MMAP_SHIFT
));
6553 case TARGET_NR_munmap
:
6554 ret
= get_errno(target_munmap(arg1
, arg2
));
6556 case TARGET_NR_mprotect
:
6558 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6559 /* Special hack to detect libc making the stack executable. */
6560 if ((arg3
& PROT_GROWSDOWN
)
6561 && arg1
>= ts
->info
->stack_limit
6562 && arg1
<= ts
->info
->start_stack
) {
6563 arg3
&= ~PROT_GROWSDOWN
;
6564 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6565 arg1
= ts
->info
->stack_limit
;
6568 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6570 #ifdef TARGET_NR_mremap
6571 case TARGET_NR_mremap
:
6572 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6575 /* ??? msync/mlock/munlock are broken for softmmu. */
6576 #ifdef TARGET_NR_msync
6577 case TARGET_NR_msync
:
6578 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6581 #ifdef TARGET_NR_mlock
6582 case TARGET_NR_mlock
:
6583 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6586 #ifdef TARGET_NR_munlock
6587 case TARGET_NR_munlock
:
6588 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6591 #ifdef TARGET_NR_mlockall
6592 case TARGET_NR_mlockall
:
6593 ret
= get_errno(mlockall(arg1
));
6596 #ifdef TARGET_NR_munlockall
6597 case TARGET_NR_munlockall
:
6598 ret
= get_errno(munlockall());
6601 case TARGET_NR_truncate
:
6602 if (!(p
= lock_user_string(arg1
)))
6604 ret
= get_errno(truncate(p
, arg2
));
6605 unlock_user(p
, arg1
, 0);
6607 case TARGET_NR_ftruncate
:
6608 ret
= get_errno(ftruncate(arg1
, arg2
));
6610 case TARGET_NR_fchmod
:
6611 ret
= get_errno(fchmod(arg1
, arg2
));
6613 #if defined(TARGET_NR_fchmodat)
6614 case TARGET_NR_fchmodat
:
6615 if (!(p
= lock_user_string(arg2
)))
6617 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6618 unlock_user(p
, arg2
, 0);
6621 case TARGET_NR_getpriority
:
6622 /* Note that negative values are valid for getpriority, so we must
6623 differentiate based on errno settings. */
6625 ret
= getpriority(arg1
, arg2
);
6626 if (ret
== -1 && errno
!= 0) {
6627 ret
= -host_to_target_errno(errno
);
6631 /* Return value is the unbiased priority. Signal no error. */
6632 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6634 /* Return value is a biased priority to avoid negative numbers. */
6638 case TARGET_NR_setpriority
:
6639 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6641 #ifdef TARGET_NR_profil
6642 case TARGET_NR_profil
:
6645 case TARGET_NR_statfs
:
6646 if (!(p
= lock_user_string(arg1
)))
6648 ret
= get_errno(statfs(path(p
), &stfs
));
6649 unlock_user(p
, arg1
, 0);
6651 if (!is_error(ret
)) {
6652 struct target_statfs
*target_stfs
;
6654 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6656 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6657 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6658 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6659 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6660 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6661 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6662 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6663 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6664 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6665 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6666 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6667 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6668 unlock_user_struct(target_stfs
, arg2
, 1);
6671 case TARGET_NR_fstatfs
:
6672 ret
= get_errno(fstatfs(arg1
, &stfs
));
6673 goto convert_statfs
;
6674 #ifdef TARGET_NR_statfs64
6675 case TARGET_NR_statfs64
:
6676 if (!(p
= lock_user_string(arg1
)))
6678 ret
= get_errno(statfs(path(p
), &stfs
));
6679 unlock_user(p
, arg1
, 0);
6681 if (!is_error(ret
)) {
6682 struct target_statfs64
*target_stfs
;
6684 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6686 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6687 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6688 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6689 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6690 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6691 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6692 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6693 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6694 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6695 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6696 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6697 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6698 unlock_user_struct(target_stfs
, arg3
, 1);
6701 case TARGET_NR_fstatfs64
:
6702 ret
= get_errno(fstatfs(arg1
, &stfs
));
6703 goto convert_statfs64
;
6705 #ifdef TARGET_NR_ioperm
6706 case TARGET_NR_ioperm
:
6709 #ifdef TARGET_NR_socketcall
6710 case TARGET_NR_socketcall
:
6711 ret
= do_socketcall(arg1
, arg2
);
6714 #ifdef TARGET_NR_accept
6715 case TARGET_NR_accept
:
6716 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
6719 #ifdef TARGET_NR_accept4
6720 case TARGET_NR_accept4
:
6721 #ifdef CONFIG_ACCEPT4
6722 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
6728 #ifdef TARGET_NR_bind
6729 case TARGET_NR_bind
:
6730 ret
= do_bind(arg1
, arg2
, arg3
);
6733 #ifdef TARGET_NR_connect
6734 case TARGET_NR_connect
:
6735 ret
= do_connect(arg1
, arg2
, arg3
);
6738 #ifdef TARGET_NR_getpeername
6739 case TARGET_NR_getpeername
:
6740 ret
= do_getpeername(arg1
, arg2
, arg3
);
6743 #ifdef TARGET_NR_getsockname
6744 case TARGET_NR_getsockname
:
6745 ret
= do_getsockname(arg1
, arg2
, arg3
);
6748 #ifdef TARGET_NR_getsockopt
6749 case TARGET_NR_getsockopt
:
6750 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6753 #ifdef TARGET_NR_listen
6754 case TARGET_NR_listen
:
6755 ret
= get_errno(listen(arg1
, arg2
));
6758 #ifdef TARGET_NR_recv
6759 case TARGET_NR_recv
:
6760 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6763 #ifdef TARGET_NR_recvfrom
6764 case TARGET_NR_recvfrom
:
6765 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6768 #ifdef TARGET_NR_recvmsg
6769 case TARGET_NR_recvmsg
:
6770 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6773 #ifdef TARGET_NR_send
6774 case TARGET_NR_send
:
6775 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6778 #ifdef TARGET_NR_sendmsg
6779 case TARGET_NR_sendmsg
:
6780 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6783 #ifdef TARGET_NR_sendmmsg
6784 case TARGET_NR_sendmmsg
:
6785 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
6787 case TARGET_NR_recvmmsg
:
6788 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
6791 #ifdef TARGET_NR_sendto
6792 case TARGET_NR_sendto
:
6793 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6796 #ifdef TARGET_NR_shutdown
6797 case TARGET_NR_shutdown
:
6798 ret
= get_errno(shutdown(arg1
, arg2
));
6801 #ifdef TARGET_NR_socket
6802 case TARGET_NR_socket
:
6803 ret
= do_socket(arg1
, arg2
, arg3
);
6806 #ifdef TARGET_NR_socketpair
6807 case TARGET_NR_socketpair
:
6808 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6811 #ifdef TARGET_NR_setsockopt
6812 case TARGET_NR_setsockopt
:
6813 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6817 case TARGET_NR_syslog
:
6818 if (!(p
= lock_user_string(arg2
)))
6820 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6821 unlock_user(p
, arg2
, 0);
6824 case TARGET_NR_setitimer
:
6826 struct itimerval value
, ovalue
, *pvalue
;
6830 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6831 || copy_from_user_timeval(&pvalue
->it_value
,
6832 arg2
+ sizeof(struct target_timeval
)))
6837 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6838 if (!is_error(ret
) && arg3
) {
6839 if (copy_to_user_timeval(arg3
,
6840 &ovalue
.it_interval
)
6841 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6847 case TARGET_NR_getitimer
:
6849 struct itimerval value
;
6851 ret
= get_errno(getitimer(arg1
, &value
));
6852 if (!is_error(ret
) && arg2
) {
6853 if (copy_to_user_timeval(arg2
,
6855 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6861 case TARGET_NR_stat
:
6862 if (!(p
= lock_user_string(arg1
)))
6864 ret
= get_errno(stat(path(p
), &st
));
6865 unlock_user(p
, arg1
, 0);
6867 case TARGET_NR_lstat
:
6868 if (!(p
= lock_user_string(arg1
)))
6870 ret
= get_errno(lstat(path(p
), &st
));
6871 unlock_user(p
, arg1
, 0);
6873 case TARGET_NR_fstat
:
6875 ret
= get_errno(fstat(arg1
, &st
));
6877 if (!is_error(ret
)) {
6878 struct target_stat
*target_st
;
6880 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6882 memset(target_st
, 0, sizeof(*target_st
));
6883 __put_user(st
.st_dev
, &target_st
->st_dev
);
6884 __put_user(st
.st_ino
, &target_st
->st_ino
);
6885 __put_user(st
.st_mode
, &target_st
->st_mode
);
6886 __put_user(st
.st_uid
, &target_st
->st_uid
);
6887 __put_user(st
.st_gid
, &target_st
->st_gid
);
6888 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6889 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6890 __put_user(st
.st_size
, &target_st
->st_size
);
6891 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6892 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6893 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6894 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6895 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6896 unlock_user_struct(target_st
, arg2
, 1);
6900 #ifdef TARGET_NR_olduname
6901 case TARGET_NR_olduname
:
6904 #ifdef TARGET_NR_iopl
6905 case TARGET_NR_iopl
:
6908 case TARGET_NR_vhangup
:
6909 ret
= get_errno(vhangup());
6911 #ifdef TARGET_NR_idle
6912 case TARGET_NR_idle
:
6915 #ifdef TARGET_NR_syscall
6916 case TARGET_NR_syscall
:
6917 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6918 arg6
, arg7
, arg8
, 0);
6921 case TARGET_NR_wait4
:
6924 abi_long status_ptr
= arg2
;
6925 struct rusage rusage
, *rusage_ptr
;
6926 abi_ulong target_rusage
= arg4
;
6928 rusage_ptr
= &rusage
;
6931 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6932 if (!is_error(ret
)) {
6933 if (status_ptr
&& ret
) {
6934 status
= host_to_target_waitstatus(status
);
6935 if (put_user_s32(status
, status_ptr
))
6939 host_to_target_rusage(target_rusage
, &rusage
);
6943 #ifdef TARGET_NR_swapoff
6944 case TARGET_NR_swapoff
:
6945 if (!(p
= lock_user_string(arg1
)))
6947 ret
= get_errno(swapoff(p
));
6948 unlock_user(p
, arg1
, 0);
6951 case TARGET_NR_sysinfo
:
6953 struct target_sysinfo
*target_value
;
6954 struct sysinfo value
;
6955 ret
= get_errno(sysinfo(&value
));
6956 if (!is_error(ret
) && arg1
)
6958 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6960 __put_user(value
.uptime
, &target_value
->uptime
);
6961 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6962 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6963 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6964 __put_user(value
.totalram
, &target_value
->totalram
);
6965 __put_user(value
.freeram
, &target_value
->freeram
);
6966 __put_user(value
.sharedram
, &target_value
->sharedram
);
6967 __put_user(value
.bufferram
, &target_value
->bufferram
);
6968 __put_user(value
.totalswap
, &target_value
->totalswap
);
6969 __put_user(value
.freeswap
, &target_value
->freeswap
);
6970 __put_user(value
.procs
, &target_value
->procs
);
6971 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6972 __put_user(value
.freehigh
, &target_value
->freehigh
);
6973 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6974 unlock_user_struct(target_value
, arg1
, 1);
6978 #ifdef TARGET_NR_ipc
6980 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6983 #ifdef TARGET_NR_semget
6984 case TARGET_NR_semget
:
6985 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6988 #ifdef TARGET_NR_semop
6989 case TARGET_NR_semop
:
6990 ret
= do_semop(arg1
, arg2
, arg3
);
6993 #ifdef TARGET_NR_semctl
6994 case TARGET_NR_semctl
:
6995 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6998 #ifdef TARGET_NR_msgctl
6999 case TARGET_NR_msgctl
:
7000 ret
= do_msgctl(arg1
, arg2
, arg3
);
7003 #ifdef TARGET_NR_msgget
7004 case TARGET_NR_msgget
:
7005 ret
= get_errno(msgget(arg1
, arg2
));
7008 #ifdef TARGET_NR_msgrcv
7009 case TARGET_NR_msgrcv
:
7010 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7013 #ifdef TARGET_NR_msgsnd
7014 case TARGET_NR_msgsnd
:
7015 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7018 #ifdef TARGET_NR_shmget
7019 case TARGET_NR_shmget
:
7020 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7023 #ifdef TARGET_NR_shmctl
7024 case TARGET_NR_shmctl
:
7025 ret
= do_shmctl(arg1
, arg2
, arg3
);
7028 #ifdef TARGET_NR_shmat
7029 case TARGET_NR_shmat
:
7030 ret
= do_shmat(arg1
, arg2
, arg3
);
7033 #ifdef TARGET_NR_shmdt
7034 case TARGET_NR_shmdt
:
7035 ret
= do_shmdt(arg1
);
7038 case TARGET_NR_fsync
:
7039 ret
= get_errno(fsync(arg1
));
7041 case TARGET_NR_clone
:
7042 /* Linux manages to have three different orderings for its
7043 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7044 * match the kernel's CONFIG_CLONE_* settings.
7045 * Microblaze is further special in that it uses a sixth
7046 * implicit argument to clone for the TLS pointer.
7048 #if defined(TARGET_MICROBLAZE)
7049 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7050 #elif defined(TARGET_CLONE_BACKWARDS)
7051 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7052 #elif defined(TARGET_CLONE_BACKWARDS2)
7053 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7055 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7058 #ifdef __NR_exit_group
7059 /* new thread calls */
7060 case TARGET_NR_exit_group
:
7064 gdb_exit(cpu_env
, arg1
);
7065 ret
= get_errno(exit_group(arg1
));
7068 case TARGET_NR_setdomainname
:
7069 if (!(p
= lock_user_string(arg1
)))
7071 ret
= get_errno(setdomainname(p
, arg2
));
7072 unlock_user(p
, arg1
, 0);
7074 case TARGET_NR_uname
:
7075 /* no need to transcode because we use the linux syscall */
7077 struct new_utsname
* buf
;
7079 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7081 ret
= get_errno(sys_uname(buf
));
7082 if (!is_error(ret
)) {
7083 /* Overrite the native machine name with whatever is being
7085 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7086 /* Allow the user to override the reported release. */
7087 if (qemu_uname_release
&& *qemu_uname_release
)
7088 strcpy (buf
->release
, qemu_uname_release
);
7090 unlock_user_struct(buf
, arg1
, 1);
7094 case TARGET_NR_modify_ldt
:
7095 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7097 #if !defined(TARGET_X86_64)
7098 case TARGET_NR_vm86old
:
7100 case TARGET_NR_vm86
:
7101 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7105 case TARGET_NR_adjtimex
:
7107 #ifdef TARGET_NR_create_module
7108 case TARGET_NR_create_module
:
7110 case TARGET_NR_init_module
:
7111 case TARGET_NR_delete_module
:
7112 #ifdef TARGET_NR_get_kernel_syms
7113 case TARGET_NR_get_kernel_syms
:
7116 case TARGET_NR_quotactl
:
7118 case TARGET_NR_getpgid
:
7119 ret
= get_errno(getpgid(arg1
));
7121 case TARGET_NR_fchdir
:
7122 ret
= get_errno(fchdir(arg1
));
7124 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7125 case TARGET_NR_bdflush
:
7128 #ifdef TARGET_NR_sysfs
7129 case TARGET_NR_sysfs
:
7132 case TARGET_NR_personality
:
7133 ret
= get_errno(personality(arg1
));
7135 #ifdef TARGET_NR_afs_syscall
7136 case TARGET_NR_afs_syscall
:
7139 #ifdef TARGET_NR__llseek /* Not on alpha */
7140 case TARGET_NR__llseek
:
7143 #if !defined(__NR_llseek)
7144 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7146 ret
= get_errno(res
);
7151 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7153 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7159 case TARGET_NR_getdents
:
7160 #ifdef __NR_getdents
7161 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7163 struct target_dirent
*target_dirp
;
7164 struct linux_dirent
*dirp
;
7165 abi_long count
= arg3
;
7167 dirp
= malloc(count
);
7169 ret
= -TARGET_ENOMEM
;
7173 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7174 if (!is_error(ret
)) {
7175 struct linux_dirent
*de
;
7176 struct target_dirent
*tde
;
7178 int reclen
, treclen
;
7179 int count1
, tnamelen
;
7183 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7187 reclen
= de
->d_reclen
;
7188 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7189 assert(tnamelen
>= 0);
7190 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7191 assert(count1
+ treclen
<= count
);
7192 tde
->d_reclen
= tswap16(treclen
);
7193 tde
->d_ino
= tswapal(de
->d_ino
);
7194 tde
->d_off
= tswapal(de
->d_off
);
7195 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7196 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7198 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7202 unlock_user(target_dirp
, arg2
, ret
);
7208 struct linux_dirent
*dirp
;
7209 abi_long count
= arg3
;
7211 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7213 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7214 if (!is_error(ret
)) {
7215 struct linux_dirent
*de
;
7220 reclen
= de
->d_reclen
;
7223 de
->d_reclen
= tswap16(reclen
);
7224 tswapls(&de
->d_ino
);
7225 tswapls(&de
->d_off
);
7226 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7230 unlock_user(dirp
, arg2
, ret
);
7234 /* Implement getdents in terms of getdents64 */
7236 struct linux_dirent64
*dirp
;
7237 abi_long count
= arg3
;
7239 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7243 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7244 if (!is_error(ret
)) {
7245 /* Convert the dirent64 structs to target dirent. We do this
7246 * in-place, since we can guarantee that a target_dirent is no
7247 * larger than a dirent64; however this means we have to be
7248 * careful to read everything before writing in the new format.
7250 struct linux_dirent64
*de
;
7251 struct target_dirent
*tde
;
7256 tde
= (struct target_dirent
*)dirp
;
7258 int namelen
, treclen
;
7259 int reclen
= de
->d_reclen
;
7260 uint64_t ino
= de
->d_ino
;
7261 int64_t off
= de
->d_off
;
7262 uint8_t type
= de
->d_type
;
7264 namelen
= strlen(de
->d_name
);
7265 treclen
= offsetof(struct target_dirent
, d_name
)
7267 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7269 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7270 tde
->d_ino
= tswapal(ino
);
7271 tde
->d_off
= tswapal(off
);
7272 tde
->d_reclen
= tswap16(treclen
);
7273 /* The target_dirent type is in what was formerly a padding
7274 * byte at the end of the structure:
7276 *(((char *)tde
) + treclen
- 1) = type
;
7278 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7279 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7285 unlock_user(dirp
, arg2
, ret
);
7289 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7290 case TARGET_NR_getdents64
:
7292 struct linux_dirent64
*dirp
;
7293 abi_long count
= arg3
;
7294 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7296 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7297 if (!is_error(ret
)) {
7298 struct linux_dirent64
*de
;
7303 reclen
= de
->d_reclen
;
7306 de
->d_reclen
= tswap16(reclen
);
7307 tswap64s((uint64_t *)&de
->d_ino
);
7308 tswap64s((uint64_t *)&de
->d_off
);
7309 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7313 unlock_user(dirp
, arg2
, ret
);
7316 #endif /* TARGET_NR_getdents64 */
7317 #if defined(TARGET_NR__newselect)
7318 case TARGET_NR__newselect
:
7319 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7322 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7323 # ifdef TARGET_NR_poll
7324 case TARGET_NR_poll
:
7326 # ifdef TARGET_NR_ppoll
7327 case TARGET_NR_ppoll
:
7330 struct target_pollfd
*target_pfd
;
7331 unsigned int nfds
= arg2
;
7336 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7340 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7341 for(i
= 0; i
< nfds
; i
++) {
7342 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7343 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7346 # ifdef TARGET_NR_ppoll
7347 if (num
== TARGET_NR_ppoll
) {
7348 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7349 target_sigset_t
*target_set
;
7350 sigset_t _set
, *set
= &_set
;
7353 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7354 unlock_user(target_pfd
, arg1
, 0);
7362 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7364 unlock_user(target_pfd
, arg1
, 0);
7367 target_to_host_sigset(set
, target_set
);
7372 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7374 if (!is_error(ret
) && arg3
) {
7375 host_to_target_timespec(arg3
, timeout_ts
);
7378 unlock_user(target_set
, arg4
, 0);
7382 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7384 if (!is_error(ret
)) {
7385 for(i
= 0; i
< nfds
; i
++) {
7386 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7389 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7393 case TARGET_NR_flock
:
7394 /* NOTE: the flock constant seems to be the same for every
7396 ret
= get_errno(flock(arg1
, arg2
));
7398 case TARGET_NR_readv
:
7400 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7402 ret
= get_errno(readv(arg1
, vec
, arg3
));
7403 unlock_iovec(vec
, arg2
, arg3
, 1);
7405 ret
= -host_to_target_errno(errno
);
7409 case TARGET_NR_writev
:
7411 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7413 ret
= get_errno(writev(arg1
, vec
, arg3
));
7414 unlock_iovec(vec
, arg2
, arg3
, 0);
7416 ret
= -host_to_target_errno(errno
);
7420 case TARGET_NR_getsid
:
7421 ret
= get_errno(getsid(arg1
));
7423 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7424 case TARGET_NR_fdatasync
:
7425 ret
= get_errno(fdatasync(arg1
));
7428 case TARGET_NR__sysctl
:
7429 /* We don't implement this, but ENOTDIR is always a safe
7431 ret
= -TARGET_ENOTDIR
;
7433 case TARGET_NR_sched_getaffinity
:
7435 unsigned int mask_size
;
7436 unsigned long *mask
;
7439 * sched_getaffinity needs multiples of ulong, so need to take
7440 * care of mismatches between target ulong and host ulong sizes.
7442 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7443 ret
= -TARGET_EINVAL
;
7446 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7448 mask
= alloca(mask_size
);
7449 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7451 if (!is_error(ret
)) {
7452 if (copy_to_user(arg3
, mask
, ret
)) {
7458 case TARGET_NR_sched_setaffinity
:
7460 unsigned int mask_size
;
7461 unsigned long *mask
;
7464 * sched_setaffinity needs multiples of ulong, so need to take
7465 * care of mismatches between target ulong and host ulong sizes.
7467 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7468 ret
= -TARGET_EINVAL
;
7471 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7473 mask
= alloca(mask_size
);
7474 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7477 memcpy(mask
, p
, arg2
);
7478 unlock_user_struct(p
, arg2
, 0);
7480 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7483 case TARGET_NR_sched_setparam
:
7485 struct sched_param
*target_schp
;
7486 struct sched_param schp
;
7488 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7490 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7491 unlock_user_struct(target_schp
, arg2
, 0);
7492 ret
= get_errno(sched_setparam(arg1
, &schp
));
7495 case TARGET_NR_sched_getparam
:
7497 struct sched_param
*target_schp
;
7498 struct sched_param schp
;
7499 ret
= get_errno(sched_getparam(arg1
, &schp
));
7500 if (!is_error(ret
)) {
7501 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7503 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7504 unlock_user_struct(target_schp
, arg2
, 1);
7508 case TARGET_NR_sched_setscheduler
:
7510 struct sched_param
*target_schp
;
7511 struct sched_param schp
;
7512 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7514 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7515 unlock_user_struct(target_schp
, arg3
, 0);
7516 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7519 case TARGET_NR_sched_getscheduler
:
7520 ret
= get_errno(sched_getscheduler(arg1
));
7522 case TARGET_NR_sched_yield
:
7523 ret
= get_errno(sched_yield());
7525 case TARGET_NR_sched_get_priority_max
:
7526 ret
= get_errno(sched_get_priority_max(arg1
));
7528 case TARGET_NR_sched_get_priority_min
:
7529 ret
= get_errno(sched_get_priority_min(arg1
));
7531 case TARGET_NR_sched_rr_get_interval
:
7534 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7535 if (!is_error(ret
)) {
7536 host_to_target_timespec(arg2
, &ts
);
7540 case TARGET_NR_nanosleep
:
7542 struct timespec req
, rem
;
7543 target_to_host_timespec(&req
, arg1
);
7544 ret
= get_errno(nanosleep(&req
, &rem
));
7545 if (is_error(ret
) && arg2
) {
7546 host_to_target_timespec(arg2
, &rem
);
7550 #ifdef TARGET_NR_query_module
7551 case TARGET_NR_query_module
:
7554 #ifdef TARGET_NR_nfsservctl
7555 case TARGET_NR_nfsservctl
:
7558 case TARGET_NR_prctl
:
7560 case PR_GET_PDEATHSIG
:
7563 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7564 if (!is_error(ret
) && arg2
7565 && put_user_ual(deathsig
, arg2
)) {
7573 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7577 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7579 unlock_user(name
, arg2
, 16);
7584 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7588 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7590 unlock_user(name
, arg2
, 0);
7595 /* Most prctl options have no pointer arguments */
7596 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7600 #ifdef TARGET_NR_arch_prctl
7601 case TARGET_NR_arch_prctl
:
7602 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7603 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7609 #ifdef TARGET_NR_pread64
7610 case TARGET_NR_pread64
:
7611 if (regpairs_aligned(cpu_env
)) {
7615 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7617 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7618 unlock_user(p
, arg2
, ret
);
7620 case TARGET_NR_pwrite64
:
7621 if (regpairs_aligned(cpu_env
)) {
7625 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7627 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7628 unlock_user(p
, arg2
, 0);
7631 case TARGET_NR_getcwd
:
7632 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7634 ret
= get_errno(sys_getcwd1(p
, arg2
));
7635 unlock_user(p
, arg1
, ret
);
7637 case TARGET_NR_capget
:
7639 case TARGET_NR_capset
:
7641 case TARGET_NR_sigaltstack
:
7642 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7643 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7644 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7645 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7651 #ifdef CONFIG_SENDFILE
7652 case TARGET_NR_sendfile
:
7657 ret
= get_user_sal(off
, arg3
);
7658 if (is_error(ret
)) {
7663 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7664 if (!is_error(ret
) && arg3
) {
7665 abi_long ret2
= put_user_sal(off
, arg3
);
7666 if (is_error(ret2
)) {
7672 #ifdef TARGET_NR_sendfile64
7673 case TARGET_NR_sendfile64
:
7678 ret
= get_user_s64(off
, arg3
);
7679 if (is_error(ret
)) {
7684 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7685 if (!is_error(ret
) && arg3
) {
7686 abi_long ret2
= put_user_s64(off
, arg3
);
7687 if (is_error(ret2
)) {
7695 case TARGET_NR_sendfile
:
7696 #ifdef TARGET_NR_sendfile64
7697 case TARGET_NR_sendfile64
:
7702 #ifdef TARGET_NR_getpmsg
7703 case TARGET_NR_getpmsg
:
7706 #ifdef TARGET_NR_putpmsg
7707 case TARGET_NR_putpmsg
:
7710 #ifdef TARGET_NR_vfork
7711 case TARGET_NR_vfork
:
7712 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7716 #ifdef TARGET_NR_ugetrlimit
7717 case TARGET_NR_ugetrlimit
:
7720 int resource
= target_to_host_resource(arg1
);
7721 ret
= get_errno(getrlimit(resource
, &rlim
));
7722 if (!is_error(ret
)) {
7723 struct target_rlimit
*target_rlim
;
7724 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7726 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7727 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7728 unlock_user_struct(target_rlim
, arg2
, 1);
7733 #ifdef TARGET_NR_truncate64
7734 case TARGET_NR_truncate64
:
7735 if (!(p
= lock_user_string(arg1
)))
7737 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7738 unlock_user(p
, arg1
, 0);
7741 #ifdef TARGET_NR_ftruncate64
7742 case TARGET_NR_ftruncate64
:
7743 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7746 #ifdef TARGET_NR_stat64
7747 case TARGET_NR_stat64
:
7748 if (!(p
= lock_user_string(arg1
)))
7750 ret
= get_errno(stat(path(p
), &st
));
7751 unlock_user(p
, arg1
, 0);
7753 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7756 #ifdef TARGET_NR_lstat64
7757 case TARGET_NR_lstat64
:
7758 if (!(p
= lock_user_string(arg1
)))
7760 ret
= get_errno(lstat(path(p
), &st
));
7761 unlock_user(p
, arg1
, 0);
7763 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7766 #ifdef TARGET_NR_fstat64
7767 case TARGET_NR_fstat64
:
7768 ret
= get_errno(fstat(arg1
, &st
));
7770 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7773 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7774 #ifdef TARGET_NR_fstatat64
7775 case TARGET_NR_fstatat64
:
7777 #ifdef TARGET_NR_newfstatat
7778 case TARGET_NR_newfstatat
:
7780 if (!(p
= lock_user_string(arg2
)))
7782 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
7784 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7787 case TARGET_NR_lchown
:
7788 if (!(p
= lock_user_string(arg1
)))
7790 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7791 unlock_user(p
, arg1
, 0);
7793 #ifdef TARGET_NR_getuid
7794 case TARGET_NR_getuid
:
7795 ret
= get_errno(high2lowuid(getuid()));
7798 #ifdef TARGET_NR_getgid
7799 case TARGET_NR_getgid
:
7800 ret
= get_errno(high2lowgid(getgid()));
7803 #ifdef TARGET_NR_geteuid
7804 case TARGET_NR_geteuid
:
7805 ret
= get_errno(high2lowuid(geteuid()));
7808 #ifdef TARGET_NR_getegid
7809 case TARGET_NR_getegid
:
7810 ret
= get_errno(high2lowgid(getegid()));
7813 case TARGET_NR_setreuid
:
7814 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7816 case TARGET_NR_setregid
:
7817 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7819 case TARGET_NR_getgroups
:
7821 int gidsetsize
= arg1
;
7822 target_id
*target_grouplist
;
7826 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7827 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7828 if (gidsetsize
== 0)
7830 if (!is_error(ret
)) {
7831 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
7832 if (!target_grouplist
)
7834 for(i
= 0;i
< ret
; i
++)
7835 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7836 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
7840 case TARGET_NR_setgroups
:
7842 int gidsetsize
= arg1
;
7843 target_id
*target_grouplist
;
7844 gid_t
*grouplist
= NULL
;
7847 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7848 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
7849 if (!target_grouplist
) {
7850 ret
= -TARGET_EFAULT
;
7853 for (i
= 0; i
< gidsetsize
; i
++) {
7854 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7856 unlock_user(target_grouplist
, arg2
, 0);
7858 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7861 case TARGET_NR_fchown
:
7862 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7864 #if defined(TARGET_NR_fchownat)
7865 case TARGET_NR_fchownat
:
7866 if (!(p
= lock_user_string(arg2
)))
7868 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
7869 low2highgid(arg4
), arg5
));
7870 unlock_user(p
, arg2
, 0);
7873 #ifdef TARGET_NR_setresuid
7874 case TARGET_NR_setresuid
:
7875 ret
= get_errno(setresuid(low2highuid(arg1
),
7877 low2highuid(arg3
)));
7880 #ifdef TARGET_NR_getresuid
7881 case TARGET_NR_getresuid
:
7883 uid_t ruid
, euid
, suid
;
7884 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7885 if (!is_error(ret
)) {
7886 if (put_user_id(high2lowuid(ruid
), arg1
)
7887 || put_user_id(high2lowuid(euid
), arg2
)
7888 || put_user_id(high2lowuid(suid
), arg3
))
7894 #ifdef TARGET_NR_getresgid
7895 case TARGET_NR_setresgid
:
7896 ret
= get_errno(setresgid(low2highgid(arg1
),
7898 low2highgid(arg3
)));
7901 #ifdef TARGET_NR_getresgid
7902 case TARGET_NR_getresgid
:
7904 gid_t rgid
, egid
, sgid
;
7905 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7906 if (!is_error(ret
)) {
7907 if (put_user_id(high2lowgid(rgid
), arg1
)
7908 || put_user_id(high2lowgid(egid
), arg2
)
7909 || put_user_id(high2lowgid(sgid
), arg3
))
7915 case TARGET_NR_chown
:
7916 if (!(p
= lock_user_string(arg1
)))
7918 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7919 unlock_user(p
, arg1
, 0);
7921 case TARGET_NR_setuid
:
7922 ret
= get_errno(setuid(low2highuid(arg1
)));
7924 case TARGET_NR_setgid
:
7925 ret
= get_errno(setgid(low2highgid(arg1
)));
7927 case TARGET_NR_setfsuid
:
7928 ret
= get_errno(setfsuid(arg1
));
7930 case TARGET_NR_setfsgid
:
7931 ret
= get_errno(setfsgid(arg1
));
7934 #ifdef TARGET_NR_lchown32
7935 case TARGET_NR_lchown32
:
7936 if (!(p
= lock_user_string(arg1
)))
7938 ret
= get_errno(lchown(p
, arg2
, arg3
));
7939 unlock_user(p
, arg1
, 0);
7942 #ifdef TARGET_NR_getuid32
7943 case TARGET_NR_getuid32
:
7944 ret
= get_errno(getuid());
7948 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7949 /* Alpha specific */
7950 case TARGET_NR_getxuid
:
7954 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7956 ret
= get_errno(getuid());
7959 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7960 /* Alpha specific */
7961 case TARGET_NR_getxgid
:
7965 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7967 ret
= get_errno(getgid());
7970 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7971 /* Alpha specific */
7972 case TARGET_NR_osf_getsysinfo
:
7973 ret
= -TARGET_EOPNOTSUPP
;
7975 case TARGET_GSI_IEEE_FP_CONTROL
:
7977 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7979 /* Copied from linux ieee_fpcr_to_swcr. */
7980 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7981 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7982 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7983 | SWCR_TRAP_ENABLE_DZE
7984 | SWCR_TRAP_ENABLE_OVF
);
7985 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7986 | SWCR_TRAP_ENABLE_INE
);
7987 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7988 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7990 if (put_user_u64 (swcr
, arg2
))
7996 /* case GSI_IEEE_STATE_AT_SIGNAL:
7997 -- Not implemented in linux kernel.
7999 -- Retrieves current unaligned access state; not much used.
8001 -- Retrieves implver information; surely not used.
8003 -- Grabs a copy of the HWRPB; surely not used.
8008 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8009 /* Alpha specific */
8010 case TARGET_NR_osf_setsysinfo
:
8011 ret
= -TARGET_EOPNOTSUPP
;
8013 case TARGET_SSI_IEEE_FP_CONTROL
:
8015 uint64_t swcr
, fpcr
, orig_fpcr
;
8017 if (get_user_u64 (swcr
, arg2
)) {
8020 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8021 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8023 /* Copied from linux ieee_swcr_to_fpcr. */
8024 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8025 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8026 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8027 | SWCR_TRAP_ENABLE_DZE
8028 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8029 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8030 | SWCR_TRAP_ENABLE_INE
)) << 57;
8031 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8032 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8034 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8039 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8041 uint64_t exc
, fpcr
, orig_fpcr
;
8044 if (get_user_u64(exc
, arg2
)) {
8048 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8050 /* We only add to the exception status here. */
8051 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8053 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8056 /* Old exceptions are not signaled. */
8057 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8059 /* If any exceptions set by this call,
8060 and are unmasked, send a signal. */
8062 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8063 si_code
= TARGET_FPE_FLTRES
;
8065 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8066 si_code
= TARGET_FPE_FLTUND
;
8068 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8069 si_code
= TARGET_FPE_FLTOVF
;
8071 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8072 si_code
= TARGET_FPE_FLTDIV
;
8074 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8075 si_code
= TARGET_FPE_FLTINV
;
8078 target_siginfo_t info
;
8079 info
.si_signo
= SIGFPE
;
8081 info
.si_code
= si_code
;
8082 info
._sifields
._sigfault
._addr
8083 = ((CPUArchState
*)cpu_env
)->pc
;
8084 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8089 /* case SSI_NVPAIRS:
8090 -- Used with SSIN_UACPROC to enable unaligned accesses.
8091 case SSI_IEEE_STATE_AT_SIGNAL:
8092 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8093 -- Not implemented in linux kernel
8098 #ifdef TARGET_NR_osf_sigprocmask
8099 /* Alpha specific. */
8100 case TARGET_NR_osf_sigprocmask
:
8104 sigset_t set
, oldset
;
8107 case TARGET_SIG_BLOCK
:
8110 case TARGET_SIG_UNBLOCK
:
8113 case TARGET_SIG_SETMASK
:
8117 ret
= -TARGET_EINVAL
;
8121 target_to_host_old_sigset(&set
, &mask
);
8122 sigprocmask(how
, &set
, &oldset
);
8123 host_to_target_old_sigset(&mask
, &oldset
);
8129 #ifdef TARGET_NR_getgid32
8130 case TARGET_NR_getgid32
:
8131 ret
= get_errno(getgid());
8134 #ifdef TARGET_NR_geteuid32
8135 case TARGET_NR_geteuid32
:
8136 ret
= get_errno(geteuid());
8139 #ifdef TARGET_NR_getegid32
8140 case TARGET_NR_getegid32
:
8141 ret
= get_errno(getegid());
8144 #ifdef TARGET_NR_setreuid32
8145 case TARGET_NR_setreuid32
:
8146 ret
= get_errno(setreuid(arg1
, arg2
));
8149 #ifdef TARGET_NR_setregid32
8150 case TARGET_NR_setregid32
:
8151 ret
= get_errno(setregid(arg1
, arg2
));
8154 #ifdef TARGET_NR_getgroups32
8155 case TARGET_NR_getgroups32
:
8157 int gidsetsize
= arg1
;
8158 uint32_t *target_grouplist
;
8162 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8163 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8164 if (gidsetsize
== 0)
8166 if (!is_error(ret
)) {
8167 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8168 if (!target_grouplist
) {
8169 ret
= -TARGET_EFAULT
;
8172 for(i
= 0;i
< ret
; i
++)
8173 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8174 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8179 #ifdef TARGET_NR_setgroups32
8180 case TARGET_NR_setgroups32
:
8182 int gidsetsize
= arg1
;
8183 uint32_t *target_grouplist
;
8187 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8188 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8189 if (!target_grouplist
) {
8190 ret
= -TARGET_EFAULT
;
8193 for(i
= 0;i
< gidsetsize
; i
++)
8194 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8195 unlock_user(target_grouplist
, arg2
, 0);
8196 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8200 #ifdef TARGET_NR_fchown32
8201 case TARGET_NR_fchown32
:
8202 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8205 #ifdef TARGET_NR_setresuid32
8206 case TARGET_NR_setresuid32
:
8207 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8210 #ifdef TARGET_NR_getresuid32
8211 case TARGET_NR_getresuid32
:
8213 uid_t ruid
, euid
, suid
;
8214 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8215 if (!is_error(ret
)) {
8216 if (put_user_u32(ruid
, arg1
)
8217 || put_user_u32(euid
, arg2
)
8218 || put_user_u32(suid
, arg3
))
8224 #ifdef TARGET_NR_setresgid32
8225 case TARGET_NR_setresgid32
:
8226 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8229 #ifdef TARGET_NR_getresgid32
8230 case TARGET_NR_getresgid32
:
8232 gid_t rgid
, egid
, sgid
;
8233 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8234 if (!is_error(ret
)) {
8235 if (put_user_u32(rgid
, arg1
)
8236 || put_user_u32(egid
, arg2
)
8237 || put_user_u32(sgid
, arg3
))
8243 #ifdef TARGET_NR_chown32
8244 case TARGET_NR_chown32
:
8245 if (!(p
= lock_user_string(arg1
)))
8247 ret
= get_errno(chown(p
, arg2
, arg3
));
8248 unlock_user(p
, arg1
, 0);
8251 #ifdef TARGET_NR_setuid32
8252 case TARGET_NR_setuid32
:
8253 ret
= get_errno(setuid(arg1
));
8256 #ifdef TARGET_NR_setgid32
8257 case TARGET_NR_setgid32
:
8258 ret
= get_errno(setgid(arg1
));
8261 #ifdef TARGET_NR_setfsuid32
8262 case TARGET_NR_setfsuid32
:
8263 ret
= get_errno(setfsuid(arg1
));
8266 #ifdef TARGET_NR_setfsgid32
8267 case TARGET_NR_setfsgid32
:
8268 ret
= get_errno(setfsgid(arg1
));
8272 case TARGET_NR_pivot_root
:
8274 #ifdef TARGET_NR_mincore
8275 case TARGET_NR_mincore
:
8278 ret
= -TARGET_EFAULT
;
8279 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8281 if (!(p
= lock_user_string(arg3
)))
8283 ret
= get_errno(mincore(a
, arg2
, p
));
8284 unlock_user(p
, arg3
, ret
);
8286 unlock_user(a
, arg1
, 0);
8290 #ifdef TARGET_NR_arm_fadvise64_64
8291 case TARGET_NR_arm_fadvise64_64
:
8294 * arm_fadvise64_64 looks like fadvise64_64 but
8295 * with different argument order
8303 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8304 #ifdef TARGET_NR_fadvise64_64
8305 case TARGET_NR_fadvise64_64
:
8307 #ifdef TARGET_NR_fadvise64
8308 case TARGET_NR_fadvise64
:
8312 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8313 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8314 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8315 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8319 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8322 #ifdef TARGET_NR_madvise
8323 case TARGET_NR_madvise
:
8324 /* A straight passthrough may not be safe because qemu sometimes
8325 turns private file-backed mappings into anonymous mappings.
8326 This will break MADV_DONTNEED.
8327 This is a hint, so ignoring and returning success is ok. */
8331 #if TARGET_ABI_BITS == 32
8332 case TARGET_NR_fcntl64
:
8336 struct target_flock64
*target_fl
;
8338 struct target_eabi_flock64
*target_efl
;
8341 cmd
= target_to_host_fcntl_cmd(arg2
);
8342 if (cmd
== -TARGET_EINVAL
) {
8348 case TARGET_F_GETLK64
:
8350 if (((CPUARMState
*)cpu_env
)->eabi
) {
8351 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8353 fl
.l_type
= tswap16(target_efl
->l_type
);
8354 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8355 fl
.l_start
= tswap64(target_efl
->l_start
);
8356 fl
.l_len
= tswap64(target_efl
->l_len
);
8357 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8358 unlock_user_struct(target_efl
, arg3
, 0);
8362 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8364 fl
.l_type
= tswap16(target_fl
->l_type
);
8365 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8366 fl
.l_start
= tswap64(target_fl
->l_start
);
8367 fl
.l_len
= tswap64(target_fl
->l_len
);
8368 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8369 unlock_user_struct(target_fl
, arg3
, 0);
8371 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8374 if (((CPUARMState
*)cpu_env
)->eabi
) {
8375 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8377 target_efl
->l_type
= tswap16(fl
.l_type
);
8378 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8379 target_efl
->l_start
= tswap64(fl
.l_start
);
8380 target_efl
->l_len
= tswap64(fl
.l_len
);
8381 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8382 unlock_user_struct(target_efl
, arg3
, 1);
8386 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8388 target_fl
->l_type
= tswap16(fl
.l_type
);
8389 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8390 target_fl
->l_start
= tswap64(fl
.l_start
);
8391 target_fl
->l_len
= tswap64(fl
.l_len
);
8392 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8393 unlock_user_struct(target_fl
, arg3
, 1);
8398 case TARGET_F_SETLK64
:
8399 case TARGET_F_SETLKW64
:
8401 if (((CPUARMState
*)cpu_env
)->eabi
) {
8402 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8404 fl
.l_type
= tswap16(target_efl
->l_type
);
8405 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8406 fl
.l_start
= tswap64(target_efl
->l_start
);
8407 fl
.l_len
= tswap64(target_efl
->l_len
);
8408 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8409 unlock_user_struct(target_efl
, arg3
, 0);
8413 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8415 fl
.l_type
= tswap16(target_fl
->l_type
);
8416 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8417 fl
.l_start
= tswap64(target_fl
->l_start
);
8418 fl
.l_len
= tswap64(target_fl
->l_len
);
8419 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8420 unlock_user_struct(target_fl
, arg3
, 0);
8422 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8425 ret
= do_fcntl(arg1
, arg2
, arg3
);
8431 #ifdef TARGET_NR_cacheflush
8432 case TARGET_NR_cacheflush
:
8433 /* self-modifying code is handled automatically, so nothing needed */
8437 #ifdef TARGET_NR_security
8438 case TARGET_NR_security
:
8441 #ifdef TARGET_NR_getpagesize
8442 case TARGET_NR_getpagesize
:
8443 ret
= TARGET_PAGE_SIZE
;
8446 case TARGET_NR_gettid
:
8447 ret
= get_errno(gettid());
8449 #ifdef TARGET_NR_readahead
8450 case TARGET_NR_readahead
:
8451 #if TARGET_ABI_BITS == 32
8452 if (regpairs_aligned(cpu_env
)) {
8457 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8459 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8464 #ifdef TARGET_NR_setxattr
8465 case TARGET_NR_listxattr
:
8466 case TARGET_NR_llistxattr
:
8470 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8472 ret
= -TARGET_EFAULT
;
8476 p
= lock_user_string(arg1
);
8478 if (num
== TARGET_NR_listxattr
) {
8479 ret
= get_errno(listxattr(p
, b
, arg3
));
8481 ret
= get_errno(llistxattr(p
, b
, arg3
));
8484 ret
= -TARGET_EFAULT
;
8486 unlock_user(p
, arg1
, 0);
8487 unlock_user(b
, arg2
, arg3
);
8490 case TARGET_NR_flistxattr
:
8494 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8496 ret
= -TARGET_EFAULT
;
8500 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8501 unlock_user(b
, arg2
, arg3
);
8504 case TARGET_NR_setxattr
:
8505 case TARGET_NR_lsetxattr
:
8507 void *p
, *n
, *v
= 0;
8509 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8511 ret
= -TARGET_EFAULT
;
8515 p
= lock_user_string(arg1
);
8516 n
= lock_user_string(arg2
);
8518 if (num
== TARGET_NR_setxattr
) {
8519 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8521 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8524 ret
= -TARGET_EFAULT
;
8526 unlock_user(p
, arg1
, 0);
8527 unlock_user(n
, arg2
, 0);
8528 unlock_user(v
, arg3
, 0);
8531 case TARGET_NR_fsetxattr
:
8535 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8537 ret
= -TARGET_EFAULT
;
8541 n
= lock_user_string(arg2
);
8543 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8545 ret
= -TARGET_EFAULT
;
8547 unlock_user(n
, arg2
, 0);
8548 unlock_user(v
, arg3
, 0);
8551 case TARGET_NR_getxattr
:
8552 case TARGET_NR_lgetxattr
:
8554 void *p
, *n
, *v
= 0;
8556 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8558 ret
= -TARGET_EFAULT
;
8562 p
= lock_user_string(arg1
);
8563 n
= lock_user_string(arg2
);
8565 if (num
== TARGET_NR_getxattr
) {
8566 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8568 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8571 ret
= -TARGET_EFAULT
;
8573 unlock_user(p
, arg1
, 0);
8574 unlock_user(n
, arg2
, 0);
8575 unlock_user(v
, arg3
, arg4
);
8578 case TARGET_NR_fgetxattr
:
8582 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8584 ret
= -TARGET_EFAULT
;
8588 n
= lock_user_string(arg2
);
8590 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8592 ret
= -TARGET_EFAULT
;
8594 unlock_user(n
, arg2
, 0);
8595 unlock_user(v
, arg3
, arg4
);
8598 case TARGET_NR_removexattr
:
8599 case TARGET_NR_lremovexattr
:
8602 p
= lock_user_string(arg1
);
8603 n
= lock_user_string(arg2
);
8605 if (num
== TARGET_NR_removexattr
) {
8606 ret
= get_errno(removexattr(p
, n
));
8608 ret
= get_errno(lremovexattr(p
, n
));
8611 ret
= -TARGET_EFAULT
;
8613 unlock_user(p
, arg1
, 0);
8614 unlock_user(n
, arg2
, 0);
8617 case TARGET_NR_fremovexattr
:
8620 n
= lock_user_string(arg2
);
8622 ret
= get_errno(fremovexattr(arg1
, n
));
8624 ret
= -TARGET_EFAULT
;
8626 unlock_user(n
, arg2
, 0);
8630 #endif /* CONFIG_ATTR */
8631 #ifdef TARGET_NR_set_thread_area
8632 case TARGET_NR_set_thread_area
:
8633 #if defined(TARGET_MIPS)
8634 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8637 #elif defined(TARGET_CRIS)
8639 ret
= -TARGET_EINVAL
;
8641 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8645 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8646 ret
= do_set_thread_area(cpu_env
, arg1
);
8648 #elif defined(TARGET_M68K)
8650 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
8651 ts
->tp_value
= arg1
;
8656 goto unimplemented_nowarn
;
8659 #ifdef TARGET_NR_get_thread_area
8660 case TARGET_NR_get_thread_area
:
8661 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8662 ret
= do_get_thread_area(cpu_env
, arg1
);
8664 #elif defined(TARGET_M68K)
8666 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
8671 goto unimplemented_nowarn
;
8674 #ifdef TARGET_NR_getdomainname
8675 case TARGET_NR_getdomainname
:
8676 goto unimplemented_nowarn
;
8679 #ifdef TARGET_NR_clock_gettime
8680 case TARGET_NR_clock_gettime
:
8683 ret
= get_errno(clock_gettime(arg1
, &ts
));
8684 if (!is_error(ret
)) {
8685 host_to_target_timespec(arg2
, &ts
);
8690 #ifdef TARGET_NR_clock_getres
8691 case TARGET_NR_clock_getres
:
8694 ret
= get_errno(clock_getres(arg1
, &ts
));
8695 if (!is_error(ret
)) {
8696 host_to_target_timespec(arg2
, &ts
);
8701 #ifdef TARGET_NR_clock_nanosleep
8702 case TARGET_NR_clock_nanosleep
:
8705 target_to_host_timespec(&ts
, arg3
);
8706 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8708 host_to_target_timespec(arg4
, &ts
);
8713 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8714 case TARGET_NR_set_tid_address
:
8715 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8719 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8720 case TARGET_NR_tkill
:
8721 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8725 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8726 case TARGET_NR_tgkill
:
8727 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8728 target_to_host_signal(arg3
)));
8732 #ifdef TARGET_NR_set_robust_list
8733 case TARGET_NR_set_robust_list
:
8734 case TARGET_NR_get_robust_list
:
8735 /* The ABI for supporting robust futexes has userspace pass
8736 * the kernel a pointer to a linked list which is updated by
8737 * userspace after the syscall; the list is walked by the kernel
8738 * when the thread exits. Since the linked list in QEMU guest
8739 * memory isn't a valid linked list for the host and we have
8740 * no way to reliably intercept the thread-death event, we can't
8741 * support these. Silently return ENOSYS so that guest userspace
8742 * falls back to a non-robust futex implementation (which should
8743 * be OK except in the corner case of the guest crashing while
8744 * holding a mutex that is shared with another process via
8747 goto unimplemented_nowarn
;
8750 #if defined(TARGET_NR_utimensat)
8751 case TARGET_NR_utimensat
:
8753 struct timespec
*tsp
, ts
[2];
8757 target_to_host_timespec(ts
, arg3
);
8758 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8762 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8764 if (!(p
= lock_user_string(arg2
))) {
8765 ret
= -TARGET_EFAULT
;
8768 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8769 unlock_user(p
, arg2
, 0);
8774 case TARGET_NR_futex
:
8775 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8777 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8778 case TARGET_NR_inotify_init
:
8779 ret
= get_errno(sys_inotify_init());
8782 #ifdef CONFIG_INOTIFY1
8783 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8784 case TARGET_NR_inotify_init1
:
8785 ret
= get_errno(sys_inotify_init1(arg1
));
8789 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8790 case TARGET_NR_inotify_add_watch
:
8791 p
= lock_user_string(arg2
);
8792 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8793 unlock_user(p
, arg2
, 0);
8796 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8797 case TARGET_NR_inotify_rm_watch
:
8798 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8802 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8803 case TARGET_NR_mq_open
:
8805 struct mq_attr posix_mq_attr
;
8807 p
= lock_user_string(arg1
- 1);
8809 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8810 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8811 unlock_user (p
, arg1
, 0);
8815 case TARGET_NR_mq_unlink
:
8816 p
= lock_user_string(arg1
- 1);
8817 ret
= get_errno(mq_unlink(p
));
8818 unlock_user (p
, arg1
, 0);
8821 case TARGET_NR_mq_timedsend
:
8825 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8827 target_to_host_timespec(&ts
, arg5
);
8828 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8829 host_to_target_timespec(arg5
, &ts
);
8832 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8833 unlock_user (p
, arg2
, arg3
);
8837 case TARGET_NR_mq_timedreceive
:
8842 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8844 target_to_host_timespec(&ts
, arg5
);
8845 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8846 host_to_target_timespec(arg5
, &ts
);
8849 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8850 unlock_user (p
, arg2
, arg3
);
8852 put_user_u32(prio
, arg4
);
8856 /* Not implemented for now... */
8857 /* case TARGET_NR_mq_notify: */
8860 case TARGET_NR_mq_getsetattr
:
8862 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8865 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8866 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8869 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8870 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8877 #ifdef CONFIG_SPLICE
8878 #ifdef TARGET_NR_tee
8881 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8885 #ifdef TARGET_NR_splice
8886 case TARGET_NR_splice
:
8888 loff_t loff_in
, loff_out
;
8889 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8891 get_user_u64(loff_in
, arg2
);
8892 ploff_in
= &loff_in
;
8895 get_user_u64(loff_out
, arg2
);
8896 ploff_out
= &loff_out
;
8898 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8902 #ifdef TARGET_NR_vmsplice
8903 case TARGET_NR_vmsplice
:
8905 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8907 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8908 unlock_iovec(vec
, arg2
, arg3
, 0);
8910 ret
= -host_to_target_errno(errno
);
8915 #endif /* CONFIG_SPLICE */
8916 #ifdef CONFIG_EVENTFD
8917 #if defined(TARGET_NR_eventfd)
8918 case TARGET_NR_eventfd
:
8919 ret
= get_errno(eventfd(arg1
, 0));
8922 #if defined(TARGET_NR_eventfd2)
8923 case TARGET_NR_eventfd2
:
8925 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
8926 if (arg2
& TARGET_O_NONBLOCK
) {
8927 host_flags
|= O_NONBLOCK
;
8929 if (arg2
& TARGET_O_CLOEXEC
) {
8930 host_flags
|= O_CLOEXEC
;
8932 ret
= get_errno(eventfd(arg1
, host_flags
));
8936 #endif /* CONFIG_EVENTFD */
8937 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8938 case TARGET_NR_fallocate
:
8939 #if TARGET_ABI_BITS == 32
8940 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8941 target_offset64(arg5
, arg6
)));
8943 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8947 #if defined(CONFIG_SYNC_FILE_RANGE)
8948 #if defined(TARGET_NR_sync_file_range)
8949 case TARGET_NR_sync_file_range
:
8950 #if TARGET_ABI_BITS == 32
8951 #if defined(TARGET_MIPS)
8952 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8953 target_offset64(arg5
, arg6
), arg7
));
8955 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8956 target_offset64(arg4
, arg5
), arg6
));
8957 #endif /* !TARGET_MIPS */
8959 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8963 #if defined(TARGET_NR_sync_file_range2)
8964 case TARGET_NR_sync_file_range2
:
8965 /* This is like sync_file_range but the arguments are reordered */
8966 #if TARGET_ABI_BITS == 32
8967 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8968 target_offset64(arg5
, arg6
), arg2
));
8970 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8975 #if defined(CONFIG_EPOLL)
8976 #if defined(TARGET_NR_epoll_create)
8977 case TARGET_NR_epoll_create
:
8978 ret
= get_errno(epoll_create(arg1
));
8981 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8982 case TARGET_NR_epoll_create1
:
8983 ret
= get_errno(epoll_create1(arg1
));
8986 #if defined(TARGET_NR_epoll_ctl)
8987 case TARGET_NR_epoll_ctl
:
8989 struct epoll_event ep
;
8990 struct epoll_event
*epp
= 0;
8992 struct target_epoll_event
*target_ep
;
8993 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8996 ep
.events
= tswap32(target_ep
->events
);
8997 /* The epoll_data_t union is just opaque data to the kernel,
8998 * so we transfer all 64 bits across and need not worry what
8999 * actual data type it is.
9001 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9002 unlock_user_struct(target_ep
, arg4
, 0);
9005 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9010 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9011 #define IMPLEMENT_EPOLL_PWAIT
9013 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9014 #if defined(TARGET_NR_epoll_wait)
9015 case TARGET_NR_epoll_wait
:
9017 #if defined(IMPLEMENT_EPOLL_PWAIT)
9018 case TARGET_NR_epoll_pwait
:
9021 struct target_epoll_event
*target_ep
;
9022 struct epoll_event
*ep
;
9024 int maxevents
= arg3
;
9027 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9028 maxevents
* sizeof(struct target_epoll_event
), 1);
9033 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9036 #if defined(IMPLEMENT_EPOLL_PWAIT)
9037 case TARGET_NR_epoll_pwait
:
9039 target_sigset_t
*target_set
;
9040 sigset_t _set
, *set
= &_set
;
9043 target_set
= lock_user(VERIFY_READ
, arg5
,
9044 sizeof(target_sigset_t
), 1);
9046 unlock_user(target_ep
, arg2
, 0);
9049 target_to_host_sigset(set
, target_set
);
9050 unlock_user(target_set
, arg5
, 0);
9055 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9059 #if defined(TARGET_NR_epoll_wait)
9060 case TARGET_NR_epoll_wait
:
9061 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9065 ret
= -TARGET_ENOSYS
;
9067 if (!is_error(ret
)) {
9069 for (i
= 0; i
< ret
; i
++) {
9070 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9071 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9074 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9079 #ifdef TARGET_NR_prlimit64
9080 case TARGET_NR_prlimit64
:
9082 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9083 struct target_rlimit64
*target_rnew
, *target_rold
;
9084 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9086 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9089 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9090 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9091 unlock_user_struct(target_rnew
, arg3
, 0);
9095 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
9096 if (!is_error(ret
) && arg4
) {
9097 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9100 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9101 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9102 unlock_user_struct(target_rold
, arg4
, 1);
9107 #ifdef TARGET_NR_gethostname
9108 case TARGET_NR_gethostname
:
9110 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9112 ret
= get_errno(gethostname(name
, arg2
));
9113 unlock_user(name
, arg1
, arg2
);
9115 ret
= -TARGET_EFAULT
;
9120 #ifdef TARGET_NR_atomic_cmpxchg_32
9121 case TARGET_NR_atomic_cmpxchg_32
:
9123 /* should use start_exclusive from main.c */
9124 abi_ulong mem_value
;
9125 if (get_user_u32(mem_value
, arg6
)) {
9126 target_siginfo_t info
;
9127 info
.si_signo
= SIGSEGV
;
9129 info
.si_code
= TARGET_SEGV_MAPERR
;
9130 info
._sifields
._sigfault
._addr
= arg6
;
9131 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9135 if (mem_value
== arg2
)
9136 put_user_u32(arg1
, arg6
);
9141 #ifdef TARGET_NR_atomic_barrier
9142 case TARGET_NR_atomic_barrier
:
9144 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9149 #ifdef TARGET_NR_timer_create
9150 case TARGET_NR_timer_create
:
9152 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9154 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
9155 struct target_sigevent
*ptarget_sevp
;
9156 struct target_timer_t
*ptarget_timer
;
9159 int timer_index
= next_free_host_timer();
9161 if (timer_index
< 0) {
9162 ret
= -TARGET_EAGAIN
;
9164 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
9167 if (!lock_user_struct(VERIFY_READ
, ptarget_sevp
, arg2
, 1)) {
9171 host_sevp
.sigev_signo
= tswap32(ptarget_sevp
->sigev_signo
);
9172 host_sevp
.sigev_notify
= tswap32(ptarget_sevp
->sigev_notify
);
9174 phost_sevp
= &host_sevp
;
9177 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
9181 if (!lock_user_struct(VERIFY_WRITE
, ptarget_timer
, arg3
, 1)) {
9184 ptarget_timer
->ptr
= tswap32(0xcafe0000 | timer_index
);
9185 unlock_user_struct(ptarget_timer
, arg3
, 1);
9192 #ifdef TARGET_NR_timer_settime
9193 case TARGET_NR_timer_settime
:
9195 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9196 * struct itimerspec * old_value */
9198 if (arg3
== 0 || arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9199 ret
= -TARGET_EINVAL
;
9201 timer_t htimer
= g_posix_timers
[arg1
];
9202 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
9204 target_to_host_itimerspec(&hspec_new
, arg3
);
9206 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
9207 host_to_target_itimerspec(arg2
, &hspec_old
);
9213 #ifdef TARGET_NR_timer_gettime
9214 case TARGET_NR_timer_gettime
:
9216 /* args: timer_t timerid, struct itimerspec *curr_value */
9219 return -TARGET_EFAULT
;
9220 } else if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9221 ret
= -TARGET_EINVAL
;
9223 timer_t htimer
= g_posix_timers
[arg1
];
9224 struct itimerspec hspec
;
9225 ret
= get_errno(timer_gettime(htimer
, &hspec
));
9227 if (host_to_target_itimerspec(arg2
, &hspec
)) {
9228 ret
= -TARGET_EFAULT
;
9235 #ifdef TARGET_NR_timer_getoverrun
9236 case TARGET_NR_timer_getoverrun
:
9238 /* args: timer_t timerid */
9240 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9241 ret
= -TARGET_EINVAL
;
9243 timer_t htimer
= g_posix_timers
[arg1
];
9244 ret
= get_errno(timer_getoverrun(htimer
));
9250 #ifdef TARGET_NR_timer_delete
9251 case TARGET_NR_timer_delete
:
9253 /* args: timer_t timerid */
9255 if (arg1
< 0 || arg1
>= ARRAY_SIZE(g_posix_timers
)) {
9256 ret
= -TARGET_EINVAL
;
9258 timer_t htimer
= g_posix_timers
[arg1
];
9259 ret
= get_errno(timer_delete(htimer
));
9260 g_posix_timers
[arg1
] = 0;
9268 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9269 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9270 unimplemented_nowarn
:
9272 ret
= -TARGET_ENOSYS
;
9277 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9280 print_syscall_ret(num
, ret
);
9283 ret
= -TARGET_EFAULT
;