4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
113 #include "fd-trans.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
253 _syscall0(int, gettid
)
255 /* This is a replacement for the host gettid() and must return a host
257 static int gettid(void) {
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
282 loff_t
*, res
, uint
, wh
);
284 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
285 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
287 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group
,int,error_code
)
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address
,int *,tidptr
)
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
296 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
300 unsigned long *, user_mask_ptr
);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
303 unsigned long *, user_mask_ptr
);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
306 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
308 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
309 struct __user_cap_data_struct
*, data
);
310 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
311 struct __user_cap_data_struct
*, data
);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get
, int, which
, int, who
)
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
324 unsigned long, idx1
, unsigned long, idx2
)
327 static bitmask_transtbl fcntl_flags_tbl
[] = {
328 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
329 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
330 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
331 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
332 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
333 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
334 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
335 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
336 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
337 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
338 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
339 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
340 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
351 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
363 static int sys_getcwd1(char *buf
, size_t size
)
365 if (getcwd(buf
, size
) == NULL
) {
366 /* getcwd() sets errno */
369 return strlen(buf
)+1;
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
376 const struct timespec
*,tsp
,int,flags
)
378 static int sys_utimensat(int dirfd
, const char *pathname
,
379 const struct timespec times
[2], int flags
)
385 #endif /* TARGET_NR_utimensat */
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
391 const char *, new, unsigned int, flags
)
393 static int sys_renameat2(int oldfd
, const char *old
,
394 int newfd
, const char *new, int flags
)
397 return renameat(oldfd
, old
, newfd
, new);
403 #endif /* TARGET_NR_renameat2 */
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
411 return (inotify_init());
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
417 return (inotify_add_watch(fd
, pathname
, mask
));
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
423 return (inotify_rm_watch(fd
, wd
));
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags
)
430 return (inotify_init1(flags
));
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY */
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64
{
452 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
453 const struct host_rlimit64
*, new_limit
,
454 struct host_rlimit64
*, old_limit
)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers
[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
467 if (g_posix_timers
[k
] == 0) {
468 g_posix_timers
[k
] = (timer_t
) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env
, int num
)
480 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486 * of registers which translates to the same as ARM/MIPS, because we start with
488 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env
, int num
)
494 case TARGET_NR_pread64
:
495 case TARGET_NR_pwrite64
:
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
505 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
508 #define ERRNO_TABLE_SIZE 1200
510 /* target_to_host_errno_table[] is initialized from
511 * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
516 * This list is the union of errno values overridden in asm-<arch>/errno.h
517 * minus the errnos that are not actually generic to all archs.
519 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
520 [EAGAIN
] = TARGET_EAGAIN
,
521 [EIDRM
] = TARGET_EIDRM
,
522 [ECHRNG
] = TARGET_ECHRNG
,
523 [EL2NSYNC
] = TARGET_EL2NSYNC
,
524 [EL3HLT
] = TARGET_EL3HLT
,
525 [EL3RST
] = TARGET_EL3RST
,
526 [ELNRNG
] = TARGET_ELNRNG
,
527 [EUNATCH
] = TARGET_EUNATCH
,
528 [ENOCSI
] = TARGET_ENOCSI
,
529 [EL2HLT
] = TARGET_EL2HLT
,
530 [EDEADLK
] = TARGET_EDEADLK
,
531 [ENOLCK
] = TARGET_ENOLCK
,
532 [EBADE
] = TARGET_EBADE
,
533 [EBADR
] = TARGET_EBADR
,
534 [EXFULL
] = TARGET_EXFULL
,
535 [ENOANO
] = TARGET_ENOANO
,
536 [EBADRQC
] = TARGET_EBADRQC
,
537 [EBADSLT
] = TARGET_EBADSLT
,
538 [EBFONT
] = TARGET_EBFONT
,
539 [ENOSTR
] = TARGET_ENOSTR
,
540 [ENODATA
] = TARGET_ENODATA
,
541 [ETIME
] = TARGET_ETIME
,
542 [ENOSR
] = TARGET_ENOSR
,
543 [ENONET
] = TARGET_ENONET
,
544 [ENOPKG
] = TARGET_ENOPKG
,
545 [EREMOTE
] = TARGET_EREMOTE
,
546 [ENOLINK
] = TARGET_ENOLINK
,
547 [EADV
] = TARGET_EADV
,
548 [ESRMNT
] = TARGET_ESRMNT
,
549 [ECOMM
] = TARGET_ECOMM
,
550 [EPROTO
] = TARGET_EPROTO
,
551 [EDOTDOT
] = TARGET_EDOTDOT
,
552 [EMULTIHOP
] = TARGET_EMULTIHOP
,
553 [EBADMSG
] = TARGET_EBADMSG
,
554 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
555 [EOVERFLOW
] = TARGET_EOVERFLOW
,
556 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
557 [EBADFD
] = TARGET_EBADFD
,
558 [EREMCHG
] = TARGET_EREMCHG
,
559 [ELIBACC
] = TARGET_ELIBACC
,
560 [ELIBBAD
] = TARGET_ELIBBAD
,
561 [ELIBSCN
] = TARGET_ELIBSCN
,
562 [ELIBMAX
] = TARGET_ELIBMAX
,
563 [ELIBEXEC
] = TARGET_ELIBEXEC
,
564 [EILSEQ
] = TARGET_EILSEQ
,
565 [ENOSYS
] = TARGET_ENOSYS
,
566 [ELOOP
] = TARGET_ELOOP
,
567 [ERESTART
] = TARGET_ERESTART
,
568 [ESTRPIPE
] = TARGET_ESTRPIPE
,
569 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
570 [EUSERS
] = TARGET_EUSERS
,
571 [ENOTSOCK
] = TARGET_ENOTSOCK
,
572 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
573 [EMSGSIZE
] = TARGET_EMSGSIZE
,
574 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
575 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
576 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
577 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
578 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
579 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
580 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
581 [EADDRINUSE
] = TARGET_EADDRINUSE
,
582 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
583 [ENETDOWN
] = TARGET_ENETDOWN
,
584 [ENETUNREACH
] = TARGET_ENETUNREACH
,
585 [ENETRESET
] = TARGET_ENETRESET
,
586 [ECONNABORTED
] = TARGET_ECONNABORTED
,
587 [ECONNRESET
] = TARGET_ECONNRESET
,
588 [ENOBUFS
] = TARGET_ENOBUFS
,
589 [EISCONN
] = TARGET_EISCONN
,
590 [ENOTCONN
] = TARGET_ENOTCONN
,
591 [EUCLEAN
] = TARGET_EUCLEAN
,
592 [ENOTNAM
] = TARGET_ENOTNAM
,
593 [ENAVAIL
] = TARGET_ENAVAIL
,
594 [EISNAM
] = TARGET_EISNAM
,
595 [EREMOTEIO
] = TARGET_EREMOTEIO
,
596 [EDQUOT
] = TARGET_EDQUOT
,
597 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
598 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
599 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
600 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
601 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
602 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
603 [EALREADY
] = TARGET_EALREADY
,
604 [EINPROGRESS
] = TARGET_EINPROGRESS
,
605 [ESTALE
] = TARGET_ESTALE
,
606 [ECANCELED
] = TARGET_ECANCELED
,
607 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
608 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
610 [ENOKEY
] = TARGET_ENOKEY
,
613 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
616 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
619 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
622 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
624 #ifdef ENOTRECOVERABLE
625 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
628 [ENOMSG
] = TARGET_ENOMSG
,
631 [ERFKILL
] = TARGET_ERFKILL
,
634 [EHWPOISON
] = TARGET_EHWPOISON
,
638 static inline int host_to_target_errno(int err
)
640 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
641 host_to_target_errno_table
[err
]) {
642 return host_to_target_errno_table
[err
];
647 static inline int target_to_host_errno(int err
)
649 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
650 target_to_host_errno_table
[err
]) {
651 return target_to_host_errno_table
[err
];
656 static inline abi_long
get_errno(abi_long ret
)
659 return -host_to_target_errno(errno
);
664 const char *target_strerror(int err
)
666 if (err
== TARGET_ERESTARTSYS
) {
667 return "To be restarted";
669 if (err
== TARGET_QEMU_ESIGRETURN
) {
670 return "Successful exit from sigreturn";
673 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
676 return strerror(target_to_host_errno(err
));
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
682 return safe_syscall(__NR_##name); \
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
688 return safe_syscall(__NR_##name, arg1); \
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
694 return safe_syscall(__NR_##name, arg1, arg2); \
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 type5 arg5, type6 arg6) \
723 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
727 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
728 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
729 int, flags
, mode_t
, mode
)
730 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
731 struct rusage
*, rusage
)
732 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
733 int, options
, struct rusage
*, rusage
)
734 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
735 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
736 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
737 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
738 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
740 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
741 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
743 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
744 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
745 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
746 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
747 safe_syscall2(int, tkill
, int, tid
, int, sig
)
748 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
749 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
750 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
751 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
752 unsigned long, pos_l
, unsigned long, pos_h
)
753 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
754 unsigned long, pos_l
, unsigned long, pos_h
)
755 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
757 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
758 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
759 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
760 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
761 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
762 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
763 safe_syscall2(int, flock
, int, fd
, int, operation
)
764 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
765 const struct timespec
*, uts
, size_t, sigsetsize
)
766 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
768 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
769 struct timespec
*, rem
)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
772 const struct timespec
*, req
, struct timespec
*, rem
)
775 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
777 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
778 long, msgtype
, int, flags
)
779 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
780 unsigned, nsops
, const struct timespec
*, timeout
)
782 /* This host kernel architecture uses a single ipc syscall; fake up
783 * wrappers for the sub-operations to hide this implementation detail.
784 * Annoyingly we can't include linux/ipc.h to get the constant definitions
785 * for the call parameter because some structs in there conflict with the
786 * sys/ipc.h ones. So we just define them here, and rely on them being
787 * the same for all host architectures.
789 #define Q_SEMTIMEDOP 4
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
794 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
795 void *, ptr
, long, fifth
)
796 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
798 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
800 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
802 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
804 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
805 const struct timespec
*timeout
)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
813 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
814 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
815 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
833 static inline int host_to_target_sock_type(int host_type
)
837 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
839 target_type
= TARGET_SOCK_DGRAM
;
842 target_type
= TARGET_SOCK_STREAM
;
845 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type
& SOCK_CLOEXEC
) {
851 target_type
|= TARGET_SOCK_CLOEXEC
;
855 #if defined(SOCK_NONBLOCK)
856 if (host_type
& SOCK_NONBLOCK
) {
857 target_type
|= TARGET_SOCK_NONBLOCK
;
864 static abi_ulong target_brk
;
865 static abi_ulong target_original_brk
;
866 static abi_ulong brk_page
;
868 void target_set_brk(abi_ulong new_brk
)
870 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
871 brk_page
= HOST_PAGE_ALIGN(target_brk
);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long
do_brk(abi_ulong new_brk
)
880 abi_long mapped_addr
;
881 abi_ulong new_alloc_size
;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
886 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
889 if (new_brk
< target_original_brk
) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk
<= brk_page
) {
898 /* Heap contents are initialized to zero, as for anonymous
900 if (new_brk
> target_brk
) {
901 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
903 target_brk
= new_brk
;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
915 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
916 PROT_READ
|PROT_WRITE
,
917 MAP_ANON
|MAP_PRIVATE
, 0, 0));
919 if (mapped_addr
== brk_page
) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
927 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
929 target_brk
= new_brk
;
930 brk_page
= HOST_PAGE_ALIGN(target_brk
);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
934 } else if (mapped_addr
!= -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr
, new_alloc_size
);
940 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
943 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM
;
951 /* For everything else, return the previous break. */
955 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
956 abi_ulong target_fds_addr
,
960 abi_ulong b
, *target_fds
;
962 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
963 if (!(target_fds
= lock_user(VERIFY_READ
,
965 sizeof(abi_ulong
) * nw
,
967 return -TARGET_EFAULT
;
971 for (i
= 0; i
< nw
; i
++) {
972 /* grab the abi_ulong */
973 __get_user(b
, &target_fds
[i
]);
974 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
975 /* check the bit inside the abi_ulong */
982 unlock_user(target_fds
, target_fds_addr
, 0);
987 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
988 abi_ulong target_fds_addr
,
991 if (target_fds_addr
) {
992 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
993 return -TARGET_EFAULT
;
1001 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1007 abi_ulong
*target_fds
;
1009 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1010 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1012 sizeof(abi_ulong
) * nw
,
1014 return -TARGET_EFAULT
;
1017 for (i
= 0; i
< nw
; i
++) {
1019 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1020 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1023 __put_user(v
, &target_fds
[i
]);
1026 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1037 static inline abi_long
host_to_target_clock_t(long ticks
)
1039 #if HOST_HZ == TARGET_HZ
1042 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1046 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1047 const struct rusage
*rusage
)
1049 struct target_rusage
*target_rusage
;
1051 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1052 return -TARGET_EFAULT
;
1053 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1054 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1055 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1056 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1057 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1058 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1059 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1060 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1061 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1062 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1063 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1064 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1065 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1066 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1067 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1068 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1069 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1070 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1071 unlock_user_struct(target_rusage
, target_addr
, 1);
1076 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1078 abi_ulong target_rlim_swap
;
1081 target_rlim_swap
= tswapal(target_rlim
);
1082 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1083 return RLIM_INFINITY
;
1085 result
= target_rlim_swap
;
1086 if (target_rlim_swap
!= (rlim_t
)result
)
1087 return RLIM_INFINITY
;
1092 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1094 abi_ulong target_rlim_swap
;
1097 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1098 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1100 target_rlim_swap
= rlim
;
1101 result
= tswapal(target_rlim_swap
);
1106 static inline int target_to_host_resource(int code
)
1109 case TARGET_RLIMIT_AS
:
1111 case TARGET_RLIMIT_CORE
:
1113 case TARGET_RLIMIT_CPU
:
1115 case TARGET_RLIMIT_DATA
:
1117 case TARGET_RLIMIT_FSIZE
:
1118 return RLIMIT_FSIZE
;
1119 case TARGET_RLIMIT_LOCKS
:
1120 return RLIMIT_LOCKS
;
1121 case TARGET_RLIMIT_MEMLOCK
:
1122 return RLIMIT_MEMLOCK
;
1123 case TARGET_RLIMIT_MSGQUEUE
:
1124 return RLIMIT_MSGQUEUE
;
1125 case TARGET_RLIMIT_NICE
:
1127 case TARGET_RLIMIT_NOFILE
:
1128 return RLIMIT_NOFILE
;
1129 case TARGET_RLIMIT_NPROC
:
1130 return RLIMIT_NPROC
;
1131 case TARGET_RLIMIT_RSS
:
1133 case TARGET_RLIMIT_RTPRIO
:
1134 return RLIMIT_RTPRIO
;
1135 case TARGET_RLIMIT_SIGPENDING
:
1136 return RLIMIT_SIGPENDING
;
1137 case TARGET_RLIMIT_STACK
:
1138 return RLIMIT_STACK
;
1144 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1145 abi_ulong target_tv_addr
)
1147 struct target_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1150 return -TARGET_EFAULT
;
1152 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1153 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1155 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1160 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1161 const struct timeval
*tv
)
1163 struct target_timeval
*target_tv
;
1165 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1166 return -TARGET_EFAULT
;
1168 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1169 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1171 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1176 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1177 abi_ulong target_tz_addr
)
1179 struct target_timezone
*target_tz
;
1181 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1182 return -TARGET_EFAULT
;
1185 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1186 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1188 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1196 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1197 abi_ulong target_mq_attr_addr
)
1199 struct target_mq_attr
*target_mq_attr
;
1201 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1202 target_mq_attr_addr
, 1))
1203 return -TARGET_EFAULT
;
1205 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1206 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1207 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1208 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1210 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1215 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1216 const struct mq_attr
*attr
)
1218 struct target_mq_attr
*target_mq_attr
;
1220 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1221 target_mq_attr_addr
, 0))
1222 return -TARGET_EFAULT
;
1224 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1225 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1226 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1227 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1229 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long
do_select(int n
,
1238 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1239 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1241 fd_set rfds
, wfds
, efds
;
1242 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1244 struct timespec ts
, *ts_ptr
;
1247 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1251 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1255 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1260 if (target_tv_addr
) {
1261 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1262 return -TARGET_EFAULT
;
1263 ts
.tv_sec
= tv
.tv_sec
;
1264 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1270 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1273 if (!is_error(ret
)) {
1274 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1275 return -TARGET_EFAULT
;
1276 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1277 return -TARGET_EFAULT
;
1278 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1279 return -TARGET_EFAULT
;
1281 if (target_tv_addr
) {
1282 tv
.tv_sec
= ts
.tv_sec
;
1283 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1284 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1285 return -TARGET_EFAULT
;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long
do_old_select(abi_ulong arg1
)
1296 struct target_sel_arg_struct
*sel
;
1297 abi_ulong inp
, outp
, exp
, tvp
;
1300 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1301 return -TARGET_EFAULT
;
1304 nsel
= tswapal(sel
->n
);
1305 inp
= tswapal(sel
->inp
);
1306 outp
= tswapal(sel
->outp
);
1307 exp
= tswapal(sel
->exp
);
1308 tvp
= tswapal(sel
->tvp
);
1310 unlock_user_struct(sel
, arg1
, 0);
1312 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1317 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1320 return pipe2(host_pipe
, flags
);
1326 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1327 int flags
, int is_pipe2
)
1331 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1334 return get_errno(ret
);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1341 return host_pipe
[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1344 return host_pipe
[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1347 return host_pipe
[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1350 return host_pipe
[0];
1354 if (put_user_s32(host_pipe
[0], pipedes
)
1355 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1356 return -TARGET_EFAULT
;
1357 return get_errno(ret
);
1360 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1361 abi_ulong target_addr
,
1364 struct target_ip_mreqn
*target_smreqn
;
1366 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1368 return -TARGET_EFAULT
;
1369 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1370 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1371 if (len
== sizeof(struct target_ip_mreqn
))
1372 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1373 unlock_user(target_smreqn
, target_addr
, 0);
1378 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1379 abi_ulong target_addr
,
1382 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1383 sa_family_t sa_family
;
1384 struct target_sockaddr
*target_saddr
;
1386 if (fd_trans_target_to_host_addr(fd
)) {
1387 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1390 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1392 return -TARGET_EFAULT
;
1394 sa_family
= tswap16(target_saddr
->sa_family
);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family
== AF_UNIX
) {
1405 if (len
< unix_maxlen
&& len
> 0) {
1406 char *cp
= (char*)target_saddr
;
1408 if ( cp
[len
-1] && !cp
[len
] )
1411 if (len
> unix_maxlen
)
1415 memcpy(addr
, target_saddr
, len
);
1416 addr
->sa_family
= sa_family
;
1417 if (sa_family
== AF_NETLINK
) {
1418 struct sockaddr_nl
*nladdr
;
1420 nladdr
= (struct sockaddr_nl
*)addr
;
1421 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1422 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1423 } else if (sa_family
== AF_PACKET
) {
1424 struct target_sockaddr_ll
*lladdr
;
1426 lladdr
= (struct target_sockaddr_ll
*)addr
;
1427 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1428 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1430 unlock_user(target_saddr
, target_addr
, 0);
1435 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1436 struct sockaddr
*addr
,
1439 struct target_sockaddr
*target_saddr
;
1446 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1448 return -TARGET_EFAULT
;
1449 memcpy(target_saddr
, addr
, len
);
1450 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1451 sizeof(target_saddr
->sa_family
)) {
1452 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1454 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1455 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1456 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1457 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1458 } else if (addr
->sa_family
== AF_PACKET
) {
1459 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1460 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1461 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1462 } else if (addr
->sa_family
== AF_INET6
&&
1463 len
>= sizeof(struct target_sockaddr_in6
)) {
1464 struct target_sockaddr_in6
*target_in6
=
1465 (struct target_sockaddr_in6
*)target_saddr
;
1466 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1468 unlock_user(target_saddr
, target_addr
, len
);
1473 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1474 struct target_msghdr
*target_msgh
)
1476 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1477 abi_long msg_controllen
;
1478 abi_ulong target_cmsg_addr
;
1479 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1480 socklen_t space
= 0;
1482 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1483 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1485 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1486 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1487 target_cmsg_start
= target_cmsg
;
1489 return -TARGET_EFAULT
;
1491 while (cmsg
&& target_cmsg
) {
1492 void *data
= CMSG_DATA(cmsg
);
1493 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1495 int len
= tswapal(target_cmsg
->cmsg_len
)
1496 - sizeof(struct target_cmsghdr
);
1498 space
+= CMSG_SPACE(len
);
1499 if (space
> msgh
->msg_controllen
) {
1500 space
-= CMSG_SPACE(len
);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1514 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1515 cmsg
->cmsg_level
= SOL_SOCKET
;
1517 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1519 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1520 cmsg
->cmsg_len
= CMSG_LEN(len
);
1522 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1523 int *fd
= (int *)data
;
1524 int *target_fd
= (int *)target_data
;
1525 int i
, numfds
= len
/ sizeof(int);
1527 for (i
= 0; i
< numfds
; i
++) {
1528 __get_user(fd
[i
], target_fd
+ i
);
1530 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1531 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1532 struct ucred
*cred
= (struct ucred
*)data
;
1533 struct target_ucred
*target_cred
=
1534 (struct target_ucred
*)target_data
;
1536 __get_user(cred
->pid
, &target_cred
->pid
);
1537 __get_user(cred
->uid
, &target_cred
->uid
);
1538 __get_user(cred
->gid
, &target_cred
->gid
);
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1542 memcpy(data
, target_data
, len
);
1545 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1546 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1549 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1551 msgh
->msg_controllen
= space
;
1555 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1556 struct msghdr
*msgh
)
1558 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1559 abi_long msg_controllen
;
1560 abi_ulong target_cmsg_addr
;
1561 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1562 socklen_t space
= 0;
1564 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1565 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1567 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1568 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1569 target_cmsg_start
= target_cmsg
;
1571 return -TARGET_EFAULT
;
1573 while (cmsg
&& target_cmsg
) {
1574 void *data
= CMSG_DATA(cmsg
);
1575 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1577 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1578 int tgt_len
, tgt_space
;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1587 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1591 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1592 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1594 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1596 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1602 switch (cmsg
->cmsg_level
) {
1604 switch (cmsg
->cmsg_type
) {
1606 tgt_len
= sizeof(struct target_timeval
);
1616 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1617 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1618 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg
->cmsg_level
) {
1628 switch (cmsg
->cmsg_type
) {
1631 int *fd
= (int *)data
;
1632 int *target_fd
= (int *)target_data
;
1633 int i
, numfds
= tgt_len
/ sizeof(int);
1635 for (i
= 0; i
< numfds
; i
++) {
1636 __put_user(fd
[i
], target_fd
+ i
);
1642 struct timeval
*tv
= (struct timeval
*)data
;
1643 struct target_timeval
*target_tv
=
1644 (struct target_timeval
*)target_data
;
1646 if (len
!= sizeof(struct timeval
) ||
1647 tgt_len
!= sizeof(struct target_timeval
)) {
1651 /* copy struct timeval to target */
1652 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1653 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1656 case SCM_CREDENTIALS
:
1658 struct ucred
*cred
= (struct ucred
*)data
;
1659 struct target_ucred
*target_cred
=
1660 (struct target_ucred
*)target_data
;
1662 __put_user(cred
->pid
, &target_cred
->pid
);
1663 __put_user(cred
->uid
, &target_cred
->uid
);
1664 __put_user(cred
->gid
, &target_cred
->gid
);
1673 switch (cmsg
->cmsg_type
) {
1676 uint32_t *v
= (uint32_t *)data
;
1677 uint32_t *t_int
= (uint32_t *)target_data
;
1679 if (len
!= sizeof(uint32_t) ||
1680 tgt_len
!= sizeof(uint32_t)) {
1683 __put_user(*v
, t_int
);
1689 struct sock_extended_err ee
;
1690 struct sockaddr_in offender
;
1692 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1693 struct errhdr_t
*target_errh
=
1694 (struct errhdr_t
*)target_data
;
1696 if (len
!= sizeof(struct errhdr_t
) ||
1697 tgt_len
!= sizeof(struct errhdr_t
)) {
1700 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1701 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1702 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1703 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1704 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1705 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1706 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1707 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1708 (void *) &errh
->offender
, sizeof(errh
->offender
));
1717 switch (cmsg
->cmsg_type
) {
1720 uint32_t *v
= (uint32_t *)data
;
1721 uint32_t *t_int
= (uint32_t *)target_data
;
1723 if (len
!= sizeof(uint32_t) ||
1724 tgt_len
!= sizeof(uint32_t)) {
1727 __put_user(*v
, t_int
);
1733 struct sock_extended_err ee
;
1734 struct sockaddr_in6 offender
;
1736 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1737 struct errhdr6_t
*target_errh
=
1738 (struct errhdr6_t
*)target_data
;
1740 if (len
!= sizeof(struct errhdr6_t
) ||
1741 tgt_len
!= sizeof(struct errhdr6_t
)) {
1744 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1745 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1746 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1747 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1748 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1749 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1750 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1751 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1752 (void *) &errh
->offender
, sizeof(errh
->offender
));
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1764 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1765 if (tgt_len
> len
) {
1766 memset(target_data
+ len
, 0, tgt_len
- len
);
1770 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1771 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1772 if (msg_controllen
< tgt_space
) {
1773 tgt_space
= msg_controllen
;
1775 msg_controllen
-= tgt_space
;
1777 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1778 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1781 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1783 target_msgh
->msg_controllen
= tswapal(space
);
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1789 abi_ulong optval_addr
, socklen_t optlen
)
1793 struct ip_mreqn
*ip_mreq
;
1794 struct ip_mreq_source
*ip_mreq_source
;
1798 /* TCP options all take an 'int' value. */
1799 if (optlen
< sizeof(uint32_t))
1800 return -TARGET_EINVAL
;
1802 if (get_user_u32(val
, optval_addr
))
1803 return -TARGET_EFAULT
;
1804 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1811 case IP_ROUTER_ALERT
:
1815 case IP_MTU_DISCOVER
:
1822 case IP_MULTICAST_TTL
:
1823 case IP_MULTICAST_LOOP
:
1825 if (optlen
>= sizeof(uint32_t)) {
1826 if (get_user_u32(val
, optval_addr
))
1827 return -TARGET_EFAULT
;
1828 } else if (optlen
>= 1) {
1829 if (get_user_u8(val
, optval_addr
))
1830 return -TARGET_EFAULT
;
1832 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1834 case IP_ADD_MEMBERSHIP
:
1835 case IP_DROP_MEMBERSHIP
:
1836 if (optlen
< sizeof (struct target_ip_mreq
) ||
1837 optlen
> sizeof (struct target_ip_mreqn
))
1838 return -TARGET_EINVAL
;
1840 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1841 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1842 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1845 case IP_BLOCK_SOURCE
:
1846 case IP_UNBLOCK_SOURCE
:
1847 case IP_ADD_SOURCE_MEMBERSHIP
:
1848 case IP_DROP_SOURCE_MEMBERSHIP
:
1849 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1850 return -TARGET_EINVAL
;
1852 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1853 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1854 unlock_user (ip_mreq_source
, optval_addr
, 0);
1863 case IPV6_MTU_DISCOVER
:
1866 case IPV6_RECVPKTINFO
:
1867 case IPV6_UNICAST_HOPS
:
1868 case IPV6_MULTICAST_HOPS
:
1869 case IPV6_MULTICAST_LOOP
:
1871 case IPV6_RECVHOPLIMIT
:
1872 case IPV6_2292HOPLIMIT
:
1875 if (optlen
< sizeof(uint32_t)) {
1876 return -TARGET_EINVAL
;
1878 if (get_user_u32(val
, optval_addr
)) {
1879 return -TARGET_EFAULT
;
1881 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1882 &val
, sizeof(val
)));
1886 struct in6_pktinfo pki
;
1888 if (optlen
< sizeof(pki
)) {
1889 return -TARGET_EINVAL
;
1892 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1893 return -TARGET_EFAULT
;
1896 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1898 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1899 &pki
, sizeof(pki
)));
1910 struct icmp6_filter icmp6f
;
1912 if (optlen
> sizeof(icmp6f
)) {
1913 optlen
= sizeof(icmp6f
);
1916 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1917 return -TARGET_EFAULT
;
1920 for (val
= 0; val
< 8; val
++) {
1921 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1924 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1936 /* those take an u32 value */
1937 if (optlen
< sizeof(uint32_t)) {
1938 return -TARGET_EINVAL
;
1941 if (get_user_u32(val
, optval_addr
)) {
1942 return -TARGET_EFAULT
;
1944 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1945 &val
, sizeof(val
)));
1952 case TARGET_SOL_SOCKET
:
1954 case TARGET_SO_RCVTIMEO
:
1958 optname
= SO_RCVTIMEO
;
1961 if (optlen
!= sizeof(struct target_timeval
)) {
1962 return -TARGET_EINVAL
;
1965 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1966 return -TARGET_EFAULT
;
1969 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1973 case TARGET_SO_SNDTIMEO
:
1974 optname
= SO_SNDTIMEO
;
1976 case TARGET_SO_ATTACH_FILTER
:
1978 struct target_sock_fprog
*tfprog
;
1979 struct target_sock_filter
*tfilter
;
1980 struct sock_fprog fprog
;
1981 struct sock_filter
*filter
;
1984 if (optlen
!= sizeof(*tfprog
)) {
1985 return -TARGET_EINVAL
;
1987 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1988 return -TARGET_EFAULT
;
1990 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1991 tswapal(tfprog
->filter
), 0)) {
1992 unlock_user_struct(tfprog
, optval_addr
, 1);
1993 return -TARGET_EFAULT
;
1996 fprog
.len
= tswap16(tfprog
->len
);
1997 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1998 if (filter
== NULL
) {
1999 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2000 unlock_user_struct(tfprog
, optval_addr
, 1);
2001 return -TARGET_ENOMEM
;
2003 for (i
= 0; i
< fprog
.len
; i
++) {
2004 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2005 filter
[i
].jt
= tfilter
[i
].jt
;
2006 filter
[i
].jf
= tfilter
[i
].jf
;
2007 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2009 fprog
.filter
= filter
;
2011 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2012 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2015 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2016 unlock_user_struct(tfprog
, optval_addr
, 1);
2019 case TARGET_SO_BINDTODEVICE
:
2021 char *dev_ifname
, *addr_ifname
;
2023 if (optlen
> IFNAMSIZ
- 1) {
2024 optlen
= IFNAMSIZ
- 1;
2026 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2028 return -TARGET_EFAULT
;
2030 optname
= SO_BINDTODEVICE
;
2031 addr_ifname
= alloca(IFNAMSIZ
);
2032 memcpy(addr_ifname
, dev_ifname
, optlen
);
2033 addr_ifname
[optlen
] = 0;
2034 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2035 addr_ifname
, optlen
));
2036 unlock_user (dev_ifname
, optval_addr
, 0);
2039 case TARGET_SO_LINGER
:
2042 struct target_linger
*tlg
;
2044 if (optlen
!= sizeof(struct target_linger
)) {
2045 return -TARGET_EINVAL
;
2047 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2048 return -TARGET_EFAULT
;
2050 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2051 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2052 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2054 unlock_user_struct(tlg
, optval_addr
, 0);
2057 /* Options with 'int' argument. */
2058 case TARGET_SO_DEBUG
:
2061 case TARGET_SO_REUSEADDR
:
2062 optname
= SO_REUSEADDR
;
2065 case TARGET_SO_REUSEPORT
:
2066 optname
= SO_REUSEPORT
;
2069 case TARGET_SO_TYPE
:
2072 case TARGET_SO_ERROR
:
2075 case TARGET_SO_DONTROUTE
:
2076 optname
= SO_DONTROUTE
;
2078 case TARGET_SO_BROADCAST
:
2079 optname
= SO_BROADCAST
;
2081 case TARGET_SO_SNDBUF
:
2082 optname
= SO_SNDBUF
;
2084 case TARGET_SO_SNDBUFFORCE
:
2085 optname
= SO_SNDBUFFORCE
;
2087 case TARGET_SO_RCVBUF
:
2088 optname
= SO_RCVBUF
;
2090 case TARGET_SO_RCVBUFFORCE
:
2091 optname
= SO_RCVBUFFORCE
;
2093 case TARGET_SO_KEEPALIVE
:
2094 optname
= SO_KEEPALIVE
;
2096 case TARGET_SO_OOBINLINE
:
2097 optname
= SO_OOBINLINE
;
2099 case TARGET_SO_NO_CHECK
:
2100 optname
= SO_NO_CHECK
;
2102 case TARGET_SO_PRIORITY
:
2103 optname
= SO_PRIORITY
;
2106 case TARGET_SO_BSDCOMPAT
:
2107 optname
= SO_BSDCOMPAT
;
2110 case TARGET_SO_PASSCRED
:
2111 optname
= SO_PASSCRED
;
2113 case TARGET_SO_PASSSEC
:
2114 optname
= SO_PASSSEC
;
2116 case TARGET_SO_TIMESTAMP
:
2117 optname
= SO_TIMESTAMP
;
2119 case TARGET_SO_RCVLOWAT
:
2120 optname
= SO_RCVLOWAT
;
2125 if (optlen
< sizeof(uint32_t))
2126 return -TARGET_EINVAL
;
2128 if (get_user_u32(val
, optval_addr
))
2129 return -TARGET_EFAULT
;
2130 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2134 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2135 ret
= -TARGET_ENOPROTOOPT
;
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2142 abi_ulong optval_addr
, abi_ulong optlen
)
2149 case TARGET_SOL_SOCKET
:
2152 /* These don't just return a single integer */
2153 case TARGET_SO_RCVTIMEO
:
2154 case TARGET_SO_SNDTIMEO
:
2155 case TARGET_SO_PEERNAME
:
2157 case TARGET_SO_PEERCRED
: {
2160 struct target_ucred
*tcr
;
2162 if (get_user_u32(len
, optlen
)) {
2163 return -TARGET_EFAULT
;
2166 return -TARGET_EINVAL
;
2170 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2178 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2179 return -TARGET_EFAULT
;
2181 __put_user(cr
.pid
, &tcr
->pid
);
2182 __put_user(cr
.uid
, &tcr
->uid
);
2183 __put_user(cr
.gid
, &tcr
->gid
);
2184 unlock_user_struct(tcr
, optval_addr
, 1);
2185 if (put_user_u32(len
, optlen
)) {
2186 return -TARGET_EFAULT
;
2190 case TARGET_SO_LINGER
:
2194 struct target_linger
*tlg
;
2196 if (get_user_u32(len
, optlen
)) {
2197 return -TARGET_EFAULT
;
2200 return -TARGET_EINVAL
;
2204 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2212 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2213 return -TARGET_EFAULT
;
2215 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2216 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2217 unlock_user_struct(tlg
, optval_addr
, 1);
2218 if (put_user_u32(len
, optlen
)) {
2219 return -TARGET_EFAULT
;
2223 /* Options with 'int' argument. */
2224 case TARGET_SO_DEBUG
:
2227 case TARGET_SO_REUSEADDR
:
2228 optname
= SO_REUSEADDR
;
2231 case TARGET_SO_REUSEPORT
:
2232 optname
= SO_REUSEPORT
;
2235 case TARGET_SO_TYPE
:
2238 case TARGET_SO_ERROR
:
2241 case TARGET_SO_DONTROUTE
:
2242 optname
= SO_DONTROUTE
;
2244 case TARGET_SO_BROADCAST
:
2245 optname
= SO_BROADCAST
;
2247 case TARGET_SO_SNDBUF
:
2248 optname
= SO_SNDBUF
;
2250 case TARGET_SO_RCVBUF
:
2251 optname
= SO_RCVBUF
;
2253 case TARGET_SO_KEEPALIVE
:
2254 optname
= SO_KEEPALIVE
;
2256 case TARGET_SO_OOBINLINE
:
2257 optname
= SO_OOBINLINE
;
2259 case TARGET_SO_NO_CHECK
:
2260 optname
= SO_NO_CHECK
;
2262 case TARGET_SO_PRIORITY
:
2263 optname
= SO_PRIORITY
;
2266 case TARGET_SO_BSDCOMPAT
:
2267 optname
= SO_BSDCOMPAT
;
2270 case TARGET_SO_PASSCRED
:
2271 optname
= SO_PASSCRED
;
2273 case TARGET_SO_TIMESTAMP
:
2274 optname
= SO_TIMESTAMP
;
2276 case TARGET_SO_RCVLOWAT
:
2277 optname
= SO_RCVLOWAT
;
2279 case TARGET_SO_ACCEPTCONN
:
2280 optname
= SO_ACCEPTCONN
;
2287 /* TCP options all take an 'int' value. */
2289 if (get_user_u32(len
, optlen
))
2290 return -TARGET_EFAULT
;
2292 return -TARGET_EINVAL
;
2294 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2297 if (optname
== SO_TYPE
) {
2298 val
= host_to_target_sock_type(val
);
2303 if (put_user_u32(val
, optval_addr
))
2304 return -TARGET_EFAULT
;
2306 if (put_user_u8(val
, optval_addr
))
2307 return -TARGET_EFAULT
;
2309 if (put_user_u32(len
, optlen
))
2310 return -TARGET_EFAULT
;
2317 case IP_ROUTER_ALERT
:
2321 case IP_MTU_DISCOVER
:
2327 case IP_MULTICAST_TTL
:
2328 case IP_MULTICAST_LOOP
:
2329 if (get_user_u32(len
, optlen
))
2330 return -TARGET_EFAULT
;
2332 return -TARGET_EINVAL
;
2334 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2337 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2339 if (put_user_u32(len
, optlen
)
2340 || put_user_u8(val
, optval_addr
))
2341 return -TARGET_EFAULT
;
2343 if (len
> sizeof(int))
2345 if (put_user_u32(len
, optlen
)
2346 || put_user_u32(val
, optval_addr
))
2347 return -TARGET_EFAULT
;
2351 ret
= -TARGET_ENOPROTOOPT
;
2357 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2359 ret
= -TARGET_EOPNOTSUPP
;
2365 /* Convert target low/high pair representing file offset into the host
2366 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2367 * as the kernel doesn't handle them either.
2369 static void target_to_host_low_high(abi_ulong tlow
,
2371 unsigned long *hlow
,
2372 unsigned long *hhigh
)
2374 uint64_t off
= tlow
|
2375 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2376 TARGET_LONG_BITS
/ 2;
2379 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2382 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2383 abi_ulong count
, int copy
)
2385 struct target_iovec
*target_vec
;
2387 abi_ulong total_len
, max_len
;
2390 bool bad_address
= false;
2396 if (count
> IOV_MAX
) {
2401 vec
= g_try_new0(struct iovec
, count
);
2407 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2408 count
* sizeof(struct target_iovec
), 1);
2409 if (target_vec
== NULL
) {
2414 /* ??? If host page size > target page size, this will result in a
2415 value larger than what we can actually support. */
2416 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2419 for (i
= 0; i
< count
; i
++) {
2420 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2421 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2426 } else if (len
== 0) {
2427 /* Zero length pointer is ignored. */
2428 vec
[i
].iov_base
= 0;
2430 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2431 /* If the first buffer pointer is bad, this is a fault. But
2432 * subsequent bad buffers will result in a partial write; this
2433 * is realized by filling the vector with null pointers and
2435 if (!vec
[i
].iov_base
) {
2446 if (len
> max_len
- total_len
) {
2447 len
= max_len
- total_len
;
2450 vec
[i
].iov_len
= len
;
2454 unlock_user(target_vec
, target_addr
, 0);
2459 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2460 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2463 unlock_user(target_vec
, target_addr
, 0);
2470 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2471 abi_ulong count
, int copy
)
2473 struct target_iovec
*target_vec
;
2476 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2477 count
* sizeof(struct target_iovec
), 1);
2479 for (i
= 0; i
< count
; i
++) {
2480 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2481 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2485 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2487 unlock_user(target_vec
, target_addr
, 0);
2493 static inline int target_to_host_sock_type(int *type
)
2496 int target_type
= *type
;
2498 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2499 case TARGET_SOCK_DGRAM
:
2500 host_type
= SOCK_DGRAM
;
2502 case TARGET_SOCK_STREAM
:
2503 host_type
= SOCK_STREAM
;
2506 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2509 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2510 #if defined(SOCK_CLOEXEC)
2511 host_type
|= SOCK_CLOEXEC
;
2513 return -TARGET_EINVAL
;
2516 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2517 #if defined(SOCK_NONBLOCK)
2518 host_type
|= SOCK_NONBLOCK
;
2519 #elif !defined(O_NONBLOCK)
2520 return -TARGET_EINVAL
;
2527 /* Try to emulate socket type flags after socket creation. */
2528 static int sock_flags_fixup(int fd
, int target_type
)
2530 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2531 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2532 int flags
= fcntl(fd
, F_GETFL
);
2533 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2535 return -TARGET_EINVAL
;
2542 /* do_socket() Must return target values and target errnos. */
2543 static abi_long
do_socket(int domain
, int type
, int protocol
)
2545 int target_type
= type
;
2548 ret
= target_to_host_sock_type(&type
);
2553 if (domain
== PF_NETLINK
&& !(
2554 #ifdef CONFIG_RTNETLINK
2555 protocol
== NETLINK_ROUTE
||
2557 protocol
== NETLINK_KOBJECT_UEVENT
||
2558 protocol
== NETLINK_AUDIT
)) {
2559 return -EPFNOSUPPORT
;
2562 if (domain
== AF_PACKET
||
2563 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2564 protocol
= tswap16(protocol
);
2567 ret
= get_errno(socket(domain
, type
, protocol
));
2569 ret
= sock_flags_fixup(ret
, target_type
);
2570 if (type
== SOCK_PACKET
) {
2571 /* Manage an obsolete case :
2572 * if socket type is SOCK_PACKET, bind by name
2574 fd_trans_register(ret
, &target_packet_trans
);
2575 } else if (domain
== PF_NETLINK
) {
2577 #ifdef CONFIG_RTNETLINK
2579 fd_trans_register(ret
, &target_netlink_route_trans
);
2582 case NETLINK_KOBJECT_UEVENT
:
2583 /* nothing to do: messages are strings */
2586 fd_trans_register(ret
, &target_netlink_audit_trans
);
2589 g_assert_not_reached();
2596 /* do_bind() Must return target values and target errnos. */
2597 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2603 if ((int)addrlen
< 0) {
2604 return -TARGET_EINVAL
;
2607 addr
= alloca(addrlen
+1);
2609 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2613 return get_errno(bind(sockfd
, addr
, addrlen
));
2616 /* do_connect() Must return target values and target errnos. */
2617 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2623 if ((int)addrlen
< 0) {
2624 return -TARGET_EINVAL
;
2627 addr
= alloca(addrlen
+1);
2629 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2633 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2636 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2637 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2638 int flags
, int send
)
2644 abi_ulong target_vec
;
2646 if (msgp
->msg_name
) {
2647 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2648 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2649 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2650 tswapal(msgp
->msg_name
),
2652 if (ret
== -TARGET_EFAULT
) {
2653 /* For connected sockets msg_name and msg_namelen must
2654 * be ignored, so returning EFAULT immediately is wrong.
2655 * Instead, pass a bad msg_name to the host kernel, and
2656 * let it decide whether to return EFAULT or not.
2658 msg
.msg_name
= (void *)-1;
2663 msg
.msg_name
= NULL
;
2664 msg
.msg_namelen
= 0;
2666 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2667 msg
.msg_control
= alloca(msg
.msg_controllen
);
2668 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2670 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2672 count
= tswapal(msgp
->msg_iovlen
);
2673 target_vec
= tswapal(msgp
->msg_iov
);
2675 if (count
> IOV_MAX
) {
2676 /* sendrcvmsg returns a different errno for this condition than
2677 * readv/writev, so we must catch it here before lock_iovec() does.
2679 ret
= -TARGET_EMSGSIZE
;
2683 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2684 target_vec
, count
, send
);
2686 ret
= -host_to_target_errno(errno
);
2689 msg
.msg_iovlen
= count
;
2693 if (fd_trans_target_to_host_data(fd
)) {
2696 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2697 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2698 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2699 msg
.msg_iov
->iov_len
);
2701 msg
.msg_iov
->iov_base
= host_msg
;
2702 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2706 ret
= target_to_host_cmsg(&msg
, msgp
);
2708 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2712 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2713 if (!is_error(ret
)) {
2715 if (fd_trans_host_to_target_data(fd
)) {
2716 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2717 MIN(msg
.msg_iov
->iov_len
, len
));
2719 ret
= host_to_target_cmsg(msgp
, &msg
);
2721 if (!is_error(ret
)) {
2722 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2723 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2724 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2725 msg
.msg_name
, msg
.msg_namelen
);
2737 unlock_iovec(vec
, target_vec
, count
, !send
);
2742 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2743 int flags
, int send
)
2746 struct target_msghdr
*msgp
;
2748 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2752 return -TARGET_EFAULT
;
2754 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2755 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2759 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2760 * so it might not have this *mmsg-specific flag either.
2762 #ifndef MSG_WAITFORONE
2763 #define MSG_WAITFORONE 0x10000
2766 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2767 unsigned int vlen
, unsigned int flags
,
2770 struct target_mmsghdr
*mmsgp
;
2774 if (vlen
> UIO_MAXIOV
) {
2778 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2780 return -TARGET_EFAULT
;
2783 for (i
= 0; i
< vlen
; i
++) {
2784 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2785 if (is_error(ret
)) {
2788 mmsgp
[i
].msg_len
= tswap32(ret
);
2789 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2790 if (flags
& MSG_WAITFORONE
) {
2791 flags
|= MSG_DONTWAIT
;
2795 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2797 /* Return number of datagrams sent if we sent any at all;
2798 * otherwise return the error.
2806 /* do_accept4() Must return target values and target errnos. */
2807 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2808 abi_ulong target_addrlen_addr
, int flags
)
2815 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2817 if (target_addr
== 0) {
2818 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2821 /* linux returns EINVAL if addrlen pointer is invalid */
2822 if (get_user_u32(addrlen
, target_addrlen_addr
))
2823 return -TARGET_EINVAL
;
2825 if ((int)addrlen
< 0) {
2826 return -TARGET_EINVAL
;
2829 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2830 return -TARGET_EINVAL
;
2832 addr
= alloca(addrlen
);
2834 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
2835 if (!is_error(ret
)) {
2836 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2837 if (put_user_u32(addrlen
, target_addrlen_addr
))
2838 ret
= -TARGET_EFAULT
;
2843 /* do_getpeername() Must return target values and target errnos. */
2844 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2845 abi_ulong target_addrlen_addr
)
2851 if (get_user_u32(addrlen
, target_addrlen_addr
))
2852 return -TARGET_EFAULT
;
2854 if ((int)addrlen
< 0) {
2855 return -TARGET_EINVAL
;
2858 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2859 return -TARGET_EFAULT
;
2861 addr
= alloca(addrlen
);
2863 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2864 if (!is_error(ret
)) {
2865 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2866 if (put_user_u32(addrlen
, target_addrlen_addr
))
2867 ret
= -TARGET_EFAULT
;
2872 /* do_getsockname() Must return target values and target errnos. */
2873 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2874 abi_ulong target_addrlen_addr
)
2880 if (get_user_u32(addrlen
, target_addrlen_addr
))
2881 return -TARGET_EFAULT
;
2883 if ((int)addrlen
< 0) {
2884 return -TARGET_EINVAL
;
2887 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2888 return -TARGET_EFAULT
;
2890 addr
= alloca(addrlen
);
2892 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2893 if (!is_error(ret
)) {
2894 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2895 if (put_user_u32(addrlen
, target_addrlen_addr
))
2896 ret
= -TARGET_EFAULT
;
2901 /* do_socketpair() Must return target values and target errnos. */
2902 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2903 abi_ulong target_tab_addr
)
2908 target_to_host_sock_type(&type
);
2910 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2911 if (!is_error(ret
)) {
2912 if (put_user_s32(tab
[0], target_tab_addr
)
2913 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2914 ret
= -TARGET_EFAULT
;
2919 /* do_sendto() Must return target values and target errnos. */
2920 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2921 abi_ulong target_addr
, socklen_t addrlen
)
2925 void *copy_msg
= NULL
;
2928 if ((int)addrlen
< 0) {
2929 return -TARGET_EINVAL
;
2932 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2934 return -TARGET_EFAULT
;
2935 if (fd_trans_target_to_host_data(fd
)) {
2936 copy_msg
= host_msg
;
2937 host_msg
= g_malloc(len
);
2938 memcpy(host_msg
, copy_msg
, len
);
2939 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
2945 addr
= alloca(addrlen
+1);
2946 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2950 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2952 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
2957 host_msg
= copy_msg
;
2959 unlock_user(host_msg
, msg
, 0);
2963 /* do_recvfrom() Must return target values and target errnos. */
2964 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2965 abi_ulong target_addr
,
2966 abi_ulong target_addrlen
)
2973 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2975 return -TARGET_EFAULT
;
2977 if (get_user_u32(addrlen
, target_addrlen
)) {
2978 ret
= -TARGET_EFAULT
;
2981 if ((int)addrlen
< 0) {
2982 ret
= -TARGET_EINVAL
;
2985 addr
= alloca(addrlen
);
2986 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
2989 addr
= NULL
; /* To keep compiler quiet. */
2990 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
2992 if (!is_error(ret
)) {
2993 if (fd_trans_host_to_target_data(fd
)) {
2995 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
2996 if (is_error(trans
)) {
3002 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3003 if (put_user_u32(addrlen
, target_addrlen
)) {
3004 ret
= -TARGET_EFAULT
;
3008 unlock_user(host_msg
, msg
, len
);
3011 unlock_user(host_msg
, msg
, 0);
3016 #ifdef TARGET_NR_socketcall
3017 /* do_socketcall() must return target values and target errnos. */
3018 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3020 static const unsigned nargs
[] = { /* number of arguments per operation */
3021 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3022 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3023 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3024 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3025 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3026 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3027 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3028 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3029 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3030 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3031 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3032 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3033 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3034 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3035 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3036 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3037 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3038 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3039 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3040 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3042 abi_long a
[6]; /* max 6 args */
3045 /* check the range of the first argument num */
3046 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3047 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3048 return -TARGET_EINVAL
;
3050 /* ensure we have space for args */
3051 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3052 return -TARGET_EINVAL
;
3054 /* collect the arguments in a[] according to nargs[] */
3055 for (i
= 0; i
< nargs
[num
]; ++i
) {
3056 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3057 return -TARGET_EFAULT
;
3060 /* now when we have the args, invoke the appropriate underlying function */
3062 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3063 return do_socket(a
[0], a
[1], a
[2]);
3064 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3065 return do_bind(a
[0], a
[1], a
[2]);
3066 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3067 return do_connect(a
[0], a
[1], a
[2]);
3068 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3069 return get_errno(listen(a
[0], a
[1]));
3070 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3071 return do_accept4(a
[0], a
[1], a
[2], 0);
3072 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3073 return do_getsockname(a
[0], a
[1], a
[2]);
3074 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3075 return do_getpeername(a
[0], a
[1], a
[2]);
3076 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3077 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3078 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3079 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3080 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3081 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3082 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3083 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3084 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3085 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3086 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3087 return get_errno(shutdown(a
[0], a
[1]));
3088 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3089 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3090 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3091 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3092 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3093 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3094 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3095 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3096 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3097 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3098 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3099 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3100 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3101 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3103 gemu_log("Unsupported socketcall: %d\n", num
);
3104 return -TARGET_EINVAL
;
3109 #define N_SHM_REGIONS 32
3111 static struct shm_region
{
3115 } shm_regions
[N_SHM_REGIONS
];
3117 #ifndef TARGET_SEMID64_DS
3118 /* asm-generic version of this struct */
3119 struct target_semid64_ds
3121 struct target_ipc_perm sem_perm
;
3122 abi_ulong sem_otime
;
3123 #if TARGET_ABI_BITS == 32
3124 abi_ulong __unused1
;
3126 abi_ulong sem_ctime
;
3127 #if TARGET_ABI_BITS == 32
3128 abi_ulong __unused2
;
3130 abi_ulong sem_nsems
;
3131 abi_ulong __unused3
;
3132 abi_ulong __unused4
;
3136 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3137 abi_ulong target_addr
)
3139 struct target_ipc_perm
*target_ip
;
3140 struct target_semid64_ds
*target_sd
;
3142 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3143 return -TARGET_EFAULT
;
3144 target_ip
= &(target_sd
->sem_perm
);
3145 host_ip
->__key
= tswap32(target_ip
->__key
);
3146 host_ip
->uid
= tswap32(target_ip
->uid
);
3147 host_ip
->gid
= tswap32(target_ip
->gid
);
3148 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3149 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3150 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3151 host_ip
->mode
= tswap32(target_ip
->mode
);
3153 host_ip
->mode
= tswap16(target_ip
->mode
);
3155 #if defined(TARGET_PPC)
3156 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3158 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3160 unlock_user_struct(target_sd
, target_addr
, 0);
3164 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3165 struct ipc_perm
*host_ip
)
3167 struct target_ipc_perm
*target_ip
;
3168 struct target_semid64_ds
*target_sd
;
3170 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3171 return -TARGET_EFAULT
;
3172 target_ip
= &(target_sd
->sem_perm
);
3173 target_ip
->__key
= tswap32(host_ip
->__key
);
3174 target_ip
->uid
= tswap32(host_ip
->uid
);
3175 target_ip
->gid
= tswap32(host_ip
->gid
);
3176 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3177 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3178 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3179 target_ip
->mode
= tswap32(host_ip
->mode
);
3181 target_ip
->mode
= tswap16(host_ip
->mode
);
3183 #if defined(TARGET_PPC)
3184 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3186 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3188 unlock_user_struct(target_sd
, target_addr
, 1);
3192 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3193 abi_ulong target_addr
)
3195 struct target_semid64_ds
*target_sd
;
3197 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3198 return -TARGET_EFAULT
;
3199 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3200 return -TARGET_EFAULT
;
3201 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3202 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3203 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3204 unlock_user_struct(target_sd
, target_addr
, 0);
3208 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3209 struct semid_ds
*host_sd
)
3211 struct target_semid64_ds
*target_sd
;
3213 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3214 return -TARGET_EFAULT
;
3215 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3216 return -TARGET_EFAULT
;
3217 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3218 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3219 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3220 unlock_user_struct(target_sd
, target_addr
, 1);
3224 struct target_seminfo
{
3237 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3238 struct seminfo
*host_seminfo
)
3240 struct target_seminfo
*target_seminfo
;
3241 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3242 return -TARGET_EFAULT
;
3243 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3244 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3245 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3246 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3247 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3248 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3249 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3250 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3251 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3252 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3253 unlock_user_struct(target_seminfo
, target_addr
, 1);
3259 struct semid_ds
*buf
;
3260 unsigned short *array
;
3261 struct seminfo
*__buf
;
3264 union target_semun
{
3271 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3272 abi_ulong target_addr
)
3275 unsigned short *array
;
3277 struct semid_ds semid_ds
;
3280 semun
.buf
= &semid_ds
;
3282 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3284 return get_errno(ret
);
3286 nsems
= semid_ds
.sem_nsems
;
3288 *host_array
= g_try_new(unsigned short, nsems
);
3290 return -TARGET_ENOMEM
;
3292 array
= lock_user(VERIFY_READ
, target_addr
,
3293 nsems
*sizeof(unsigned short), 1);
3295 g_free(*host_array
);
3296 return -TARGET_EFAULT
;
3299 for(i
=0; i
<nsems
; i
++) {
3300 __get_user((*host_array
)[i
], &array
[i
]);
3302 unlock_user(array
, target_addr
, 0);
3307 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3308 unsigned short **host_array
)
3311 unsigned short *array
;
3313 struct semid_ds semid_ds
;
3316 semun
.buf
= &semid_ds
;
3318 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3320 return get_errno(ret
);
3322 nsems
= semid_ds
.sem_nsems
;
3324 array
= lock_user(VERIFY_WRITE
, target_addr
,
3325 nsems
*sizeof(unsigned short), 0);
3327 return -TARGET_EFAULT
;
3329 for(i
=0; i
<nsems
; i
++) {
3330 __put_user((*host_array
)[i
], &array
[i
]);
3332 g_free(*host_array
);
3333 unlock_user(array
, target_addr
, 1);
3338 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3339 abi_ulong target_arg
)
3341 union target_semun target_su
= { .buf
= target_arg
};
3343 struct semid_ds dsarg
;
3344 unsigned short *array
= NULL
;
3345 struct seminfo seminfo
;
3346 abi_long ret
= -TARGET_EINVAL
;
3353 /* In 64 bit cross-endian situations, we will erroneously pick up
3354 * the wrong half of the union for the "val" element. To rectify
3355 * this, the entire 8-byte structure is byteswapped, followed by
3356 * a swap of the 4 byte val field. In other cases, the data is
3357 * already in proper host byte order. */
3358 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3359 target_su
.buf
= tswapal(target_su
.buf
);
3360 arg
.val
= tswap32(target_su
.val
);
3362 arg
.val
= target_su
.val
;
3364 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3368 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3372 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3373 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3380 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3384 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3385 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3391 arg
.__buf
= &seminfo
;
3392 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3393 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3401 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3408 struct target_sembuf
{
3409 unsigned short sem_num
;
3414 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3415 abi_ulong target_addr
,
3418 struct target_sembuf
*target_sembuf
;
3421 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3422 nsops
*sizeof(struct target_sembuf
), 1);
3424 return -TARGET_EFAULT
;
3426 for(i
=0; i
<nsops
; i
++) {
3427 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3428 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3429 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3432 unlock_user(target_sembuf
, target_addr
, 0);
3437 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3439 struct sembuf sops
[nsops
];
3441 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3442 return -TARGET_EFAULT
;
3444 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3447 struct target_msqid_ds
3449 struct target_ipc_perm msg_perm
;
3450 abi_ulong msg_stime
;
3451 #if TARGET_ABI_BITS == 32
3452 abi_ulong __unused1
;
3454 abi_ulong msg_rtime
;
3455 #if TARGET_ABI_BITS == 32
3456 abi_ulong __unused2
;
3458 abi_ulong msg_ctime
;
3459 #if TARGET_ABI_BITS == 32
3460 abi_ulong __unused3
;
3462 abi_ulong __msg_cbytes
;
3464 abi_ulong msg_qbytes
;
3465 abi_ulong msg_lspid
;
3466 abi_ulong msg_lrpid
;
3467 abi_ulong __unused4
;
3468 abi_ulong __unused5
;
3471 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3472 abi_ulong target_addr
)
3474 struct target_msqid_ds
*target_md
;
3476 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3477 return -TARGET_EFAULT
;
3478 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3479 return -TARGET_EFAULT
;
3480 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3481 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3482 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3483 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3484 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3485 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3486 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3487 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3488 unlock_user_struct(target_md
, target_addr
, 0);
3492 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3493 struct msqid_ds
*host_md
)
3495 struct target_msqid_ds
*target_md
;
3497 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3498 return -TARGET_EFAULT
;
3499 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3500 return -TARGET_EFAULT
;
3501 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3502 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3503 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3504 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3505 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3506 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3507 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3508 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3509 unlock_user_struct(target_md
, target_addr
, 1);
3513 struct target_msginfo
{
3521 unsigned short int msgseg
;
3524 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3525 struct msginfo
*host_msginfo
)
3527 struct target_msginfo
*target_msginfo
;
3528 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3529 return -TARGET_EFAULT
;
3530 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3531 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3532 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3533 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3534 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3535 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3536 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3537 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3538 unlock_user_struct(target_msginfo
, target_addr
, 1);
3542 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3544 struct msqid_ds dsarg
;
3545 struct msginfo msginfo
;
3546 abi_long ret
= -TARGET_EINVAL
;
3554 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3555 return -TARGET_EFAULT
;
3556 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3557 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3558 return -TARGET_EFAULT
;
3561 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3565 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3566 if (host_to_target_msginfo(ptr
, &msginfo
))
3567 return -TARGET_EFAULT
;
3574 struct target_msgbuf
{
3579 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3580 ssize_t msgsz
, int msgflg
)
3582 struct target_msgbuf
*target_mb
;
3583 struct msgbuf
*host_mb
;
3587 return -TARGET_EINVAL
;
3590 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3591 return -TARGET_EFAULT
;
3592 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3594 unlock_user_struct(target_mb
, msgp
, 0);
3595 return -TARGET_ENOMEM
;
3597 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3598 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3599 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3601 unlock_user_struct(target_mb
, msgp
, 0);
3606 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3607 ssize_t msgsz
, abi_long msgtyp
,
3610 struct target_msgbuf
*target_mb
;
3612 struct msgbuf
*host_mb
;
3616 return -TARGET_EINVAL
;
3619 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3620 return -TARGET_EFAULT
;
3622 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3624 ret
= -TARGET_ENOMEM
;
3627 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3630 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3631 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3632 if (!target_mtext
) {
3633 ret
= -TARGET_EFAULT
;
3636 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3637 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3640 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3644 unlock_user_struct(target_mb
, msgp
, 1);
3649 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3650 abi_ulong target_addr
)
3652 struct target_shmid_ds
*target_sd
;
3654 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3655 return -TARGET_EFAULT
;
3656 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3657 return -TARGET_EFAULT
;
3658 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3659 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3660 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3661 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3662 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3663 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3664 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3665 unlock_user_struct(target_sd
, target_addr
, 0);
3669 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3670 struct shmid_ds
*host_sd
)
3672 struct target_shmid_ds
*target_sd
;
3674 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3675 return -TARGET_EFAULT
;
3676 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3677 return -TARGET_EFAULT
;
3678 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3679 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3680 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3681 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3682 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3683 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3684 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3685 unlock_user_struct(target_sd
, target_addr
, 1);
3689 struct target_shminfo
{
3697 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3698 struct shminfo
*host_shminfo
)
3700 struct target_shminfo
*target_shminfo
;
3701 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3702 return -TARGET_EFAULT
;
3703 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3704 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3705 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3706 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3707 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3708 unlock_user_struct(target_shminfo
, target_addr
, 1);
3712 struct target_shm_info
{
3717 abi_ulong swap_attempts
;
3718 abi_ulong swap_successes
;
3721 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3722 struct shm_info
*host_shm_info
)
3724 struct target_shm_info
*target_shm_info
;
3725 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3726 return -TARGET_EFAULT
;
3727 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3728 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3729 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3730 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3731 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3732 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3733 unlock_user_struct(target_shm_info
, target_addr
, 1);
3737 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3739 struct shmid_ds dsarg
;
3740 struct shminfo shminfo
;
3741 struct shm_info shm_info
;
3742 abi_long ret
= -TARGET_EINVAL
;
3750 if (target_to_host_shmid_ds(&dsarg
, buf
))
3751 return -TARGET_EFAULT
;
3752 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3753 if (host_to_target_shmid_ds(buf
, &dsarg
))
3754 return -TARGET_EFAULT
;
3757 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3758 if (host_to_target_shminfo(buf
, &shminfo
))
3759 return -TARGET_EFAULT
;
3762 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3763 if (host_to_target_shm_info(buf
, &shm_info
))
3764 return -TARGET_EFAULT
;
3769 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3776 #ifndef TARGET_FORCE_SHMLBA
3777 /* For most architectures, SHMLBA is the same as the page size;
3778 * some architectures have larger values, in which case they should
3779 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3780 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3781 * and defining its own value for SHMLBA.
3783 * The kernel also permits SHMLBA to be set by the architecture to a
3784 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3785 * this means that addresses are rounded to the large size if
3786 * SHM_RND is set but addresses not aligned to that size are not rejected
3787 * as long as they are at least page-aligned. Since the only architecture
3788 * which uses this is ia64 this code doesn't provide for that oddity.
3790 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3792 return TARGET_PAGE_SIZE
;
3796 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3797 int shmid
, abi_ulong shmaddr
, int shmflg
)
3801 struct shmid_ds shm_info
;
3805 /* find out the length of the shared memory segment */
3806 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3807 if (is_error(ret
)) {
3808 /* can't get length, bail out */
3812 shmlba
= target_shmlba(cpu_env
);
3814 if (shmaddr
& (shmlba
- 1)) {
3815 if (shmflg
& SHM_RND
) {
3816 shmaddr
&= ~(shmlba
- 1);
3818 return -TARGET_EINVAL
;
3821 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3822 return -TARGET_EINVAL
;
3828 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3830 abi_ulong mmap_start
;
3832 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3834 if (mmap_start
== -1) {
3836 host_raddr
= (void *)-1;
3838 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3841 if (host_raddr
== (void *)-1) {
3843 return get_errno((long)host_raddr
);
3845 raddr
=h2g((unsigned long)host_raddr
);
3847 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3848 PAGE_VALID
| PAGE_READ
|
3849 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3851 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3852 if (!shm_regions
[i
].in_use
) {
3853 shm_regions
[i
].in_use
= true;
3854 shm_regions
[i
].start
= raddr
;
3855 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3865 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3872 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3873 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3874 shm_regions
[i
].in_use
= false;
3875 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3879 rv
= get_errno(shmdt(g2h(shmaddr
)));
3886 #ifdef TARGET_NR_ipc
3887 /* ??? This only works with linear mappings. */
3888 /* do_ipc() must return target values and target errnos. */
3889 static abi_long
do_ipc(CPUArchState
*cpu_env
,
3890 unsigned int call
, abi_long first
,
3891 abi_long second
, abi_long third
,
3892 abi_long ptr
, abi_long fifth
)
3897 version
= call
>> 16;
3902 ret
= do_semop(first
, ptr
, second
);
3906 ret
= get_errno(semget(first
, second
, third
));
3909 case IPCOP_semctl
: {
3910 /* The semun argument to semctl is passed by value, so dereference the
3913 get_user_ual(atptr
, ptr
);
3914 ret
= do_semctl(first
, second
, third
, atptr
);
3919 ret
= get_errno(msgget(first
, second
));
3923 ret
= do_msgsnd(first
, ptr
, second
, third
);
3927 ret
= do_msgctl(first
, second
, ptr
);
3934 struct target_ipc_kludge
{
3939 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3940 ret
= -TARGET_EFAULT
;
3944 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3946 unlock_user_struct(tmp
, ptr
, 0);
3950 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3959 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
3960 if (is_error(raddr
))
3961 return get_errno(raddr
);
3962 if (put_user_ual(raddr
, third
))
3963 return -TARGET_EFAULT
;
3967 ret
= -TARGET_EINVAL
;
3972 ret
= do_shmdt(ptr
);
3976 /* IPC_* flag values are the same on all linux platforms */
3977 ret
= get_errno(shmget(first
, second
, third
));
3980 /* IPC_* and SHM_* command values are the same on all linux platforms */
3982 ret
= do_shmctl(first
, second
, ptr
);
3985 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3986 ret
= -TARGET_ENOSYS
;
3993 /* kernel structure types definitions */
3995 #define STRUCT(name, ...) STRUCT_ ## name,
3996 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3998 #include "syscall_types.h"
4002 #undef STRUCT_SPECIAL
4004 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4005 #define STRUCT_SPECIAL(name)
4006 #include "syscall_types.h"
4008 #undef STRUCT_SPECIAL
4010 typedef struct IOCTLEntry IOCTLEntry
;
4012 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4013 int fd
, int cmd
, abi_long arg
);
4017 unsigned int host_cmd
;
4020 do_ioctl_fn
*do_ioctl
;
4021 const argtype arg_type
[5];
4024 #define IOC_R 0x0001
4025 #define IOC_W 0x0002
4026 #define IOC_RW (IOC_R | IOC_W)
4028 #define MAX_STRUCT_SIZE 4096
4030 #ifdef CONFIG_FIEMAP
4031 /* So fiemap access checks don't overflow on 32 bit systems.
4032 * This is very slightly smaller than the limit imposed by
4033 * the underlying kernel.
4035 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4036 / sizeof(struct fiemap_extent))
4038 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4039 int fd
, int cmd
, abi_long arg
)
4041 /* The parameter for this ioctl is a struct fiemap followed
4042 * by an array of struct fiemap_extent whose size is set
4043 * in fiemap->fm_extent_count. The array is filled in by the
4046 int target_size_in
, target_size_out
;
4048 const argtype
*arg_type
= ie
->arg_type
;
4049 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4052 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4056 assert(arg_type
[0] == TYPE_PTR
);
4057 assert(ie
->access
== IOC_RW
);
4059 target_size_in
= thunk_type_size(arg_type
, 0);
4060 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4062 return -TARGET_EFAULT
;
4064 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4065 unlock_user(argptr
, arg
, 0);
4066 fm
= (struct fiemap
*)buf_temp
;
4067 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4068 return -TARGET_EINVAL
;
4071 outbufsz
= sizeof (*fm
) +
4072 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4074 if (outbufsz
> MAX_STRUCT_SIZE
) {
4075 /* We can't fit all the extents into the fixed size buffer.
4076 * Allocate one that is large enough and use it instead.
4078 fm
= g_try_malloc(outbufsz
);
4080 return -TARGET_ENOMEM
;
4082 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4085 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4086 if (!is_error(ret
)) {
4087 target_size_out
= target_size_in
;
4088 /* An extent_count of 0 means we were only counting the extents
4089 * so there are no structs to copy
4091 if (fm
->fm_extent_count
!= 0) {
4092 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4094 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4096 ret
= -TARGET_EFAULT
;
4098 /* Convert the struct fiemap */
4099 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4100 if (fm
->fm_extent_count
!= 0) {
4101 p
= argptr
+ target_size_in
;
4102 /* ...and then all the struct fiemap_extents */
4103 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4104 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4109 unlock_user(argptr
, arg
, target_size_out
);
4119 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4120 int fd
, int cmd
, abi_long arg
)
4122 const argtype
*arg_type
= ie
->arg_type
;
4126 struct ifconf
*host_ifconf
;
4128 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4129 int target_ifreq_size
;
4134 abi_long target_ifc_buf
;
4138 assert(arg_type
[0] == TYPE_PTR
);
4139 assert(ie
->access
== IOC_RW
);
4142 target_size
= thunk_type_size(arg_type
, 0);
4144 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4146 return -TARGET_EFAULT
;
4147 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4148 unlock_user(argptr
, arg
, 0);
4150 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4151 target_ifc_len
= host_ifconf
->ifc_len
;
4152 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4154 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4155 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4156 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4158 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4159 if (outbufsz
> MAX_STRUCT_SIZE
) {
4160 /* We can't fit all the extents into the fixed size buffer.
4161 * Allocate one that is large enough and use it instead.
4163 host_ifconf
= malloc(outbufsz
);
4165 return -TARGET_ENOMEM
;
4167 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4170 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4172 host_ifconf
->ifc_len
= host_ifc_len
;
4173 host_ifconf
->ifc_buf
= host_ifc_buf
;
4175 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4176 if (!is_error(ret
)) {
4177 /* convert host ifc_len to target ifc_len */
4179 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4180 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4181 host_ifconf
->ifc_len
= target_ifc_len
;
4183 /* restore target ifc_buf */
4185 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4187 /* copy struct ifconf to target user */
4189 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4191 return -TARGET_EFAULT
;
4192 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4193 unlock_user(argptr
, arg
, target_size
);
4195 /* copy ifreq[] to target user */
4197 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4198 for (i
= 0; i
< nb_ifreq
; i
++) {
4199 thunk_convert(argptr
+ i
* target_ifreq_size
,
4200 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4201 ifreq_arg_type
, THUNK_TARGET
);
4203 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4213 #if defined(CONFIG_USBFS)
4214 #if HOST_LONG_BITS > 64
4215 #error USBDEVFS thunks do not support >64 bit hosts yet.
4218 uint64_t target_urb_adr
;
4219 uint64_t target_buf_adr
;
4220 char *target_buf_ptr
;
4221 struct usbdevfs_urb host_urb
;
4224 static GHashTable
*usbdevfs_urb_hashtable(void)
4226 static GHashTable
*urb_hashtable
;
4228 if (!urb_hashtable
) {
4229 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4231 return urb_hashtable
;
4234 static void urb_hashtable_insert(struct live_urb
*urb
)
4236 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4237 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4240 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4242 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4243 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4246 static void urb_hashtable_remove(struct live_urb
*urb
)
4248 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4249 g_hash_table_remove(urb_hashtable
, urb
);
4253 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4254 int fd
, int cmd
, abi_long arg
)
4256 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4257 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4258 struct live_urb
*lurb
;
4262 uintptr_t target_urb_adr
;
4265 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4267 memset(buf_temp
, 0, sizeof(uint64_t));
4268 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4269 if (is_error(ret
)) {
4273 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4274 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4275 if (!lurb
->target_urb_adr
) {
4276 return -TARGET_EFAULT
;
4278 urb_hashtable_remove(lurb
);
4279 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4280 lurb
->host_urb
.buffer_length
);
4281 lurb
->target_buf_ptr
= NULL
;
4283 /* restore the guest buffer pointer */
4284 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4286 /* update the guest urb struct */
4287 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4290 return -TARGET_EFAULT
;
4292 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4293 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4295 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4296 /* write back the urb handle */
4297 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4300 return -TARGET_EFAULT
;
4303 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4304 target_urb_adr
= lurb
->target_urb_adr
;
4305 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4306 unlock_user(argptr
, arg
, target_size
);
4313 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4314 uint8_t *buf_temp
__attribute__((unused
)),
4315 int fd
, int cmd
, abi_long arg
)
4317 struct live_urb
*lurb
;
4319 /* map target address back to host URB with metadata. */
4320 lurb
= urb_hashtable_lookup(arg
);
4322 return -TARGET_EFAULT
;
4324 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4328 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4329 int fd
, int cmd
, abi_long arg
)
4331 const argtype
*arg_type
= ie
->arg_type
;
4336 struct live_urb
*lurb
;
4339 * each submitted URB needs to map to a unique ID for the
4340 * kernel, and that unique ID needs to be a pointer to
4341 * host memory. hence, we need to malloc for each URB.
4342 * isochronous transfers have a variable length struct.
4345 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4347 /* construct host copy of urb and metadata */
4348 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4350 return -TARGET_ENOMEM
;
4353 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4356 return -TARGET_EFAULT
;
4358 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4359 unlock_user(argptr
, arg
, 0);
4361 lurb
->target_urb_adr
= arg
;
4362 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4364 /* buffer space used depends on endpoint type so lock the entire buffer */
4365 /* control type urbs should check the buffer contents for true direction */
4366 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4367 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4368 lurb
->host_urb
.buffer_length
, 1);
4369 if (lurb
->target_buf_ptr
== NULL
) {
4371 return -TARGET_EFAULT
;
4374 /* update buffer pointer in host copy */
4375 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4377 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4378 if (is_error(ret
)) {
4379 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4382 urb_hashtable_insert(lurb
);
4387 #endif /* CONFIG_USBFS */
4389 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4390 int cmd
, abi_long arg
)
4393 struct dm_ioctl
*host_dm
;
4394 abi_long guest_data
;
4395 uint32_t guest_data_size
;
4397 const argtype
*arg_type
= ie
->arg_type
;
4399 void *big_buf
= NULL
;
4403 target_size
= thunk_type_size(arg_type
, 0);
4404 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4406 ret
= -TARGET_EFAULT
;
4409 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4410 unlock_user(argptr
, arg
, 0);
4412 /* buf_temp is too small, so fetch things into a bigger buffer */
4413 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4414 memcpy(big_buf
, buf_temp
, target_size
);
4418 guest_data
= arg
+ host_dm
->data_start
;
4419 if ((guest_data
- arg
) < 0) {
4420 ret
= -TARGET_EINVAL
;
4423 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4424 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4426 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4428 ret
= -TARGET_EFAULT
;
4432 switch (ie
->host_cmd
) {
4434 case DM_LIST_DEVICES
:
4437 case DM_DEV_SUSPEND
:
4440 case DM_TABLE_STATUS
:
4441 case DM_TABLE_CLEAR
:
4443 case DM_LIST_VERSIONS
:
4447 case DM_DEV_SET_GEOMETRY
:
4448 /* data contains only strings */
4449 memcpy(host_data
, argptr
, guest_data_size
);
4452 memcpy(host_data
, argptr
, guest_data_size
);
4453 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4457 void *gspec
= argptr
;
4458 void *cur_data
= host_data
;
4459 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4460 int spec_size
= thunk_type_size(arg_type
, 0);
4463 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4464 struct dm_target_spec
*spec
= cur_data
;
4468 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4469 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4471 spec
->next
= sizeof(*spec
) + slen
;
4472 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4474 cur_data
+= spec
->next
;
4479 ret
= -TARGET_EINVAL
;
4480 unlock_user(argptr
, guest_data
, 0);
4483 unlock_user(argptr
, guest_data
, 0);
4485 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4486 if (!is_error(ret
)) {
4487 guest_data
= arg
+ host_dm
->data_start
;
4488 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4489 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4490 switch (ie
->host_cmd
) {
4495 case DM_DEV_SUSPEND
:
4498 case DM_TABLE_CLEAR
:
4500 case DM_DEV_SET_GEOMETRY
:
4501 /* no return data */
4503 case DM_LIST_DEVICES
:
4505 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4506 uint32_t remaining_data
= guest_data_size
;
4507 void *cur_data
= argptr
;
4508 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4509 int nl_size
= 12; /* can't use thunk_size due to alignment */
4512 uint32_t next
= nl
->next
;
4514 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4516 if (remaining_data
< nl
->next
) {
4517 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4520 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4521 strcpy(cur_data
+ nl_size
, nl
->name
);
4522 cur_data
+= nl
->next
;
4523 remaining_data
-= nl
->next
;
4527 nl
= (void*)nl
+ next
;
4532 case DM_TABLE_STATUS
:
4534 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4535 void *cur_data
= argptr
;
4536 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4537 int spec_size
= thunk_type_size(arg_type
, 0);
4540 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4541 uint32_t next
= spec
->next
;
4542 int slen
= strlen((char*)&spec
[1]) + 1;
4543 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4544 if (guest_data_size
< spec
->next
) {
4545 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4548 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4549 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4550 cur_data
= argptr
+ spec
->next
;
4551 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4557 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4558 int count
= *(uint32_t*)hdata
;
4559 uint64_t *hdev
= hdata
+ 8;
4560 uint64_t *gdev
= argptr
+ 8;
4563 *(uint32_t*)argptr
= tswap32(count
);
4564 for (i
= 0; i
< count
; i
++) {
4565 *gdev
= tswap64(*hdev
);
4571 case DM_LIST_VERSIONS
:
4573 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4574 uint32_t remaining_data
= guest_data_size
;
4575 void *cur_data
= argptr
;
4576 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4577 int vers_size
= thunk_type_size(arg_type
, 0);
4580 uint32_t next
= vers
->next
;
4582 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4584 if (remaining_data
< vers
->next
) {
4585 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4588 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4589 strcpy(cur_data
+ vers_size
, vers
->name
);
4590 cur_data
+= vers
->next
;
4591 remaining_data
-= vers
->next
;
4595 vers
= (void*)vers
+ next
;
4600 unlock_user(argptr
, guest_data
, 0);
4601 ret
= -TARGET_EINVAL
;
4604 unlock_user(argptr
, guest_data
, guest_data_size
);
4606 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4608 ret
= -TARGET_EFAULT
;
4611 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4612 unlock_user(argptr
, arg
, target_size
);
4619 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4620 int cmd
, abi_long arg
)
4624 const argtype
*arg_type
= ie
->arg_type
;
4625 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4628 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4629 struct blkpg_partition host_part
;
4631 /* Read and convert blkpg */
4633 target_size
= thunk_type_size(arg_type
, 0);
4634 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4636 ret
= -TARGET_EFAULT
;
4639 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4640 unlock_user(argptr
, arg
, 0);
4642 switch (host_blkpg
->op
) {
4643 case BLKPG_ADD_PARTITION
:
4644 case BLKPG_DEL_PARTITION
:
4645 /* payload is struct blkpg_partition */
4648 /* Unknown opcode */
4649 ret
= -TARGET_EINVAL
;
4653 /* Read and convert blkpg->data */
4654 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4655 target_size
= thunk_type_size(part_arg_type
, 0);
4656 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4658 ret
= -TARGET_EFAULT
;
4661 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4662 unlock_user(argptr
, arg
, 0);
4664 /* Swizzle the data pointer to our local copy and call! */
4665 host_blkpg
->data
= &host_part
;
4666 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4672 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4673 int fd
, int cmd
, abi_long arg
)
4675 const argtype
*arg_type
= ie
->arg_type
;
4676 const StructEntry
*se
;
4677 const argtype
*field_types
;
4678 const int *dst_offsets
, *src_offsets
;
4681 abi_ulong
*target_rt_dev_ptr
;
4682 unsigned long *host_rt_dev_ptr
;
4686 assert(ie
->access
== IOC_W
);
4687 assert(*arg_type
== TYPE_PTR
);
4689 assert(*arg_type
== TYPE_STRUCT
);
4690 target_size
= thunk_type_size(arg_type
, 0);
4691 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4693 return -TARGET_EFAULT
;
4696 assert(*arg_type
== (int)STRUCT_rtentry
);
4697 se
= struct_entries
+ *arg_type
++;
4698 assert(se
->convert
[0] == NULL
);
4699 /* convert struct here to be able to catch rt_dev string */
4700 field_types
= se
->field_types
;
4701 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4702 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4703 for (i
= 0; i
< se
->nb_fields
; i
++) {
4704 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4705 assert(*field_types
== TYPE_PTRVOID
);
4706 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4707 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4708 if (*target_rt_dev_ptr
!= 0) {
4709 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4710 tswapal(*target_rt_dev_ptr
));
4711 if (!*host_rt_dev_ptr
) {
4712 unlock_user(argptr
, arg
, 0);
4713 return -TARGET_EFAULT
;
4716 *host_rt_dev_ptr
= 0;
4721 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4722 argptr
+ src_offsets
[i
],
4723 field_types
, THUNK_HOST
);
4725 unlock_user(argptr
, arg
, 0);
4727 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4728 if (*host_rt_dev_ptr
!= 0) {
4729 unlock_user((void *)*host_rt_dev_ptr
,
4730 *target_rt_dev_ptr
, 0);
4735 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4736 int fd
, int cmd
, abi_long arg
)
4738 int sig
= target_to_host_signal(arg
);
4739 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4743 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4744 int fd
, int cmd
, abi_long arg
)
4746 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4747 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4751 static IOCTLEntry ioctl_entries
[] = {
4752 #define IOCTL(cmd, access, ...) \
4753 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4754 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4755 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4756 #define IOCTL_IGNORE(cmd) \
4757 { TARGET_ ## cmd, 0, #cmd },
4762 /* ??? Implement proper locking for ioctls. */
4763 /* do_ioctl() Must return target values and target errnos. */
4764 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4766 const IOCTLEntry
*ie
;
4767 const argtype
*arg_type
;
4769 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4775 if (ie
->target_cmd
== 0) {
4776 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4777 return -TARGET_ENOSYS
;
4779 if (ie
->target_cmd
== cmd
)
4783 arg_type
= ie
->arg_type
;
4785 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4786 } else if (!ie
->host_cmd
) {
4787 /* Some architectures define BSD ioctls in their headers
4788 that are not implemented in Linux. */
4789 return -TARGET_ENOSYS
;
4792 switch(arg_type
[0]) {
4795 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4799 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4803 target_size
= thunk_type_size(arg_type
, 0);
4804 switch(ie
->access
) {
4806 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4807 if (!is_error(ret
)) {
4808 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4810 return -TARGET_EFAULT
;
4811 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4812 unlock_user(argptr
, arg
, target_size
);
4816 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4818 return -TARGET_EFAULT
;
4819 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4820 unlock_user(argptr
, arg
, 0);
4821 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4825 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4827 return -TARGET_EFAULT
;
4828 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4829 unlock_user(argptr
, arg
, 0);
4830 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4831 if (!is_error(ret
)) {
4832 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4834 return -TARGET_EFAULT
;
4835 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4836 unlock_user(argptr
, arg
, target_size
);
4842 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4843 (long)cmd
, arg_type
[0]);
4844 ret
= -TARGET_ENOSYS
;
4850 static const bitmask_transtbl iflag_tbl
[] = {
4851 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4852 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4853 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4854 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4855 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4856 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4857 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4858 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4859 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4860 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4861 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4862 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4863 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4864 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4868 static const bitmask_transtbl oflag_tbl
[] = {
4869 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4870 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4871 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4872 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4873 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4874 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4875 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4876 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4877 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4878 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4879 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4880 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4881 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4882 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4883 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4884 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4885 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4886 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4887 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4888 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4889 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4890 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4891 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4892 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4896 static const bitmask_transtbl cflag_tbl
[] = {
4897 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4898 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4899 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4900 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4901 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4902 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4903 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4904 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4905 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4906 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4907 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4908 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4909 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4910 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4911 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4912 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4913 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4914 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4915 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4916 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4917 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4918 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4919 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4920 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4921 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4922 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4923 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4924 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4925 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4926 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4927 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4931 static const bitmask_transtbl lflag_tbl
[] = {
4932 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4933 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4934 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4935 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4936 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4937 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4938 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4939 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4940 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4941 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4942 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4943 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4944 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4945 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4946 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4950 static void target_to_host_termios (void *dst
, const void *src
)
4952 struct host_termios
*host
= dst
;
4953 const struct target_termios
*target
= src
;
4956 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4958 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4960 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4962 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4963 host
->c_line
= target
->c_line
;
4965 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4966 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4967 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4968 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4969 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4970 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4971 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4972 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4973 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4974 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4975 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4976 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4977 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4978 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4979 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4980 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4981 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4982 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4985 static void host_to_target_termios (void *dst
, const void *src
)
4987 struct target_termios
*target
= dst
;
4988 const struct host_termios
*host
= src
;
4991 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4993 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4995 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4997 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4998 target
->c_line
= host
->c_line
;
5000 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5001 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5002 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5003 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5004 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5005 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5006 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5007 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5008 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5009 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5010 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5011 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5012 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5013 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5014 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5015 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5016 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5017 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5020 static const StructEntry struct_termios_def
= {
5021 .convert
= { host_to_target_termios
, target_to_host_termios
},
5022 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5023 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5026 static bitmask_transtbl mmap_flags_tbl
[] = {
5027 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5028 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5029 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5030 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5031 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5032 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5033 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5034 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5035 MAP_DENYWRITE
, MAP_DENYWRITE
},
5036 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5037 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5038 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5039 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5040 MAP_NORESERVE
, MAP_NORESERVE
},
5041 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5042 /* MAP_STACK had been ignored by the kernel for quite some time.
5043 Recognize it for the target insofar as we do not want to pass
5044 it through to the host. */
5045 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5049 #if defined(TARGET_I386)
5051 /* NOTE: there is really one LDT for all the threads */
5052 static uint8_t *ldt_table
;
5054 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5061 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5062 if (size
> bytecount
)
5064 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5066 return -TARGET_EFAULT
;
5067 /* ??? Should this by byteswapped? */
5068 memcpy(p
, ldt_table
, size
);
5069 unlock_user(p
, ptr
, size
);
5073 /* XXX: add locking support */
5074 static abi_long
write_ldt(CPUX86State
*env
,
5075 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5077 struct target_modify_ldt_ldt_s ldt_info
;
5078 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5079 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5080 int seg_not_present
, useable
, lm
;
5081 uint32_t *lp
, entry_1
, entry_2
;
5083 if (bytecount
!= sizeof(ldt_info
))
5084 return -TARGET_EINVAL
;
5085 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5086 return -TARGET_EFAULT
;
5087 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5088 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5089 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5090 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5091 unlock_user_struct(target_ldt_info
, ptr
, 0);
5093 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5094 return -TARGET_EINVAL
;
5095 seg_32bit
= ldt_info
.flags
& 1;
5096 contents
= (ldt_info
.flags
>> 1) & 3;
5097 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5098 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5099 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5100 useable
= (ldt_info
.flags
>> 6) & 1;
5104 lm
= (ldt_info
.flags
>> 7) & 1;
5106 if (contents
== 3) {
5108 return -TARGET_EINVAL
;
5109 if (seg_not_present
== 0)
5110 return -TARGET_EINVAL
;
5112 /* allocate the LDT */
5114 env
->ldt
.base
= target_mmap(0,
5115 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5116 PROT_READ
|PROT_WRITE
,
5117 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5118 if (env
->ldt
.base
== -1)
5119 return -TARGET_ENOMEM
;
5120 memset(g2h(env
->ldt
.base
), 0,
5121 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5122 env
->ldt
.limit
= 0xffff;
5123 ldt_table
= g2h(env
->ldt
.base
);
5126 /* NOTE: same code as Linux kernel */
5127 /* Allow LDTs to be cleared by the user. */
5128 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5131 read_exec_only
== 1 &&
5133 limit_in_pages
== 0 &&
5134 seg_not_present
== 1 &&
5142 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5143 (ldt_info
.limit
& 0x0ffff);
5144 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5145 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5146 (ldt_info
.limit
& 0xf0000) |
5147 ((read_exec_only
^ 1) << 9) |
5149 ((seg_not_present
^ 1) << 15) |
5151 (limit_in_pages
<< 23) |
5155 entry_2
|= (useable
<< 20);
5157 /* Install the new entry ... */
5159 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5160 lp
[0] = tswap32(entry_1
);
5161 lp
[1] = tswap32(entry_2
);
5165 /* specific and weird i386 syscalls */
5166 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5167 unsigned long bytecount
)
5173 ret
= read_ldt(ptr
, bytecount
);
5176 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5179 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5182 ret
= -TARGET_ENOSYS
;
5188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5189 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5191 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5192 struct target_modify_ldt_ldt_s ldt_info
;
5193 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5194 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5195 int seg_not_present
, useable
, lm
;
5196 uint32_t *lp
, entry_1
, entry_2
;
5199 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5200 if (!target_ldt_info
)
5201 return -TARGET_EFAULT
;
5202 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5203 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5204 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5205 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5206 if (ldt_info
.entry_number
== -1) {
5207 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5208 if (gdt_table
[i
] == 0) {
5209 ldt_info
.entry_number
= i
;
5210 target_ldt_info
->entry_number
= tswap32(i
);
5215 unlock_user_struct(target_ldt_info
, ptr
, 1);
5217 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5218 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5219 return -TARGET_EINVAL
;
5220 seg_32bit
= ldt_info
.flags
& 1;
5221 contents
= (ldt_info
.flags
>> 1) & 3;
5222 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5223 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5224 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5225 useable
= (ldt_info
.flags
>> 6) & 1;
5229 lm
= (ldt_info
.flags
>> 7) & 1;
5232 if (contents
== 3) {
5233 if (seg_not_present
== 0)
5234 return -TARGET_EINVAL
;
5237 /* NOTE: same code as Linux kernel */
5238 /* Allow LDTs to be cleared by the user. */
5239 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5240 if ((contents
== 0 &&
5241 read_exec_only
== 1 &&
5243 limit_in_pages
== 0 &&
5244 seg_not_present
== 1 &&
5252 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5253 (ldt_info
.limit
& 0x0ffff);
5254 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5255 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5256 (ldt_info
.limit
& 0xf0000) |
5257 ((read_exec_only
^ 1) << 9) |
5259 ((seg_not_present
^ 1) << 15) |
5261 (limit_in_pages
<< 23) |
5266 /* Install the new entry ... */
5268 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5269 lp
[0] = tswap32(entry_1
);
5270 lp
[1] = tswap32(entry_2
);
5274 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5276 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5277 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5278 uint32_t base_addr
, limit
, flags
;
5279 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5280 int seg_not_present
, useable
, lm
;
5281 uint32_t *lp
, entry_1
, entry_2
;
5283 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5284 if (!target_ldt_info
)
5285 return -TARGET_EFAULT
;
5286 idx
= tswap32(target_ldt_info
->entry_number
);
5287 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5288 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5289 unlock_user_struct(target_ldt_info
, ptr
, 1);
5290 return -TARGET_EINVAL
;
5292 lp
= (uint32_t *)(gdt_table
+ idx
);
5293 entry_1
= tswap32(lp
[0]);
5294 entry_2
= tswap32(lp
[1]);
5296 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5297 contents
= (entry_2
>> 10) & 3;
5298 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5299 seg_32bit
= (entry_2
>> 22) & 1;
5300 limit_in_pages
= (entry_2
>> 23) & 1;
5301 useable
= (entry_2
>> 20) & 1;
5305 lm
= (entry_2
>> 21) & 1;
5307 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5308 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5309 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5310 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5311 base_addr
= (entry_1
>> 16) |
5312 (entry_2
& 0xff000000) |
5313 ((entry_2
& 0xff) << 16);
5314 target_ldt_info
->base_addr
= tswapal(base_addr
);
5315 target_ldt_info
->limit
= tswap32(limit
);
5316 target_ldt_info
->flags
= tswap32(flags
);
5317 unlock_user_struct(target_ldt_info
, ptr
, 1);
5320 #endif /* TARGET_I386 && TARGET_ABI32 */
5322 #ifndef TARGET_ABI32
5323 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5330 case TARGET_ARCH_SET_GS
:
5331 case TARGET_ARCH_SET_FS
:
5332 if (code
== TARGET_ARCH_SET_GS
)
5336 cpu_x86_load_seg(env
, idx
, 0);
5337 env
->segs
[idx
].base
= addr
;
5339 case TARGET_ARCH_GET_GS
:
5340 case TARGET_ARCH_GET_FS
:
5341 if (code
== TARGET_ARCH_GET_GS
)
5345 val
= env
->segs
[idx
].base
;
5346 if (put_user(val
, addr
, abi_ulong
))
5347 ret
= -TARGET_EFAULT
;
5350 ret
= -TARGET_EINVAL
;
5357 #endif /* defined(TARGET_I386) */
5359 #define NEW_STACK_SIZE 0x40000
5362 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5365 pthread_mutex_t mutex
;
5366 pthread_cond_t cond
;
5369 abi_ulong child_tidptr
;
5370 abi_ulong parent_tidptr
;
5374 static void *clone_func(void *arg
)
5376 new_thread_info
*info
= arg
;
5381 rcu_register_thread();
5382 tcg_register_thread();
5384 cpu
= ENV_GET_CPU(env
);
5386 ts
= (TaskState
*)cpu
->opaque
;
5387 info
->tid
= gettid();
5389 if (info
->child_tidptr
)
5390 put_user_u32(info
->tid
, info
->child_tidptr
);
5391 if (info
->parent_tidptr
)
5392 put_user_u32(info
->tid
, info
->parent_tidptr
);
5393 /* Enable signals. */
5394 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5395 /* Signal to the parent that we're ready. */
5396 pthread_mutex_lock(&info
->mutex
);
5397 pthread_cond_broadcast(&info
->cond
);
5398 pthread_mutex_unlock(&info
->mutex
);
5399 /* Wait until the parent has finished initializing the tls state. */
5400 pthread_mutex_lock(&clone_lock
);
5401 pthread_mutex_unlock(&clone_lock
);
5407 /* do_fork() Must return host values and target errnos (unlike most
5408 do_*() functions). */
5409 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5410 abi_ulong parent_tidptr
, target_ulong newtls
,
5411 abi_ulong child_tidptr
)
5413 CPUState
*cpu
= ENV_GET_CPU(env
);
5417 CPUArchState
*new_env
;
5420 flags
&= ~CLONE_IGNORED_FLAGS
;
5422 /* Emulate vfork() with fork() */
5423 if (flags
& CLONE_VFORK
)
5424 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5426 if (flags
& CLONE_VM
) {
5427 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5428 new_thread_info info
;
5429 pthread_attr_t attr
;
5431 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5432 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5433 return -TARGET_EINVAL
;
5436 ts
= g_new0(TaskState
, 1);
5437 init_task_state(ts
);
5439 /* Grab a mutex so that thread setup appears atomic. */
5440 pthread_mutex_lock(&clone_lock
);
5442 /* we create a new CPU instance. */
5443 new_env
= cpu_copy(env
);
5444 /* Init regs that differ from the parent. */
5445 cpu_clone_regs(new_env
, newsp
);
5446 new_cpu
= ENV_GET_CPU(new_env
);
5447 new_cpu
->opaque
= ts
;
5448 ts
->bprm
= parent_ts
->bprm
;
5449 ts
->info
= parent_ts
->info
;
5450 ts
->signal_mask
= parent_ts
->signal_mask
;
5452 if (flags
& CLONE_CHILD_CLEARTID
) {
5453 ts
->child_tidptr
= child_tidptr
;
5456 if (flags
& CLONE_SETTLS
) {
5457 cpu_set_tls (new_env
, newtls
);
5460 memset(&info
, 0, sizeof(info
));
5461 pthread_mutex_init(&info
.mutex
, NULL
);
5462 pthread_mutex_lock(&info
.mutex
);
5463 pthread_cond_init(&info
.cond
, NULL
);
5465 if (flags
& CLONE_CHILD_SETTID
) {
5466 info
.child_tidptr
= child_tidptr
;
5468 if (flags
& CLONE_PARENT_SETTID
) {
5469 info
.parent_tidptr
= parent_tidptr
;
5472 ret
= pthread_attr_init(&attr
);
5473 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5474 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5475 /* It is not safe to deliver signals until the child has finished
5476 initializing, so temporarily block all signals. */
5477 sigfillset(&sigmask
);
5478 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5480 /* If this is our first additional thread, we need to ensure we
5481 * generate code for parallel execution and flush old translations.
5483 if (!parallel_cpus
) {
5484 parallel_cpus
= true;
5488 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5489 /* TODO: Free new CPU state if thread creation failed. */
5491 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5492 pthread_attr_destroy(&attr
);
5494 /* Wait for the child to initialize. */
5495 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5500 pthread_mutex_unlock(&info
.mutex
);
5501 pthread_cond_destroy(&info
.cond
);
5502 pthread_mutex_destroy(&info
.mutex
);
5503 pthread_mutex_unlock(&clone_lock
);
5505 /* if no CLONE_VM, we consider it is a fork */
5506 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5507 return -TARGET_EINVAL
;
5510 /* We can't support custom termination signals */
5511 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5512 return -TARGET_EINVAL
;
5515 if (block_signals()) {
5516 return -TARGET_ERESTARTSYS
;
5522 /* Child Process. */
5523 cpu_clone_regs(env
, newsp
);
5525 /* There is a race condition here. The parent process could
5526 theoretically read the TID in the child process before the child
5527 tid is set. This would require using either ptrace
5528 (not implemented) or having *_tidptr to point at a shared memory
5529 mapping. We can't repeat the spinlock hack used above because
5530 the child process gets its own copy of the lock. */
5531 if (flags
& CLONE_CHILD_SETTID
)
5532 put_user_u32(gettid(), child_tidptr
);
5533 if (flags
& CLONE_PARENT_SETTID
)
5534 put_user_u32(gettid(), parent_tidptr
);
5535 ts
= (TaskState
*)cpu
->opaque
;
5536 if (flags
& CLONE_SETTLS
)
5537 cpu_set_tls (env
, newtls
);
5538 if (flags
& CLONE_CHILD_CLEARTID
)
5539 ts
->child_tidptr
= child_tidptr
;
5547 /* warning : doesn't handle linux specific flags... */
5548 static int target_to_host_fcntl_cmd(int cmd
)
5553 case TARGET_F_DUPFD
:
5554 case TARGET_F_GETFD
:
5555 case TARGET_F_SETFD
:
5556 case TARGET_F_GETFL
:
5557 case TARGET_F_SETFL
:
5560 case TARGET_F_GETLK
:
5563 case TARGET_F_SETLK
:
5566 case TARGET_F_SETLKW
:
5569 case TARGET_F_GETOWN
:
5572 case TARGET_F_SETOWN
:
5575 case TARGET_F_GETSIG
:
5578 case TARGET_F_SETSIG
:
5581 #if TARGET_ABI_BITS == 32
5582 case TARGET_F_GETLK64
:
5585 case TARGET_F_SETLK64
:
5588 case TARGET_F_SETLKW64
:
5592 case TARGET_F_SETLEASE
:
5595 case TARGET_F_GETLEASE
:
5598 #ifdef F_DUPFD_CLOEXEC
5599 case TARGET_F_DUPFD_CLOEXEC
:
5600 ret
= F_DUPFD_CLOEXEC
;
5603 case TARGET_F_NOTIFY
:
5607 case TARGET_F_GETOWN_EX
:
5612 case TARGET_F_SETOWN_EX
:
5617 case TARGET_F_SETPIPE_SZ
:
5620 case TARGET_F_GETPIPE_SZ
:
5625 ret
= -TARGET_EINVAL
;
5629 #if defined(__powerpc64__)
5630 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5631 * is not supported by kernel. The glibc fcntl call actually adjusts
5632 * them to 5, 6 and 7 before making the syscall(). Since we make the
5633 * syscall directly, adjust to what is supported by the kernel.
5635 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5636 ret
-= F_GETLK64
- 5;
5643 #define FLOCK_TRANSTBL \
5645 TRANSTBL_CONVERT(F_RDLCK); \
5646 TRANSTBL_CONVERT(F_WRLCK); \
5647 TRANSTBL_CONVERT(F_UNLCK); \
5648 TRANSTBL_CONVERT(F_EXLCK); \
5649 TRANSTBL_CONVERT(F_SHLCK); \
5652 static int target_to_host_flock(int type
)
5654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5656 #undef TRANSTBL_CONVERT
5657 return -TARGET_EINVAL
;
5660 static int host_to_target_flock(int type
)
5662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5664 #undef TRANSTBL_CONVERT
5665 /* if we don't know how to convert the value coming
5666 * from the host we copy to the target field as-is
5671 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5672 abi_ulong target_flock_addr
)
5674 struct target_flock
*target_fl
;
5677 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5678 return -TARGET_EFAULT
;
5681 __get_user(l_type
, &target_fl
->l_type
);
5682 l_type
= target_to_host_flock(l_type
);
5686 fl
->l_type
= l_type
;
5687 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5688 __get_user(fl
->l_start
, &target_fl
->l_start
);
5689 __get_user(fl
->l_len
, &target_fl
->l_len
);
5690 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5691 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5695 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5696 const struct flock64
*fl
)
5698 struct target_flock
*target_fl
;
5701 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5702 return -TARGET_EFAULT
;
5705 l_type
= host_to_target_flock(fl
->l_type
);
5706 __put_user(l_type
, &target_fl
->l_type
);
5707 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5708 __put_user(fl
->l_start
, &target_fl
->l_start
);
5709 __put_user(fl
->l_len
, &target_fl
->l_len
);
5710 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5711 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5715 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5716 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5719 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5720 abi_ulong target_flock_addr
)
5722 struct target_oabi_flock64
*target_fl
;
5725 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5726 return -TARGET_EFAULT
;
5729 __get_user(l_type
, &target_fl
->l_type
);
5730 l_type
= target_to_host_flock(l_type
);
5734 fl
->l_type
= l_type
;
5735 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5736 __get_user(fl
->l_start
, &target_fl
->l_start
);
5737 __get_user(fl
->l_len
, &target_fl
->l_len
);
5738 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5739 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5743 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5744 const struct flock64
*fl
)
5746 struct target_oabi_flock64
*target_fl
;
5749 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5750 return -TARGET_EFAULT
;
5753 l_type
= host_to_target_flock(fl
->l_type
);
5754 __put_user(l_type
, &target_fl
->l_type
);
5755 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5756 __put_user(fl
->l_start
, &target_fl
->l_start
);
5757 __put_user(fl
->l_len
, &target_fl
->l_len
);
5758 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5759 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5764 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5765 abi_ulong target_flock_addr
)
5767 struct target_flock64
*target_fl
;
5770 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5771 return -TARGET_EFAULT
;
5774 __get_user(l_type
, &target_fl
->l_type
);
5775 l_type
= target_to_host_flock(l_type
);
5779 fl
->l_type
= l_type
;
5780 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5781 __get_user(fl
->l_start
, &target_fl
->l_start
);
5782 __get_user(fl
->l_len
, &target_fl
->l_len
);
5783 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5784 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5788 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5789 const struct flock64
*fl
)
5791 struct target_flock64
*target_fl
;
5794 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5795 return -TARGET_EFAULT
;
5798 l_type
= host_to_target_flock(fl
->l_type
);
5799 __put_user(l_type
, &target_fl
->l_type
);
5800 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5801 __put_user(fl
->l_start
, &target_fl
->l_start
);
5802 __put_user(fl
->l_len
, &target_fl
->l_len
);
5803 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5804 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5808 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5810 struct flock64 fl64
;
5812 struct f_owner_ex fox
;
5813 struct target_f_owner_ex
*target_fox
;
5816 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5818 if (host_cmd
== -TARGET_EINVAL
)
5822 case TARGET_F_GETLK
:
5823 ret
= copy_from_user_flock(&fl64
, arg
);
5827 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5829 ret
= copy_to_user_flock(arg
, &fl64
);
5833 case TARGET_F_SETLK
:
5834 case TARGET_F_SETLKW
:
5835 ret
= copy_from_user_flock(&fl64
, arg
);
5839 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5842 case TARGET_F_GETLK64
:
5843 ret
= copy_from_user_flock64(&fl64
, arg
);
5847 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5849 ret
= copy_to_user_flock64(arg
, &fl64
);
5852 case TARGET_F_SETLK64
:
5853 case TARGET_F_SETLKW64
:
5854 ret
= copy_from_user_flock64(&fl64
, arg
);
5858 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5861 case TARGET_F_GETFL
:
5862 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5864 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5868 case TARGET_F_SETFL
:
5869 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5870 target_to_host_bitmask(arg
,
5875 case TARGET_F_GETOWN_EX
:
5876 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5878 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5879 return -TARGET_EFAULT
;
5880 target_fox
->type
= tswap32(fox
.type
);
5881 target_fox
->pid
= tswap32(fox
.pid
);
5882 unlock_user_struct(target_fox
, arg
, 1);
5888 case TARGET_F_SETOWN_EX
:
5889 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5890 return -TARGET_EFAULT
;
5891 fox
.type
= tswap32(target_fox
->type
);
5892 fox
.pid
= tswap32(target_fox
->pid
);
5893 unlock_user_struct(target_fox
, arg
, 0);
5894 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5898 case TARGET_F_SETOWN
:
5899 case TARGET_F_GETOWN
:
5900 case TARGET_F_SETSIG
:
5901 case TARGET_F_GETSIG
:
5902 case TARGET_F_SETLEASE
:
5903 case TARGET_F_GETLEASE
:
5904 case TARGET_F_SETPIPE_SZ
:
5905 case TARGET_F_GETPIPE_SZ
:
5906 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5910 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5918 static inline int high2lowuid(int uid
)
5926 static inline int high2lowgid(int gid
)
5934 static inline int low2highuid(int uid
)
5936 if ((int16_t)uid
== -1)
5942 static inline int low2highgid(int gid
)
5944 if ((int16_t)gid
== -1)
5949 static inline int tswapid(int id
)
5954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5956 #else /* !USE_UID16 */
5957 static inline int high2lowuid(int uid
)
5961 static inline int high2lowgid(int gid
)
5965 static inline int low2highuid(int uid
)
5969 static inline int low2highgid(int gid
)
5973 static inline int tswapid(int id
)
5978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5980 #endif /* USE_UID16 */
5982 /* We must do direct syscalls for setting UID/GID, because we want to
5983 * implement the Linux system call semantics of "change only for this thread",
5984 * not the libc/POSIX semantics of "change for all threads in process".
5985 * (See http://ewontfix.com/17/ for more details.)
5986 * We use the 32-bit version of the syscalls if present; if it is not
5987 * then either the host architecture supports 32-bit UIDs natively with
5988 * the standard syscall, or the 16-bit UID is the best we can do.
5990 #ifdef __NR_setuid32
5991 #define __NR_sys_setuid __NR_setuid32
5993 #define __NR_sys_setuid __NR_setuid
5995 #ifdef __NR_setgid32
5996 #define __NR_sys_setgid __NR_setgid32
5998 #define __NR_sys_setgid __NR_setgid
6000 #ifdef __NR_setresuid32
6001 #define __NR_sys_setresuid __NR_setresuid32
6003 #define __NR_sys_setresuid __NR_setresuid
6005 #ifdef __NR_setresgid32
6006 #define __NR_sys_setresgid __NR_setresgid32
6008 #define __NR_sys_setresgid __NR_setresgid
6011 _syscall1(int, sys_setuid
, uid_t
, uid
)
6012 _syscall1(int, sys_setgid
, gid_t
, gid
)
6013 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6014 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6016 void syscall_init(void)
6019 const argtype
*arg_type
;
6023 thunk_init(STRUCT_MAX
);
6025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6027 #include "syscall_types.h"
6029 #undef STRUCT_SPECIAL
6031 /* Build target_to_host_errno_table[] table from
6032 * host_to_target_errno_table[]. */
6033 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6034 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6037 /* we patch the ioctl size if necessary. We rely on the fact that
6038 no ioctl has all the bits at '1' in the size field */
6040 while (ie
->target_cmd
!= 0) {
6041 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6042 TARGET_IOC_SIZEMASK
) {
6043 arg_type
= ie
->arg_type
;
6044 if (arg_type
[0] != TYPE_PTR
) {
6045 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6050 size
= thunk_type_size(arg_type
, 0);
6051 ie
->target_cmd
= (ie
->target_cmd
&
6052 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6053 (size
<< TARGET_IOC_SIZESHIFT
);
6056 /* automatic consistency check if same arch */
6057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6058 (defined(__x86_64__) && defined(TARGET_X86_64))
6059 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6060 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6061 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6068 #if TARGET_ABI_BITS == 32
6069 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6071 #ifdef TARGET_WORDS_BIGENDIAN
6072 return ((uint64_t)word0
<< 32) | word1
;
6074 return ((uint64_t)word1
<< 32) | word0
;
6077 #else /* TARGET_ABI_BITS == 32 */
6078 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6082 #endif /* TARGET_ABI_BITS != 32 */
6084 #ifdef TARGET_NR_truncate64
6085 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6090 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6094 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6098 #ifdef TARGET_NR_ftruncate64
6099 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6104 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6108 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6112 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6113 abi_ulong target_addr
)
6115 struct target_timespec
*target_ts
;
6117 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6118 return -TARGET_EFAULT
;
6119 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6120 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6121 unlock_user_struct(target_ts
, target_addr
, 0);
6125 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6126 struct timespec
*host_ts
)
6128 struct target_timespec
*target_ts
;
6130 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6131 return -TARGET_EFAULT
;
6132 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6133 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6134 unlock_user_struct(target_ts
, target_addr
, 1);
6138 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6139 abi_ulong target_addr
)
6141 struct target_itimerspec
*target_itspec
;
6143 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6144 return -TARGET_EFAULT
;
6147 host_itspec
->it_interval
.tv_sec
=
6148 tswapal(target_itspec
->it_interval
.tv_sec
);
6149 host_itspec
->it_interval
.tv_nsec
=
6150 tswapal(target_itspec
->it_interval
.tv_nsec
);
6151 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6152 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6154 unlock_user_struct(target_itspec
, target_addr
, 1);
6158 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6159 struct itimerspec
*host_its
)
6161 struct target_itimerspec
*target_itspec
;
6163 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6164 return -TARGET_EFAULT
;
6167 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6168 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6170 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6171 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6173 unlock_user_struct(target_itspec
, target_addr
, 0);
6177 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6178 abi_long target_addr
)
6180 struct target_timex
*target_tx
;
6182 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6183 return -TARGET_EFAULT
;
6186 __get_user(host_tx
->modes
, &target_tx
->modes
);
6187 __get_user(host_tx
->offset
, &target_tx
->offset
);
6188 __get_user(host_tx
->freq
, &target_tx
->freq
);
6189 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6190 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6191 __get_user(host_tx
->status
, &target_tx
->status
);
6192 __get_user(host_tx
->constant
, &target_tx
->constant
);
6193 __get_user(host_tx
->precision
, &target_tx
->precision
);
6194 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6195 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6196 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6197 __get_user(host_tx
->tick
, &target_tx
->tick
);
6198 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6199 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6200 __get_user(host_tx
->shift
, &target_tx
->shift
);
6201 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6202 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6203 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6204 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6205 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6206 __get_user(host_tx
->tai
, &target_tx
->tai
);
6208 unlock_user_struct(target_tx
, target_addr
, 0);
6212 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6213 struct timex
*host_tx
)
6215 struct target_timex
*target_tx
;
6217 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6218 return -TARGET_EFAULT
;
6221 __put_user(host_tx
->modes
, &target_tx
->modes
);
6222 __put_user(host_tx
->offset
, &target_tx
->offset
);
6223 __put_user(host_tx
->freq
, &target_tx
->freq
);
6224 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6225 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6226 __put_user(host_tx
->status
, &target_tx
->status
);
6227 __put_user(host_tx
->constant
, &target_tx
->constant
);
6228 __put_user(host_tx
->precision
, &target_tx
->precision
);
6229 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6230 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6231 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6232 __put_user(host_tx
->tick
, &target_tx
->tick
);
6233 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6234 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6235 __put_user(host_tx
->shift
, &target_tx
->shift
);
6236 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6237 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6238 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6239 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6240 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6241 __put_user(host_tx
->tai
, &target_tx
->tai
);
6243 unlock_user_struct(target_tx
, target_addr
, 1);
6248 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6249 abi_ulong target_addr
)
6251 struct target_sigevent
*target_sevp
;
6253 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6254 return -TARGET_EFAULT
;
6257 /* This union is awkward on 64 bit systems because it has a 32 bit
6258 * integer and a pointer in it; we follow the conversion approach
6259 * used for handling sigval types in signal.c so the guest should get
6260 * the correct value back even if we did a 64 bit byteswap and it's
6261 * using the 32 bit integer.
6263 host_sevp
->sigev_value
.sival_ptr
=
6264 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6265 host_sevp
->sigev_signo
=
6266 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6267 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6268 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6270 unlock_user_struct(target_sevp
, target_addr
, 1);
6274 #if defined(TARGET_NR_mlockall)
6275 static inline int target_to_host_mlockall_arg(int arg
)
6279 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6280 result
|= MCL_CURRENT
;
6282 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6283 result
|= MCL_FUTURE
;
6289 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6290 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6291 defined(TARGET_NR_newfstatat))
6292 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6293 abi_ulong target_addr
,
6294 struct stat
*host_st
)
6296 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6297 if (((CPUARMState
*)cpu_env
)->eabi
) {
6298 struct target_eabi_stat64
*target_st
;
6300 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6301 return -TARGET_EFAULT
;
6302 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6303 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6304 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6305 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6306 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6308 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6309 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6310 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6311 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6312 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6313 __put_user(host_st
->st_size
, &target_st
->st_size
);
6314 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6315 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6316 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6317 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6318 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6319 unlock_user_struct(target_st
, target_addr
, 1);
6323 #if defined(TARGET_HAS_STRUCT_STAT64)
6324 struct target_stat64
*target_st
;
6326 struct target_stat
*target_st
;
6329 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6330 return -TARGET_EFAULT
;
6331 memset(target_st
, 0, sizeof(*target_st
));
6332 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6333 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6334 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6335 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6337 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6338 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6339 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6340 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6341 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6342 /* XXX: better use of kernel struct */
6343 __put_user(host_st
->st_size
, &target_st
->st_size
);
6344 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6345 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6346 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6347 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6348 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6349 unlock_user_struct(target_st
, target_addr
, 1);
6356 /* ??? Using host futex calls even when target atomic operations
6357 are not really atomic probably breaks things. However implementing
6358 futexes locally would make futexes shared between multiple processes
6359 tricky. However they're probably useless because guest atomic
6360 operations won't work either. */
6361 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6362 target_ulong uaddr2
, int val3
)
6364 struct timespec ts
, *pts
;
6367 /* ??? We assume FUTEX_* constants are the same on both host
6369 #ifdef FUTEX_CMD_MASK
6370 base_op
= op
& FUTEX_CMD_MASK
;
6376 case FUTEX_WAIT_BITSET
:
6379 target_to_host_timespec(pts
, timeout
);
6383 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6386 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6388 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6390 case FUTEX_CMP_REQUEUE
:
6392 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6393 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6394 But the prototype takes a `struct timespec *'; insert casts
6395 to satisfy the compiler. We do not need to tswap TIMEOUT
6396 since it's not compared to guest memory. */
6397 pts
= (struct timespec
*)(uintptr_t) timeout
;
6398 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6400 (base_op
== FUTEX_CMP_REQUEUE
6404 return -TARGET_ENOSYS
;
6407 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6408 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6409 abi_long handle
, abi_long mount_id
,
6412 struct file_handle
*target_fh
;
6413 struct file_handle
*fh
;
6417 unsigned int size
, total_size
;
6419 if (get_user_s32(size
, handle
)) {
6420 return -TARGET_EFAULT
;
6423 name
= lock_user_string(pathname
);
6425 return -TARGET_EFAULT
;
6428 total_size
= sizeof(struct file_handle
) + size
;
6429 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6431 unlock_user(name
, pathname
, 0);
6432 return -TARGET_EFAULT
;
6435 fh
= g_malloc0(total_size
);
6436 fh
->handle_bytes
= size
;
6438 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6439 unlock_user(name
, pathname
, 0);
6441 /* man name_to_handle_at(2):
6442 * Other than the use of the handle_bytes field, the caller should treat
6443 * the file_handle structure as an opaque data type
6446 memcpy(target_fh
, fh
, total_size
);
6447 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6448 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6450 unlock_user(target_fh
, handle
, total_size
);
6452 if (put_user_s32(mid
, mount_id
)) {
6453 return -TARGET_EFAULT
;
6461 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6462 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6465 struct file_handle
*target_fh
;
6466 struct file_handle
*fh
;
6467 unsigned int size
, total_size
;
6470 if (get_user_s32(size
, handle
)) {
6471 return -TARGET_EFAULT
;
6474 total_size
= sizeof(struct file_handle
) + size
;
6475 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6477 return -TARGET_EFAULT
;
6480 fh
= g_memdup(target_fh
, total_size
);
6481 fh
->handle_bytes
= size
;
6482 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6484 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6485 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6489 unlock_user(target_fh
, handle
, total_size
);
6495 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6497 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6500 target_sigset_t
*target_mask
;
6504 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6505 return -TARGET_EINVAL
;
6507 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6508 return -TARGET_EFAULT
;
6511 target_to_host_sigset(&host_mask
, target_mask
);
6513 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6515 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6517 fd_trans_register(ret
, &target_signalfd_trans
);
6520 unlock_user_struct(target_mask
, mask
, 0);
6526 /* Map host to target signal numbers for the wait family of syscalls.
6527 Assume all other status bits are the same. */
6528 int host_to_target_waitstatus(int status
)
6530 if (WIFSIGNALED(status
)) {
6531 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6533 if (WIFSTOPPED(status
)) {
6534 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6540 static int open_self_cmdline(void *cpu_env
, int fd
)
6542 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6543 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6546 for (i
= 0; i
< bprm
->argc
; i
++) {
6547 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6549 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6557 static int open_self_maps(void *cpu_env
, int fd
)
6559 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6560 TaskState
*ts
= cpu
->opaque
;
6566 fp
= fopen("/proc/self/maps", "r");
6571 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6572 int fields
, dev_maj
, dev_min
, inode
;
6573 uint64_t min
, max
, offset
;
6574 char flag_r
, flag_w
, flag_x
, flag_p
;
6575 char path
[512] = "";
6576 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6577 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6578 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6580 if ((fields
< 10) || (fields
> 11)) {
6583 if (h2g_valid(min
)) {
6584 int flags
= page_get_flags(h2g(min
));
6585 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6586 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6589 if (h2g(min
) == ts
->info
->stack_limit
) {
6590 pstrcpy(path
, sizeof(path
), " [stack]");
6592 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6593 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6594 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6595 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6596 path
[0] ? " " : "", path
);
6606 static int open_self_stat(void *cpu_env
, int fd
)
6608 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6609 TaskState
*ts
= cpu
->opaque
;
6610 abi_ulong start_stack
= ts
->info
->start_stack
;
6613 for (i
= 0; i
< 44; i
++) {
6621 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6622 } else if (i
== 1) {
6624 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6625 } else if (i
== 27) {
6628 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6630 /* for the rest, there is MasterCard */
6631 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6635 if (write(fd
, buf
, len
) != len
) {
6643 static int open_self_auxv(void *cpu_env
, int fd
)
6645 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6646 TaskState
*ts
= cpu
->opaque
;
6647 abi_ulong auxv
= ts
->info
->saved_auxv
;
6648 abi_ulong len
= ts
->info
->auxv_len
;
6652 * Auxiliary vector is stored in target process stack.
6653 * read in whole auxv vector and copy it to file
6655 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6659 r
= write(fd
, ptr
, len
);
6666 lseek(fd
, 0, SEEK_SET
);
6667 unlock_user(ptr
, auxv
, len
);
6673 static int is_proc_myself(const char *filename
, const char *entry
)
6675 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6676 filename
+= strlen("/proc/");
6677 if (!strncmp(filename
, "self/", strlen("self/"))) {
6678 filename
+= strlen("self/");
6679 } else if (*filename
>= '1' && *filename
<= '9') {
6681 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6682 if (!strncmp(filename
, myself
, strlen(myself
))) {
6683 filename
+= strlen(myself
);
6690 if (!strcmp(filename
, entry
)) {
6697 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6698 static int is_proc(const char *filename
, const char *entry
)
6700 return strcmp(filename
, entry
) == 0;
6703 static int open_net_route(void *cpu_env
, int fd
)
6710 fp
= fopen("/proc/net/route", "r");
6717 read
= getline(&line
, &len
, fp
);
6718 dprintf(fd
, "%s", line
);
6722 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6724 uint32_t dest
, gw
, mask
;
6725 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6726 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6727 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6728 &mask
, &mtu
, &window
, &irtt
);
6729 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6730 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6731 metric
, tswap32(mask
), mtu
, window
, irtt
);
6741 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6744 const char *filename
;
6745 int (*fill
)(void *cpu_env
, int fd
);
6746 int (*cmp
)(const char *s1
, const char *s2
);
6748 const struct fake_open
*fake_open
;
6749 static const struct fake_open fakes
[] = {
6750 { "maps", open_self_maps
, is_proc_myself
},
6751 { "stat", open_self_stat
, is_proc_myself
},
6752 { "auxv", open_self_auxv
, is_proc_myself
},
6753 { "cmdline", open_self_cmdline
, is_proc_myself
},
6754 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6755 { "/proc/net/route", open_net_route
, is_proc
},
6757 { NULL
, NULL
, NULL
}
6760 if (is_proc_myself(pathname
, "exe")) {
6761 int execfd
= qemu_getauxval(AT_EXECFD
);
6762 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6765 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6766 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6771 if (fake_open
->filename
) {
6773 char filename
[PATH_MAX
];
6776 /* create temporary file to map stat to */
6777 tmpdir
= getenv("TMPDIR");
6780 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6781 fd
= mkstemp(filename
);
6787 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6793 lseek(fd
, 0, SEEK_SET
);
6798 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6801 #define TIMER_MAGIC 0x0caf0000
6802 #define TIMER_MAGIC_MASK 0xffff0000
6804 /* Convert QEMU provided timer ID back to internal 16bit index format */
6805 static target_timer_t
get_timer_id(abi_long arg
)
6807 target_timer_t timerid
= arg
;
6809 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6810 return -TARGET_EINVAL
;
6815 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6816 return -TARGET_EINVAL
;
6822 static int target_to_host_cpu_mask(unsigned long *host_mask
,
6824 abi_ulong target_addr
,
6827 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6828 unsigned host_bits
= sizeof(*host_mask
) * 8;
6829 abi_ulong
*target_mask
;
6832 assert(host_size
>= target_size
);
6834 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
6836 return -TARGET_EFAULT
;
6838 memset(host_mask
, 0, host_size
);
6840 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6841 unsigned bit
= i
* target_bits
;
6844 __get_user(val
, &target_mask
[i
]);
6845 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6846 if (val
& (1UL << j
)) {
6847 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
6852 unlock_user(target_mask
, target_addr
, 0);
6856 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
6858 abi_ulong target_addr
,
6861 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6862 unsigned host_bits
= sizeof(*host_mask
) * 8;
6863 abi_ulong
*target_mask
;
6866 assert(host_size
>= target_size
);
6868 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
6870 return -TARGET_EFAULT
;
6873 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6874 unsigned bit
= i
* target_bits
;
6877 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6878 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
6882 __put_user(val
, &target_mask
[i
]);
6885 unlock_user(target_mask
, target_addr
, target_size
);
6889 /* This is an internal helper for do_syscall so that it is easier
6890 * to have a single return point, so that actions, such as logging
6891 * of syscall results, can be performed.
6892 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6894 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
6895 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6896 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6899 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6901 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6902 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6903 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6906 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6907 || defined(TARGET_NR_fstatfs)
6913 case TARGET_NR_exit
:
6914 /* In old applications this may be used to implement _exit(2).
6915 However in threaded applictions it is used for thread termination,
6916 and _exit_group is used for application termination.
6917 Do thread termination if we have more then one thread. */
6919 if (block_signals()) {
6920 return -TARGET_ERESTARTSYS
;
6925 if (CPU_NEXT(first_cpu
)) {
6928 /* Remove the CPU from the list. */
6929 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
6934 if (ts
->child_tidptr
) {
6935 put_user_u32(0, ts
->child_tidptr
);
6936 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6940 object_unref(OBJECT(cpu
));
6942 rcu_unregister_thread();
6947 preexit_cleanup(cpu_env
, arg1
);
6949 return 0; /* avoid warning */
6950 case TARGET_NR_read
:
6954 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6955 return -TARGET_EFAULT
;
6956 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6958 fd_trans_host_to_target_data(arg1
)) {
6959 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6961 unlock_user(p
, arg2
, ret
);
6964 case TARGET_NR_write
:
6965 if (arg2
== 0 && arg3
== 0) {
6966 return get_errno(safe_write(arg1
, 0, 0));
6968 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6969 return -TARGET_EFAULT
;
6970 if (fd_trans_target_to_host_data(arg1
)) {
6971 void *copy
= g_malloc(arg3
);
6972 memcpy(copy
, p
, arg3
);
6973 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
6975 ret
= get_errno(safe_write(arg1
, copy
, ret
));
6979 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6981 unlock_user(p
, arg2
, 0);
6984 #ifdef TARGET_NR_open
6985 case TARGET_NR_open
:
6986 if (!(p
= lock_user_string(arg1
)))
6987 return -TARGET_EFAULT
;
6988 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6989 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6991 fd_trans_unregister(ret
);
6992 unlock_user(p
, arg1
, 0);
6995 case TARGET_NR_openat
:
6996 if (!(p
= lock_user_string(arg2
)))
6997 return -TARGET_EFAULT
;
6998 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6999 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7001 fd_trans_unregister(ret
);
7002 unlock_user(p
, arg2
, 0);
7004 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7005 case TARGET_NR_name_to_handle_at
:
7006 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7009 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7010 case TARGET_NR_open_by_handle_at
:
7011 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7012 fd_trans_unregister(ret
);
7015 case TARGET_NR_close
:
7016 fd_trans_unregister(arg1
);
7017 return get_errno(close(arg1
));
7020 return do_brk(arg1
);
7021 #ifdef TARGET_NR_fork
7022 case TARGET_NR_fork
:
7023 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7025 #ifdef TARGET_NR_waitpid
7026 case TARGET_NR_waitpid
:
7029 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7030 if (!is_error(ret
) && arg2
&& ret
7031 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7032 return -TARGET_EFAULT
;
7036 #ifdef TARGET_NR_waitid
7037 case TARGET_NR_waitid
:
7041 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7042 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7043 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7044 return -TARGET_EFAULT
;
7045 host_to_target_siginfo(p
, &info
);
7046 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7051 #ifdef TARGET_NR_creat /* not on alpha */
7052 case TARGET_NR_creat
:
7053 if (!(p
= lock_user_string(arg1
)))
7054 return -TARGET_EFAULT
;
7055 ret
= get_errno(creat(p
, arg2
));
7056 fd_trans_unregister(ret
);
7057 unlock_user(p
, arg1
, 0);
7060 #ifdef TARGET_NR_link
7061 case TARGET_NR_link
:
7064 p
= lock_user_string(arg1
);
7065 p2
= lock_user_string(arg2
);
7067 ret
= -TARGET_EFAULT
;
7069 ret
= get_errno(link(p
, p2
));
7070 unlock_user(p2
, arg2
, 0);
7071 unlock_user(p
, arg1
, 0);
7075 #if defined(TARGET_NR_linkat)
7076 case TARGET_NR_linkat
:
7080 return -TARGET_EFAULT
;
7081 p
= lock_user_string(arg2
);
7082 p2
= lock_user_string(arg4
);
7084 ret
= -TARGET_EFAULT
;
7086 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7087 unlock_user(p
, arg2
, 0);
7088 unlock_user(p2
, arg4
, 0);
7092 #ifdef TARGET_NR_unlink
7093 case TARGET_NR_unlink
:
7094 if (!(p
= lock_user_string(arg1
)))
7095 return -TARGET_EFAULT
;
7096 ret
= get_errno(unlink(p
));
7097 unlock_user(p
, arg1
, 0);
7100 #if defined(TARGET_NR_unlinkat)
7101 case TARGET_NR_unlinkat
:
7102 if (!(p
= lock_user_string(arg2
)))
7103 return -TARGET_EFAULT
;
7104 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7105 unlock_user(p
, arg2
, 0);
7108 case TARGET_NR_execve
:
7110 char **argp
, **envp
;
7113 abi_ulong guest_argp
;
7114 abi_ulong guest_envp
;
7121 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7122 if (get_user_ual(addr
, gp
))
7123 return -TARGET_EFAULT
;
7130 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7131 if (get_user_ual(addr
, gp
))
7132 return -TARGET_EFAULT
;
7138 argp
= g_new0(char *, argc
+ 1);
7139 envp
= g_new0(char *, envc
+ 1);
7141 for (gp
= guest_argp
, q
= argp
; gp
;
7142 gp
+= sizeof(abi_ulong
), q
++) {
7143 if (get_user_ual(addr
, gp
))
7147 if (!(*q
= lock_user_string(addr
)))
7149 total_size
+= strlen(*q
) + 1;
7153 for (gp
= guest_envp
, q
= envp
; gp
;
7154 gp
+= sizeof(abi_ulong
), q
++) {
7155 if (get_user_ual(addr
, gp
))
7159 if (!(*q
= lock_user_string(addr
)))
7161 total_size
+= strlen(*q
) + 1;
7165 if (!(p
= lock_user_string(arg1
)))
7167 /* Although execve() is not an interruptible syscall it is
7168 * a special case where we must use the safe_syscall wrapper:
7169 * if we allow a signal to happen before we make the host
7170 * syscall then we will 'lose' it, because at the point of
7171 * execve the process leaves QEMU's control. So we use the
7172 * safe syscall wrapper to ensure that we either take the
7173 * signal as a guest signal, or else it does not happen
7174 * before the execve completes and makes it the other
7175 * program's problem.
7177 ret
= get_errno(safe_execve(p
, argp
, envp
));
7178 unlock_user(p
, arg1
, 0);
7183 ret
= -TARGET_EFAULT
;
7186 for (gp
= guest_argp
, q
= argp
; *q
;
7187 gp
+= sizeof(abi_ulong
), q
++) {
7188 if (get_user_ual(addr
, gp
)
7191 unlock_user(*q
, addr
, 0);
7193 for (gp
= guest_envp
, q
= envp
; *q
;
7194 gp
+= sizeof(abi_ulong
), q
++) {
7195 if (get_user_ual(addr
, gp
)
7198 unlock_user(*q
, addr
, 0);
7205 case TARGET_NR_chdir
:
7206 if (!(p
= lock_user_string(arg1
)))
7207 return -TARGET_EFAULT
;
7208 ret
= get_errno(chdir(p
));
7209 unlock_user(p
, arg1
, 0);
7211 #ifdef TARGET_NR_time
7212 case TARGET_NR_time
:
7215 ret
= get_errno(time(&host_time
));
7218 && put_user_sal(host_time
, arg1
))
7219 return -TARGET_EFAULT
;
7223 #ifdef TARGET_NR_mknod
7224 case TARGET_NR_mknod
:
7225 if (!(p
= lock_user_string(arg1
)))
7226 return -TARGET_EFAULT
;
7227 ret
= get_errno(mknod(p
, arg2
, arg3
));
7228 unlock_user(p
, arg1
, 0);
7231 #if defined(TARGET_NR_mknodat)
7232 case TARGET_NR_mknodat
:
7233 if (!(p
= lock_user_string(arg2
)))
7234 return -TARGET_EFAULT
;
7235 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7236 unlock_user(p
, arg2
, 0);
7239 #ifdef TARGET_NR_chmod
7240 case TARGET_NR_chmod
:
7241 if (!(p
= lock_user_string(arg1
)))
7242 return -TARGET_EFAULT
;
7243 ret
= get_errno(chmod(p
, arg2
));
7244 unlock_user(p
, arg1
, 0);
7247 #ifdef TARGET_NR_lseek
7248 case TARGET_NR_lseek
:
7249 return get_errno(lseek(arg1
, arg2
, arg3
));
7251 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7252 /* Alpha specific */
7253 case TARGET_NR_getxpid
:
7254 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7255 return get_errno(getpid());
7257 #ifdef TARGET_NR_getpid
7258 case TARGET_NR_getpid
:
7259 return get_errno(getpid());
7261 case TARGET_NR_mount
:
7263 /* need to look at the data field */
7267 p
= lock_user_string(arg1
);
7269 return -TARGET_EFAULT
;
7275 p2
= lock_user_string(arg2
);
7278 unlock_user(p
, arg1
, 0);
7280 return -TARGET_EFAULT
;
7284 p3
= lock_user_string(arg3
);
7287 unlock_user(p
, arg1
, 0);
7289 unlock_user(p2
, arg2
, 0);
7290 return -TARGET_EFAULT
;
7296 /* FIXME - arg5 should be locked, but it isn't clear how to
7297 * do that since it's not guaranteed to be a NULL-terminated
7301 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7303 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7305 ret
= get_errno(ret
);
7308 unlock_user(p
, arg1
, 0);
7310 unlock_user(p2
, arg2
, 0);
7312 unlock_user(p3
, arg3
, 0);
7316 #ifdef TARGET_NR_umount
7317 case TARGET_NR_umount
:
7318 if (!(p
= lock_user_string(arg1
)))
7319 return -TARGET_EFAULT
;
7320 ret
= get_errno(umount(p
));
7321 unlock_user(p
, arg1
, 0);
7324 #ifdef TARGET_NR_stime /* not on alpha */
7325 case TARGET_NR_stime
:
7328 if (get_user_sal(host_time
, arg1
))
7329 return -TARGET_EFAULT
;
7330 return get_errno(stime(&host_time
));
7333 #ifdef TARGET_NR_alarm /* not on alpha */
7334 case TARGET_NR_alarm
:
7337 #ifdef TARGET_NR_pause /* not on alpha */
7338 case TARGET_NR_pause
:
7339 if (!block_signals()) {
7340 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7342 return -TARGET_EINTR
;
7344 #ifdef TARGET_NR_utime
7345 case TARGET_NR_utime
:
7347 struct utimbuf tbuf
, *host_tbuf
;
7348 struct target_utimbuf
*target_tbuf
;
7350 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7351 return -TARGET_EFAULT
;
7352 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7353 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7354 unlock_user_struct(target_tbuf
, arg2
, 0);
7359 if (!(p
= lock_user_string(arg1
)))
7360 return -TARGET_EFAULT
;
7361 ret
= get_errno(utime(p
, host_tbuf
));
7362 unlock_user(p
, arg1
, 0);
7366 #ifdef TARGET_NR_utimes
7367 case TARGET_NR_utimes
:
7369 struct timeval
*tvp
, tv
[2];
7371 if (copy_from_user_timeval(&tv
[0], arg2
)
7372 || copy_from_user_timeval(&tv
[1],
7373 arg2
+ sizeof(struct target_timeval
)))
7374 return -TARGET_EFAULT
;
7379 if (!(p
= lock_user_string(arg1
)))
7380 return -TARGET_EFAULT
;
7381 ret
= get_errno(utimes(p
, tvp
));
7382 unlock_user(p
, arg1
, 0);
7386 #if defined(TARGET_NR_futimesat)
7387 case TARGET_NR_futimesat
:
7389 struct timeval
*tvp
, tv
[2];
7391 if (copy_from_user_timeval(&tv
[0], arg3
)
7392 || copy_from_user_timeval(&tv
[1],
7393 arg3
+ sizeof(struct target_timeval
)))
7394 return -TARGET_EFAULT
;
7399 if (!(p
= lock_user_string(arg2
))) {
7400 return -TARGET_EFAULT
;
7402 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7403 unlock_user(p
, arg2
, 0);
7407 #ifdef TARGET_NR_access
7408 case TARGET_NR_access
:
7409 if (!(p
= lock_user_string(arg1
))) {
7410 return -TARGET_EFAULT
;
7412 ret
= get_errno(access(path(p
), arg2
));
7413 unlock_user(p
, arg1
, 0);
7416 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7417 case TARGET_NR_faccessat
:
7418 if (!(p
= lock_user_string(arg2
))) {
7419 return -TARGET_EFAULT
;
7421 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7422 unlock_user(p
, arg2
, 0);
7425 #ifdef TARGET_NR_nice /* not on alpha */
7426 case TARGET_NR_nice
:
7427 return get_errno(nice(arg1
));
7429 case TARGET_NR_sync
:
7432 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7433 case TARGET_NR_syncfs
:
7434 return get_errno(syncfs(arg1
));
7436 case TARGET_NR_kill
:
7437 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7438 #ifdef TARGET_NR_rename
7439 case TARGET_NR_rename
:
7442 p
= lock_user_string(arg1
);
7443 p2
= lock_user_string(arg2
);
7445 ret
= -TARGET_EFAULT
;
7447 ret
= get_errno(rename(p
, p2
));
7448 unlock_user(p2
, arg2
, 0);
7449 unlock_user(p
, arg1
, 0);
7453 #if defined(TARGET_NR_renameat)
7454 case TARGET_NR_renameat
:
7457 p
= lock_user_string(arg2
);
7458 p2
= lock_user_string(arg4
);
7460 ret
= -TARGET_EFAULT
;
7462 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7463 unlock_user(p2
, arg4
, 0);
7464 unlock_user(p
, arg2
, 0);
7468 #if defined(TARGET_NR_renameat2)
7469 case TARGET_NR_renameat2
:
7472 p
= lock_user_string(arg2
);
7473 p2
= lock_user_string(arg4
);
7475 ret
= -TARGET_EFAULT
;
7477 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7479 unlock_user(p2
, arg4
, 0);
7480 unlock_user(p
, arg2
, 0);
7484 #ifdef TARGET_NR_mkdir
7485 case TARGET_NR_mkdir
:
7486 if (!(p
= lock_user_string(arg1
)))
7487 return -TARGET_EFAULT
;
7488 ret
= get_errno(mkdir(p
, arg2
));
7489 unlock_user(p
, arg1
, 0);
7492 #if defined(TARGET_NR_mkdirat)
7493 case TARGET_NR_mkdirat
:
7494 if (!(p
= lock_user_string(arg2
)))
7495 return -TARGET_EFAULT
;
7496 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7497 unlock_user(p
, arg2
, 0);
7500 #ifdef TARGET_NR_rmdir
7501 case TARGET_NR_rmdir
:
7502 if (!(p
= lock_user_string(arg1
)))
7503 return -TARGET_EFAULT
;
7504 ret
= get_errno(rmdir(p
));
7505 unlock_user(p
, arg1
, 0);
7509 ret
= get_errno(dup(arg1
));
7511 fd_trans_dup(arg1
, ret
);
7514 #ifdef TARGET_NR_pipe
7515 case TARGET_NR_pipe
:
7516 return do_pipe(cpu_env
, arg1
, 0, 0);
7518 #ifdef TARGET_NR_pipe2
7519 case TARGET_NR_pipe2
:
7520 return do_pipe(cpu_env
, arg1
,
7521 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7523 case TARGET_NR_times
:
7525 struct target_tms
*tmsp
;
7527 ret
= get_errno(times(&tms
));
7529 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7531 return -TARGET_EFAULT
;
7532 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7533 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7534 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7535 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7538 ret
= host_to_target_clock_t(ret
);
7541 case TARGET_NR_acct
:
7543 ret
= get_errno(acct(NULL
));
7545 if (!(p
= lock_user_string(arg1
))) {
7546 return -TARGET_EFAULT
;
7548 ret
= get_errno(acct(path(p
)));
7549 unlock_user(p
, arg1
, 0);
7552 #ifdef TARGET_NR_umount2
7553 case TARGET_NR_umount2
:
7554 if (!(p
= lock_user_string(arg1
)))
7555 return -TARGET_EFAULT
;
7556 ret
= get_errno(umount2(p
, arg2
));
7557 unlock_user(p
, arg1
, 0);
7560 case TARGET_NR_ioctl
:
7561 return do_ioctl(arg1
, arg2
, arg3
);
7562 #ifdef TARGET_NR_fcntl
7563 case TARGET_NR_fcntl
:
7564 return do_fcntl(arg1
, arg2
, arg3
);
7566 case TARGET_NR_setpgid
:
7567 return get_errno(setpgid(arg1
, arg2
));
7568 case TARGET_NR_umask
:
7569 return get_errno(umask(arg1
));
7570 case TARGET_NR_chroot
:
7571 if (!(p
= lock_user_string(arg1
)))
7572 return -TARGET_EFAULT
;
7573 ret
= get_errno(chroot(p
));
7574 unlock_user(p
, arg1
, 0);
7576 #ifdef TARGET_NR_dup2
7577 case TARGET_NR_dup2
:
7578 ret
= get_errno(dup2(arg1
, arg2
));
7580 fd_trans_dup(arg1
, arg2
);
7584 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7585 case TARGET_NR_dup3
:
7589 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7592 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7593 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7595 fd_trans_dup(arg1
, arg2
);
7600 #ifdef TARGET_NR_getppid /* not on alpha */
7601 case TARGET_NR_getppid
:
7602 return get_errno(getppid());
7604 #ifdef TARGET_NR_getpgrp
7605 case TARGET_NR_getpgrp
:
7606 return get_errno(getpgrp());
7608 case TARGET_NR_setsid
:
7609 return get_errno(setsid());
7610 #ifdef TARGET_NR_sigaction
7611 case TARGET_NR_sigaction
:
7613 #if defined(TARGET_ALPHA)
7614 struct target_sigaction act
, oact
, *pact
= 0;
7615 struct target_old_sigaction
*old_act
;
7617 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7618 return -TARGET_EFAULT
;
7619 act
._sa_handler
= old_act
->_sa_handler
;
7620 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7621 act
.sa_flags
= old_act
->sa_flags
;
7622 act
.sa_restorer
= 0;
7623 unlock_user_struct(old_act
, arg2
, 0);
7626 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7627 if (!is_error(ret
) && arg3
) {
7628 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7629 return -TARGET_EFAULT
;
7630 old_act
->_sa_handler
= oact
._sa_handler
;
7631 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7632 old_act
->sa_flags
= oact
.sa_flags
;
7633 unlock_user_struct(old_act
, arg3
, 1);
7635 #elif defined(TARGET_MIPS)
7636 struct target_sigaction act
, oact
, *pact
, *old_act
;
7639 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7640 return -TARGET_EFAULT
;
7641 act
._sa_handler
= old_act
->_sa_handler
;
7642 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7643 act
.sa_flags
= old_act
->sa_flags
;
7644 unlock_user_struct(old_act
, arg2
, 0);
7650 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7652 if (!is_error(ret
) && arg3
) {
7653 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7654 return -TARGET_EFAULT
;
7655 old_act
->_sa_handler
= oact
._sa_handler
;
7656 old_act
->sa_flags
= oact
.sa_flags
;
7657 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7658 old_act
->sa_mask
.sig
[1] = 0;
7659 old_act
->sa_mask
.sig
[2] = 0;
7660 old_act
->sa_mask
.sig
[3] = 0;
7661 unlock_user_struct(old_act
, arg3
, 1);
7664 struct target_old_sigaction
*old_act
;
7665 struct target_sigaction act
, oact
, *pact
;
7667 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7668 return -TARGET_EFAULT
;
7669 act
._sa_handler
= old_act
->_sa_handler
;
7670 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7671 act
.sa_flags
= old_act
->sa_flags
;
7672 act
.sa_restorer
= old_act
->sa_restorer
;
7673 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7674 act
.ka_restorer
= 0;
7676 unlock_user_struct(old_act
, arg2
, 0);
7681 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7682 if (!is_error(ret
) && arg3
) {
7683 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7684 return -TARGET_EFAULT
;
7685 old_act
->_sa_handler
= oact
._sa_handler
;
7686 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7687 old_act
->sa_flags
= oact
.sa_flags
;
7688 old_act
->sa_restorer
= oact
.sa_restorer
;
7689 unlock_user_struct(old_act
, arg3
, 1);
7695 case TARGET_NR_rt_sigaction
:
7697 #if defined(TARGET_ALPHA)
7698 /* For Alpha and SPARC this is a 5 argument syscall, with
7699 * a 'restorer' parameter which must be copied into the
7700 * sa_restorer field of the sigaction struct.
7701 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7702 * and arg5 is the sigsetsize.
7703 * Alpha also has a separate rt_sigaction struct that it uses
7704 * here; SPARC uses the usual sigaction struct.
7706 struct target_rt_sigaction
*rt_act
;
7707 struct target_sigaction act
, oact
, *pact
= 0;
7709 if (arg4
!= sizeof(target_sigset_t
)) {
7710 return -TARGET_EINVAL
;
7713 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7714 return -TARGET_EFAULT
;
7715 act
._sa_handler
= rt_act
->_sa_handler
;
7716 act
.sa_mask
= rt_act
->sa_mask
;
7717 act
.sa_flags
= rt_act
->sa_flags
;
7718 act
.sa_restorer
= arg5
;
7719 unlock_user_struct(rt_act
, arg2
, 0);
7722 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7723 if (!is_error(ret
) && arg3
) {
7724 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7725 return -TARGET_EFAULT
;
7726 rt_act
->_sa_handler
= oact
._sa_handler
;
7727 rt_act
->sa_mask
= oact
.sa_mask
;
7728 rt_act
->sa_flags
= oact
.sa_flags
;
7729 unlock_user_struct(rt_act
, arg3
, 1);
7733 target_ulong restorer
= arg4
;
7734 target_ulong sigsetsize
= arg5
;
7736 target_ulong sigsetsize
= arg4
;
7738 struct target_sigaction
*act
;
7739 struct target_sigaction
*oact
;
7741 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7742 return -TARGET_EINVAL
;
7745 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7746 return -TARGET_EFAULT
;
7748 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7749 act
->ka_restorer
= restorer
;
7755 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7756 ret
= -TARGET_EFAULT
;
7757 goto rt_sigaction_fail
;
7761 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7764 unlock_user_struct(act
, arg2
, 0);
7766 unlock_user_struct(oact
, arg3
, 1);
7770 #ifdef TARGET_NR_sgetmask /* not on alpha */
7771 case TARGET_NR_sgetmask
:
7774 abi_ulong target_set
;
7775 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7777 host_to_target_old_sigset(&target_set
, &cur_set
);
7783 #ifdef TARGET_NR_ssetmask /* not on alpha */
7784 case TARGET_NR_ssetmask
:
7787 abi_ulong target_set
= arg1
;
7788 target_to_host_old_sigset(&set
, &target_set
);
7789 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7791 host_to_target_old_sigset(&target_set
, &oset
);
7797 #ifdef TARGET_NR_sigprocmask
7798 case TARGET_NR_sigprocmask
:
7800 #if defined(TARGET_ALPHA)
7801 sigset_t set
, oldset
;
7806 case TARGET_SIG_BLOCK
:
7809 case TARGET_SIG_UNBLOCK
:
7812 case TARGET_SIG_SETMASK
:
7816 return -TARGET_EINVAL
;
7819 target_to_host_old_sigset(&set
, &mask
);
7821 ret
= do_sigprocmask(how
, &set
, &oldset
);
7822 if (!is_error(ret
)) {
7823 host_to_target_old_sigset(&mask
, &oldset
);
7825 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7828 sigset_t set
, oldset
, *set_ptr
;
7833 case TARGET_SIG_BLOCK
:
7836 case TARGET_SIG_UNBLOCK
:
7839 case TARGET_SIG_SETMASK
:
7843 return -TARGET_EINVAL
;
7845 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7846 return -TARGET_EFAULT
;
7847 target_to_host_old_sigset(&set
, p
);
7848 unlock_user(p
, arg2
, 0);
7854 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7855 if (!is_error(ret
) && arg3
) {
7856 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7857 return -TARGET_EFAULT
;
7858 host_to_target_old_sigset(p
, &oldset
);
7859 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7865 case TARGET_NR_rt_sigprocmask
:
7868 sigset_t set
, oldset
, *set_ptr
;
7870 if (arg4
!= sizeof(target_sigset_t
)) {
7871 return -TARGET_EINVAL
;
7876 case TARGET_SIG_BLOCK
:
7879 case TARGET_SIG_UNBLOCK
:
7882 case TARGET_SIG_SETMASK
:
7886 return -TARGET_EINVAL
;
7888 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7889 return -TARGET_EFAULT
;
7890 target_to_host_sigset(&set
, p
);
7891 unlock_user(p
, arg2
, 0);
7897 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7898 if (!is_error(ret
) && arg3
) {
7899 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7900 return -TARGET_EFAULT
;
7901 host_to_target_sigset(p
, &oldset
);
7902 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7906 #ifdef TARGET_NR_sigpending
7907 case TARGET_NR_sigpending
:
7910 ret
= get_errno(sigpending(&set
));
7911 if (!is_error(ret
)) {
7912 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7913 return -TARGET_EFAULT
;
7914 host_to_target_old_sigset(p
, &set
);
7915 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7920 case TARGET_NR_rt_sigpending
:
7924 /* Yes, this check is >, not != like most. We follow the kernel's
7925 * logic and it does it like this because it implements
7926 * NR_sigpending through the same code path, and in that case
7927 * the old_sigset_t is smaller in size.
7929 if (arg2
> sizeof(target_sigset_t
)) {
7930 return -TARGET_EINVAL
;
7933 ret
= get_errno(sigpending(&set
));
7934 if (!is_error(ret
)) {
7935 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7936 return -TARGET_EFAULT
;
7937 host_to_target_sigset(p
, &set
);
7938 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7942 #ifdef TARGET_NR_sigsuspend
7943 case TARGET_NR_sigsuspend
:
7945 TaskState
*ts
= cpu
->opaque
;
7946 #if defined(TARGET_ALPHA)
7947 abi_ulong mask
= arg1
;
7948 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7950 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7951 return -TARGET_EFAULT
;
7952 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7953 unlock_user(p
, arg1
, 0);
7955 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7957 if (ret
!= -TARGET_ERESTARTSYS
) {
7958 ts
->in_sigsuspend
= 1;
7963 case TARGET_NR_rt_sigsuspend
:
7965 TaskState
*ts
= cpu
->opaque
;
7967 if (arg2
!= sizeof(target_sigset_t
)) {
7968 return -TARGET_EINVAL
;
7970 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7971 return -TARGET_EFAULT
;
7972 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7973 unlock_user(p
, arg1
, 0);
7974 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7976 if (ret
!= -TARGET_ERESTARTSYS
) {
7977 ts
->in_sigsuspend
= 1;
7981 case TARGET_NR_rt_sigtimedwait
:
7984 struct timespec uts
, *puts
;
7987 if (arg4
!= sizeof(target_sigset_t
)) {
7988 return -TARGET_EINVAL
;
7991 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7992 return -TARGET_EFAULT
;
7993 target_to_host_sigset(&set
, p
);
7994 unlock_user(p
, arg1
, 0);
7997 target_to_host_timespec(puts
, arg3
);
8001 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8003 if (!is_error(ret
)) {
8005 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8008 return -TARGET_EFAULT
;
8010 host_to_target_siginfo(p
, &uinfo
);
8011 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8013 ret
= host_to_target_signal(ret
);
8017 case TARGET_NR_rt_sigqueueinfo
:
8021 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8023 return -TARGET_EFAULT
;
8025 target_to_host_siginfo(&uinfo
, p
);
8026 unlock_user(p
, arg3
, 0);
8027 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8030 case TARGET_NR_rt_tgsigqueueinfo
:
8034 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8036 return -TARGET_EFAULT
;
8038 target_to_host_siginfo(&uinfo
, p
);
8039 unlock_user(p
, arg4
, 0);
8040 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8043 #ifdef TARGET_NR_sigreturn
8044 case TARGET_NR_sigreturn
:
8045 if (block_signals()) {
8046 return -TARGET_ERESTARTSYS
;
8048 return do_sigreturn(cpu_env
);
8050 case TARGET_NR_rt_sigreturn
:
8051 if (block_signals()) {
8052 return -TARGET_ERESTARTSYS
;
8054 return do_rt_sigreturn(cpu_env
);
8055 case TARGET_NR_sethostname
:
8056 if (!(p
= lock_user_string(arg1
)))
8057 return -TARGET_EFAULT
;
8058 ret
= get_errno(sethostname(p
, arg2
));
8059 unlock_user(p
, arg1
, 0);
8061 #ifdef TARGET_NR_setrlimit
8062 case TARGET_NR_setrlimit
:
8064 int resource
= target_to_host_resource(arg1
);
8065 struct target_rlimit
*target_rlim
;
8067 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8068 return -TARGET_EFAULT
;
8069 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8070 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8071 unlock_user_struct(target_rlim
, arg2
, 0);
8073 * If we just passed through resource limit settings for memory then
8074 * they would also apply to QEMU's own allocations, and QEMU will
8075 * crash or hang or die if its allocations fail. Ideally we would
8076 * track the guest allocations in QEMU and apply the limits ourselves.
8077 * For now, just tell the guest the call succeeded but don't actually
8080 if (resource
!= RLIMIT_AS
&&
8081 resource
!= RLIMIT_DATA
&&
8082 resource
!= RLIMIT_STACK
) {
8083 return get_errno(setrlimit(resource
, &rlim
));
8089 #ifdef TARGET_NR_getrlimit
8090 case TARGET_NR_getrlimit
:
8092 int resource
= target_to_host_resource(arg1
);
8093 struct target_rlimit
*target_rlim
;
8096 ret
= get_errno(getrlimit(resource
, &rlim
));
8097 if (!is_error(ret
)) {
8098 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8099 return -TARGET_EFAULT
;
8100 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8101 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8102 unlock_user_struct(target_rlim
, arg2
, 1);
8107 case TARGET_NR_getrusage
:
8109 struct rusage rusage
;
8110 ret
= get_errno(getrusage(arg1
, &rusage
));
8111 if (!is_error(ret
)) {
8112 ret
= host_to_target_rusage(arg2
, &rusage
);
8116 case TARGET_NR_gettimeofday
:
8119 ret
= get_errno(gettimeofday(&tv
, NULL
));
8120 if (!is_error(ret
)) {
8121 if (copy_to_user_timeval(arg1
, &tv
))
8122 return -TARGET_EFAULT
;
8126 case TARGET_NR_settimeofday
:
8128 struct timeval tv
, *ptv
= NULL
;
8129 struct timezone tz
, *ptz
= NULL
;
8132 if (copy_from_user_timeval(&tv
, arg1
)) {
8133 return -TARGET_EFAULT
;
8139 if (copy_from_user_timezone(&tz
, arg2
)) {
8140 return -TARGET_EFAULT
;
8145 return get_errno(settimeofday(ptv
, ptz
));
8147 #if defined(TARGET_NR_select)
8148 case TARGET_NR_select
:
8149 #if defined(TARGET_WANT_NI_OLD_SELECT)
8150 /* some architectures used to have old_select here
8151 * but now ENOSYS it.
8153 ret
= -TARGET_ENOSYS
;
8154 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8155 ret
= do_old_select(arg1
);
8157 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8161 #ifdef TARGET_NR_pselect6
8162 case TARGET_NR_pselect6
:
8164 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8165 fd_set rfds
, wfds
, efds
;
8166 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8167 struct timespec ts
, *ts_ptr
;
8170 * The 6th arg is actually two args smashed together,
8171 * so we cannot use the C library.
8179 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8180 target_sigset_t
*target_sigset
;
8188 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8192 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8196 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8202 * This takes a timespec, and not a timeval, so we cannot
8203 * use the do_select() helper ...
8206 if (target_to_host_timespec(&ts
, ts_addr
)) {
8207 return -TARGET_EFAULT
;
8214 /* Extract the two packed args for the sigset */
8217 sig
.size
= SIGSET_T_SIZE
;
8219 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8221 return -TARGET_EFAULT
;
8223 arg_sigset
= tswapal(arg7
[0]);
8224 arg_sigsize
= tswapal(arg7
[1]);
8225 unlock_user(arg7
, arg6
, 0);
8229 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8230 /* Like the kernel, we enforce correct size sigsets */
8231 return -TARGET_EINVAL
;
8233 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8234 sizeof(*target_sigset
), 1);
8235 if (!target_sigset
) {
8236 return -TARGET_EFAULT
;
8238 target_to_host_sigset(&set
, target_sigset
);
8239 unlock_user(target_sigset
, arg_sigset
, 0);
8247 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8250 if (!is_error(ret
)) {
8251 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8252 return -TARGET_EFAULT
;
8253 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8254 return -TARGET_EFAULT
;
8255 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8256 return -TARGET_EFAULT
;
8258 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8259 return -TARGET_EFAULT
;
8264 #ifdef TARGET_NR_symlink
8265 case TARGET_NR_symlink
:
8268 p
= lock_user_string(arg1
);
8269 p2
= lock_user_string(arg2
);
8271 ret
= -TARGET_EFAULT
;
8273 ret
= get_errno(symlink(p
, p2
));
8274 unlock_user(p2
, arg2
, 0);
8275 unlock_user(p
, arg1
, 0);
8279 #if defined(TARGET_NR_symlinkat)
8280 case TARGET_NR_symlinkat
:
8283 p
= lock_user_string(arg1
);
8284 p2
= lock_user_string(arg3
);
8286 ret
= -TARGET_EFAULT
;
8288 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8289 unlock_user(p2
, arg3
, 0);
8290 unlock_user(p
, arg1
, 0);
8294 #ifdef TARGET_NR_readlink
8295 case TARGET_NR_readlink
:
8298 p
= lock_user_string(arg1
);
8299 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8301 ret
= -TARGET_EFAULT
;
8303 /* Short circuit this for the magic exe check. */
8304 ret
= -TARGET_EINVAL
;
8305 } else if (is_proc_myself((const char *)p
, "exe")) {
8306 char real
[PATH_MAX
], *temp
;
8307 temp
= realpath(exec_path
, real
);
8308 /* Return value is # of bytes that we wrote to the buffer. */
8310 ret
= get_errno(-1);
8312 /* Don't worry about sign mismatch as earlier mapping
8313 * logic would have thrown a bad address error. */
8314 ret
= MIN(strlen(real
), arg3
);
8315 /* We cannot NUL terminate the string. */
8316 memcpy(p2
, real
, ret
);
8319 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8321 unlock_user(p2
, arg2
, ret
);
8322 unlock_user(p
, arg1
, 0);
8326 #if defined(TARGET_NR_readlinkat)
8327 case TARGET_NR_readlinkat
:
8330 p
= lock_user_string(arg2
);
8331 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8333 ret
= -TARGET_EFAULT
;
8334 } else if (is_proc_myself((const char *)p
, "exe")) {
8335 char real
[PATH_MAX
], *temp
;
8336 temp
= realpath(exec_path
, real
);
8337 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8338 snprintf((char *)p2
, arg4
, "%s", real
);
8340 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8342 unlock_user(p2
, arg3
, ret
);
8343 unlock_user(p
, arg2
, 0);
8347 #ifdef TARGET_NR_swapon
8348 case TARGET_NR_swapon
:
8349 if (!(p
= lock_user_string(arg1
)))
8350 return -TARGET_EFAULT
;
8351 ret
= get_errno(swapon(p
, arg2
));
8352 unlock_user(p
, arg1
, 0);
8355 case TARGET_NR_reboot
:
8356 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8357 /* arg4 must be ignored in all other cases */
8358 p
= lock_user_string(arg4
);
8360 return -TARGET_EFAULT
;
8362 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8363 unlock_user(p
, arg4
, 0);
8365 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8368 #ifdef TARGET_NR_mmap
8369 case TARGET_NR_mmap
:
8370 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8371 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8372 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8373 || defined(TARGET_S390X)
8376 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8377 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8378 return -TARGET_EFAULT
;
8385 unlock_user(v
, arg1
, 0);
8386 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8387 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8391 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8392 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8398 #ifdef TARGET_NR_mmap2
8399 case TARGET_NR_mmap2
:
8401 #define MMAP_SHIFT 12
8403 ret
= target_mmap(arg1
, arg2
, arg3
,
8404 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8405 arg5
, arg6
<< MMAP_SHIFT
);
8406 return get_errno(ret
);
8408 case TARGET_NR_munmap
:
8409 return get_errno(target_munmap(arg1
, arg2
));
8410 case TARGET_NR_mprotect
:
8412 TaskState
*ts
= cpu
->opaque
;
8413 /* Special hack to detect libc making the stack executable. */
8414 if ((arg3
& PROT_GROWSDOWN
)
8415 && arg1
>= ts
->info
->stack_limit
8416 && arg1
<= ts
->info
->start_stack
) {
8417 arg3
&= ~PROT_GROWSDOWN
;
8418 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8419 arg1
= ts
->info
->stack_limit
;
8422 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8423 #ifdef TARGET_NR_mremap
8424 case TARGET_NR_mremap
:
8425 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8427 /* ??? msync/mlock/munlock are broken for softmmu. */
8428 #ifdef TARGET_NR_msync
8429 case TARGET_NR_msync
:
8430 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8432 #ifdef TARGET_NR_mlock
8433 case TARGET_NR_mlock
:
8434 return get_errno(mlock(g2h(arg1
), arg2
));
8436 #ifdef TARGET_NR_munlock
8437 case TARGET_NR_munlock
:
8438 return get_errno(munlock(g2h(arg1
), arg2
));
8440 #ifdef TARGET_NR_mlockall
8441 case TARGET_NR_mlockall
:
8442 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8444 #ifdef TARGET_NR_munlockall
8445 case TARGET_NR_munlockall
:
8446 return get_errno(munlockall());
8448 #ifdef TARGET_NR_truncate
8449 case TARGET_NR_truncate
:
8450 if (!(p
= lock_user_string(arg1
)))
8451 return -TARGET_EFAULT
;
8452 ret
= get_errno(truncate(p
, arg2
));
8453 unlock_user(p
, arg1
, 0);
8456 #ifdef TARGET_NR_ftruncate
8457 case TARGET_NR_ftruncate
:
8458 return get_errno(ftruncate(arg1
, arg2
));
8460 case TARGET_NR_fchmod
:
8461 return get_errno(fchmod(arg1
, arg2
));
8462 #if defined(TARGET_NR_fchmodat)
8463 case TARGET_NR_fchmodat
:
8464 if (!(p
= lock_user_string(arg2
)))
8465 return -TARGET_EFAULT
;
8466 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8467 unlock_user(p
, arg2
, 0);
8470 case TARGET_NR_getpriority
:
8471 /* Note that negative values are valid for getpriority, so we must
8472 differentiate based on errno settings. */
8474 ret
= getpriority(arg1
, arg2
);
8475 if (ret
== -1 && errno
!= 0) {
8476 return -host_to_target_errno(errno
);
8479 /* Return value is the unbiased priority. Signal no error. */
8480 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8482 /* Return value is a biased priority to avoid negative numbers. */
8486 case TARGET_NR_setpriority
:
8487 return get_errno(setpriority(arg1
, arg2
, arg3
));
8488 #ifdef TARGET_NR_statfs
8489 case TARGET_NR_statfs
:
8490 if (!(p
= lock_user_string(arg1
))) {
8491 return -TARGET_EFAULT
;
8493 ret
= get_errno(statfs(path(p
), &stfs
));
8494 unlock_user(p
, arg1
, 0);
8496 if (!is_error(ret
)) {
8497 struct target_statfs
*target_stfs
;
8499 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8500 return -TARGET_EFAULT
;
8501 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8502 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8503 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8504 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8505 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8506 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8507 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8508 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8509 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8510 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8511 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8512 #ifdef _STATFS_F_FLAGS
8513 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8515 __put_user(0, &target_stfs
->f_flags
);
8517 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8518 unlock_user_struct(target_stfs
, arg2
, 1);
8522 #ifdef TARGET_NR_fstatfs
8523 case TARGET_NR_fstatfs
:
8524 ret
= get_errno(fstatfs(arg1
, &stfs
));
8525 goto convert_statfs
;
8527 #ifdef TARGET_NR_statfs64
8528 case TARGET_NR_statfs64
:
8529 if (!(p
= lock_user_string(arg1
))) {
8530 return -TARGET_EFAULT
;
8532 ret
= get_errno(statfs(path(p
), &stfs
));
8533 unlock_user(p
, arg1
, 0);
8535 if (!is_error(ret
)) {
8536 struct target_statfs64
*target_stfs
;
8538 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8539 return -TARGET_EFAULT
;
8540 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8541 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8542 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8543 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8544 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8545 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8546 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8547 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8548 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8549 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8550 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8551 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8552 unlock_user_struct(target_stfs
, arg3
, 1);
8555 case TARGET_NR_fstatfs64
:
8556 ret
= get_errno(fstatfs(arg1
, &stfs
));
8557 goto convert_statfs64
;
8559 #ifdef TARGET_NR_socketcall
8560 case TARGET_NR_socketcall
:
8561 return do_socketcall(arg1
, arg2
);
8563 #ifdef TARGET_NR_accept
8564 case TARGET_NR_accept
:
8565 return do_accept4(arg1
, arg2
, arg3
, 0);
8567 #ifdef TARGET_NR_accept4
8568 case TARGET_NR_accept4
:
8569 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8571 #ifdef TARGET_NR_bind
8572 case TARGET_NR_bind
:
8573 return do_bind(arg1
, arg2
, arg3
);
8575 #ifdef TARGET_NR_connect
8576 case TARGET_NR_connect
:
8577 return do_connect(arg1
, arg2
, arg3
);
8579 #ifdef TARGET_NR_getpeername
8580 case TARGET_NR_getpeername
:
8581 return do_getpeername(arg1
, arg2
, arg3
);
8583 #ifdef TARGET_NR_getsockname
8584 case TARGET_NR_getsockname
:
8585 return do_getsockname(arg1
, arg2
, arg3
);
8587 #ifdef TARGET_NR_getsockopt
8588 case TARGET_NR_getsockopt
:
8589 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8591 #ifdef TARGET_NR_listen
8592 case TARGET_NR_listen
:
8593 return get_errno(listen(arg1
, arg2
));
8595 #ifdef TARGET_NR_recv
8596 case TARGET_NR_recv
:
8597 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8599 #ifdef TARGET_NR_recvfrom
8600 case TARGET_NR_recvfrom
:
8601 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8603 #ifdef TARGET_NR_recvmsg
8604 case TARGET_NR_recvmsg
:
8605 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8607 #ifdef TARGET_NR_send
8608 case TARGET_NR_send
:
8609 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8611 #ifdef TARGET_NR_sendmsg
8612 case TARGET_NR_sendmsg
:
8613 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8615 #ifdef TARGET_NR_sendmmsg
8616 case TARGET_NR_sendmmsg
:
8617 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8618 case TARGET_NR_recvmmsg
:
8619 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8621 #ifdef TARGET_NR_sendto
8622 case TARGET_NR_sendto
:
8623 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8625 #ifdef TARGET_NR_shutdown
8626 case TARGET_NR_shutdown
:
8627 return get_errno(shutdown(arg1
, arg2
));
8629 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8630 case TARGET_NR_getrandom
:
8631 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8633 return -TARGET_EFAULT
;
8635 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8636 unlock_user(p
, arg1
, ret
);
8639 #ifdef TARGET_NR_socket
8640 case TARGET_NR_socket
:
8641 return do_socket(arg1
, arg2
, arg3
);
8643 #ifdef TARGET_NR_socketpair
8644 case TARGET_NR_socketpair
:
8645 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8647 #ifdef TARGET_NR_setsockopt
8648 case TARGET_NR_setsockopt
:
8649 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8651 #if defined(TARGET_NR_syslog)
8652 case TARGET_NR_syslog
:
8657 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8658 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8659 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8660 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8661 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8662 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8663 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8664 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8665 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8666 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8667 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8668 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8671 return -TARGET_EINVAL
;
8676 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8678 return -TARGET_EFAULT
;
8680 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8681 unlock_user(p
, arg2
, arg3
);
8685 return -TARGET_EINVAL
;
8690 case TARGET_NR_setitimer
:
8692 struct itimerval value
, ovalue
, *pvalue
;
8696 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8697 || copy_from_user_timeval(&pvalue
->it_value
,
8698 arg2
+ sizeof(struct target_timeval
)))
8699 return -TARGET_EFAULT
;
8703 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8704 if (!is_error(ret
) && arg3
) {
8705 if (copy_to_user_timeval(arg3
,
8706 &ovalue
.it_interval
)
8707 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8709 return -TARGET_EFAULT
;
8713 case TARGET_NR_getitimer
:
8715 struct itimerval value
;
8717 ret
= get_errno(getitimer(arg1
, &value
));
8718 if (!is_error(ret
) && arg2
) {
8719 if (copy_to_user_timeval(arg2
,
8721 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8723 return -TARGET_EFAULT
;
8727 #ifdef TARGET_NR_stat
8728 case TARGET_NR_stat
:
8729 if (!(p
= lock_user_string(arg1
))) {
8730 return -TARGET_EFAULT
;
8732 ret
= get_errno(stat(path(p
), &st
));
8733 unlock_user(p
, arg1
, 0);
8736 #ifdef TARGET_NR_lstat
8737 case TARGET_NR_lstat
:
8738 if (!(p
= lock_user_string(arg1
))) {
8739 return -TARGET_EFAULT
;
8741 ret
= get_errno(lstat(path(p
), &st
));
8742 unlock_user(p
, arg1
, 0);
8745 #ifdef TARGET_NR_fstat
8746 case TARGET_NR_fstat
:
8748 ret
= get_errno(fstat(arg1
, &st
));
8749 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8752 if (!is_error(ret
)) {
8753 struct target_stat
*target_st
;
8755 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8756 return -TARGET_EFAULT
;
8757 memset(target_st
, 0, sizeof(*target_st
));
8758 __put_user(st
.st_dev
, &target_st
->st_dev
);
8759 __put_user(st
.st_ino
, &target_st
->st_ino
);
8760 __put_user(st
.st_mode
, &target_st
->st_mode
);
8761 __put_user(st
.st_uid
, &target_st
->st_uid
);
8762 __put_user(st
.st_gid
, &target_st
->st_gid
);
8763 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8764 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8765 __put_user(st
.st_size
, &target_st
->st_size
);
8766 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8767 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8768 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8769 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8770 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8771 unlock_user_struct(target_st
, arg2
, 1);
8776 case TARGET_NR_vhangup
:
8777 return get_errno(vhangup());
8778 #ifdef TARGET_NR_syscall
8779 case TARGET_NR_syscall
:
8780 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8781 arg6
, arg7
, arg8
, 0);
8783 case TARGET_NR_wait4
:
8786 abi_long status_ptr
= arg2
;
8787 struct rusage rusage
, *rusage_ptr
;
8788 abi_ulong target_rusage
= arg4
;
8789 abi_long rusage_err
;
8791 rusage_ptr
= &rusage
;
8794 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8795 if (!is_error(ret
)) {
8796 if (status_ptr
&& ret
) {
8797 status
= host_to_target_waitstatus(status
);
8798 if (put_user_s32(status
, status_ptr
))
8799 return -TARGET_EFAULT
;
8801 if (target_rusage
) {
8802 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8810 #ifdef TARGET_NR_swapoff
8811 case TARGET_NR_swapoff
:
8812 if (!(p
= lock_user_string(arg1
)))
8813 return -TARGET_EFAULT
;
8814 ret
= get_errno(swapoff(p
));
8815 unlock_user(p
, arg1
, 0);
8818 case TARGET_NR_sysinfo
:
8820 struct target_sysinfo
*target_value
;
8821 struct sysinfo value
;
8822 ret
= get_errno(sysinfo(&value
));
8823 if (!is_error(ret
) && arg1
)
8825 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8826 return -TARGET_EFAULT
;
8827 __put_user(value
.uptime
, &target_value
->uptime
);
8828 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8829 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8830 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8831 __put_user(value
.totalram
, &target_value
->totalram
);
8832 __put_user(value
.freeram
, &target_value
->freeram
);
8833 __put_user(value
.sharedram
, &target_value
->sharedram
);
8834 __put_user(value
.bufferram
, &target_value
->bufferram
);
8835 __put_user(value
.totalswap
, &target_value
->totalswap
);
8836 __put_user(value
.freeswap
, &target_value
->freeswap
);
8837 __put_user(value
.procs
, &target_value
->procs
);
8838 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8839 __put_user(value
.freehigh
, &target_value
->freehigh
);
8840 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8841 unlock_user_struct(target_value
, arg1
, 1);
8845 #ifdef TARGET_NR_ipc
8847 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8849 #ifdef TARGET_NR_semget
8850 case TARGET_NR_semget
:
8851 return get_errno(semget(arg1
, arg2
, arg3
));
8853 #ifdef TARGET_NR_semop
8854 case TARGET_NR_semop
:
8855 return do_semop(arg1
, arg2
, arg3
);
8857 #ifdef TARGET_NR_semctl
8858 case TARGET_NR_semctl
:
8859 return do_semctl(arg1
, arg2
, arg3
, arg4
);
8861 #ifdef TARGET_NR_msgctl
8862 case TARGET_NR_msgctl
:
8863 return do_msgctl(arg1
, arg2
, arg3
);
8865 #ifdef TARGET_NR_msgget
8866 case TARGET_NR_msgget
:
8867 return get_errno(msgget(arg1
, arg2
));
8869 #ifdef TARGET_NR_msgrcv
8870 case TARGET_NR_msgrcv
:
8871 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8873 #ifdef TARGET_NR_msgsnd
8874 case TARGET_NR_msgsnd
:
8875 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8877 #ifdef TARGET_NR_shmget
8878 case TARGET_NR_shmget
:
8879 return get_errno(shmget(arg1
, arg2
, arg3
));
8881 #ifdef TARGET_NR_shmctl
8882 case TARGET_NR_shmctl
:
8883 return do_shmctl(arg1
, arg2
, arg3
);
8885 #ifdef TARGET_NR_shmat
8886 case TARGET_NR_shmat
:
8887 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
8889 #ifdef TARGET_NR_shmdt
8890 case TARGET_NR_shmdt
:
8891 return do_shmdt(arg1
);
8893 case TARGET_NR_fsync
:
8894 return get_errno(fsync(arg1
));
8895 case TARGET_NR_clone
:
8896 /* Linux manages to have three different orderings for its
8897 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8898 * match the kernel's CONFIG_CLONE_* settings.
8899 * Microblaze is further special in that it uses a sixth
8900 * implicit argument to clone for the TLS pointer.
8902 #if defined(TARGET_MICROBLAZE)
8903 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8904 #elif defined(TARGET_CLONE_BACKWARDS)
8905 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8906 #elif defined(TARGET_CLONE_BACKWARDS2)
8907 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8909 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8912 #ifdef __NR_exit_group
8913 /* new thread calls */
8914 case TARGET_NR_exit_group
:
8915 preexit_cleanup(cpu_env
, arg1
);
8916 return get_errno(exit_group(arg1
));
8918 case TARGET_NR_setdomainname
:
8919 if (!(p
= lock_user_string(arg1
)))
8920 return -TARGET_EFAULT
;
8921 ret
= get_errno(setdomainname(p
, arg2
));
8922 unlock_user(p
, arg1
, 0);
8924 case TARGET_NR_uname
:
8925 /* no need to transcode because we use the linux syscall */
8927 struct new_utsname
* buf
;
8929 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8930 return -TARGET_EFAULT
;
8931 ret
= get_errno(sys_uname(buf
));
8932 if (!is_error(ret
)) {
8933 /* Overwrite the native machine name with whatever is being
8935 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
8936 sizeof(buf
->machine
));
8937 /* Allow the user to override the reported release. */
8938 if (qemu_uname_release
&& *qemu_uname_release
) {
8939 g_strlcpy(buf
->release
, qemu_uname_release
,
8940 sizeof(buf
->release
));
8943 unlock_user_struct(buf
, arg1
, 1);
8947 case TARGET_NR_modify_ldt
:
8948 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8949 #if !defined(TARGET_X86_64)
8950 case TARGET_NR_vm86
:
8951 return do_vm86(cpu_env
, arg1
, arg2
);
8954 case TARGET_NR_adjtimex
:
8956 struct timex host_buf
;
8958 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
8959 return -TARGET_EFAULT
;
8961 ret
= get_errno(adjtimex(&host_buf
));
8962 if (!is_error(ret
)) {
8963 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
8964 return -TARGET_EFAULT
;
8969 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8970 case TARGET_NR_clock_adjtime
:
8972 struct timex htx
, *phtx
= &htx
;
8974 if (target_to_host_timex(phtx
, arg2
) != 0) {
8975 return -TARGET_EFAULT
;
8977 ret
= get_errno(clock_adjtime(arg1
, phtx
));
8978 if (!is_error(ret
) && phtx
) {
8979 if (host_to_target_timex(arg2
, phtx
) != 0) {
8980 return -TARGET_EFAULT
;
8986 case TARGET_NR_getpgid
:
8987 return get_errno(getpgid(arg1
));
8988 case TARGET_NR_fchdir
:
8989 return get_errno(fchdir(arg1
));
8990 case TARGET_NR_personality
:
8991 return get_errno(personality(arg1
));
8992 #ifdef TARGET_NR__llseek /* Not on alpha */
8993 case TARGET_NR__llseek
:
8996 #if !defined(__NR_llseek)
8997 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
8999 ret
= get_errno(res
);
9004 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9006 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9007 return -TARGET_EFAULT
;
9012 #ifdef TARGET_NR_getdents
9013 case TARGET_NR_getdents
:
9014 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9015 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9017 struct target_dirent
*target_dirp
;
9018 struct linux_dirent
*dirp
;
9019 abi_long count
= arg3
;
9021 dirp
= g_try_malloc(count
);
9023 return -TARGET_ENOMEM
;
9026 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9027 if (!is_error(ret
)) {
9028 struct linux_dirent
*de
;
9029 struct target_dirent
*tde
;
9031 int reclen
, treclen
;
9032 int count1
, tnamelen
;
9036 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9037 return -TARGET_EFAULT
;
9040 reclen
= de
->d_reclen
;
9041 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9042 assert(tnamelen
>= 0);
9043 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9044 assert(count1
+ treclen
<= count
);
9045 tde
->d_reclen
= tswap16(treclen
);
9046 tde
->d_ino
= tswapal(de
->d_ino
);
9047 tde
->d_off
= tswapal(de
->d_off
);
9048 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9049 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9051 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9055 unlock_user(target_dirp
, arg2
, ret
);
9061 struct linux_dirent
*dirp
;
9062 abi_long count
= arg3
;
9064 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9065 return -TARGET_EFAULT
;
9066 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9067 if (!is_error(ret
)) {
9068 struct linux_dirent
*de
;
9073 reclen
= de
->d_reclen
;
9076 de
->d_reclen
= tswap16(reclen
);
9077 tswapls(&de
->d_ino
);
9078 tswapls(&de
->d_off
);
9079 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9083 unlock_user(dirp
, arg2
, ret
);
9087 /* Implement getdents in terms of getdents64 */
9089 struct linux_dirent64
*dirp
;
9090 abi_long count
= arg3
;
9092 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9094 return -TARGET_EFAULT
;
9096 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9097 if (!is_error(ret
)) {
9098 /* Convert the dirent64 structs to target dirent. We do this
9099 * in-place, since we can guarantee that a target_dirent is no
9100 * larger than a dirent64; however this means we have to be
9101 * careful to read everything before writing in the new format.
9103 struct linux_dirent64
*de
;
9104 struct target_dirent
*tde
;
9109 tde
= (struct target_dirent
*)dirp
;
9111 int namelen
, treclen
;
9112 int reclen
= de
->d_reclen
;
9113 uint64_t ino
= de
->d_ino
;
9114 int64_t off
= de
->d_off
;
9115 uint8_t type
= de
->d_type
;
9117 namelen
= strlen(de
->d_name
);
9118 treclen
= offsetof(struct target_dirent
, d_name
)
9120 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9122 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9123 tde
->d_ino
= tswapal(ino
);
9124 tde
->d_off
= tswapal(off
);
9125 tde
->d_reclen
= tswap16(treclen
);
9126 /* The target_dirent type is in what was formerly a padding
9127 * byte at the end of the structure:
9129 *(((char *)tde
) + treclen
- 1) = type
;
9131 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9132 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9138 unlock_user(dirp
, arg2
, ret
);
9142 #endif /* TARGET_NR_getdents */
9143 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9144 case TARGET_NR_getdents64
:
9146 struct linux_dirent64
*dirp
;
9147 abi_long count
= arg3
;
9148 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9149 return -TARGET_EFAULT
;
9150 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9151 if (!is_error(ret
)) {
9152 struct linux_dirent64
*de
;
9157 reclen
= de
->d_reclen
;
9160 de
->d_reclen
= tswap16(reclen
);
9161 tswap64s((uint64_t *)&de
->d_ino
);
9162 tswap64s((uint64_t *)&de
->d_off
);
9163 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9167 unlock_user(dirp
, arg2
, ret
);
9170 #endif /* TARGET_NR_getdents64 */
9171 #if defined(TARGET_NR__newselect)
9172 case TARGET_NR__newselect
:
9173 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9175 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9176 # ifdef TARGET_NR_poll
9177 case TARGET_NR_poll
:
9179 # ifdef TARGET_NR_ppoll
9180 case TARGET_NR_ppoll
:
9183 struct target_pollfd
*target_pfd
;
9184 unsigned int nfds
= arg2
;
9191 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9192 return -TARGET_EINVAL
;
9195 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9196 sizeof(struct target_pollfd
) * nfds
, 1);
9198 return -TARGET_EFAULT
;
9201 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9202 for (i
= 0; i
< nfds
; i
++) {
9203 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9204 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9209 # ifdef TARGET_NR_ppoll
9210 case TARGET_NR_ppoll
:
9212 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9213 target_sigset_t
*target_set
;
9214 sigset_t _set
, *set
= &_set
;
9217 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9218 unlock_user(target_pfd
, arg1
, 0);
9219 return -TARGET_EFAULT
;
9226 if (arg5
!= sizeof(target_sigset_t
)) {
9227 unlock_user(target_pfd
, arg1
, 0);
9228 return -TARGET_EINVAL
;
9231 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9233 unlock_user(target_pfd
, arg1
, 0);
9234 return -TARGET_EFAULT
;
9236 target_to_host_sigset(set
, target_set
);
9241 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9242 set
, SIGSET_T_SIZE
));
9244 if (!is_error(ret
) && arg3
) {
9245 host_to_target_timespec(arg3
, timeout_ts
);
9248 unlock_user(target_set
, arg4
, 0);
9253 # ifdef TARGET_NR_poll
9254 case TARGET_NR_poll
:
9256 struct timespec ts
, *pts
;
9259 /* Convert ms to secs, ns */
9260 ts
.tv_sec
= arg3
/ 1000;
9261 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9264 /* -ve poll() timeout means "infinite" */
9267 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9272 g_assert_not_reached();
9275 if (!is_error(ret
)) {
9276 for(i
= 0; i
< nfds
; i
++) {
9277 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9280 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9284 case TARGET_NR_flock
:
9285 /* NOTE: the flock constant seems to be the same for every
9287 return get_errno(safe_flock(arg1
, arg2
));
9288 case TARGET_NR_readv
:
9290 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9292 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9293 unlock_iovec(vec
, arg2
, arg3
, 1);
9295 ret
= -host_to_target_errno(errno
);
9299 case TARGET_NR_writev
:
9301 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9303 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9304 unlock_iovec(vec
, arg2
, arg3
, 0);
9306 ret
= -host_to_target_errno(errno
);
9310 #if defined(TARGET_NR_preadv)
9311 case TARGET_NR_preadv
:
9313 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9315 unsigned long low
, high
;
9317 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9318 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9319 unlock_iovec(vec
, arg2
, arg3
, 1);
9321 ret
= -host_to_target_errno(errno
);
9326 #if defined(TARGET_NR_pwritev)
9327 case TARGET_NR_pwritev
:
9329 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9331 unsigned long low
, high
;
9333 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9334 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9335 unlock_iovec(vec
, arg2
, arg3
, 0);
9337 ret
= -host_to_target_errno(errno
);
9342 case TARGET_NR_getsid
:
9343 return get_errno(getsid(arg1
));
9344 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9345 case TARGET_NR_fdatasync
:
9346 return get_errno(fdatasync(arg1
));
9348 #ifdef TARGET_NR__sysctl
9349 case TARGET_NR__sysctl
:
9350 /* We don't implement this, but ENOTDIR is always a safe
9352 return -TARGET_ENOTDIR
;
9354 case TARGET_NR_sched_getaffinity
:
9356 unsigned int mask_size
;
9357 unsigned long *mask
;
9360 * sched_getaffinity needs multiples of ulong, so need to take
9361 * care of mismatches between target ulong and host ulong sizes.
9363 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9364 return -TARGET_EINVAL
;
9366 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9368 mask
= alloca(mask_size
);
9369 memset(mask
, 0, mask_size
);
9370 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9372 if (!is_error(ret
)) {
9374 /* More data returned than the caller's buffer will fit.
9375 * This only happens if sizeof(abi_long) < sizeof(long)
9376 * and the caller passed us a buffer holding an odd number
9377 * of abi_longs. If the host kernel is actually using the
9378 * extra 4 bytes then fail EINVAL; otherwise we can just
9379 * ignore them and only copy the interesting part.
9381 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9382 if (numcpus
> arg2
* 8) {
9383 return -TARGET_EINVAL
;
9388 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9389 return -TARGET_EFAULT
;
9394 case TARGET_NR_sched_setaffinity
:
9396 unsigned int mask_size
;
9397 unsigned long *mask
;
9400 * sched_setaffinity needs multiples of ulong, so need to take
9401 * care of mismatches between target ulong and host ulong sizes.
9403 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9404 return -TARGET_EINVAL
;
9406 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9407 mask
= alloca(mask_size
);
9409 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9414 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9416 case TARGET_NR_getcpu
:
9419 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9420 arg2
? &node
: NULL
,
9422 if (is_error(ret
)) {
9425 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9426 return -TARGET_EFAULT
;
9428 if (arg2
&& put_user_u32(node
, arg2
)) {
9429 return -TARGET_EFAULT
;
9433 case TARGET_NR_sched_setparam
:
9435 struct sched_param
*target_schp
;
9436 struct sched_param schp
;
9439 return -TARGET_EINVAL
;
9441 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9442 return -TARGET_EFAULT
;
9443 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9444 unlock_user_struct(target_schp
, arg2
, 0);
9445 return get_errno(sched_setparam(arg1
, &schp
));
9447 case TARGET_NR_sched_getparam
:
9449 struct sched_param
*target_schp
;
9450 struct sched_param schp
;
9453 return -TARGET_EINVAL
;
9455 ret
= get_errno(sched_getparam(arg1
, &schp
));
9456 if (!is_error(ret
)) {
9457 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9458 return -TARGET_EFAULT
;
9459 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9460 unlock_user_struct(target_schp
, arg2
, 1);
9464 case TARGET_NR_sched_setscheduler
:
9466 struct sched_param
*target_schp
;
9467 struct sched_param schp
;
9469 return -TARGET_EINVAL
;
9471 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9472 return -TARGET_EFAULT
;
9473 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9474 unlock_user_struct(target_schp
, arg3
, 0);
9475 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9477 case TARGET_NR_sched_getscheduler
:
9478 return get_errno(sched_getscheduler(arg1
));
9479 case TARGET_NR_sched_yield
:
9480 return get_errno(sched_yield());
9481 case TARGET_NR_sched_get_priority_max
:
9482 return get_errno(sched_get_priority_max(arg1
));
9483 case TARGET_NR_sched_get_priority_min
:
9484 return get_errno(sched_get_priority_min(arg1
));
9485 case TARGET_NR_sched_rr_get_interval
:
9488 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9489 if (!is_error(ret
)) {
9490 ret
= host_to_target_timespec(arg2
, &ts
);
9494 case TARGET_NR_nanosleep
:
9496 struct timespec req
, rem
;
9497 target_to_host_timespec(&req
, arg1
);
9498 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9499 if (is_error(ret
) && arg2
) {
9500 host_to_target_timespec(arg2
, &rem
);
9504 case TARGET_NR_prctl
:
9506 case PR_GET_PDEATHSIG
:
9509 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9510 if (!is_error(ret
) && arg2
9511 && put_user_ual(deathsig
, arg2
)) {
9512 return -TARGET_EFAULT
;
9519 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9521 return -TARGET_EFAULT
;
9523 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9525 unlock_user(name
, arg2
, 16);
9530 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9532 return -TARGET_EFAULT
;
9534 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9536 unlock_user(name
, arg2
, 0);
9541 case TARGET_PR_GET_FP_MODE
:
9543 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9545 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9546 ret
|= TARGET_PR_FP_MODE_FR
;
9548 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9549 ret
|= TARGET_PR_FP_MODE_FRE
;
9553 case TARGET_PR_SET_FP_MODE
:
9555 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9556 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9557 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9558 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9559 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9561 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9562 TARGET_PR_FP_MODE_FRE
;
9564 /* If nothing to change, return right away, successfully. */
9565 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9568 /* Check the value is valid */
9569 if (arg2
& ~known_bits
) {
9570 return -TARGET_EOPNOTSUPP
;
9572 /* Setting FRE without FR is not supported. */
9573 if (new_fre
&& !new_fr
) {
9574 return -TARGET_EOPNOTSUPP
;
9576 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9577 /* FR1 is not supported */
9578 return -TARGET_EOPNOTSUPP
;
9580 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9581 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9582 /* cannot set FR=0 */
9583 return -TARGET_EOPNOTSUPP
;
9585 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9586 /* Cannot set FRE=1 */
9587 return -TARGET_EOPNOTSUPP
;
9591 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9592 for (i
= 0; i
< 32 ; i
+= 2) {
9593 if (!old_fr
&& new_fr
) {
9594 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9595 } else if (old_fr
&& !new_fr
) {
9596 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9601 env
->CP0_Status
|= (1 << CP0St_FR
);
9602 env
->hflags
|= MIPS_HFLAG_F64
;
9604 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9605 env
->hflags
&= ~MIPS_HFLAG_F64
;
9608 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9609 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9610 env
->hflags
|= MIPS_HFLAG_FRE
;
9613 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9614 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9620 #ifdef TARGET_AARCH64
9621 case TARGET_PR_SVE_SET_VL
:
9623 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9624 * PR_SVE_VL_INHERIT. Note the kernel definition
9625 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9626 * even though the current architectural maximum is VQ=16.
9628 ret
= -TARGET_EINVAL
;
9629 if (cpu_isar_feature(aa64_sve
, arm_env_get_cpu(cpu_env
))
9630 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9631 CPUARMState
*env
= cpu_env
;
9632 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9633 uint32_t vq
, old_vq
;
9635 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9636 vq
= MAX(arg2
/ 16, 1);
9637 vq
= MIN(vq
, cpu
->sve_max_vq
);
9640 aarch64_sve_narrow_vq(env
, vq
);
9642 env
->vfp
.zcr_el
[1] = vq
- 1;
9646 case TARGET_PR_SVE_GET_VL
:
9647 ret
= -TARGET_EINVAL
;
9649 ARMCPU
*cpu
= arm_env_get_cpu(cpu_env
);
9650 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9651 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9655 #endif /* AARCH64 */
9656 case PR_GET_SECCOMP
:
9657 case PR_SET_SECCOMP
:
9658 /* Disable seccomp to prevent the target disabling syscalls we
9660 return -TARGET_EINVAL
;
9662 /* Most prctl options have no pointer arguments */
9663 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9666 #ifdef TARGET_NR_arch_prctl
9667 case TARGET_NR_arch_prctl
:
9668 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9669 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9674 #ifdef TARGET_NR_pread64
9675 case TARGET_NR_pread64
:
9676 if (regpairs_aligned(cpu_env
, num
)) {
9680 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9681 return -TARGET_EFAULT
;
9682 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9683 unlock_user(p
, arg2
, ret
);
9685 case TARGET_NR_pwrite64
:
9686 if (regpairs_aligned(cpu_env
, num
)) {
9690 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9691 return -TARGET_EFAULT
;
9692 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9693 unlock_user(p
, arg2
, 0);
9696 case TARGET_NR_getcwd
:
9697 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9698 return -TARGET_EFAULT
;
9699 ret
= get_errno(sys_getcwd1(p
, arg2
));
9700 unlock_user(p
, arg1
, ret
);
9702 case TARGET_NR_capget
:
9703 case TARGET_NR_capset
:
9705 struct target_user_cap_header
*target_header
;
9706 struct target_user_cap_data
*target_data
= NULL
;
9707 struct __user_cap_header_struct header
;
9708 struct __user_cap_data_struct data
[2];
9709 struct __user_cap_data_struct
*dataptr
= NULL
;
9710 int i
, target_datalen
;
9713 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9714 return -TARGET_EFAULT
;
9716 header
.version
= tswap32(target_header
->version
);
9717 header
.pid
= tswap32(target_header
->pid
);
9719 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9720 /* Version 2 and up takes pointer to two user_data structs */
9724 target_datalen
= sizeof(*target_data
) * data_items
;
9727 if (num
== TARGET_NR_capget
) {
9728 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9730 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9733 unlock_user_struct(target_header
, arg1
, 0);
9734 return -TARGET_EFAULT
;
9737 if (num
== TARGET_NR_capset
) {
9738 for (i
= 0; i
< data_items
; i
++) {
9739 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9740 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9741 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9748 if (num
== TARGET_NR_capget
) {
9749 ret
= get_errno(capget(&header
, dataptr
));
9751 ret
= get_errno(capset(&header
, dataptr
));
9754 /* The kernel always updates version for both capget and capset */
9755 target_header
->version
= tswap32(header
.version
);
9756 unlock_user_struct(target_header
, arg1
, 1);
9759 if (num
== TARGET_NR_capget
) {
9760 for (i
= 0; i
< data_items
; i
++) {
9761 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9762 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9763 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9765 unlock_user(target_data
, arg2
, target_datalen
);
9767 unlock_user(target_data
, arg2
, 0);
9772 case TARGET_NR_sigaltstack
:
9773 return do_sigaltstack(arg1
, arg2
,
9774 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9776 #ifdef CONFIG_SENDFILE
9777 #ifdef TARGET_NR_sendfile
9778 case TARGET_NR_sendfile
:
9783 ret
= get_user_sal(off
, arg3
);
9784 if (is_error(ret
)) {
9789 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9790 if (!is_error(ret
) && arg3
) {
9791 abi_long ret2
= put_user_sal(off
, arg3
);
9792 if (is_error(ret2
)) {
9799 #ifdef TARGET_NR_sendfile64
9800 case TARGET_NR_sendfile64
:
9805 ret
= get_user_s64(off
, arg3
);
9806 if (is_error(ret
)) {
9811 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9812 if (!is_error(ret
) && arg3
) {
9813 abi_long ret2
= put_user_s64(off
, arg3
);
9814 if (is_error(ret2
)) {
9822 #ifdef TARGET_NR_vfork
9823 case TARGET_NR_vfork
:
9824 return get_errno(do_fork(cpu_env
,
9825 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
9828 #ifdef TARGET_NR_ugetrlimit
9829 case TARGET_NR_ugetrlimit
:
9832 int resource
= target_to_host_resource(arg1
);
9833 ret
= get_errno(getrlimit(resource
, &rlim
));
9834 if (!is_error(ret
)) {
9835 struct target_rlimit
*target_rlim
;
9836 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9837 return -TARGET_EFAULT
;
9838 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9839 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9840 unlock_user_struct(target_rlim
, arg2
, 1);
9845 #ifdef TARGET_NR_truncate64
9846 case TARGET_NR_truncate64
:
9847 if (!(p
= lock_user_string(arg1
)))
9848 return -TARGET_EFAULT
;
9849 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9850 unlock_user(p
, arg1
, 0);
9853 #ifdef TARGET_NR_ftruncate64
9854 case TARGET_NR_ftruncate64
:
9855 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9857 #ifdef TARGET_NR_stat64
9858 case TARGET_NR_stat64
:
9859 if (!(p
= lock_user_string(arg1
))) {
9860 return -TARGET_EFAULT
;
9862 ret
= get_errno(stat(path(p
), &st
));
9863 unlock_user(p
, arg1
, 0);
9865 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9868 #ifdef TARGET_NR_lstat64
9869 case TARGET_NR_lstat64
:
9870 if (!(p
= lock_user_string(arg1
))) {
9871 return -TARGET_EFAULT
;
9873 ret
= get_errno(lstat(path(p
), &st
));
9874 unlock_user(p
, arg1
, 0);
9876 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9879 #ifdef TARGET_NR_fstat64
9880 case TARGET_NR_fstat64
:
9881 ret
= get_errno(fstat(arg1
, &st
));
9883 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9886 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9887 #ifdef TARGET_NR_fstatat64
9888 case TARGET_NR_fstatat64
:
9890 #ifdef TARGET_NR_newfstatat
9891 case TARGET_NR_newfstatat
:
9893 if (!(p
= lock_user_string(arg2
))) {
9894 return -TARGET_EFAULT
;
9896 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9897 unlock_user(p
, arg2
, 0);
9899 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9902 #ifdef TARGET_NR_lchown
9903 case TARGET_NR_lchown
:
9904 if (!(p
= lock_user_string(arg1
)))
9905 return -TARGET_EFAULT
;
9906 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9907 unlock_user(p
, arg1
, 0);
9910 #ifdef TARGET_NR_getuid
9911 case TARGET_NR_getuid
:
9912 return get_errno(high2lowuid(getuid()));
9914 #ifdef TARGET_NR_getgid
9915 case TARGET_NR_getgid
:
9916 return get_errno(high2lowgid(getgid()));
9918 #ifdef TARGET_NR_geteuid
9919 case TARGET_NR_geteuid
:
9920 return get_errno(high2lowuid(geteuid()));
9922 #ifdef TARGET_NR_getegid
9923 case TARGET_NR_getegid
:
9924 return get_errno(high2lowgid(getegid()));
9926 case TARGET_NR_setreuid
:
9927 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9928 case TARGET_NR_setregid
:
9929 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9930 case TARGET_NR_getgroups
:
9932 int gidsetsize
= arg1
;
9933 target_id
*target_grouplist
;
9937 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9938 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9939 if (gidsetsize
== 0)
9941 if (!is_error(ret
)) {
9942 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9943 if (!target_grouplist
)
9944 return -TARGET_EFAULT
;
9945 for(i
= 0;i
< ret
; i
++)
9946 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9947 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9951 case TARGET_NR_setgroups
:
9953 int gidsetsize
= arg1
;
9954 target_id
*target_grouplist
;
9955 gid_t
*grouplist
= NULL
;
9958 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9959 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9960 if (!target_grouplist
) {
9961 return -TARGET_EFAULT
;
9963 for (i
= 0; i
< gidsetsize
; i
++) {
9964 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9966 unlock_user(target_grouplist
, arg2
, 0);
9968 return get_errno(setgroups(gidsetsize
, grouplist
));
9970 case TARGET_NR_fchown
:
9971 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9972 #if defined(TARGET_NR_fchownat)
9973 case TARGET_NR_fchownat
:
9974 if (!(p
= lock_user_string(arg2
)))
9975 return -TARGET_EFAULT
;
9976 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9977 low2highgid(arg4
), arg5
));
9978 unlock_user(p
, arg2
, 0);
9981 #ifdef TARGET_NR_setresuid
9982 case TARGET_NR_setresuid
:
9983 return get_errno(sys_setresuid(low2highuid(arg1
),
9985 low2highuid(arg3
)));
9987 #ifdef TARGET_NR_getresuid
9988 case TARGET_NR_getresuid
:
9990 uid_t ruid
, euid
, suid
;
9991 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9992 if (!is_error(ret
)) {
9993 if (put_user_id(high2lowuid(ruid
), arg1
)
9994 || put_user_id(high2lowuid(euid
), arg2
)
9995 || put_user_id(high2lowuid(suid
), arg3
))
9996 return -TARGET_EFAULT
;
10001 #ifdef TARGET_NR_getresgid
10002 case TARGET_NR_setresgid
:
10003 return get_errno(sys_setresgid(low2highgid(arg1
),
10005 low2highgid(arg3
)));
10007 #ifdef TARGET_NR_getresgid
10008 case TARGET_NR_getresgid
:
10010 gid_t rgid
, egid
, sgid
;
10011 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10012 if (!is_error(ret
)) {
10013 if (put_user_id(high2lowgid(rgid
), arg1
)
10014 || put_user_id(high2lowgid(egid
), arg2
)
10015 || put_user_id(high2lowgid(sgid
), arg3
))
10016 return -TARGET_EFAULT
;
10021 #ifdef TARGET_NR_chown
10022 case TARGET_NR_chown
:
10023 if (!(p
= lock_user_string(arg1
)))
10024 return -TARGET_EFAULT
;
10025 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10026 unlock_user(p
, arg1
, 0);
10029 case TARGET_NR_setuid
:
10030 return get_errno(sys_setuid(low2highuid(arg1
)));
10031 case TARGET_NR_setgid
:
10032 return get_errno(sys_setgid(low2highgid(arg1
)));
10033 case TARGET_NR_setfsuid
:
10034 return get_errno(setfsuid(arg1
));
10035 case TARGET_NR_setfsgid
:
10036 return get_errno(setfsgid(arg1
));
10038 #ifdef TARGET_NR_lchown32
10039 case TARGET_NR_lchown32
:
10040 if (!(p
= lock_user_string(arg1
)))
10041 return -TARGET_EFAULT
;
10042 ret
= get_errno(lchown(p
, arg2
, arg3
));
10043 unlock_user(p
, arg1
, 0);
10046 #ifdef TARGET_NR_getuid32
10047 case TARGET_NR_getuid32
:
10048 return get_errno(getuid());
10051 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10052 /* Alpha specific */
10053 case TARGET_NR_getxuid
:
10057 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10059 return get_errno(getuid());
10061 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10062 /* Alpha specific */
10063 case TARGET_NR_getxgid
:
10067 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10069 return get_errno(getgid());
10071 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10072 /* Alpha specific */
10073 case TARGET_NR_osf_getsysinfo
:
10074 ret
= -TARGET_EOPNOTSUPP
;
10076 case TARGET_GSI_IEEE_FP_CONTROL
:
10078 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10080 /* Copied from linux ieee_fpcr_to_swcr. */
10081 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10082 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10083 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10084 | SWCR_TRAP_ENABLE_DZE
10085 | SWCR_TRAP_ENABLE_OVF
);
10086 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10087 | SWCR_TRAP_ENABLE_INE
);
10088 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10089 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10091 if (put_user_u64 (swcr
, arg2
))
10092 return -TARGET_EFAULT
;
10097 /* case GSI_IEEE_STATE_AT_SIGNAL:
10098 -- Not implemented in linux kernel.
10100 -- Retrieves current unaligned access state; not much used.
10101 case GSI_PROC_TYPE:
10102 -- Retrieves implver information; surely not used.
10103 case GSI_GET_HWRPB:
10104 -- Grabs a copy of the HWRPB; surely not used.
10109 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10110 /* Alpha specific */
10111 case TARGET_NR_osf_setsysinfo
:
10112 ret
= -TARGET_EOPNOTSUPP
;
10114 case TARGET_SSI_IEEE_FP_CONTROL
:
10116 uint64_t swcr
, fpcr
, orig_fpcr
;
10118 if (get_user_u64 (swcr
, arg2
)) {
10119 return -TARGET_EFAULT
;
10121 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10122 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10124 /* Copied from linux ieee_swcr_to_fpcr. */
10125 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10126 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10127 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10128 | SWCR_TRAP_ENABLE_DZE
10129 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10130 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10131 | SWCR_TRAP_ENABLE_INE
)) << 57;
10132 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10133 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10135 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10140 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10142 uint64_t exc
, fpcr
, orig_fpcr
;
10145 if (get_user_u64(exc
, arg2
)) {
10146 return -TARGET_EFAULT
;
10149 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10151 /* We only add to the exception status here. */
10152 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10154 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10157 /* Old exceptions are not signaled. */
10158 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10160 /* If any exceptions set by this call,
10161 and are unmasked, send a signal. */
10163 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10164 si_code
= TARGET_FPE_FLTRES
;
10166 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10167 si_code
= TARGET_FPE_FLTUND
;
10169 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10170 si_code
= TARGET_FPE_FLTOVF
;
10172 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10173 si_code
= TARGET_FPE_FLTDIV
;
10175 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10176 si_code
= TARGET_FPE_FLTINV
;
10178 if (si_code
!= 0) {
10179 target_siginfo_t info
;
10180 info
.si_signo
= SIGFPE
;
10182 info
.si_code
= si_code
;
10183 info
._sifields
._sigfault
._addr
10184 = ((CPUArchState
*)cpu_env
)->pc
;
10185 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10186 QEMU_SI_FAULT
, &info
);
10191 /* case SSI_NVPAIRS:
10192 -- Used with SSIN_UACPROC to enable unaligned accesses.
10193 case SSI_IEEE_STATE_AT_SIGNAL:
10194 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10195 -- Not implemented in linux kernel
10200 #ifdef TARGET_NR_osf_sigprocmask
10201 /* Alpha specific. */
10202 case TARGET_NR_osf_sigprocmask
:
10206 sigset_t set
, oldset
;
10209 case TARGET_SIG_BLOCK
:
10212 case TARGET_SIG_UNBLOCK
:
10215 case TARGET_SIG_SETMASK
:
10219 return -TARGET_EINVAL
;
10222 target_to_host_old_sigset(&set
, &mask
);
10223 ret
= do_sigprocmask(how
, &set
, &oldset
);
10225 host_to_target_old_sigset(&mask
, &oldset
);
10232 #ifdef TARGET_NR_getgid32
10233 case TARGET_NR_getgid32
:
10234 return get_errno(getgid());
10236 #ifdef TARGET_NR_geteuid32
10237 case TARGET_NR_geteuid32
:
10238 return get_errno(geteuid());
10240 #ifdef TARGET_NR_getegid32
10241 case TARGET_NR_getegid32
:
10242 return get_errno(getegid());
10244 #ifdef TARGET_NR_setreuid32
10245 case TARGET_NR_setreuid32
:
10246 return get_errno(setreuid(arg1
, arg2
));
10248 #ifdef TARGET_NR_setregid32
10249 case TARGET_NR_setregid32
:
10250 return get_errno(setregid(arg1
, arg2
));
10252 #ifdef TARGET_NR_getgroups32
10253 case TARGET_NR_getgroups32
:
10255 int gidsetsize
= arg1
;
10256 uint32_t *target_grouplist
;
10260 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10261 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10262 if (gidsetsize
== 0)
10264 if (!is_error(ret
)) {
10265 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10266 if (!target_grouplist
) {
10267 return -TARGET_EFAULT
;
10269 for(i
= 0;i
< ret
; i
++)
10270 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10271 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10276 #ifdef TARGET_NR_setgroups32
10277 case TARGET_NR_setgroups32
:
10279 int gidsetsize
= arg1
;
10280 uint32_t *target_grouplist
;
10284 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10285 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10286 if (!target_grouplist
) {
10287 return -TARGET_EFAULT
;
10289 for(i
= 0;i
< gidsetsize
; i
++)
10290 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10291 unlock_user(target_grouplist
, arg2
, 0);
10292 return get_errno(setgroups(gidsetsize
, grouplist
));
10295 #ifdef TARGET_NR_fchown32
10296 case TARGET_NR_fchown32
:
10297 return get_errno(fchown(arg1
, arg2
, arg3
));
10299 #ifdef TARGET_NR_setresuid32
10300 case TARGET_NR_setresuid32
:
10301 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10303 #ifdef TARGET_NR_getresuid32
10304 case TARGET_NR_getresuid32
:
10306 uid_t ruid
, euid
, suid
;
10307 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10308 if (!is_error(ret
)) {
10309 if (put_user_u32(ruid
, arg1
)
10310 || put_user_u32(euid
, arg2
)
10311 || put_user_u32(suid
, arg3
))
10312 return -TARGET_EFAULT
;
10317 #ifdef TARGET_NR_setresgid32
10318 case TARGET_NR_setresgid32
:
10319 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10321 #ifdef TARGET_NR_getresgid32
10322 case TARGET_NR_getresgid32
:
10324 gid_t rgid
, egid
, sgid
;
10325 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10326 if (!is_error(ret
)) {
10327 if (put_user_u32(rgid
, arg1
)
10328 || put_user_u32(egid
, arg2
)
10329 || put_user_u32(sgid
, arg3
))
10330 return -TARGET_EFAULT
;
10335 #ifdef TARGET_NR_chown32
10336 case TARGET_NR_chown32
:
10337 if (!(p
= lock_user_string(arg1
)))
10338 return -TARGET_EFAULT
;
10339 ret
= get_errno(chown(p
, arg2
, arg3
));
10340 unlock_user(p
, arg1
, 0);
10343 #ifdef TARGET_NR_setuid32
10344 case TARGET_NR_setuid32
:
10345 return get_errno(sys_setuid(arg1
));
10347 #ifdef TARGET_NR_setgid32
10348 case TARGET_NR_setgid32
:
10349 return get_errno(sys_setgid(arg1
));
10351 #ifdef TARGET_NR_setfsuid32
10352 case TARGET_NR_setfsuid32
:
10353 return get_errno(setfsuid(arg1
));
10355 #ifdef TARGET_NR_setfsgid32
10356 case TARGET_NR_setfsgid32
:
10357 return get_errno(setfsgid(arg1
));
10359 #ifdef TARGET_NR_mincore
10360 case TARGET_NR_mincore
:
10362 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10364 return -TARGET_ENOMEM
;
10366 p
= lock_user_string(arg3
);
10368 ret
= -TARGET_EFAULT
;
10370 ret
= get_errno(mincore(a
, arg2
, p
));
10371 unlock_user(p
, arg3
, ret
);
10373 unlock_user(a
, arg1
, 0);
10377 #ifdef TARGET_NR_arm_fadvise64_64
10378 case TARGET_NR_arm_fadvise64_64
:
10379 /* arm_fadvise64_64 looks like fadvise64_64 but
10380 * with different argument order: fd, advice, offset, len
10381 * rather than the usual fd, offset, len, advice.
10382 * Note that offset and len are both 64-bit so appear as
10383 * pairs of 32-bit registers.
10385 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10386 target_offset64(arg5
, arg6
), arg2
);
10387 return -host_to_target_errno(ret
);
10390 #if TARGET_ABI_BITS == 32
10392 #ifdef TARGET_NR_fadvise64_64
10393 case TARGET_NR_fadvise64_64
:
10394 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10395 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10403 /* 6 args: fd, offset (high, low), len (high, low), advice */
10404 if (regpairs_aligned(cpu_env
, num
)) {
10405 /* offset is in (3,4), len in (5,6) and advice in 7 */
10413 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10414 target_offset64(arg4
, arg5
), arg6
);
10415 return -host_to_target_errno(ret
);
10418 #ifdef TARGET_NR_fadvise64
10419 case TARGET_NR_fadvise64
:
10420 /* 5 args: fd, offset (high, low), len, advice */
10421 if (regpairs_aligned(cpu_env
, num
)) {
10422 /* offset is in (3,4), len in 5 and advice in 6 */
10428 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10429 return -host_to_target_errno(ret
);
10432 #else /* not a 32-bit ABI */
10433 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10434 #ifdef TARGET_NR_fadvise64_64
10435 case TARGET_NR_fadvise64_64
:
10437 #ifdef TARGET_NR_fadvise64
10438 case TARGET_NR_fadvise64
:
10440 #ifdef TARGET_S390X
10442 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10443 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10444 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10445 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10449 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10451 #endif /* end of 64-bit ABI fadvise handling */
10453 #ifdef TARGET_NR_madvise
10454 case TARGET_NR_madvise
:
10455 /* A straight passthrough may not be safe because qemu sometimes
10456 turns private file-backed mappings into anonymous mappings.
10457 This will break MADV_DONTNEED.
10458 This is a hint, so ignoring and returning success is ok. */
10461 #if TARGET_ABI_BITS == 32
10462 case TARGET_NR_fcntl64
:
10466 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10467 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10470 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10471 copyfrom
= copy_from_user_oabi_flock64
;
10472 copyto
= copy_to_user_oabi_flock64
;
10476 cmd
= target_to_host_fcntl_cmd(arg2
);
10477 if (cmd
== -TARGET_EINVAL
) {
10482 case TARGET_F_GETLK64
:
10483 ret
= copyfrom(&fl
, arg3
);
10487 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10489 ret
= copyto(arg3
, &fl
);
10493 case TARGET_F_SETLK64
:
10494 case TARGET_F_SETLKW64
:
10495 ret
= copyfrom(&fl
, arg3
);
10499 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10502 ret
= do_fcntl(arg1
, arg2
, arg3
);
10508 #ifdef TARGET_NR_cacheflush
10509 case TARGET_NR_cacheflush
:
10510 /* self-modifying code is handled automatically, so nothing needed */
10513 #ifdef TARGET_NR_getpagesize
10514 case TARGET_NR_getpagesize
:
10515 return TARGET_PAGE_SIZE
;
10517 case TARGET_NR_gettid
:
10518 return get_errno(gettid());
10519 #ifdef TARGET_NR_readahead
10520 case TARGET_NR_readahead
:
10521 #if TARGET_ABI_BITS == 32
10522 if (regpairs_aligned(cpu_env
, num
)) {
10527 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10529 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10534 #ifdef TARGET_NR_setxattr
10535 case TARGET_NR_listxattr
:
10536 case TARGET_NR_llistxattr
:
10540 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10542 return -TARGET_EFAULT
;
10545 p
= lock_user_string(arg1
);
10547 if (num
== TARGET_NR_listxattr
) {
10548 ret
= get_errno(listxattr(p
, b
, arg3
));
10550 ret
= get_errno(llistxattr(p
, b
, arg3
));
10553 ret
= -TARGET_EFAULT
;
10555 unlock_user(p
, arg1
, 0);
10556 unlock_user(b
, arg2
, arg3
);
10559 case TARGET_NR_flistxattr
:
10563 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10565 return -TARGET_EFAULT
;
10568 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10569 unlock_user(b
, arg2
, arg3
);
10572 case TARGET_NR_setxattr
:
10573 case TARGET_NR_lsetxattr
:
10575 void *p
, *n
, *v
= 0;
10577 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10579 return -TARGET_EFAULT
;
10582 p
= lock_user_string(arg1
);
10583 n
= lock_user_string(arg2
);
10585 if (num
== TARGET_NR_setxattr
) {
10586 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10588 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10591 ret
= -TARGET_EFAULT
;
10593 unlock_user(p
, arg1
, 0);
10594 unlock_user(n
, arg2
, 0);
10595 unlock_user(v
, arg3
, 0);
10598 case TARGET_NR_fsetxattr
:
10602 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10604 return -TARGET_EFAULT
;
10607 n
= lock_user_string(arg2
);
10609 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10611 ret
= -TARGET_EFAULT
;
10613 unlock_user(n
, arg2
, 0);
10614 unlock_user(v
, arg3
, 0);
10617 case TARGET_NR_getxattr
:
10618 case TARGET_NR_lgetxattr
:
10620 void *p
, *n
, *v
= 0;
10622 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10624 return -TARGET_EFAULT
;
10627 p
= lock_user_string(arg1
);
10628 n
= lock_user_string(arg2
);
10630 if (num
== TARGET_NR_getxattr
) {
10631 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10633 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10636 ret
= -TARGET_EFAULT
;
10638 unlock_user(p
, arg1
, 0);
10639 unlock_user(n
, arg2
, 0);
10640 unlock_user(v
, arg3
, arg4
);
10643 case TARGET_NR_fgetxattr
:
10647 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10649 return -TARGET_EFAULT
;
10652 n
= lock_user_string(arg2
);
10654 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10656 ret
= -TARGET_EFAULT
;
10658 unlock_user(n
, arg2
, 0);
10659 unlock_user(v
, arg3
, arg4
);
10662 case TARGET_NR_removexattr
:
10663 case TARGET_NR_lremovexattr
:
10666 p
= lock_user_string(arg1
);
10667 n
= lock_user_string(arg2
);
10669 if (num
== TARGET_NR_removexattr
) {
10670 ret
= get_errno(removexattr(p
, n
));
10672 ret
= get_errno(lremovexattr(p
, n
));
10675 ret
= -TARGET_EFAULT
;
10677 unlock_user(p
, arg1
, 0);
10678 unlock_user(n
, arg2
, 0);
10681 case TARGET_NR_fremovexattr
:
10684 n
= lock_user_string(arg2
);
10686 ret
= get_errno(fremovexattr(arg1
, n
));
10688 ret
= -TARGET_EFAULT
;
10690 unlock_user(n
, arg2
, 0);
10694 #endif /* CONFIG_ATTR */
10695 #ifdef TARGET_NR_set_thread_area
10696 case TARGET_NR_set_thread_area
:
10697 #if defined(TARGET_MIPS)
10698 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10700 #elif defined(TARGET_CRIS)
10702 ret
= -TARGET_EINVAL
;
10704 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10708 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10709 return do_set_thread_area(cpu_env
, arg1
);
10710 #elif defined(TARGET_M68K)
10712 TaskState
*ts
= cpu
->opaque
;
10713 ts
->tp_value
= arg1
;
10717 return -TARGET_ENOSYS
;
10720 #ifdef TARGET_NR_get_thread_area
10721 case TARGET_NR_get_thread_area
:
10722 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10723 return do_get_thread_area(cpu_env
, arg1
);
10724 #elif defined(TARGET_M68K)
10726 TaskState
*ts
= cpu
->opaque
;
10727 return ts
->tp_value
;
10730 return -TARGET_ENOSYS
;
10733 #ifdef TARGET_NR_getdomainname
10734 case TARGET_NR_getdomainname
:
10735 return -TARGET_ENOSYS
;
10738 #ifdef TARGET_NR_clock_settime
10739 case TARGET_NR_clock_settime
:
10741 struct timespec ts
;
10743 ret
= target_to_host_timespec(&ts
, arg2
);
10744 if (!is_error(ret
)) {
10745 ret
= get_errno(clock_settime(arg1
, &ts
));
10750 #ifdef TARGET_NR_clock_gettime
10751 case TARGET_NR_clock_gettime
:
10753 struct timespec ts
;
10754 ret
= get_errno(clock_gettime(arg1
, &ts
));
10755 if (!is_error(ret
)) {
10756 ret
= host_to_target_timespec(arg2
, &ts
);
10761 #ifdef TARGET_NR_clock_getres
10762 case TARGET_NR_clock_getres
:
10764 struct timespec ts
;
10765 ret
= get_errno(clock_getres(arg1
, &ts
));
10766 if (!is_error(ret
)) {
10767 host_to_target_timespec(arg2
, &ts
);
10772 #ifdef TARGET_NR_clock_nanosleep
10773 case TARGET_NR_clock_nanosleep
:
10775 struct timespec ts
;
10776 target_to_host_timespec(&ts
, arg3
);
10777 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10778 &ts
, arg4
? &ts
: NULL
));
10780 host_to_target_timespec(arg4
, &ts
);
10782 #if defined(TARGET_PPC)
10783 /* clock_nanosleep is odd in that it returns positive errno values.
10784 * On PPC, CR0 bit 3 should be set in such a situation. */
10785 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10786 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10793 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10794 case TARGET_NR_set_tid_address
:
10795 return get_errno(set_tid_address((int *)g2h(arg1
)));
10798 case TARGET_NR_tkill
:
10799 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10801 case TARGET_NR_tgkill
:
10802 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10803 target_to_host_signal(arg3
)));
10805 #ifdef TARGET_NR_set_robust_list
10806 case TARGET_NR_set_robust_list
:
10807 case TARGET_NR_get_robust_list
:
10808 /* The ABI for supporting robust futexes has userspace pass
10809 * the kernel a pointer to a linked list which is updated by
10810 * userspace after the syscall; the list is walked by the kernel
10811 * when the thread exits. Since the linked list in QEMU guest
10812 * memory isn't a valid linked list for the host and we have
10813 * no way to reliably intercept the thread-death event, we can't
10814 * support these. Silently return ENOSYS so that guest userspace
10815 * falls back to a non-robust futex implementation (which should
10816 * be OK except in the corner case of the guest crashing while
10817 * holding a mutex that is shared with another process via
10820 return -TARGET_ENOSYS
;
10823 #if defined(TARGET_NR_utimensat)
10824 case TARGET_NR_utimensat
:
10826 struct timespec
*tsp
, ts
[2];
10830 target_to_host_timespec(ts
, arg3
);
10831 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10835 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10837 if (!(p
= lock_user_string(arg2
))) {
10838 return -TARGET_EFAULT
;
10840 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10841 unlock_user(p
, arg2
, 0);
10846 case TARGET_NR_futex
:
10847 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10848 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10849 case TARGET_NR_inotify_init
:
10850 ret
= get_errno(sys_inotify_init());
10852 fd_trans_register(ret
, &target_inotify_trans
);
10856 #ifdef CONFIG_INOTIFY1
10857 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10858 case TARGET_NR_inotify_init1
:
10859 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
10860 fcntl_flags_tbl
)));
10862 fd_trans_register(ret
, &target_inotify_trans
);
10867 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10868 case TARGET_NR_inotify_add_watch
:
10869 p
= lock_user_string(arg2
);
10870 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10871 unlock_user(p
, arg2
, 0);
10874 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10875 case TARGET_NR_inotify_rm_watch
:
10876 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10879 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10880 case TARGET_NR_mq_open
:
10882 struct mq_attr posix_mq_attr
;
10883 struct mq_attr
*pposix_mq_attr
;
10886 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
10887 pposix_mq_attr
= NULL
;
10889 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
10890 return -TARGET_EFAULT
;
10892 pposix_mq_attr
= &posix_mq_attr
;
10894 p
= lock_user_string(arg1
- 1);
10896 return -TARGET_EFAULT
;
10898 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
10899 unlock_user (p
, arg1
, 0);
10903 case TARGET_NR_mq_unlink
:
10904 p
= lock_user_string(arg1
- 1);
10906 return -TARGET_EFAULT
;
10908 ret
= get_errno(mq_unlink(p
));
10909 unlock_user (p
, arg1
, 0);
10912 case TARGET_NR_mq_timedsend
:
10914 struct timespec ts
;
10916 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10918 target_to_host_timespec(&ts
, arg5
);
10919 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10920 host_to_target_timespec(arg5
, &ts
);
10922 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10924 unlock_user (p
, arg2
, arg3
);
10928 case TARGET_NR_mq_timedreceive
:
10930 struct timespec ts
;
10933 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10935 target_to_host_timespec(&ts
, arg5
);
10936 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10938 host_to_target_timespec(arg5
, &ts
);
10940 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10943 unlock_user (p
, arg2
, arg3
);
10945 put_user_u32(prio
, arg4
);
10949 /* Not implemented for now... */
10950 /* case TARGET_NR_mq_notify: */
10953 case TARGET_NR_mq_getsetattr
:
10955 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10958 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10959 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
10960 &posix_mq_attr_out
));
10961 } else if (arg3
!= 0) {
10962 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
10964 if (ret
== 0 && arg3
!= 0) {
10965 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10971 #ifdef CONFIG_SPLICE
10972 #ifdef TARGET_NR_tee
10973 case TARGET_NR_tee
:
10975 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10979 #ifdef TARGET_NR_splice
10980 case TARGET_NR_splice
:
10982 loff_t loff_in
, loff_out
;
10983 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10985 if (get_user_u64(loff_in
, arg2
)) {
10986 return -TARGET_EFAULT
;
10988 ploff_in
= &loff_in
;
10991 if (get_user_u64(loff_out
, arg4
)) {
10992 return -TARGET_EFAULT
;
10994 ploff_out
= &loff_out
;
10996 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10998 if (put_user_u64(loff_in
, arg2
)) {
10999 return -TARGET_EFAULT
;
11003 if (put_user_u64(loff_out
, arg4
)) {
11004 return -TARGET_EFAULT
;
11010 #ifdef TARGET_NR_vmsplice
11011 case TARGET_NR_vmsplice
:
11013 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11015 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11016 unlock_iovec(vec
, arg2
, arg3
, 0);
11018 ret
= -host_to_target_errno(errno
);
11023 #endif /* CONFIG_SPLICE */
11024 #ifdef CONFIG_EVENTFD
11025 #if defined(TARGET_NR_eventfd)
11026 case TARGET_NR_eventfd
:
11027 ret
= get_errno(eventfd(arg1
, 0));
11029 fd_trans_register(ret
, &target_eventfd_trans
);
11033 #if defined(TARGET_NR_eventfd2)
11034 case TARGET_NR_eventfd2
:
11036 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11037 if (arg2
& TARGET_O_NONBLOCK
) {
11038 host_flags
|= O_NONBLOCK
;
11040 if (arg2
& TARGET_O_CLOEXEC
) {
11041 host_flags
|= O_CLOEXEC
;
11043 ret
= get_errno(eventfd(arg1
, host_flags
));
11045 fd_trans_register(ret
, &target_eventfd_trans
);
11050 #endif /* CONFIG_EVENTFD */
11051 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11052 case TARGET_NR_fallocate
:
11053 #if TARGET_ABI_BITS == 32
11054 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11055 target_offset64(arg5
, arg6
)));
11057 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11061 #if defined(CONFIG_SYNC_FILE_RANGE)
11062 #if defined(TARGET_NR_sync_file_range)
11063 case TARGET_NR_sync_file_range
:
11064 #if TARGET_ABI_BITS == 32
11065 #if defined(TARGET_MIPS)
11066 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11067 target_offset64(arg5
, arg6
), arg7
));
11069 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11070 target_offset64(arg4
, arg5
), arg6
));
11071 #endif /* !TARGET_MIPS */
11073 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11077 #if defined(TARGET_NR_sync_file_range2)
11078 case TARGET_NR_sync_file_range2
:
11079 /* This is like sync_file_range but the arguments are reordered */
11080 #if TARGET_ABI_BITS == 32
11081 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11082 target_offset64(arg5
, arg6
), arg2
));
11084 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11089 #if defined(TARGET_NR_signalfd4)
11090 case TARGET_NR_signalfd4
:
11091 return do_signalfd4(arg1
, arg2
, arg4
);
11093 #if defined(TARGET_NR_signalfd)
11094 case TARGET_NR_signalfd
:
11095 return do_signalfd4(arg1
, arg2
, 0);
11097 #if defined(CONFIG_EPOLL)
11098 #if defined(TARGET_NR_epoll_create)
11099 case TARGET_NR_epoll_create
:
11100 return get_errno(epoll_create(arg1
));
11102 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11103 case TARGET_NR_epoll_create1
:
11104 return get_errno(epoll_create1(arg1
));
11106 #if defined(TARGET_NR_epoll_ctl)
11107 case TARGET_NR_epoll_ctl
:
11109 struct epoll_event ep
;
11110 struct epoll_event
*epp
= 0;
11112 struct target_epoll_event
*target_ep
;
11113 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11114 return -TARGET_EFAULT
;
11116 ep
.events
= tswap32(target_ep
->events
);
11117 /* The epoll_data_t union is just opaque data to the kernel,
11118 * so we transfer all 64 bits across and need not worry what
11119 * actual data type it is.
11121 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11122 unlock_user_struct(target_ep
, arg4
, 0);
11125 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11129 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11130 #if defined(TARGET_NR_epoll_wait)
11131 case TARGET_NR_epoll_wait
:
11133 #if defined(TARGET_NR_epoll_pwait)
11134 case TARGET_NR_epoll_pwait
:
11137 struct target_epoll_event
*target_ep
;
11138 struct epoll_event
*ep
;
11140 int maxevents
= arg3
;
11141 int timeout
= arg4
;
11143 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11144 return -TARGET_EINVAL
;
11147 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11148 maxevents
* sizeof(struct target_epoll_event
), 1);
11150 return -TARGET_EFAULT
;
11153 ep
= g_try_new(struct epoll_event
, maxevents
);
11155 unlock_user(target_ep
, arg2
, 0);
11156 return -TARGET_ENOMEM
;
11160 #if defined(TARGET_NR_epoll_pwait)
11161 case TARGET_NR_epoll_pwait
:
11163 target_sigset_t
*target_set
;
11164 sigset_t _set
, *set
= &_set
;
11167 if (arg6
!= sizeof(target_sigset_t
)) {
11168 ret
= -TARGET_EINVAL
;
11172 target_set
= lock_user(VERIFY_READ
, arg5
,
11173 sizeof(target_sigset_t
), 1);
11175 ret
= -TARGET_EFAULT
;
11178 target_to_host_sigset(set
, target_set
);
11179 unlock_user(target_set
, arg5
, 0);
11184 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11185 set
, SIGSET_T_SIZE
));
11189 #if defined(TARGET_NR_epoll_wait)
11190 case TARGET_NR_epoll_wait
:
11191 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11196 ret
= -TARGET_ENOSYS
;
11198 if (!is_error(ret
)) {
11200 for (i
= 0; i
< ret
; i
++) {
11201 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11202 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11204 unlock_user(target_ep
, arg2
,
11205 ret
* sizeof(struct target_epoll_event
));
11207 unlock_user(target_ep
, arg2
, 0);
11214 #ifdef TARGET_NR_prlimit64
11215 case TARGET_NR_prlimit64
:
11217 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11218 struct target_rlimit64
*target_rnew
, *target_rold
;
11219 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11220 int resource
= target_to_host_resource(arg2
);
11222 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11223 return -TARGET_EFAULT
;
11225 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11226 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11227 unlock_user_struct(target_rnew
, arg3
, 0);
11231 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11232 if (!is_error(ret
) && arg4
) {
11233 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11234 return -TARGET_EFAULT
;
11236 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11237 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11238 unlock_user_struct(target_rold
, arg4
, 1);
11243 #ifdef TARGET_NR_gethostname
11244 case TARGET_NR_gethostname
:
11246 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11248 ret
= get_errno(gethostname(name
, arg2
));
11249 unlock_user(name
, arg1
, arg2
);
11251 ret
= -TARGET_EFAULT
;
11256 #ifdef TARGET_NR_atomic_cmpxchg_32
11257 case TARGET_NR_atomic_cmpxchg_32
:
11259 /* should use start_exclusive from main.c */
11260 abi_ulong mem_value
;
11261 if (get_user_u32(mem_value
, arg6
)) {
11262 target_siginfo_t info
;
11263 info
.si_signo
= SIGSEGV
;
11265 info
.si_code
= TARGET_SEGV_MAPERR
;
11266 info
._sifields
._sigfault
._addr
= arg6
;
11267 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11268 QEMU_SI_FAULT
, &info
);
11272 if (mem_value
== arg2
)
11273 put_user_u32(arg1
, arg6
);
11277 #ifdef TARGET_NR_atomic_barrier
11278 case TARGET_NR_atomic_barrier
:
11279 /* Like the kernel implementation and the
11280 qemu arm barrier, no-op this? */
11284 #ifdef TARGET_NR_timer_create
11285 case TARGET_NR_timer_create
:
11287 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11289 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11292 int timer_index
= next_free_host_timer();
11294 if (timer_index
< 0) {
11295 ret
= -TARGET_EAGAIN
;
11297 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11300 phost_sevp
= &host_sevp
;
11301 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11307 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11311 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11312 return -TARGET_EFAULT
;
11320 #ifdef TARGET_NR_timer_settime
11321 case TARGET_NR_timer_settime
:
11323 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11324 * struct itimerspec * old_value */
11325 target_timer_t timerid
= get_timer_id(arg1
);
11329 } else if (arg3
== 0) {
11330 ret
= -TARGET_EINVAL
;
11332 timer_t htimer
= g_posix_timers
[timerid
];
11333 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11335 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11336 return -TARGET_EFAULT
;
11339 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11340 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11341 return -TARGET_EFAULT
;
11348 #ifdef TARGET_NR_timer_gettime
11349 case TARGET_NR_timer_gettime
:
11351 /* args: timer_t timerid, struct itimerspec *curr_value */
11352 target_timer_t timerid
= get_timer_id(arg1
);
11356 } else if (!arg2
) {
11357 ret
= -TARGET_EFAULT
;
11359 timer_t htimer
= g_posix_timers
[timerid
];
11360 struct itimerspec hspec
;
11361 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11363 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11364 ret
= -TARGET_EFAULT
;
11371 #ifdef TARGET_NR_timer_getoverrun
11372 case TARGET_NR_timer_getoverrun
:
11374 /* args: timer_t timerid */
11375 target_timer_t timerid
= get_timer_id(arg1
);
11380 timer_t htimer
= g_posix_timers
[timerid
];
11381 ret
= get_errno(timer_getoverrun(htimer
));
11383 fd_trans_unregister(ret
);
11388 #ifdef TARGET_NR_timer_delete
11389 case TARGET_NR_timer_delete
:
11391 /* args: timer_t timerid */
11392 target_timer_t timerid
= get_timer_id(arg1
);
11397 timer_t htimer
= g_posix_timers
[timerid
];
11398 ret
= get_errno(timer_delete(htimer
));
11399 g_posix_timers
[timerid
] = 0;
11405 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11406 case TARGET_NR_timerfd_create
:
11407 return get_errno(timerfd_create(arg1
,
11408 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11411 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11412 case TARGET_NR_timerfd_gettime
:
11414 struct itimerspec its_curr
;
11416 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11418 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11419 return -TARGET_EFAULT
;
11425 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11426 case TARGET_NR_timerfd_settime
:
11428 struct itimerspec its_new
, its_old
, *p_new
;
11431 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11432 return -TARGET_EFAULT
;
11439 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11441 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11442 return -TARGET_EFAULT
;
11448 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11449 case TARGET_NR_ioprio_get
:
11450 return get_errno(ioprio_get(arg1
, arg2
));
11453 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11454 case TARGET_NR_ioprio_set
:
11455 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11458 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11459 case TARGET_NR_setns
:
11460 return get_errno(setns(arg1
, arg2
));
11462 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11463 case TARGET_NR_unshare
:
11464 return get_errno(unshare(arg1
));
11466 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11467 case TARGET_NR_kcmp
:
11468 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11470 #ifdef TARGET_NR_swapcontext
11471 case TARGET_NR_swapcontext
:
11472 /* PowerPC specific. */
11473 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11477 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11478 return -TARGET_ENOSYS
;
11483 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11484 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11485 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11488 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
11491 #ifdef DEBUG_ERESTARTSYS
11492 /* Debug-only code for exercising the syscall-restart code paths
11493 * in the per-architecture cpu main loops: restart every syscall
11494 * the guest makes once before letting it through.
11500 return -TARGET_ERESTARTSYS
;
11505 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11506 arg5
, arg6
, arg7
, arg8
);
11508 if (unlikely(do_strace
)) {
11509 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11510 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11511 arg5
, arg6
, arg7
, arg8
);
11512 print_syscall_ret(num
, ret
);
11514 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11515 arg5
, arg6
, arg7
, arg8
);
11518 trace_guest_user_syscall_ret(cpu
, num
, ret
);