4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
113 #include "fd-trans.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
252 #define __NR_sys_gettid __NR_gettid
253 _syscall0(int, sys_gettid
)
255 /* For the 64-bit guest on 32-bit host case we must emulate
256 * getdents using getdents64, because otherwise the host
257 * might hand us back more dirent records than we can fit
258 * into the guest buffer after structure format conversion.
259 * Otherwise we emulate getdents with getdents if the host has it.
261 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
262 #define EMULATE_GETDENTS_WITH_GETDENTS
265 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
266 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
268 #if (defined(TARGET_NR_getdents) && \
269 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
270 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
275 loff_t
*, res
, uint
, wh
);
277 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
278 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
280 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
281 #ifdef __NR_exit_group
282 _syscall1(int,exit_group
,int,error_code
)
284 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
285 _syscall1(int,set_tid_address
,int *,tidptr
)
287 #if defined(TARGET_NR_futex) && defined(__NR_futex)
288 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
289 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
291 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
292 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
293 unsigned long *, user_mask_ptr
);
294 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
295 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
296 unsigned long *, user_mask_ptr
);
297 #define __NR_sys_getcpu __NR_getcpu
298 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
299 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
301 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
302 struct __user_cap_data_struct
*, data
);
303 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
304 struct __user_cap_data_struct
*, data
);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get
, int, which
, int, who
)
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
317 unsigned long, idx1
, unsigned long, idx2
)
320 static bitmask_transtbl fcntl_flags_tbl
[] = {
321 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
322 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
323 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
324 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
325 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
326 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
327 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
328 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
329 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
330 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
331 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
332 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
333 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
334 #if defined(O_DIRECT)
335 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
337 #if defined(O_NOATIME)
338 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
340 #if defined(O_CLOEXEC)
341 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
344 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
346 #if defined(O_TMPFILE)
347 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
349 /* Don't terminate the list prematurely on 64-bit host+guest. */
350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
351 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
356 static int sys_getcwd1(char *buf
, size_t size
)
358 if (getcwd(buf
, size
) == NULL
) {
359 /* getcwd() sets errno */
362 return strlen(buf
)+1;
365 #ifdef TARGET_NR_utimensat
366 #if defined(__NR_utimensat)
367 #define __NR_sys_utimensat __NR_utimensat
368 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
369 const struct timespec
*,tsp
,int,flags
)
371 static int sys_utimensat(int dirfd
, const char *pathname
,
372 const struct timespec times
[2], int flags
)
378 #endif /* TARGET_NR_utimensat */
380 #ifdef TARGET_NR_renameat2
381 #if defined(__NR_renameat2)
382 #define __NR_sys_renameat2 __NR_renameat2
383 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
384 const char *, new, unsigned int, flags
)
386 static int sys_renameat2(int oldfd
, const char *old
,
387 int newfd
, const char *new, int flags
)
390 return renameat(oldfd
, old
, newfd
, new);
396 #endif /* TARGET_NR_renameat2 */
398 #ifdef CONFIG_INOTIFY
399 #include <sys/inotify.h>
401 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
402 static int sys_inotify_init(void)
404 return (inotify_init());
407 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
408 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
410 return (inotify_add_watch(fd
, pathname
, mask
));
413 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
414 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
416 return (inotify_rm_watch(fd
, wd
));
419 #ifdef CONFIG_INOTIFY1
420 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
421 static int sys_inotify_init1(int flags
)
423 return (inotify_init1(flags
));
428 /* Userspace can usually survive runtime without inotify */
429 #undef TARGET_NR_inotify_init
430 #undef TARGET_NR_inotify_init1
431 #undef TARGET_NR_inotify_add_watch
432 #undef TARGET_NR_inotify_rm_watch
433 #endif /* CONFIG_INOTIFY */
435 #if defined(TARGET_NR_prlimit64)
436 #ifndef __NR_prlimit64
437 # define __NR_prlimit64 -1
439 #define __NR_sys_prlimit64 __NR_prlimit64
440 /* The glibc rlimit structure may not be that used by the underlying syscall */
441 struct host_rlimit64
{
445 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
446 const struct host_rlimit64
*, new_limit
,
447 struct host_rlimit64
*, old_limit
)
451 #if defined(TARGET_NR_timer_create)
452 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
453 static timer_t g_posix_timers
[32] = { 0, } ;
455 static inline int next_free_host_timer(void)
458 /* FIXME: Does finding the next free slot require a lock? */
459 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
460 if (g_posix_timers
[k
] == 0) {
461 g_posix_timers
[k
] = (timer_t
) 1;
469 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
471 static inline int regpairs_aligned(void *cpu_env
, int num
)
473 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
475 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
476 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
477 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
478 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
479 * of registers which translates to the same as ARM/MIPS, because we start with
481 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
482 #elif defined(TARGET_SH4)
483 /* SH4 doesn't align register pairs, except for p{read,write}64 */
484 static inline int regpairs_aligned(void *cpu_env
, int num
)
487 case TARGET_NR_pread64
:
488 case TARGET_NR_pwrite64
:
495 #elif defined(TARGET_XTENSA)
496 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
498 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
501 #define ERRNO_TABLE_SIZE 1200
503 /* target_to_host_errno_table[] is initialized from
504 * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
509 * This list is the union of errno values overridden in asm-<arch>/errno.h
510 * minus the errnos that are not actually generic to all archs.
512 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
513 [EAGAIN
] = TARGET_EAGAIN
,
514 [EIDRM
] = TARGET_EIDRM
,
515 [ECHRNG
] = TARGET_ECHRNG
,
516 [EL2NSYNC
] = TARGET_EL2NSYNC
,
517 [EL3HLT
] = TARGET_EL3HLT
,
518 [EL3RST
] = TARGET_EL3RST
,
519 [ELNRNG
] = TARGET_ELNRNG
,
520 [EUNATCH
] = TARGET_EUNATCH
,
521 [ENOCSI
] = TARGET_ENOCSI
,
522 [EL2HLT
] = TARGET_EL2HLT
,
523 [EDEADLK
] = TARGET_EDEADLK
,
524 [ENOLCK
] = TARGET_ENOLCK
,
525 [EBADE
] = TARGET_EBADE
,
526 [EBADR
] = TARGET_EBADR
,
527 [EXFULL
] = TARGET_EXFULL
,
528 [ENOANO
] = TARGET_ENOANO
,
529 [EBADRQC
] = TARGET_EBADRQC
,
530 [EBADSLT
] = TARGET_EBADSLT
,
531 [EBFONT
] = TARGET_EBFONT
,
532 [ENOSTR
] = TARGET_ENOSTR
,
533 [ENODATA
] = TARGET_ENODATA
,
534 [ETIME
] = TARGET_ETIME
,
535 [ENOSR
] = TARGET_ENOSR
,
536 [ENONET
] = TARGET_ENONET
,
537 [ENOPKG
] = TARGET_ENOPKG
,
538 [EREMOTE
] = TARGET_EREMOTE
,
539 [ENOLINK
] = TARGET_ENOLINK
,
540 [EADV
] = TARGET_EADV
,
541 [ESRMNT
] = TARGET_ESRMNT
,
542 [ECOMM
] = TARGET_ECOMM
,
543 [EPROTO
] = TARGET_EPROTO
,
544 [EDOTDOT
] = TARGET_EDOTDOT
,
545 [EMULTIHOP
] = TARGET_EMULTIHOP
,
546 [EBADMSG
] = TARGET_EBADMSG
,
547 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
548 [EOVERFLOW
] = TARGET_EOVERFLOW
,
549 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
550 [EBADFD
] = TARGET_EBADFD
,
551 [EREMCHG
] = TARGET_EREMCHG
,
552 [ELIBACC
] = TARGET_ELIBACC
,
553 [ELIBBAD
] = TARGET_ELIBBAD
,
554 [ELIBSCN
] = TARGET_ELIBSCN
,
555 [ELIBMAX
] = TARGET_ELIBMAX
,
556 [ELIBEXEC
] = TARGET_ELIBEXEC
,
557 [EILSEQ
] = TARGET_EILSEQ
,
558 [ENOSYS
] = TARGET_ENOSYS
,
559 [ELOOP
] = TARGET_ELOOP
,
560 [ERESTART
] = TARGET_ERESTART
,
561 [ESTRPIPE
] = TARGET_ESTRPIPE
,
562 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
563 [EUSERS
] = TARGET_EUSERS
,
564 [ENOTSOCK
] = TARGET_ENOTSOCK
,
565 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
566 [EMSGSIZE
] = TARGET_EMSGSIZE
,
567 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
568 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
569 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
570 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
571 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
572 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
573 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
574 [EADDRINUSE
] = TARGET_EADDRINUSE
,
575 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
576 [ENETDOWN
] = TARGET_ENETDOWN
,
577 [ENETUNREACH
] = TARGET_ENETUNREACH
,
578 [ENETRESET
] = TARGET_ENETRESET
,
579 [ECONNABORTED
] = TARGET_ECONNABORTED
,
580 [ECONNRESET
] = TARGET_ECONNRESET
,
581 [ENOBUFS
] = TARGET_ENOBUFS
,
582 [EISCONN
] = TARGET_EISCONN
,
583 [ENOTCONN
] = TARGET_ENOTCONN
,
584 [EUCLEAN
] = TARGET_EUCLEAN
,
585 [ENOTNAM
] = TARGET_ENOTNAM
,
586 [ENAVAIL
] = TARGET_ENAVAIL
,
587 [EISNAM
] = TARGET_EISNAM
,
588 [EREMOTEIO
] = TARGET_EREMOTEIO
,
589 [EDQUOT
] = TARGET_EDQUOT
,
590 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
591 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
592 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
593 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
594 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
595 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
596 [EALREADY
] = TARGET_EALREADY
,
597 [EINPROGRESS
] = TARGET_EINPROGRESS
,
598 [ESTALE
] = TARGET_ESTALE
,
599 [ECANCELED
] = TARGET_ECANCELED
,
600 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
601 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
603 [ENOKEY
] = TARGET_ENOKEY
,
606 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
609 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
612 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
615 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
617 #ifdef ENOTRECOVERABLE
618 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
621 [ENOMSG
] = TARGET_ENOMSG
,
624 [ERFKILL
] = TARGET_ERFKILL
,
627 [EHWPOISON
] = TARGET_EHWPOISON
,
631 static inline int host_to_target_errno(int err
)
633 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
634 host_to_target_errno_table
[err
]) {
635 return host_to_target_errno_table
[err
];
640 static inline int target_to_host_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 target_to_host_errno_table
[err
]) {
644 return target_to_host_errno_table
[err
];
649 static inline abi_long
get_errno(abi_long ret
)
652 return -host_to_target_errno(errno
);
657 const char *target_strerror(int err
)
659 if (err
== TARGET_ERESTARTSYS
) {
660 return "To be restarted";
662 if (err
== TARGET_QEMU_ESIGRETURN
) {
663 return "Successful exit from sigreturn";
666 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
669 return strerror(target_to_host_errno(err
));
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
675 return safe_syscall(__NR_##name); \
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
681 return safe_syscall(__NR_##name, arg1); \
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
687 return safe_syscall(__NR_##name, arg1, arg2); \
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714 type5 arg5, type6 arg6) \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
720 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
721 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
722 int, flags
, mode_t
, mode
)
723 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
724 struct rusage
*, rusage
)
725 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
726 int, options
, struct rusage
*, rusage
)
727 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
728 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
729 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
730 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
731 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
733 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
734 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
736 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
737 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
738 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
739 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
740 safe_syscall2(int, tkill
, int, tid
, int, sig
)
741 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
742 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
743 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
744 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
745 unsigned long, pos_l
, unsigned long, pos_h
)
746 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
747 unsigned long, pos_l
, unsigned long, pos_h
)
748 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
750 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
751 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
752 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
753 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
754 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
755 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
756 safe_syscall2(int, flock
, int, fd
, int, operation
)
757 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
758 const struct timespec
*, uts
, size_t, sigsetsize
)
759 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
761 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
762 struct timespec
*, rem
)
763 #ifdef TARGET_NR_clock_nanosleep
764 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
765 const struct timespec
*, req
, struct timespec
*, rem
)
768 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
770 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
771 long, msgtype
, int, flags
)
772 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
773 unsigned, nsops
, const struct timespec
*, timeout
)
775 /* This host kernel architecture uses a single ipc syscall; fake up
776 * wrappers for the sub-operations to hide this implementation detail.
777 * Annoyingly we can't include linux/ipc.h to get the constant definitions
778 * for the call parameter because some structs in there conflict with the
779 * sys/ipc.h ones. So we just define them here, and rely on them being
780 * the same for all host architectures.
782 #define Q_SEMTIMEDOP 4
785 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
787 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
788 void *, ptr
, long, fifth
)
789 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
791 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
793 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
795 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
797 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
798 const struct timespec
*timeout
)
800 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
804 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
805 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
806 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
807 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
808 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811 * "third argument might be integer or pointer or not present" behaviour of
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817 * use the flock64 struct rather than unsuffixed flock
818 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
826 static inline int host_to_target_sock_type(int host_type
)
830 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
832 target_type
= TARGET_SOCK_DGRAM
;
835 target_type
= TARGET_SOCK_STREAM
;
838 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
842 #if defined(SOCK_CLOEXEC)
843 if (host_type
& SOCK_CLOEXEC
) {
844 target_type
|= TARGET_SOCK_CLOEXEC
;
848 #if defined(SOCK_NONBLOCK)
849 if (host_type
& SOCK_NONBLOCK
) {
850 target_type
|= TARGET_SOCK_NONBLOCK
;
857 static abi_ulong target_brk
;
858 static abi_ulong target_original_brk
;
859 static abi_ulong brk_page
;
861 void target_set_brk(abi_ulong new_brk
)
863 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
864 brk_page
= HOST_PAGE_ALIGN(target_brk
);
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
870 /* do_brk() must return target values and target errnos. */
871 abi_long
do_brk(abi_ulong new_brk
)
873 abi_long mapped_addr
;
874 abi_ulong new_alloc_size
;
876 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
879 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
882 if (new_brk
< target_original_brk
) {
883 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
888 /* If the new brk is less than the highest page reserved to the
889 * target heap allocation, set it and we're almost done... */
890 if (new_brk
<= brk_page
) {
891 /* Heap contents are initialized to zero, as for anonymous
893 if (new_brk
> target_brk
) {
894 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
896 target_brk
= new_brk
;
897 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
901 /* We need to allocate more memory after the brk... Note that
902 * we don't use MAP_FIXED because that will map over the top of
903 * any existing mapping (like the one with the host libc or qemu
904 * itself); instead we treat "mapped but at wrong address" as
905 * a failure and unmap again.
907 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
908 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
909 PROT_READ
|PROT_WRITE
,
910 MAP_ANON
|MAP_PRIVATE
, 0, 0));
912 if (mapped_addr
== brk_page
) {
913 /* Heap contents are initialized to zero, as for anonymous
914 * mapped pages. Technically the new pages are already
915 * initialized to zero since they *are* anonymous mapped
916 * pages, however we have to take care with the contents that
917 * come from the remaining part of the previous page: it may
918 * contains garbage data due to a previous heap usage (grown
920 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
922 target_brk
= new_brk
;
923 brk_page
= HOST_PAGE_ALIGN(target_brk
);
924 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
927 } else if (mapped_addr
!= -1) {
928 /* Mapped but at wrong address, meaning there wasn't actually
929 * enough space for this brk.
931 target_munmap(mapped_addr
, new_alloc_size
);
933 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
936 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
939 #if defined(TARGET_ALPHA)
940 /* We (partially) emulate OSF/1 on Alpha, which requires we
941 return a proper errno, not an unchanged brk value. */
942 return -TARGET_ENOMEM
;
944 /* For everything else, return the previous break. */
948 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
949 abi_ulong target_fds_addr
,
953 abi_ulong b
, *target_fds
;
955 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
956 if (!(target_fds
= lock_user(VERIFY_READ
,
958 sizeof(abi_ulong
) * nw
,
960 return -TARGET_EFAULT
;
964 for (i
= 0; i
< nw
; i
++) {
965 /* grab the abi_ulong */
966 __get_user(b
, &target_fds
[i
]);
967 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
968 /* check the bit inside the abi_ulong */
975 unlock_user(target_fds
, target_fds_addr
, 0);
980 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
981 abi_ulong target_fds_addr
,
984 if (target_fds_addr
) {
985 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
986 return -TARGET_EFAULT
;
994 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1000 abi_ulong
*target_fds
;
1002 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1003 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1005 sizeof(abi_ulong
) * nw
,
1007 return -TARGET_EFAULT
;
1010 for (i
= 0; i
< nw
; i
++) {
1012 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1013 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1016 __put_user(v
, &target_fds
[i
]);
1019 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1024 #if defined(__alpha__)
1025 #define HOST_HZ 1024
1030 static inline abi_long
host_to_target_clock_t(long ticks
)
1032 #if HOST_HZ == TARGET_HZ
1035 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1039 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1040 const struct rusage
*rusage
)
1042 struct target_rusage
*target_rusage
;
1044 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1045 return -TARGET_EFAULT
;
1046 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1047 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1048 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1049 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1050 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1051 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1052 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1053 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1054 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1055 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1056 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1057 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1058 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1059 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1060 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1061 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1062 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1063 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1064 unlock_user_struct(target_rusage
, target_addr
, 1);
1069 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1071 abi_ulong target_rlim_swap
;
1074 target_rlim_swap
= tswapal(target_rlim
);
1075 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1076 return RLIM_INFINITY
;
1078 result
= target_rlim_swap
;
1079 if (target_rlim_swap
!= (rlim_t
)result
)
1080 return RLIM_INFINITY
;
1085 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1087 abi_ulong target_rlim_swap
;
1090 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1091 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1093 target_rlim_swap
= rlim
;
1094 result
= tswapal(target_rlim_swap
);
1099 static inline int target_to_host_resource(int code
)
1102 case TARGET_RLIMIT_AS
:
1104 case TARGET_RLIMIT_CORE
:
1106 case TARGET_RLIMIT_CPU
:
1108 case TARGET_RLIMIT_DATA
:
1110 case TARGET_RLIMIT_FSIZE
:
1111 return RLIMIT_FSIZE
;
1112 case TARGET_RLIMIT_LOCKS
:
1113 return RLIMIT_LOCKS
;
1114 case TARGET_RLIMIT_MEMLOCK
:
1115 return RLIMIT_MEMLOCK
;
1116 case TARGET_RLIMIT_MSGQUEUE
:
1117 return RLIMIT_MSGQUEUE
;
1118 case TARGET_RLIMIT_NICE
:
1120 case TARGET_RLIMIT_NOFILE
:
1121 return RLIMIT_NOFILE
;
1122 case TARGET_RLIMIT_NPROC
:
1123 return RLIMIT_NPROC
;
1124 case TARGET_RLIMIT_RSS
:
1126 case TARGET_RLIMIT_RTPRIO
:
1127 return RLIMIT_RTPRIO
;
1128 case TARGET_RLIMIT_SIGPENDING
:
1129 return RLIMIT_SIGPENDING
;
1130 case TARGET_RLIMIT_STACK
:
1131 return RLIMIT_STACK
;
1137 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1138 abi_ulong target_tv_addr
)
1140 struct target_timeval
*target_tv
;
1142 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1143 return -TARGET_EFAULT
;
1145 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1146 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1148 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1153 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1154 const struct timeval
*tv
)
1156 struct target_timeval
*target_tv
;
1158 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1159 return -TARGET_EFAULT
;
1161 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1162 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1164 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1169 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1170 abi_ulong target_tz_addr
)
1172 struct target_timezone
*target_tz
;
1174 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1175 return -TARGET_EFAULT
;
1178 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1179 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1181 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1186 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1189 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1190 abi_ulong target_mq_attr_addr
)
1192 struct target_mq_attr
*target_mq_attr
;
1194 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1195 target_mq_attr_addr
, 1))
1196 return -TARGET_EFAULT
;
1198 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1199 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1200 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1201 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1203 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1208 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1209 const struct mq_attr
*attr
)
1211 struct target_mq_attr
*target_mq_attr
;
1213 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1214 target_mq_attr_addr
, 0))
1215 return -TARGET_EFAULT
;
1217 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1218 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1219 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1220 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1222 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1228 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1229 /* do_select() must return target values and target errnos. */
1230 static abi_long
do_select(int n
,
1231 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1232 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1234 fd_set rfds
, wfds
, efds
;
1235 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1237 struct timespec ts
, *ts_ptr
;
1240 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1244 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1248 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1253 if (target_tv_addr
) {
1254 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1255 return -TARGET_EFAULT
;
1256 ts
.tv_sec
= tv
.tv_sec
;
1257 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1263 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1266 if (!is_error(ret
)) {
1267 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1268 return -TARGET_EFAULT
;
1269 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1270 return -TARGET_EFAULT
;
1271 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1272 return -TARGET_EFAULT
;
1274 if (target_tv_addr
) {
1275 tv
.tv_sec
= ts
.tv_sec
;
1276 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1277 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1278 return -TARGET_EFAULT
;
1286 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1287 static abi_long
do_old_select(abi_ulong arg1
)
1289 struct target_sel_arg_struct
*sel
;
1290 abi_ulong inp
, outp
, exp
, tvp
;
1293 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1294 return -TARGET_EFAULT
;
1297 nsel
= tswapal(sel
->n
);
1298 inp
= tswapal(sel
->inp
);
1299 outp
= tswapal(sel
->outp
);
1300 exp
= tswapal(sel
->exp
);
1301 tvp
= tswapal(sel
->tvp
);
1303 unlock_user_struct(sel
, arg1
, 0);
1305 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1310 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1313 return pipe2(host_pipe
, flags
);
1319 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1320 int flags
, int is_pipe2
)
1324 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1327 return get_errno(ret
);
1329 /* Several targets have special calling conventions for the original
1330 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1332 #if defined(TARGET_ALPHA)
1333 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1334 return host_pipe
[0];
1335 #elif defined(TARGET_MIPS)
1336 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1337 return host_pipe
[0];
1338 #elif defined(TARGET_SH4)
1339 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1340 return host_pipe
[0];
1341 #elif defined(TARGET_SPARC)
1342 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1343 return host_pipe
[0];
1347 if (put_user_s32(host_pipe
[0], pipedes
)
1348 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1349 return -TARGET_EFAULT
;
1350 return get_errno(ret
);
1353 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1354 abi_ulong target_addr
,
1357 struct target_ip_mreqn
*target_smreqn
;
1359 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1361 return -TARGET_EFAULT
;
1362 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1363 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1364 if (len
== sizeof(struct target_ip_mreqn
))
1365 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1366 unlock_user(target_smreqn
, target_addr
, 0);
1371 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1372 abi_ulong target_addr
,
1375 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1376 sa_family_t sa_family
;
1377 struct target_sockaddr
*target_saddr
;
1379 if (fd_trans_target_to_host_addr(fd
)) {
1380 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1383 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1385 return -TARGET_EFAULT
;
1387 sa_family
= tswap16(target_saddr
->sa_family
);
1389 /* Oops. The caller might send a incomplete sun_path; sun_path
1390 * must be terminated by \0 (see the manual page), but
1391 * unfortunately it is quite common to specify sockaddr_un
1392 * length as "strlen(x->sun_path)" while it should be
1393 * "strlen(...) + 1". We'll fix that here if needed.
1394 * Linux kernel has a similar feature.
1397 if (sa_family
== AF_UNIX
) {
1398 if (len
< unix_maxlen
&& len
> 0) {
1399 char *cp
= (char*)target_saddr
;
1401 if ( cp
[len
-1] && !cp
[len
] )
1404 if (len
> unix_maxlen
)
1408 memcpy(addr
, target_saddr
, len
);
1409 addr
->sa_family
= sa_family
;
1410 if (sa_family
== AF_NETLINK
) {
1411 struct sockaddr_nl
*nladdr
;
1413 nladdr
= (struct sockaddr_nl
*)addr
;
1414 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1415 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1416 } else if (sa_family
== AF_PACKET
) {
1417 struct target_sockaddr_ll
*lladdr
;
1419 lladdr
= (struct target_sockaddr_ll
*)addr
;
1420 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1421 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1423 unlock_user(target_saddr
, target_addr
, 0);
1428 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1429 struct sockaddr
*addr
,
1432 struct target_sockaddr
*target_saddr
;
1439 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1441 return -TARGET_EFAULT
;
1442 memcpy(target_saddr
, addr
, len
);
1443 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1444 sizeof(target_saddr
->sa_family
)) {
1445 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1447 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1448 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1449 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1450 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1451 } else if (addr
->sa_family
== AF_PACKET
) {
1452 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1453 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1454 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1455 } else if (addr
->sa_family
== AF_INET6
&&
1456 len
>= sizeof(struct target_sockaddr_in6
)) {
1457 struct target_sockaddr_in6
*target_in6
=
1458 (struct target_sockaddr_in6
*)target_saddr
;
1459 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1461 unlock_user(target_saddr
, target_addr
, len
);
1466 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1467 struct target_msghdr
*target_msgh
)
1469 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1470 abi_long msg_controllen
;
1471 abi_ulong target_cmsg_addr
;
1472 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1473 socklen_t space
= 0;
1475 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1476 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1478 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1479 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1480 target_cmsg_start
= target_cmsg
;
1482 return -TARGET_EFAULT
;
1484 while (cmsg
&& target_cmsg
) {
1485 void *data
= CMSG_DATA(cmsg
);
1486 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1488 int len
= tswapal(target_cmsg
->cmsg_len
)
1489 - sizeof(struct target_cmsghdr
);
1491 space
+= CMSG_SPACE(len
);
1492 if (space
> msgh
->msg_controllen
) {
1493 space
-= CMSG_SPACE(len
);
1494 /* This is a QEMU bug, since we allocated the payload
1495 * area ourselves (unlike overflow in host-to-target
1496 * conversion, which is just the guest giving us a buffer
1497 * that's too small). It can't happen for the payload types
1498 * we currently support; if it becomes an issue in future
1499 * we would need to improve our allocation strategy to
1500 * something more intelligent than "twice the size of the
1501 * target buffer we're reading from".
1503 gemu_log("Host cmsg overflow\n");
1507 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1508 cmsg
->cmsg_level
= SOL_SOCKET
;
1510 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1512 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1513 cmsg
->cmsg_len
= CMSG_LEN(len
);
1515 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1516 int *fd
= (int *)data
;
1517 int *target_fd
= (int *)target_data
;
1518 int i
, numfds
= len
/ sizeof(int);
1520 for (i
= 0; i
< numfds
; i
++) {
1521 __get_user(fd
[i
], target_fd
+ i
);
1523 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1524 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1525 struct ucred
*cred
= (struct ucred
*)data
;
1526 struct target_ucred
*target_cred
=
1527 (struct target_ucred
*)target_data
;
1529 __get_user(cred
->pid
, &target_cred
->pid
);
1530 __get_user(cred
->uid
, &target_cred
->uid
);
1531 __get_user(cred
->gid
, &target_cred
->gid
);
1533 gemu_log("Unsupported ancillary data: %d/%d\n",
1534 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1535 memcpy(data
, target_data
, len
);
1538 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1539 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1542 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1544 msgh
->msg_controllen
= space
;
1548 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1549 struct msghdr
*msgh
)
1551 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1552 abi_long msg_controllen
;
1553 abi_ulong target_cmsg_addr
;
1554 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1555 socklen_t space
= 0;
1557 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1558 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1560 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1561 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1562 target_cmsg_start
= target_cmsg
;
1564 return -TARGET_EFAULT
;
1566 while (cmsg
&& target_cmsg
) {
1567 void *data
= CMSG_DATA(cmsg
);
1568 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1570 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1571 int tgt_len
, tgt_space
;
1573 /* We never copy a half-header but may copy half-data;
1574 * this is Linux's behaviour in put_cmsg(). Note that
1575 * truncation here is a guest problem (which we report
1576 * to the guest via the CTRUNC bit), unlike truncation
1577 * in target_to_host_cmsg, which is a QEMU bug.
1579 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1580 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1584 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1585 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1587 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1589 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1591 /* Payload types which need a different size of payload on
1592 * the target must adjust tgt_len here.
1595 switch (cmsg
->cmsg_level
) {
1597 switch (cmsg
->cmsg_type
) {
1599 tgt_len
= sizeof(struct target_timeval
);
1609 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1610 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1611 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1614 /* We must now copy-and-convert len bytes of payload
1615 * into tgt_len bytes of destination space. Bear in mind
1616 * that in both source and destination we may be dealing
1617 * with a truncated value!
1619 switch (cmsg
->cmsg_level
) {
1621 switch (cmsg
->cmsg_type
) {
1624 int *fd
= (int *)data
;
1625 int *target_fd
= (int *)target_data
;
1626 int i
, numfds
= tgt_len
/ sizeof(int);
1628 for (i
= 0; i
< numfds
; i
++) {
1629 __put_user(fd
[i
], target_fd
+ i
);
1635 struct timeval
*tv
= (struct timeval
*)data
;
1636 struct target_timeval
*target_tv
=
1637 (struct target_timeval
*)target_data
;
1639 if (len
!= sizeof(struct timeval
) ||
1640 tgt_len
!= sizeof(struct target_timeval
)) {
1644 /* copy struct timeval to target */
1645 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1646 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1649 case SCM_CREDENTIALS
:
1651 struct ucred
*cred
= (struct ucred
*)data
;
1652 struct target_ucred
*target_cred
=
1653 (struct target_ucred
*)target_data
;
1655 __put_user(cred
->pid
, &target_cred
->pid
);
1656 __put_user(cred
->uid
, &target_cred
->uid
);
1657 __put_user(cred
->gid
, &target_cred
->gid
);
1666 switch (cmsg
->cmsg_type
) {
1669 uint32_t *v
= (uint32_t *)data
;
1670 uint32_t *t_int
= (uint32_t *)target_data
;
1672 if (len
!= sizeof(uint32_t) ||
1673 tgt_len
!= sizeof(uint32_t)) {
1676 __put_user(*v
, t_int
);
1682 struct sock_extended_err ee
;
1683 struct sockaddr_in offender
;
1685 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1686 struct errhdr_t
*target_errh
=
1687 (struct errhdr_t
*)target_data
;
1689 if (len
!= sizeof(struct errhdr_t
) ||
1690 tgt_len
!= sizeof(struct errhdr_t
)) {
1693 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1694 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1695 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1696 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1697 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1698 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1699 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1700 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1701 (void *) &errh
->offender
, sizeof(errh
->offender
));
1710 switch (cmsg
->cmsg_type
) {
1713 uint32_t *v
= (uint32_t *)data
;
1714 uint32_t *t_int
= (uint32_t *)target_data
;
1716 if (len
!= sizeof(uint32_t) ||
1717 tgt_len
!= sizeof(uint32_t)) {
1720 __put_user(*v
, t_int
);
1726 struct sock_extended_err ee
;
1727 struct sockaddr_in6 offender
;
1729 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1730 struct errhdr6_t
*target_errh
=
1731 (struct errhdr6_t
*)target_data
;
1733 if (len
!= sizeof(struct errhdr6_t
) ||
1734 tgt_len
!= sizeof(struct errhdr6_t
)) {
1737 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1738 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1739 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1740 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1741 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1742 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1743 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1744 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1745 (void *) &errh
->offender
, sizeof(errh
->offender
));
1755 gemu_log("Unsupported ancillary data: %d/%d\n",
1756 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1757 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1758 if (tgt_len
> len
) {
1759 memset(target_data
+ len
, 0, tgt_len
- len
);
1763 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1764 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1765 if (msg_controllen
< tgt_space
) {
1766 tgt_space
= msg_controllen
;
1768 msg_controllen
-= tgt_space
;
1770 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1771 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1774 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1776 target_msgh
->msg_controllen
= tswapal(space
);
1780 /* do_setsockopt() Must return target values and target errnos. */
1781 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1782 abi_ulong optval_addr
, socklen_t optlen
)
1786 struct ip_mreqn
*ip_mreq
;
1787 struct ip_mreq_source
*ip_mreq_source
;
1791 /* TCP options all take an 'int' value. */
1792 if (optlen
< sizeof(uint32_t))
1793 return -TARGET_EINVAL
;
1795 if (get_user_u32(val
, optval_addr
))
1796 return -TARGET_EFAULT
;
1797 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1804 case IP_ROUTER_ALERT
:
1808 case IP_MTU_DISCOVER
:
1815 case IP_MULTICAST_TTL
:
1816 case IP_MULTICAST_LOOP
:
1818 if (optlen
>= sizeof(uint32_t)) {
1819 if (get_user_u32(val
, optval_addr
))
1820 return -TARGET_EFAULT
;
1821 } else if (optlen
>= 1) {
1822 if (get_user_u8(val
, optval_addr
))
1823 return -TARGET_EFAULT
;
1825 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1827 case IP_ADD_MEMBERSHIP
:
1828 case IP_DROP_MEMBERSHIP
:
1829 if (optlen
< sizeof (struct target_ip_mreq
) ||
1830 optlen
> sizeof (struct target_ip_mreqn
))
1831 return -TARGET_EINVAL
;
1833 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1834 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1835 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1838 case IP_BLOCK_SOURCE
:
1839 case IP_UNBLOCK_SOURCE
:
1840 case IP_ADD_SOURCE_MEMBERSHIP
:
1841 case IP_DROP_SOURCE_MEMBERSHIP
:
1842 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1843 return -TARGET_EINVAL
;
1845 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1846 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1847 unlock_user (ip_mreq_source
, optval_addr
, 0);
1856 case IPV6_MTU_DISCOVER
:
1859 case IPV6_RECVPKTINFO
:
1860 case IPV6_UNICAST_HOPS
:
1861 case IPV6_MULTICAST_HOPS
:
1862 case IPV6_MULTICAST_LOOP
:
1864 case IPV6_RECVHOPLIMIT
:
1865 case IPV6_2292HOPLIMIT
:
1868 if (optlen
< sizeof(uint32_t)) {
1869 return -TARGET_EINVAL
;
1871 if (get_user_u32(val
, optval_addr
)) {
1872 return -TARGET_EFAULT
;
1874 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1875 &val
, sizeof(val
)));
1879 struct in6_pktinfo pki
;
1881 if (optlen
< sizeof(pki
)) {
1882 return -TARGET_EINVAL
;
1885 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1886 return -TARGET_EFAULT
;
1889 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1891 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1892 &pki
, sizeof(pki
)));
1903 struct icmp6_filter icmp6f
;
1905 if (optlen
> sizeof(icmp6f
)) {
1906 optlen
= sizeof(icmp6f
);
1909 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1910 return -TARGET_EFAULT
;
1913 for (val
= 0; val
< 8; val
++) {
1914 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1917 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1929 /* those take an u32 value */
1930 if (optlen
< sizeof(uint32_t)) {
1931 return -TARGET_EINVAL
;
1934 if (get_user_u32(val
, optval_addr
)) {
1935 return -TARGET_EFAULT
;
1937 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1938 &val
, sizeof(val
)));
1945 case TARGET_SOL_SOCKET
:
1947 case TARGET_SO_RCVTIMEO
:
1951 optname
= SO_RCVTIMEO
;
1954 if (optlen
!= sizeof(struct target_timeval
)) {
1955 return -TARGET_EINVAL
;
1958 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1959 return -TARGET_EFAULT
;
1962 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1966 case TARGET_SO_SNDTIMEO
:
1967 optname
= SO_SNDTIMEO
;
1969 case TARGET_SO_ATTACH_FILTER
:
1971 struct target_sock_fprog
*tfprog
;
1972 struct target_sock_filter
*tfilter
;
1973 struct sock_fprog fprog
;
1974 struct sock_filter
*filter
;
1977 if (optlen
!= sizeof(*tfprog
)) {
1978 return -TARGET_EINVAL
;
1980 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1981 return -TARGET_EFAULT
;
1983 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1984 tswapal(tfprog
->filter
), 0)) {
1985 unlock_user_struct(tfprog
, optval_addr
, 1);
1986 return -TARGET_EFAULT
;
1989 fprog
.len
= tswap16(tfprog
->len
);
1990 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1991 if (filter
== NULL
) {
1992 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1993 unlock_user_struct(tfprog
, optval_addr
, 1);
1994 return -TARGET_ENOMEM
;
1996 for (i
= 0; i
< fprog
.len
; i
++) {
1997 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1998 filter
[i
].jt
= tfilter
[i
].jt
;
1999 filter
[i
].jf
= tfilter
[i
].jf
;
2000 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2002 fprog
.filter
= filter
;
2004 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2005 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2008 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2009 unlock_user_struct(tfprog
, optval_addr
, 1);
2012 case TARGET_SO_BINDTODEVICE
:
2014 char *dev_ifname
, *addr_ifname
;
2016 if (optlen
> IFNAMSIZ
- 1) {
2017 optlen
= IFNAMSIZ
- 1;
2019 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2021 return -TARGET_EFAULT
;
2023 optname
= SO_BINDTODEVICE
;
2024 addr_ifname
= alloca(IFNAMSIZ
);
2025 memcpy(addr_ifname
, dev_ifname
, optlen
);
2026 addr_ifname
[optlen
] = 0;
2027 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2028 addr_ifname
, optlen
));
2029 unlock_user (dev_ifname
, optval_addr
, 0);
2032 case TARGET_SO_LINGER
:
2035 struct target_linger
*tlg
;
2037 if (optlen
!= sizeof(struct target_linger
)) {
2038 return -TARGET_EINVAL
;
2040 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2041 return -TARGET_EFAULT
;
2043 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2044 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2045 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2047 unlock_user_struct(tlg
, optval_addr
, 0);
2050 /* Options with 'int' argument. */
2051 case TARGET_SO_DEBUG
:
2054 case TARGET_SO_REUSEADDR
:
2055 optname
= SO_REUSEADDR
;
2058 case TARGET_SO_REUSEPORT
:
2059 optname
= SO_REUSEPORT
;
2062 case TARGET_SO_TYPE
:
2065 case TARGET_SO_ERROR
:
2068 case TARGET_SO_DONTROUTE
:
2069 optname
= SO_DONTROUTE
;
2071 case TARGET_SO_BROADCAST
:
2072 optname
= SO_BROADCAST
;
2074 case TARGET_SO_SNDBUF
:
2075 optname
= SO_SNDBUF
;
2077 case TARGET_SO_SNDBUFFORCE
:
2078 optname
= SO_SNDBUFFORCE
;
2080 case TARGET_SO_RCVBUF
:
2081 optname
= SO_RCVBUF
;
2083 case TARGET_SO_RCVBUFFORCE
:
2084 optname
= SO_RCVBUFFORCE
;
2086 case TARGET_SO_KEEPALIVE
:
2087 optname
= SO_KEEPALIVE
;
2089 case TARGET_SO_OOBINLINE
:
2090 optname
= SO_OOBINLINE
;
2092 case TARGET_SO_NO_CHECK
:
2093 optname
= SO_NO_CHECK
;
2095 case TARGET_SO_PRIORITY
:
2096 optname
= SO_PRIORITY
;
2099 case TARGET_SO_BSDCOMPAT
:
2100 optname
= SO_BSDCOMPAT
;
2103 case TARGET_SO_PASSCRED
:
2104 optname
= SO_PASSCRED
;
2106 case TARGET_SO_PASSSEC
:
2107 optname
= SO_PASSSEC
;
2109 case TARGET_SO_TIMESTAMP
:
2110 optname
= SO_TIMESTAMP
;
2112 case TARGET_SO_RCVLOWAT
:
2113 optname
= SO_RCVLOWAT
;
2118 if (optlen
< sizeof(uint32_t))
2119 return -TARGET_EINVAL
;
2121 if (get_user_u32(val
, optval_addr
))
2122 return -TARGET_EFAULT
;
2123 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2127 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2128 ret
= -TARGET_ENOPROTOOPT
;
2133 /* do_getsockopt() Must return target values and target errnos. */
2134 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2135 abi_ulong optval_addr
, abi_ulong optlen
)
2142 case TARGET_SOL_SOCKET
:
2145 /* These don't just return a single integer */
2146 case TARGET_SO_RCVTIMEO
:
2147 case TARGET_SO_SNDTIMEO
:
2148 case TARGET_SO_PEERNAME
:
2150 case TARGET_SO_PEERCRED
: {
2153 struct target_ucred
*tcr
;
2155 if (get_user_u32(len
, optlen
)) {
2156 return -TARGET_EFAULT
;
2159 return -TARGET_EINVAL
;
2163 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2171 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2172 return -TARGET_EFAULT
;
2174 __put_user(cr
.pid
, &tcr
->pid
);
2175 __put_user(cr
.uid
, &tcr
->uid
);
2176 __put_user(cr
.gid
, &tcr
->gid
);
2177 unlock_user_struct(tcr
, optval_addr
, 1);
2178 if (put_user_u32(len
, optlen
)) {
2179 return -TARGET_EFAULT
;
2183 case TARGET_SO_LINGER
:
2187 struct target_linger
*tlg
;
2189 if (get_user_u32(len
, optlen
)) {
2190 return -TARGET_EFAULT
;
2193 return -TARGET_EINVAL
;
2197 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2205 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2206 return -TARGET_EFAULT
;
2208 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2209 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2210 unlock_user_struct(tlg
, optval_addr
, 1);
2211 if (put_user_u32(len
, optlen
)) {
2212 return -TARGET_EFAULT
;
2216 /* Options with 'int' argument. */
2217 case TARGET_SO_DEBUG
:
2220 case TARGET_SO_REUSEADDR
:
2221 optname
= SO_REUSEADDR
;
2224 case TARGET_SO_REUSEPORT
:
2225 optname
= SO_REUSEPORT
;
2228 case TARGET_SO_TYPE
:
2231 case TARGET_SO_ERROR
:
2234 case TARGET_SO_DONTROUTE
:
2235 optname
= SO_DONTROUTE
;
2237 case TARGET_SO_BROADCAST
:
2238 optname
= SO_BROADCAST
;
2240 case TARGET_SO_SNDBUF
:
2241 optname
= SO_SNDBUF
;
2243 case TARGET_SO_RCVBUF
:
2244 optname
= SO_RCVBUF
;
2246 case TARGET_SO_KEEPALIVE
:
2247 optname
= SO_KEEPALIVE
;
2249 case TARGET_SO_OOBINLINE
:
2250 optname
= SO_OOBINLINE
;
2252 case TARGET_SO_NO_CHECK
:
2253 optname
= SO_NO_CHECK
;
2255 case TARGET_SO_PRIORITY
:
2256 optname
= SO_PRIORITY
;
2259 case TARGET_SO_BSDCOMPAT
:
2260 optname
= SO_BSDCOMPAT
;
2263 case TARGET_SO_PASSCRED
:
2264 optname
= SO_PASSCRED
;
2266 case TARGET_SO_TIMESTAMP
:
2267 optname
= SO_TIMESTAMP
;
2269 case TARGET_SO_RCVLOWAT
:
2270 optname
= SO_RCVLOWAT
;
2272 case TARGET_SO_ACCEPTCONN
:
2273 optname
= SO_ACCEPTCONN
;
2280 /* TCP options all take an 'int' value. */
2282 if (get_user_u32(len
, optlen
))
2283 return -TARGET_EFAULT
;
2285 return -TARGET_EINVAL
;
2287 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2290 if (optname
== SO_TYPE
) {
2291 val
= host_to_target_sock_type(val
);
2296 if (put_user_u32(val
, optval_addr
))
2297 return -TARGET_EFAULT
;
2299 if (put_user_u8(val
, optval_addr
))
2300 return -TARGET_EFAULT
;
2302 if (put_user_u32(len
, optlen
))
2303 return -TARGET_EFAULT
;
2310 case IP_ROUTER_ALERT
:
2314 case IP_MTU_DISCOVER
:
2320 case IP_MULTICAST_TTL
:
2321 case IP_MULTICAST_LOOP
:
2322 if (get_user_u32(len
, optlen
))
2323 return -TARGET_EFAULT
;
2325 return -TARGET_EINVAL
;
2327 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2330 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2332 if (put_user_u32(len
, optlen
)
2333 || put_user_u8(val
, optval_addr
))
2334 return -TARGET_EFAULT
;
2336 if (len
> sizeof(int))
2338 if (put_user_u32(len
, optlen
)
2339 || put_user_u32(val
, optval_addr
))
2340 return -TARGET_EFAULT
;
2344 ret
= -TARGET_ENOPROTOOPT
;
2350 case IPV6_MTU_DISCOVER
:
2353 case IPV6_RECVPKTINFO
:
2354 case IPV6_UNICAST_HOPS
:
2355 case IPV6_MULTICAST_HOPS
:
2356 case IPV6_MULTICAST_LOOP
:
2358 case IPV6_RECVHOPLIMIT
:
2359 case IPV6_2292HOPLIMIT
:
2361 if (get_user_u32(len
, optlen
))
2362 return -TARGET_EFAULT
;
2364 return -TARGET_EINVAL
;
2366 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2369 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2371 if (put_user_u32(len
, optlen
)
2372 || put_user_u8(val
, optval_addr
))
2373 return -TARGET_EFAULT
;
2375 if (len
> sizeof(int))
2377 if (put_user_u32(len
, optlen
)
2378 || put_user_u32(val
, optval_addr
))
2379 return -TARGET_EFAULT
;
2383 ret
= -TARGET_ENOPROTOOPT
;
2389 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2391 ret
= -TARGET_EOPNOTSUPP
;
2397 /* Convert target low/high pair representing file offset into the host
2398 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2399 * as the kernel doesn't handle them either.
2401 static void target_to_host_low_high(abi_ulong tlow
,
2403 unsigned long *hlow
,
2404 unsigned long *hhigh
)
2406 uint64_t off
= tlow
|
2407 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2408 TARGET_LONG_BITS
/ 2;
2411 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2414 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2415 abi_ulong count
, int copy
)
2417 struct target_iovec
*target_vec
;
2419 abi_ulong total_len
, max_len
;
2422 bool bad_address
= false;
2428 if (count
> IOV_MAX
) {
2433 vec
= g_try_new0(struct iovec
, count
);
2439 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2440 count
* sizeof(struct target_iovec
), 1);
2441 if (target_vec
== NULL
) {
2446 /* ??? If host page size > target page size, this will result in a
2447 value larger than what we can actually support. */
2448 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2451 for (i
= 0; i
< count
; i
++) {
2452 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2453 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2458 } else if (len
== 0) {
2459 /* Zero length pointer is ignored. */
2460 vec
[i
].iov_base
= 0;
2462 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2463 /* If the first buffer pointer is bad, this is a fault. But
2464 * subsequent bad buffers will result in a partial write; this
2465 * is realized by filling the vector with null pointers and
2467 if (!vec
[i
].iov_base
) {
2478 if (len
> max_len
- total_len
) {
2479 len
= max_len
- total_len
;
2482 vec
[i
].iov_len
= len
;
2486 unlock_user(target_vec
, target_addr
, 0);
2491 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2492 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2495 unlock_user(target_vec
, target_addr
, 0);
2502 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2503 abi_ulong count
, int copy
)
2505 struct target_iovec
*target_vec
;
2508 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2509 count
* sizeof(struct target_iovec
), 1);
2511 for (i
= 0; i
< count
; i
++) {
2512 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2513 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2517 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2519 unlock_user(target_vec
, target_addr
, 0);
2525 static inline int target_to_host_sock_type(int *type
)
2528 int target_type
= *type
;
2530 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2531 case TARGET_SOCK_DGRAM
:
2532 host_type
= SOCK_DGRAM
;
2534 case TARGET_SOCK_STREAM
:
2535 host_type
= SOCK_STREAM
;
2538 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2541 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2542 #if defined(SOCK_CLOEXEC)
2543 host_type
|= SOCK_CLOEXEC
;
2545 return -TARGET_EINVAL
;
2548 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2549 #if defined(SOCK_NONBLOCK)
2550 host_type
|= SOCK_NONBLOCK
;
2551 #elif !defined(O_NONBLOCK)
2552 return -TARGET_EINVAL
;
2559 /* Try to emulate socket type flags after socket creation. */
2560 static int sock_flags_fixup(int fd
, int target_type
)
2562 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2563 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2564 int flags
= fcntl(fd
, F_GETFL
);
2565 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2567 return -TARGET_EINVAL
;
2574 /* do_socket() Must return target values and target errnos. */
2575 static abi_long
do_socket(int domain
, int type
, int protocol
)
2577 int target_type
= type
;
2580 ret
= target_to_host_sock_type(&type
);
2585 if (domain
== PF_NETLINK
&& !(
2586 #ifdef CONFIG_RTNETLINK
2587 protocol
== NETLINK_ROUTE
||
2589 protocol
== NETLINK_KOBJECT_UEVENT
||
2590 protocol
== NETLINK_AUDIT
)) {
2591 return -EPFNOSUPPORT
;
2594 if (domain
== AF_PACKET
||
2595 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2596 protocol
= tswap16(protocol
);
2599 ret
= get_errno(socket(domain
, type
, protocol
));
2601 ret
= sock_flags_fixup(ret
, target_type
);
2602 if (type
== SOCK_PACKET
) {
2603 /* Manage an obsolete case :
2604 * if socket type is SOCK_PACKET, bind by name
2606 fd_trans_register(ret
, &target_packet_trans
);
2607 } else if (domain
== PF_NETLINK
) {
2609 #ifdef CONFIG_RTNETLINK
2611 fd_trans_register(ret
, &target_netlink_route_trans
);
2614 case NETLINK_KOBJECT_UEVENT
:
2615 /* nothing to do: messages are strings */
2618 fd_trans_register(ret
, &target_netlink_audit_trans
);
2621 g_assert_not_reached();
2628 /* do_bind() Must return target values and target errnos. */
2629 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2635 if ((int)addrlen
< 0) {
2636 return -TARGET_EINVAL
;
2639 addr
= alloca(addrlen
+1);
2641 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2645 return get_errno(bind(sockfd
, addr
, addrlen
));
2648 /* do_connect() Must return target values and target errnos. */
2649 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2655 if ((int)addrlen
< 0) {
2656 return -TARGET_EINVAL
;
2659 addr
= alloca(addrlen
+1);
2661 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2665 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2668 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2669 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2670 int flags
, int send
)
2676 abi_ulong target_vec
;
2678 if (msgp
->msg_name
) {
2679 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2680 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2681 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2682 tswapal(msgp
->msg_name
),
2684 if (ret
== -TARGET_EFAULT
) {
2685 /* For connected sockets msg_name and msg_namelen must
2686 * be ignored, so returning EFAULT immediately is wrong.
2687 * Instead, pass a bad msg_name to the host kernel, and
2688 * let it decide whether to return EFAULT or not.
2690 msg
.msg_name
= (void *)-1;
2695 msg
.msg_name
= NULL
;
2696 msg
.msg_namelen
= 0;
2698 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2699 msg
.msg_control
= alloca(msg
.msg_controllen
);
2700 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2702 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2704 count
= tswapal(msgp
->msg_iovlen
);
2705 target_vec
= tswapal(msgp
->msg_iov
);
2707 if (count
> IOV_MAX
) {
2708 /* sendrcvmsg returns a different errno for this condition than
2709 * readv/writev, so we must catch it here before lock_iovec() does.
2711 ret
= -TARGET_EMSGSIZE
;
2715 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2716 target_vec
, count
, send
);
2718 ret
= -host_to_target_errno(errno
);
2721 msg
.msg_iovlen
= count
;
2725 if (fd_trans_target_to_host_data(fd
)) {
2728 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2729 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2730 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2731 msg
.msg_iov
->iov_len
);
2733 msg
.msg_iov
->iov_base
= host_msg
;
2734 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2738 ret
= target_to_host_cmsg(&msg
, msgp
);
2740 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2744 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2745 if (!is_error(ret
)) {
2747 if (fd_trans_host_to_target_data(fd
)) {
2748 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2749 MIN(msg
.msg_iov
->iov_len
, len
));
2751 ret
= host_to_target_cmsg(msgp
, &msg
);
2753 if (!is_error(ret
)) {
2754 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2755 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
2756 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2757 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2758 msg
.msg_name
, msg
.msg_namelen
);
2770 unlock_iovec(vec
, target_vec
, count
, !send
);
2775 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2776 int flags
, int send
)
2779 struct target_msghdr
*msgp
;
2781 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2785 return -TARGET_EFAULT
;
2787 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2788 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2792 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2793 * so it might not have this *mmsg-specific flag either.
2795 #ifndef MSG_WAITFORONE
2796 #define MSG_WAITFORONE 0x10000
2799 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2800 unsigned int vlen
, unsigned int flags
,
2803 struct target_mmsghdr
*mmsgp
;
2807 if (vlen
> UIO_MAXIOV
) {
2811 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2813 return -TARGET_EFAULT
;
2816 for (i
= 0; i
< vlen
; i
++) {
2817 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2818 if (is_error(ret
)) {
2821 mmsgp
[i
].msg_len
= tswap32(ret
);
2822 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2823 if (flags
& MSG_WAITFORONE
) {
2824 flags
|= MSG_DONTWAIT
;
2828 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2830 /* Return number of datagrams sent if we sent any at all;
2831 * otherwise return the error.
2839 /* do_accept4() Must return target values and target errnos. */
2840 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2841 abi_ulong target_addrlen_addr
, int flags
)
2843 socklen_t addrlen
, ret_addrlen
;
2848 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2850 if (target_addr
== 0) {
2851 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2854 /* linux returns EINVAL if addrlen pointer is invalid */
2855 if (get_user_u32(addrlen
, target_addrlen_addr
))
2856 return -TARGET_EINVAL
;
2858 if ((int)addrlen
< 0) {
2859 return -TARGET_EINVAL
;
2862 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2863 return -TARGET_EINVAL
;
2865 addr
= alloca(addrlen
);
2867 ret_addrlen
= addrlen
;
2868 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
2869 if (!is_error(ret
)) {
2870 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2871 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2872 ret
= -TARGET_EFAULT
;
2878 /* do_getpeername() Must return target values and target errnos. */
2879 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2880 abi_ulong target_addrlen_addr
)
2882 socklen_t addrlen
, ret_addrlen
;
2886 if (get_user_u32(addrlen
, target_addrlen_addr
))
2887 return -TARGET_EFAULT
;
2889 if ((int)addrlen
< 0) {
2890 return -TARGET_EINVAL
;
2893 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2894 return -TARGET_EFAULT
;
2896 addr
= alloca(addrlen
);
2898 ret_addrlen
= addrlen
;
2899 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
2900 if (!is_error(ret
)) {
2901 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2902 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2903 ret
= -TARGET_EFAULT
;
2909 /* do_getsockname() Must return target values and target errnos. */
2910 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2911 abi_ulong target_addrlen_addr
)
2913 socklen_t addrlen
, ret_addrlen
;
2917 if (get_user_u32(addrlen
, target_addrlen_addr
))
2918 return -TARGET_EFAULT
;
2920 if ((int)addrlen
< 0) {
2921 return -TARGET_EINVAL
;
2924 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2925 return -TARGET_EFAULT
;
2927 addr
= alloca(addrlen
);
2929 ret_addrlen
= addrlen
;
2930 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
2931 if (!is_error(ret
)) {
2932 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2933 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2934 ret
= -TARGET_EFAULT
;
2940 /* do_socketpair() Must return target values and target errnos. */
2941 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2942 abi_ulong target_tab_addr
)
2947 target_to_host_sock_type(&type
);
2949 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2950 if (!is_error(ret
)) {
2951 if (put_user_s32(tab
[0], target_tab_addr
)
2952 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2953 ret
= -TARGET_EFAULT
;
2958 /* do_sendto() Must return target values and target errnos. */
2959 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2960 abi_ulong target_addr
, socklen_t addrlen
)
2964 void *copy_msg
= NULL
;
2967 if ((int)addrlen
< 0) {
2968 return -TARGET_EINVAL
;
2971 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2973 return -TARGET_EFAULT
;
2974 if (fd_trans_target_to_host_data(fd
)) {
2975 copy_msg
= host_msg
;
2976 host_msg
= g_malloc(len
);
2977 memcpy(host_msg
, copy_msg
, len
);
2978 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
2984 addr
= alloca(addrlen
+1);
2985 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2989 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2991 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
2996 host_msg
= copy_msg
;
2998 unlock_user(host_msg
, msg
, 0);
3002 /* do_recvfrom() Must return target values and target errnos. */
3003 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3004 abi_ulong target_addr
,
3005 abi_ulong target_addrlen
)
3007 socklen_t addrlen
, ret_addrlen
;
3012 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3014 return -TARGET_EFAULT
;
3016 if (get_user_u32(addrlen
, target_addrlen
)) {
3017 ret
= -TARGET_EFAULT
;
3020 if ((int)addrlen
< 0) {
3021 ret
= -TARGET_EINVAL
;
3024 addr
= alloca(addrlen
);
3025 ret_addrlen
= addrlen
;
3026 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3027 addr
, &ret_addrlen
));
3029 addr
= NULL
; /* To keep compiler quiet. */
3030 addrlen
= 0; /* To keep compiler quiet. */
3031 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3033 if (!is_error(ret
)) {
3034 if (fd_trans_host_to_target_data(fd
)) {
3036 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3037 if (is_error(trans
)) {
3043 host_to_target_sockaddr(target_addr
, addr
,
3044 MIN(addrlen
, ret_addrlen
));
3045 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3046 ret
= -TARGET_EFAULT
;
3050 unlock_user(host_msg
, msg
, len
);
3053 unlock_user(host_msg
, msg
, 0);
3058 #ifdef TARGET_NR_socketcall
3059 /* do_socketcall() must return target values and target errnos. */
3060 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3062 static const unsigned nargs
[] = { /* number of arguments per operation */
3063 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3064 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3065 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3066 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3067 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3068 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3069 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3070 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3071 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3072 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3073 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3074 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3075 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3076 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3077 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3078 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3079 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3080 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3081 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3082 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3084 abi_long a
[6]; /* max 6 args */
3087 /* check the range of the first argument num */
3088 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3089 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3090 return -TARGET_EINVAL
;
3092 /* ensure we have space for args */
3093 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3094 return -TARGET_EINVAL
;
3096 /* collect the arguments in a[] according to nargs[] */
3097 for (i
= 0; i
< nargs
[num
]; ++i
) {
3098 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3099 return -TARGET_EFAULT
;
3102 /* now when we have the args, invoke the appropriate underlying function */
3104 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3105 return do_socket(a
[0], a
[1], a
[2]);
3106 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3107 return do_bind(a
[0], a
[1], a
[2]);
3108 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3109 return do_connect(a
[0], a
[1], a
[2]);
3110 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3111 return get_errno(listen(a
[0], a
[1]));
3112 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3113 return do_accept4(a
[0], a
[1], a
[2], 0);
3114 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3115 return do_getsockname(a
[0], a
[1], a
[2]);
3116 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3117 return do_getpeername(a
[0], a
[1], a
[2]);
3118 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3119 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3120 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3121 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3122 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3123 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3124 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3125 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3126 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3127 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3128 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3129 return get_errno(shutdown(a
[0], a
[1]));
3130 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3131 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3132 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3133 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3134 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3135 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3136 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3137 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3138 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3139 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3140 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3141 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3142 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3143 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3145 gemu_log("Unsupported socketcall: %d\n", num
);
3146 return -TARGET_EINVAL
;
3151 #define N_SHM_REGIONS 32
3153 static struct shm_region
{
3157 } shm_regions
[N_SHM_REGIONS
];
3159 #ifndef TARGET_SEMID64_DS
3160 /* asm-generic version of this struct */
3161 struct target_semid64_ds
3163 struct target_ipc_perm sem_perm
;
3164 abi_ulong sem_otime
;
3165 #if TARGET_ABI_BITS == 32
3166 abi_ulong __unused1
;
3168 abi_ulong sem_ctime
;
3169 #if TARGET_ABI_BITS == 32
3170 abi_ulong __unused2
;
3172 abi_ulong sem_nsems
;
3173 abi_ulong __unused3
;
3174 abi_ulong __unused4
;
3178 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3179 abi_ulong target_addr
)
3181 struct target_ipc_perm
*target_ip
;
3182 struct target_semid64_ds
*target_sd
;
3184 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3185 return -TARGET_EFAULT
;
3186 target_ip
= &(target_sd
->sem_perm
);
3187 host_ip
->__key
= tswap32(target_ip
->__key
);
3188 host_ip
->uid
= tswap32(target_ip
->uid
);
3189 host_ip
->gid
= tswap32(target_ip
->gid
);
3190 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3191 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3192 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3193 host_ip
->mode
= tswap32(target_ip
->mode
);
3195 host_ip
->mode
= tswap16(target_ip
->mode
);
3197 #if defined(TARGET_PPC)
3198 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3200 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3202 unlock_user_struct(target_sd
, target_addr
, 0);
3206 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3207 struct ipc_perm
*host_ip
)
3209 struct target_ipc_perm
*target_ip
;
3210 struct target_semid64_ds
*target_sd
;
3212 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3213 return -TARGET_EFAULT
;
3214 target_ip
= &(target_sd
->sem_perm
);
3215 target_ip
->__key
= tswap32(host_ip
->__key
);
3216 target_ip
->uid
= tswap32(host_ip
->uid
);
3217 target_ip
->gid
= tswap32(host_ip
->gid
);
3218 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3219 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3220 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3221 target_ip
->mode
= tswap32(host_ip
->mode
);
3223 target_ip
->mode
= tswap16(host_ip
->mode
);
3225 #if defined(TARGET_PPC)
3226 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3228 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3230 unlock_user_struct(target_sd
, target_addr
, 1);
3234 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3235 abi_ulong target_addr
)
3237 struct target_semid64_ds
*target_sd
;
3239 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3240 return -TARGET_EFAULT
;
3241 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3242 return -TARGET_EFAULT
;
3243 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3244 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3245 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3246 unlock_user_struct(target_sd
, target_addr
, 0);
3250 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3251 struct semid_ds
*host_sd
)
3253 struct target_semid64_ds
*target_sd
;
3255 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3256 return -TARGET_EFAULT
;
3257 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3258 return -TARGET_EFAULT
;
3259 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3260 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3261 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3262 unlock_user_struct(target_sd
, target_addr
, 1);
3266 struct target_seminfo
{
3279 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3280 struct seminfo
*host_seminfo
)
3282 struct target_seminfo
*target_seminfo
;
3283 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3284 return -TARGET_EFAULT
;
3285 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3286 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3287 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3288 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3289 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3290 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3291 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3292 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3293 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3294 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3295 unlock_user_struct(target_seminfo
, target_addr
, 1);
3301 struct semid_ds
*buf
;
3302 unsigned short *array
;
3303 struct seminfo
*__buf
;
3306 union target_semun
{
3313 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3314 abi_ulong target_addr
)
3317 unsigned short *array
;
3319 struct semid_ds semid_ds
;
3322 semun
.buf
= &semid_ds
;
3324 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3326 return get_errno(ret
);
3328 nsems
= semid_ds
.sem_nsems
;
3330 *host_array
= g_try_new(unsigned short, nsems
);
3332 return -TARGET_ENOMEM
;
3334 array
= lock_user(VERIFY_READ
, target_addr
,
3335 nsems
*sizeof(unsigned short), 1);
3337 g_free(*host_array
);
3338 return -TARGET_EFAULT
;
3341 for(i
=0; i
<nsems
; i
++) {
3342 __get_user((*host_array
)[i
], &array
[i
]);
3344 unlock_user(array
, target_addr
, 0);
3349 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3350 unsigned short **host_array
)
3353 unsigned short *array
;
3355 struct semid_ds semid_ds
;
3358 semun
.buf
= &semid_ds
;
3360 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3362 return get_errno(ret
);
3364 nsems
= semid_ds
.sem_nsems
;
3366 array
= lock_user(VERIFY_WRITE
, target_addr
,
3367 nsems
*sizeof(unsigned short), 0);
3369 return -TARGET_EFAULT
;
3371 for(i
=0; i
<nsems
; i
++) {
3372 __put_user((*host_array
)[i
], &array
[i
]);
3374 g_free(*host_array
);
3375 unlock_user(array
, target_addr
, 1);
3380 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3381 abi_ulong target_arg
)
3383 union target_semun target_su
= { .buf
= target_arg
};
3385 struct semid_ds dsarg
;
3386 unsigned short *array
= NULL
;
3387 struct seminfo seminfo
;
3388 abi_long ret
= -TARGET_EINVAL
;
3395 /* In 64 bit cross-endian situations, we will erroneously pick up
3396 * the wrong half of the union for the "val" element. To rectify
3397 * this, the entire 8-byte structure is byteswapped, followed by
3398 * a swap of the 4 byte val field. In other cases, the data is
3399 * already in proper host byte order. */
3400 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3401 target_su
.buf
= tswapal(target_su
.buf
);
3402 arg
.val
= tswap32(target_su
.val
);
3404 arg
.val
= target_su
.val
;
3406 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3410 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3414 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3415 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3422 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3426 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3427 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3433 arg
.__buf
= &seminfo
;
3434 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3435 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3443 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3450 struct target_sembuf
{
3451 unsigned short sem_num
;
3456 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3457 abi_ulong target_addr
,
3460 struct target_sembuf
*target_sembuf
;
3463 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3464 nsops
*sizeof(struct target_sembuf
), 1);
3466 return -TARGET_EFAULT
;
3468 for(i
=0; i
<nsops
; i
++) {
3469 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3470 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3471 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3474 unlock_user(target_sembuf
, target_addr
, 0);
3479 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3481 struct sembuf sops
[nsops
];
3483 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3484 return -TARGET_EFAULT
;
3486 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3489 struct target_msqid_ds
3491 struct target_ipc_perm msg_perm
;
3492 abi_ulong msg_stime
;
3493 #if TARGET_ABI_BITS == 32
3494 abi_ulong __unused1
;
3496 abi_ulong msg_rtime
;
3497 #if TARGET_ABI_BITS == 32
3498 abi_ulong __unused2
;
3500 abi_ulong msg_ctime
;
3501 #if TARGET_ABI_BITS == 32
3502 abi_ulong __unused3
;
3504 abi_ulong __msg_cbytes
;
3506 abi_ulong msg_qbytes
;
3507 abi_ulong msg_lspid
;
3508 abi_ulong msg_lrpid
;
3509 abi_ulong __unused4
;
3510 abi_ulong __unused5
;
3513 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3514 abi_ulong target_addr
)
3516 struct target_msqid_ds
*target_md
;
3518 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3519 return -TARGET_EFAULT
;
3520 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3521 return -TARGET_EFAULT
;
3522 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3523 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3524 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3525 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3526 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3527 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3528 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3529 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3530 unlock_user_struct(target_md
, target_addr
, 0);
3534 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3535 struct msqid_ds
*host_md
)
3537 struct target_msqid_ds
*target_md
;
3539 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3540 return -TARGET_EFAULT
;
3541 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3542 return -TARGET_EFAULT
;
3543 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3544 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3545 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3546 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3547 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3548 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3549 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3550 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3551 unlock_user_struct(target_md
, target_addr
, 1);
3555 struct target_msginfo
{
3563 unsigned short int msgseg
;
3566 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3567 struct msginfo
*host_msginfo
)
3569 struct target_msginfo
*target_msginfo
;
3570 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3571 return -TARGET_EFAULT
;
3572 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3573 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3574 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3575 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3576 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3577 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3578 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3579 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3580 unlock_user_struct(target_msginfo
, target_addr
, 1);
3584 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3586 struct msqid_ds dsarg
;
3587 struct msginfo msginfo
;
3588 abi_long ret
= -TARGET_EINVAL
;
3596 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3597 return -TARGET_EFAULT
;
3598 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3599 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3600 return -TARGET_EFAULT
;
3603 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3607 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3608 if (host_to_target_msginfo(ptr
, &msginfo
))
3609 return -TARGET_EFAULT
;
3616 struct target_msgbuf
{
3621 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3622 ssize_t msgsz
, int msgflg
)
3624 struct target_msgbuf
*target_mb
;
3625 struct msgbuf
*host_mb
;
3629 return -TARGET_EINVAL
;
3632 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3633 return -TARGET_EFAULT
;
3634 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3636 unlock_user_struct(target_mb
, msgp
, 0);
3637 return -TARGET_ENOMEM
;
3639 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3640 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3641 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3643 unlock_user_struct(target_mb
, msgp
, 0);
3648 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3649 ssize_t msgsz
, abi_long msgtyp
,
3652 struct target_msgbuf
*target_mb
;
3654 struct msgbuf
*host_mb
;
3658 return -TARGET_EINVAL
;
3661 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3662 return -TARGET_EFAULT
;
3664 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3666 ret
= -TARGET_ENOMEM
;
3669 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3672 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3673 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3674 if (!target_mtext
) {
3675 ret
= -TARGET_EFAULT
;
3678 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3679 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3682 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3686 unlock_user_struct(target_mb
, msgp
, 1);
3691 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3692 abi_ulong target_addr
)
3694 struct target_shmid_ds
*target_sd
;
3696 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3697 return -TARGET_EFAULT
;
3698 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3699 return -TARGET_EFAULT
;
3700 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3701 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3702 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3703 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3704 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3705 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3706 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3707 unlock_user_struct(target_sd
, target_addr
, 0);
3711 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3712 struct shmid_ds
*host_sd
)
3714 struct target_shmid_ds
*target_sd
;
3716 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3717 return -TARGET_EFAULT
;
3718 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3719 return -TARGET_EFAULT
;
3720 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3721 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3722 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3723 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3724 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3725 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3726 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3727 unlock_user_struct(target_sd
, target_addr
, 1);
3731 struct target_shminfo
{
3739 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3740 struct shminfo
*host_shminfo
)
3742 struct target_shminfo
*target_shminfo
;
3743 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3744 return -TARGET_EFAULT
;
3745 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3746 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3747 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3748 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3749 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3750 unlock_user_struct(target_shminfo
, target_addr
, 1);
3754 struct target_shm_info
{
3759 abi_ulong swap_attempts
;
3760 abi_ulong swap_successes
;
3763 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3764 struct shm_info
*host_shm_info
)
3766 struct target_shm_info
*target_shm_info
;
3767 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3768 return -TARGET_EFAULT
;
3769 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3770 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3771 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3772 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3773 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3774 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3775 unlock_user_struct(target_shm_info
, target_addr
, 1);
3779 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3781 struct shmid_ds dsarg
;
3782 struct shminfo shminfo
;
3783 struct shm_info shm_info
;
3784 abi_long ret
= -TARGET_EINVAL
;
3792 if (target_to_host_shmid_ds(&dsarg
, buf
))
3793 return -TARGET_EFAULT
;
3794 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3795 if (host_to_target_shmid_ds(buf
, &dsarg
))
3796 return -TARGET_EFAULT
;
3799 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3800 if (host_to_target_shminfo(buf
, &shminfo
))
3801 return -TARGET_EFAULT
;
3804 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3805 if (host_to_target_shm_info(buf
, &shm_info
))
3806 return -TARGET_EFAULT
;
3811 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3818 #ifndef TARGET_FORCE_SHMLBA
3819 /* For most architectures, SHMLBA is the same as the page size;
3820 * some architectures have larger values, in which case they should
3821 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3822 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3823 * and defining its own value for SHMLBA.
3825 * The kernel also permits SHMLBA to be set by the architecture to a
3826 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3827 * this means that addresses are rounded to the large size if
3828 * SHM_RND is set but addresses not aligned to that size are not rejected
3829 * as long as they are at least page-aligned. Since the only architecture
3830 * which uses this is ia64 this code doesn't provide for that oddity.
3832 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3834 return TARGET_PAGE_SIZE
;
3838 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3839 int shmid
, abi_ulong shmaddr
, int shmflg
)
3843 struct shmid_ds shm_info
;
3847 /* find out the length of the shared memory segment */
3848 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3849 if (is_error(ret
)) {
3850 /* can't get length, bail out */
3854 shmlba
= target_shmlba(cpu_env
);
3856 if (shmaddr
& (shmlba
- 1)) {
3857 if (shmflg
& SHM_RND
) {
3858 shmaddr
&= ~(shmlba
- 1);
3860 return -TARGET_EINVAL
;
3863 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3864 return -TARGET_EINVAL
;
3870 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3872 abi_ulong mmap_start
;
3874 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3876 if (mmap_start
== -1) {
3878 host_raddr
= (void *)-1;
3880 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3883 if (host_raddr
== (void *)-1) {
3885 return get_errno((long)host_raddr
);
3887 raddr
=h2g((unsigned long)host_raddr
);
3889 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3890 PAGE_VALID
| PAGE_READ
|
3891 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3893 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3894 if (!shm_regions
[i
].in_use
) {
3895 shm_regions
[i
].in_use
= true;
3896 shm_regions
[i
].start
= raddr
;
3897 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3907 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3914 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3915 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3916 shm_regions
[i
].in_use
= false;
3917 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3921 rv
= get_errno(shmdt(g2h(shmaddr
)));
3928 #ifdef TARGET_NR_ipc
3929 /* ??? This only works with linear mappings. */
3930 /* do_ipc() must return target values and target errnos. */
3931 static abi_long
do_ipc(CPUArchState
*cpu_env
,
3932 unsigned int call
, abi_long first
,
3933 abi_long second
, abi_long third
,
3934 abi_long ptr
, abi_long fifth
)
3939 version
= call
>> 16;
3944 ret
= do_semop(first
, ptr
, second
);
3948 ret
= get_errno(semget(first
, second
, third
));
3951 case IPCOP_semctl
: {
3952 /* The semun argument to semctl is passed by value, so dereference the
3955 get_user_ual(atptr
, ptr
);
3956 ret
= do_semctl(first
, second
, third
, atptr
);
3961 ret
= get_errno(msgget(first
, second
));
3965 ret
= do_msgsnd(first
, ptr
, second
, third
);
3969 ret
= do_msgctl(first
, second
, ptr
);
3976 struct target_ipc_kludge
{
3981 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3982 ret
= -TARGET_EFAULT
;
3986 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3988 unlock_user_struct(tmp
, ptr
, 0);
3992 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4001 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4002 if (is_error(raddr
))
4003 return get_errno(raddr
);
4004 if (put_user_ual(raddr
, third
))
4005 return -TARGET_EFAULT
;
4009 ret
= -TARGET_EINVAL
;
4014 ret
= do_shmdt(ptr
);
4018 /* IPC_* flag values are the same on all linux platforms */
4019 ret
= get_errno(shmget(first
, second
, third
));
4022 /* IPC_* and SHM_* command values are the same on all linux platforms */
4024 ret
= do_shmctl(first
, second
, ptr
);
4027 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4028 ret
= -TARGET_ENOSYS
;
4035 /* kernel structure types definitions */
4037 #define STRUCT(name, ...) STRUCT_ ## name,
4038 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4040 #include "syscall_types.h"
4044 #undef STRUCT_SPECIAL
4046 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4047 #define STRUCT_SPECIAL(name)
4048 #include "syscall_types.h"
4050 #undef STRUCT_SPECIAL
4052 typedef struct IOCTLEntry IOCTLEntry
;
4054 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4055 int fd
, int cmd
, abi_long arg
);
4059 unsigned int host_cmd
;
4062 do_ioctl_fn
*do_ioctl
;
4063 const argtype arg_type
[5];
4066 #define IOC_R 0x0001
4067 #define IOC_W 0x0002
4068 #define IOC_RW (IOC_R | IOC_W)
4070 #define MAX_STRUCT_SIZE 4096
4072 #ifdef CONFIG_FIEMAP
4073 /* So fiemap access checks don't overflow on 32 bit systems.
4074 * This is very slightly smaller than the limit imposed by
4075 * the underlying kernel.
4077 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4078 / sizeof(struct fiemap_extent))
4080 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4081 int fd
, int cmd
, abi_long arg
)
4083 /* The parameter for this ioctl is a struct fiemap followed
4084 * by an array of struct fiemap_extent whose size is set
4085 * in fiemap->fm_extent_count. The array is filled in by the
4088 int target_size_in
, target_size_out
;
4090 const argtype
*arg_type
= ie
->arg_type
;
4091 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4094 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4098 assert(arg_type
[0] == TYPE_PTR
);
4099 assert(ie
->access
== IOC_RW
);
4101 target_size_in
= thunk_type_size(arg_type
, 0);
4102 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4104 return -TARGET_EFAULT
;
4106 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4107 unlock_user(argptr
, arg
, 0);
4108 fm
= (struct fiemap
*)buf_temp
;
4109 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4110 return -TARGET_EINVAL
;
4113 outbufsz
= sizeof (*fm
) +
4114 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4116 if (outbufsz
> MAX_STRUCT_SIZE
) {
4117 /* We can't fit all the extents into the fixed size buffer.
4118 * Allocate one that is large enough and use it instead.
4120 fm
= g_try_malloc(outbufsz
);
4122 return -TARGET_ENOMEM
;
4124 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4127 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4128 if (!is_error(ret
)) {
4129 target_size_out
= target_size_in
;
4130 /* An extent_count of 0 means we were only counting the extents
4131 * so there are no structs to copy
4133 if (fm
->fm_extent_count
!= 0) {
4134 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4136 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4138 ret
= -TARGET_EFAULT
;
4140 /* Convert the struct fiemap */
4141 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4142 if (fm
->fm_extent_count
!= 0) {
4143 p
= argptr
+ target_size_in
;
4144 /* ...and then all the struct fiemap_extents */
4145 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4146 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4151 unlock_user(argptr
, arg
, target_size_out
);
4161 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4162 int fd
, int cmd
, abi_long arg
)
4164 const argtype
*arg_type
= ie
->arg_type
;
4168 struct ifconf
*host_ifconf
;
4170 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4171 int target_ifreq_size
;
4176 abi_long target_ifc_buf
;
4180 assert(arg_type
[0] == TYPE_PTR
);
4181 assert(ie
->access
== IOC_RW
);
4184 target_size
= thunk_type_size(arg_type
, 0);
4186 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4188 return -TARGET_EFAULT
;
4189 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4190 unlock_user(argptr
, arg
, 0);
4192 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4193 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4194 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4196 if (target_ifc_buf
!= 0) {
4197 target_ifc_len
= host_ifconf
->ifc_len
;
4198 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4199 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4201 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4202 if (outbufsz
> MAX_STRUCT_SIZE
) {
4204 * We can't fit all the extents into the fixed size buffer.
4205 * Allocate one that is large enough and use it instead.
4207 host_ifconf
= malloc(outbufsz
);
4209 return -TARGET_ENOMEM
;
4211 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4214 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4216 host_ifconf
->ifc_len
= host_ifc_len
;
4218 host_ifc_buf
= NULL
;
4220 host_ifconf
->ifc_buf
= host_ifc_buf
;
4222 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4223 if (!is_error(ret
)) {
4224 /* convert host ifc_len to target ifc_len */
4226 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4227 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4228 host_ifconf
->ifc_len
= target_ifc_len
;
4230 /* restore target ifc_buf */
4232 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4234 /* copy struct ifconf to target user */
4236 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4238 return -TARGET_EFAULT
;
4239 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4240 unlock_user(argptr
, arg
, target_size
);
4242 if (target_ifc_buf
!= 0) {
4243 /* copy ifreq[] to target user */
4244 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4245 for (i
= 0; i
< nb_ifreq
; i
++) {
4246 thunk_convert(argptr
+ i
* target_ifreq_size
,
4247 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4248 ifreq_arg_type
, THUNK_TARGET
);
4250 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4261 #if defined(CONFIG_USBFS)
4262 #if HOST_LONG_BITS > 64
4263 #error USBDEVFS thunks do not support >64 bit hosts yet.
4266 uint64_t target_urb_adr
;
4267 uint64_t target_buf_adr
;
4268 char *target_buf_ptr
;
4269 struct usbdevfs_urb host_urb
;
4272 static GHashTable
*usbdevfs_urb_hashtable(void)
4274 static GHashTable
*urb_hashtable
;
4276 if (!urb_hashtable
) {
4277 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4279 return urb_hashtable
;
4282 static void urb_hashtable_insert(struct live_urb
*urb
)
4284 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4285 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4288 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4290 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4291 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4294 static void urb_hashtable_remove(struct live_urb
*urb
)
4296 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4297 g_hash_table_remove(urb_hashtable
, urb
);
4301 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4302 int fd
, int cmd
, abi_long arg
)
4304 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4305 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4306 struct live_urb
*lurb
;
4310 uintptr_t target_urb_adr
;
4313 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4315 memset(buf_temp
, 0, sizeof(uint64_t));
4316 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4317 if (is_error(ret
)) {
4321 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4322 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4323 if (!lurb
->target_urb_adr
) {
4324 return -TARGET_EFAULT
;
4326 urb_hashtable_remove(lurb
);
4327 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4328 lurb
->host_urb
.buffer_length
);
4329 lurb
->target_buf_ptr
= NULL
;
4331 /* restore the guest buffer pointer */
4332 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4334 /* update the guest urb struct */
4335 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4338 return -TARGET_EFAULT
;
4340 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4341 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4343 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4344 /* write back the urb handle */
4345 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4348 return -TARGET_EFAULT
;
4351 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4352 target_urb_adr
= lurb
->target_urb_adr
;
4353 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4354 unlock_user(argptr
, arg
, target_size
);
4361 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4362 uint8_t *buf_temp
__attribute__((unused
)),
4363 int fd
, int cmd
, abi_long arg
)
4365 struct live_urb
*lurb
;
4367 /* map target address back to host URB with metadata. */
4368 lurb
= urb_hashtable_lookup(arg
);
4370 return -TARGET_EFAULT
;
4372 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4376 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4377 int fd
, int cmd
, abi_long arg
)
4379 const argtype
*arg_type
= ie
->arg_type
;
4384 struct live_urb
*lurb
;
4387 * each submitted URB needs to map to a unique ID for the
4388 * kernel, and that unique ID needs to be a pointer to
4389 * host memory. hence, we need to malloc for each URB.
4390 * isochronous transfers have a variable length struct.
4393 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4395 /* construct host copy of urb and metadata */
4396 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4398 return -TARGET_ENOMEM
;
4401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4404 return -TARGET_EFAULT
;
4406 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4407 unlock_user(argptr
, arg
, 0);
4409 lurb
->target_urb_adr
= arg
;
4410 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4412 /* buffer space used depends on endpoint type so lock the entire buffer */
4413 /* control type urbs should check the buffer contents for true direction */
4414 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4415 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4416 lurb
->host_urb
.buffer_length
, 1);
4417 if (lurb
->target_buf_ptr
== NULL
) {
4419 return -TARGET_EFAULT
;
4422 /* update buffer pointer in host copy */
4423 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4425 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4426 if (is_error(ret
)) {
4427 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4430 urb_hashtable_insert(lurb
);
4435 #endif /* CONFIG_USBFS */
4437 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4438 int cmd
, abi_long arg
)
4441 struct dm_ioctl
*host_dm
;
4442 abi_long guest_data
;
4443 uint32_t guest_data_size
;
4445 const argtype
*arg_type
= ie
->arg_type
;
4447 void *big_buf
= NULL
;
4451 target_size
= thunk_type_size(arg_type
, 0);
4452 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4454 ret
= -TARGET_EFAULT
;
4457 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4458 unlock_user(argptr
, arg
, 0);
4460 /* buf_temp is too small, so fetch things into a bigger buffer */
4461 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4462 memcpy(big_buf
, buf_temp
, target_size
);
4466 guest_data
= arg
+ host_dm
->data_start
;
4467 if ((guest_data
- arg
) < 0) {
4468 ret
= -TARGET_EINVAL
;
4471 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4472 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4474 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4476 ret
= -TARGET_EFAULT
;
4480 switch (ie
->host_cmd
) {
4482 case DM_LIST_DEVICES
:
4485 case DM_DEV_SUSPEND
:
4488 case DM_TABLE_STATUS
:
4489 case DM_TABLE_CLEAR
:
4491 case DM_LIST_VERSIONS
:
4495 case DM_DEV_SET_GEOMETRY
:
4496 /* data contains only strings */
4497 memcpy(host_data
, argptr
, guest_data_size
);
4500 memcpy(host_data
, argptr
, guest_data_size
);
4501 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4505 void *gspec
= argptr
;
4506 void *cur_data
= host_data
;
4507 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4508 int spec_size
= thunk_type_size(arg_type
, 0);
4511 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4512 struct dm_target_spec
*spec
= cur_data
;
4516 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4517 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4519 spec
->next
= sizeof(*spec
) + slen
;
4520 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4522 cur_data
+= spec
->next
;
4527 ret
= -TARGET_EINVAL
;
4528 unlock_user(argptr
, guest_data
, 0);
4531 unlock_user(argptr
, guest_data
, 0);
4533 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4534 if (!is_error(ret
)) {
4535 guest_data
= arg
+ host_dm
->data_start
;
4536 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4537 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4538 switch (ie
->host_cmd
) {
4543 case DM_DEV_SUSPEND
:
4546 case DM_TABLE_CLEAR
:
4548 case DM_DEV_SET_GEOMETRY
:
4549 /* no return data */
4551 case DM_LIST_DEVICES
:
4553 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4554 uint32_t remaining_data
= guest_data_size
;
4555 void *cur_data
= argptr
;
4556 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4557 int nl_size
= 12; /* can't use thunk_size due to alignment */
4560 uint32_t next
= nl
->next
;
4562 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4564 if (remaining_data
< nl
->next
) {
4565 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4568 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4569 strcpy(cur_data
+ nl_size
, nl
->name
);
4570 cur_data
+= nl
->next
;
4571 remaining_data
-= nl
->next
;
4575 nl
= (void*)nl
+ next
;
4580 case DM_TABLE_STATUS
:
4582 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4583 void *cur_data
= argptr
;
4584 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4585 int spec_size
= thunk_type_size(arg_type
, 0);
4588 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4589 uint32_t next
= spec
->next
;
4590 int slen
= strlen((char*)&spec
[1]) + 1;
4591 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4592 if (guest_data_size
< spec
->next
) {
4593 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4596 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4597 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4598 cur_data
= argptr
+ spec
->next
;
4599 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4605 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4606 int count
= *(uint32_t*)hdata
;
4607 uint64_t *hdev
= hdata
+ 8;
4608 uint64_t *gdev
= argptr
+ 8;
4611 *(uint32_t*)argptr
= tswap32(count
);
4612 for (i
= 0; i
< count
; i
++) {
4613 *gdev
= tswap64(*hdev
);
4619 case DM_LIST_VERSIONS
:
4621 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4622 uint32_t remaining_data
= guest_data_size
;
4623 void *cur_data
= argptr
;
4624 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4625 int vers_size
= thunk_type_size(arg_type
, 0);
4628 uint32_t next
= vers
->next
;
4630 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4632 if (remaining_data
< vers
->next
) {
4633 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4636 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4637 strcpy(cur_data
+ vers_size
, vers
->name
);
4638 cur_data
+= vers
->next
;
4639 remaining_data
-= vers
->next
;
4643 vers
= (void*)vers
+ next
;
4648 unlock_user(argptr
, guest_data
, 0);
4649 ret
= -TARGET_EINVAL
;
4652 unlock_user(argptr
, guest_data
, guest_data_size
);
4654 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4656 ret
= -TARGET_EFAULT
;
4659 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4660 unlock_user(argptr
, arg
, target_size
);
4667 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4668 int cmd
, abi_long arg
)
4672 const argtype
*arg_type
= ie
->arg_type
;
4673 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4676 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4677 struct blkpg_partition host_part
;
4679 /* Read and convert blkpg */
4681 target_size
= thunk_type_size(arg_type
, 0);
4682 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4684 ret
= -TARGET_EFAULT
;
4687 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4688 unlock_user(argptr
, arg
, 0);
4690 switch (host_blkpg
->op
) {
4691 case BLKPG_ADD_PARTITION
:
4692 case BLKPG_DEL_PARTITION
:
4693 /* payload is struct blkpg_partition */
4696 /* Unknown opcode */
4697 ret
= -TARGET_EINVAL
;
4701 /* Read and convert blkpg->data */
4702 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4703 target_size
= thunk_type_size(part_arg_type
, 0);
4704 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4706 ret
= -TARGET_EFAULT
;
4709 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4710 unlock_user(argptr
, arg
, 0);
4712 /* Swizzle the data pointer to our local copy and call! */
4713 host_blkpg
->data
= &host_part
;
4714 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4720 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4721 int fd
, int cmd
, abi_long arg
)
4723 const argtype
*arg_type
= ie
->arg_type
;
4724 const StructEntry
*se
;
4725 const argtype
*field_types
;
4726 const int *dst_offsets
, *src_offsets
;
4729 abi_ulong
*target_rt_dev_ptr
= NULL
;
4730 unsigned long *host_rt_dev_ptr
= NULL
;
4734 assert(ie
->access
== IOC_W
);
4735 assert(*arg_type
== TYPE_PTR
);
4737 assert(*arg_type
== TYPE_STRUCT
);
4738 target_size
= thunk_type_size(arg_type
, 0);
4739 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4741 return -TARGET_EFAULT
;
4744 assert(*arg_type
== (int)STRUCT_rtentry
);
4745 se
= struct_entries
+ *arg_type
++;
4746 assert(se
->convert
[0] == NULL
);
4747 /* convert struct here to be able to catch rt_dev string */
4748 field_types
= se
->field_types
;
4749 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4750 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4751 for (i
= 0; i
< se
->nb_fields
; i
++) {
4752 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4753 assert(*field_types
== TYPE_PTRVOID
);
4754 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4755 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4756 if (*target_rt_dev_ptr
!= 0) {
4757 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4758 tswapal(*target_rt_dev_ptr
));
4759 if (!*host_rt_dev_ptr
) {
4760 unlock_user(argptr
, arg
, 0);
4761 return -TARGET_EFAULT
;
4764 *host_rt_dev_ptr
= 0;
4769 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4770 argptr
+ src_offsets
[i
],
4771 field_types
, THUNK_HOST
);
4773 unlock_user(argptr
, arg
, 0);
4775 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4777 assert(host_rt_dev_ptr
!= NULL
);
4778 assert(target_rt_dev_ptr
!= NULL
);
4779 if (*host_rt_dev_ptr
!= 0) {
4780 unlock_user((void *)*host_rt_dev_ptr
,
4781 *target_rt_dev_ptr
, 0);
4786 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4787 int fd
, int cmd
, abi_long arg
)
4789 int sig
= target_to_host_signal(arg
);
4790 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4794 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4795 int fd
, int cmd
, abi_long arg
)
4797 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4798 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4802 static IOCTLEntry ioctl_entries
[] = {
4803 #define IOCTL(cmd, access, ...) \
4804 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4805 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4806 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4807 #define IOCTL_IGNORE(cmd) \
4808 { TARGET_ ## cmd, 0, #cmd },
4813 /* ??? Implement proper locking for ioctls. */
4814 /* do_ioctl() Must return target values and target errnos. */
4815 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4817 const IOCTLEntry
*ie
;
4818 const argtype
*arg_type
;
4820 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4826 if (ie
->target_cmd
== 0) {
4827 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4828 return -TARGET_ENOSYS
;
4830 if (ie
->target_cmd
== cmd
)
4834 arg_type
= ie
->arg_type
;
4836 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4837 } else if (!ie
->host_cmd
) {
4838 /* Some architectures define BSD ioctls in their headers
4839 that are not implemented in Linux. */
4840 return -TARGET_ENOSYS
;
4843 switch(arg_type
[0]) {
4846 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4850 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4854 target_size
= thunk_type_size(arg_type
, 0);
4855 switch(ie
->access
) {
4857 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4858 if (!is_error(ret
)) {
4859 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4861 return -TARGET_EFAULT
;
4862 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4863 unlock_user(argptr
, arg
, target_size
);
4867 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4869 return -TARGET_EFAULT
;
4870 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4871 unlock_user(argptr
, arg
, 0);
4872 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4876 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4878 return -TARGET_EFAULT
;
4879 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4880 unlock_user(argptr
, arg
, 0);
4881 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4882 if (!is_error(ret
)) {
4883 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4885 return -TARGET_EFAULT
;
4886 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4887 unlock_user(argptr
, arg
, target_size
);
4893 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4894 (long)cmd
, arg_type
[0]);
4895 ret
= -TARGET_ENOSYS
;
4901 static const bitmask_transtbl iflag_tbl
[] = {
4902 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4903 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4904 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4905 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4906 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4907 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4908 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4909 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4910 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4911 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4912 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4913 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4914 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4915 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4919 static const bitmask_transtbl oflag_tbl
[] = {
4920 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4921 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4922 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4923 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4924 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4925 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4926 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4927 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4928 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4929 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4930 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4931 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4932 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4933 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4934 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4935 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4936 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4937 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4938 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4939 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4940 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4941 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4942 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4943 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4947 static const bitmask_transtbl cflag_tbl
[] = {
4948 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4949 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4950 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4951 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4952 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4953 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4954 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4955 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4956 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4957 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4958 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4959 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4960 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4961 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4962 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4963 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4964 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4965 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4966 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4967 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4968 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4969 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4970 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4971 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4972 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4973 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4974 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4975 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4976 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4977 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4978 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4982 static const bitmask_transtbl lflag_tbl
[] = {
4983 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4984 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4985 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4986 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4987 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4988 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4989 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4990 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4991 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4992 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4993 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4994 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4995 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4996 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4997 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5001 static void target_to_host_termios (void *dst
, const void *src
)
5003 struct host_termios
*host
= dst
;
5004 const struct target_termios
*target
= src
;
5007 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5009 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5011 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5013 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5014 host
->c_line
= target
->c_line
;
5016 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5017 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5018 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5019 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5020 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5021 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5022 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5023 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5024 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5025 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5026 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5027 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5028 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5029 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5030 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5031 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5032 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5033 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5036 static void host_to_target_termios (void *dst
, const void *src
)
5038 struct target_termios
*target
= dst
;
5039 const struct host_termios
*host
= src
;
5042 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5044 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5046 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5048 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5049 target
->c_line
= host
->c_line
;
5051 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5052 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5053 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5054 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5055 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5056 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5057 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5058 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5059 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5060 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5061 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5062 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5063 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5064 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5065 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5066 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5067 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5068 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5071 static const StructEntry struct_termios_def
= {
5072 .convert
= { host_to_target_termios
, target_to_host_termios
},
5073 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5074 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5077 static bitmask_transtbl mmap_flags_tbl
[] = {
5078 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5079 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5080 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5081 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5082 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5083 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5084 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5085 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5086 MAP_DENYWRITE
, MAP_DENYWRITE
},
5087 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5088 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5089 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5090 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5091 MAP_NORESERVE
, MAP_NORESERVE
},
5092 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5093 /* MAP_STACK had been ignored by the kernel for quite some time.
5094 Recognize it for the target insofar as we do not want to pass
5095 it through to the host. */
5096 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5100 #if defined(TARGET_I386)
5102 /* NOTE: there is really one LDT for all the threads */
5103 static uint8_t *ldt_table
;
5105 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5112 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5113 if (size
> bytecount
)
5115 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5117 return -TARGET_EFAULT
;
5118 /* ??? Should this by byteswapped? */
5119 memcpy(p
, ldt_table
, size
);
5120 unlock_user(p
, ptr
, size
);
5124 /* XXX: add locking support */
5125 static abi_long
write_ldt(CPUX86State
*env
,
5126 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5128 struct target_modify_ldt_ldt_s ldt_info
;
5129 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5130 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5131 int seg_not_present
, useable
, lm
;
5132 uint32_t *lp
, entry_1
, entry_2
;
5134 if (bytecount
!= sizeof(ldt_info
))
5135 return -TARGET_EINVAL
;
5136 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5137 return -TARGET_EFAULT
;
5138 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5139 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5140 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5141 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5142 unlock_user_struct(target_ldt_info
, ptr
, 0);
5144 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5145 return -TARGET_EINVAL
;
5146 seg_32bit
= ldt_info
.flags
& 1;
5147 contents
= (ldt_info
.flags
>> 1) & 3;
5148 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5149 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5150 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5151 useable
= (ldt_info
.flags
>> 6) & 1;
5155 lm
= (ldt_info
.flags
>> 7) & 1;
5157 if (contents
== 3) {
5159 return -TARGET_EINVAL
;
5160 if (seg_not_present
== 0)
5161 return -TARGET_EINVAL
;
5163 /* allocate the LDT */
5165 env
->ldt
.base
= target_mmap(0,
5166 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5167 PROT_READ
|PROT_WRITE
,
5168 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5169 if (env
->ldt
.base
== -1)
5170 return -TARGET_ENOMEM
;
5171 memset(g2h(env
->ldt
.base
), 0,
5172 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5173 env
->ldt
.limit
= 0xffff;
5174 ldt_table
= g2h(env
->ldt
.base
);
5177 /* NOTE: same code as Linux kernel */
5178 /* Allow LDTs to be cleared by the user. */
5179 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5182 read_exec_only
== 1 &&
5184 limit_in_pages
== 0 &&
5185 seg_not_present
== 1 &&
5193 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5194 (ldt_info
.limit
& 0x0ffff);
5195 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5196 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5197 (ldt_info
.limit
& 0xf0000) |
5198 ((read_exec_only
^ 1) << 9) |
5200 ((seg_not_present
^ 1) << 15) |
5202 (limit_in_pages
<< 23) |
5206 entry_2
|= (useable
<< 20);
5208 /* Install the new entry ... */
5210 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5211 lp
[0] = tswap32(entry_1
);
5212 lp
[1] = tswap32(entry_2
);
5216 /* specific and weird i386 syscalls */
5217 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5218 unsigned long bytecount
)
5224 ret
= read_ldt(ptr
, bytecount
);
5227 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5230 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5233 ret
= -TARGET_ENOSYS
;
5239 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5240 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5242 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5243 struct target_modify_ldt_ldt_s ldt_info
;
5244 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5245 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5246 int seg_not_present
, useable
, lm
;
5247 uint32_t *lp
, entry_1
, entry_2
;
5250 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5251 if (!target_ldt_info
)
5252 return -TARGET_EFAULT
;
5253 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5254 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5255 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5256 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5257 if (ldt_info
.entry_number
== -1) {
5258 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5259 if (gdt_table
[i
] == 0) {
5260 ldt_info
.entry_number
= i
;
5261 target_ldt_info
->entry_number
= tswap32(i
);
5266 unlock_user_struct(target_ldt_info
, ptr
, 1);
5268 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5269 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5270 return -TARGET_EINVAL
;
5271 seg_32bit
= ldt_info
.flags
& 1;
5272 contents
= (ldt_info
.flags
>> 1) & 3;
5273 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5274 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5275 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5276 useable
= (ldt_info
.flags
>> 6) & 1;
5280 lm
= (ldt_info
.flags
>> 7) & 1;
5283 if (contents
== 3) {
5284 if (seg_not_present
== 0)
5285 return -TARGET_EINVAL
;
5288 /* NOTE: same code as Linux kernel */
5289 /* Allow LDTs to be cleared by the user. */
5290 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5291 if ((contents
== 0 &&
5292 read_exec_only
== 1 &&
5294 limit_in_pages
== 0 &&
5295 seg_not_present
== 1 &&
5303 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5304 (ldt_info
.limit
& 0x0ffff);
5305 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5306 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5307 (ldt_info
.limit
& 0xf0000) |
5308 ((read_exec_only
^ 1) << 9) |
5310 ((seg_not_present
^ 1) << 15) |
5312 (limit_in_pages
<< 23) |
5317 /* Install the new entry ... */
5319 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5320 lp
[0] = tswap32(entry_1
);
5321 lp
[1] = tswap32(entry_2
);
5325 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5327 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5328 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5329 uint32_t base_addr
, limit
, flags
;
5330 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5331 int seg_not_present
, useable
, lm
;
5332 uint32_t *lp
, entry_1
, entry_2
;
5334 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5335 if (!target_ldt_info
)
5336 return -TARGET_EFAULT
;
5337 idx
= tswap32(target_ldt_info
->entry_number
);
5338 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5339 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5340 unlock_user_struct(target_ldt_info
, ptr
, 1);
5341 return -TARGET_EINVAL
;
5343 lp
= (uint32_t *)(gdt_table
+ idx
);
5344 entry_1
= tswap32(lp
[0]);
5345 entry_2
= tswap32(lp
[1]);
5347 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5348 contents
= (entry_2
>> 10) & 3;
5349 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5350 seg_32bit
= (entry_2
>> 22) & 1;
5351 limit_in_pages
= (entry_2
>> 23) & 1;
5352 useable
= (entry_2
>> 20) & 1;
5356 lm
= (entry_2
>> 21) & 1;
5358 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5359 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5360 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5361 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5362 base_addr
= (entry_1
>> 16) |
5363 (entry_2
& 0xff000000) |
5364 ((entry_2
& 0xff) << 16);
5365 target_ldt_info
->base_addr
= tswapal(base_addr
);
5366 target_ldt_info
->limit
= tswap32(limit
);
5367 target_ldt_info
->flags
= tswap32(flags
);
5368 unlock_user_struct(target_ldt_info
, ptr
, 1);
5371 #endif /* TARGET_I386 && TARGET_ABI32 */
5373 #ifndef TARGET_ABI32
5374 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5381 case TARGET_ARCH_SET_GS
:
5382 case TARGET_ARCH_SET_FS
:
5383 if (code
== TARGET_ARCH_SET_GS
)
5387 cpu_x86_load_seg(env
, idx
, 0);
5388 env
->segs
[idx
].base
= addr
;
5390 case TARGET_ARCH_GET_GS
:
5391 case TARGET_ARCH_GET_FS
:
5392 if (code
== TARGET_ARCH_GET_GS
)
5396 val
= env
->segs
[idx
].base
;
5397 if (put_user(val
, addr
, abi_ulong
))
5398 ret
= -TARGET_EFAULT
;
5401 ret
= -TARGET_EINVAL
;
5408 #endif /* defined(TARGET_I386) */
5410 #define NEW_STACK_SIZE 0x40000
5413 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5416 pthread_mutex_t mutex
;
5417 pthread_cond_t cond
;
5420 abi_ulong child_tidptr
;
5421 abi_ulong parent_tidptr
;
5425 static void *clone_func(void *arg
)
5427 new_thread_info
*info
= arg
;
5432 rcu_register_thread();
5433 tcg_register_thread();
5435 cpu
= ENV_GET_CPU(env
);
5437 ts
= (TaskState
*)cpu
->opaque
;
5438 info
->tid
= sys_gettid();
5440 if (info
->child_tidptr
)
5441 put_user_u32(info
->tid
, info
->child_tidptr
);
5442 if (info
->parent_tidptr
)
5443 put_user_u32(info
->tid
, info
->parent_tidptr
);
5444 /* Enable signals. */
5445 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5446 /* Signal to the parent that we're ready. */
5447 pthread_mutex_lock(&info
->mutex
);
5448 pthread_cond_broadcast(&info
->cond
);
5449 pthread_mutex_unlock(&info
->mutex
);
5450 /* Wait until the parent has finished initializing the tls state. */
5451 pthread_mutex_lock(&clone_lock
);
5452 pthread_mutex_unlock(&clone_lock
);
5458 /* do_fork() Must return host values and target errnos (unlike most
5459 do_*() functions). */
5460 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5461 abi_ulong parent_tidptr
, target_ulong newtls
,
5462 abi_ulong child_tidptr
)
5464 CPUState
*cpu
= ENV_GET_CPU(env
);
5468 CPUArchState
*new_env
;
5471 flags
&= ~CLONE_IGNORED_FLAGS
;
5473 /* Emulate vfork() with fork() */
5474 if (flags
& CLONE_VFORK
)
5475 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5477 if (flags
& CLONE_VM
) {
5478 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5479 new_thread_info info
;
5480 pthread_attr_t attr
;
5482 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5483 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5484 return -TARGET_EINVAL
;
5487 ts
= g_new0(TaskState
, 1);
5488 init_task_state(ts
);
5490 /* Grab a mutex so that thread setup appears atomic. */
5491 pthread_mutex_lock(&clone_lock
);
5493 /* we create a new CPU instance. */
5494 new_env
= cpu_copy(env
);
5495 /* Init regs that differ from the parent. */
5496 cpu_clone_regs(new_env
, newsp
);
5497 new_cpu
= ENV_GET_CPU(new_env
);
5498 new_cpu
->opaque
= ts
;
5499 ts
->bprm
= parent_ts
->bprm
;
5500 ts
->info
= parent_ts
->info
;
5501 ts
->signal_mask
= parent_ts
->signal_mask
;
5503 if (flags
& CLONE_CHILD_CLEARTID
) {
5504 ts
->child_tidptr
= child_tidptr
;
5507 if (flags
& CLONE_SETTLS
) {
5508 cpu_set_tls (new_env
, newtls
);
5511 memset(&info
, 0, sizeof(info
));
5512 pthread_mutex_init(&info
.mutex
, NULL
);
5513 pthread_mutex_lock(&info
.mutex
);
5514 pthread_cond_init(&info
.cond
, NULL
);
5516 if (flags
& CLONE_CHILD_SETTID
) {
5517 info
.child_tidptr
= child_tidptr
;
5519 if (flags
& CLONE_PARENT_SETTID
) {
5520 info
.parent_tidptr
= parent_tidptr
;
5523 ret
= pthread_attr_init(&attr
);
5524 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5525 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5526 /* It is not safe to deliver signals until the child has finished
5527 initializing, so temporarily block all signals. */
5528 sigfillset(&sigmask
);
5529 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5531 /* If this is our first additional thread, we need to ensure we
5532 * generate code for parallel execution and flush old translations.
5534 if (!parallel_cpus
) {
5535 parallel_cpus
= true;
5539 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5540 /* TODO: Free new CPU state if thread creation failed. */
5542 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5543 pthread_attr_destroy(&attr
);
5545 /* Wait for the child to initialize. */
5546 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5551 pthread_mutex_unlock(&info
.mutex
);
5552 pthread_cond_destroy(&info
.cond
);
5553 pthread_mutex_destroy(&info
.mutex
);
5554 pthread_mutex_unlock(&clone_lock
);
5556 /* if no CLONE_VM, we consider it is a fork */
5557 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5558 return -TARGET_EINVAL
;
5561 /* We can't support custom termination signals */
5562 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5563 return -TARGET_EINVAL
;
5566 if (block_signals()) {
5567 return -TARGET_ERESTARTSYS
;
5573 /* Child Process. */
5574 cpu_clone_regs(env
, newsp
);
5576 /* There is a race condition here. The parent process could
5577 theoretically read the TID in the child process before the child
5578 tid is set. This would require using either ptrace
5579 (not implemented) or having *_tidptr to point at a shared memory
5580 mapping. We can't repeat the spinlock hack used above because
5581 the child process gets its own copy of the lock. */
5582 if (flags
& CLONE_CHILD_SETTID
)
5583 put_user_u32(sys_gettid(), child_tidptr
);
5584 if (flags
& CLONE_PARENT_SETTID
)
5585 put_user_u32(sys_gettid(), parent_tidptr
);
5586 ts
= (TaskState
*)cpu
->opaque
;
5587 if (flags
& CLONE_SETTLS
)
5588 cpu_set_tls (env
, newtls
);
5589 if (flags
& CLONE_CHILD_CLEARTID
)
5590 ts
->child_tidptr
= child_tidptr
;
5598 /* warning : doesn't handle linux specific flags... */
5599 static int target_to_host_fcntl_cmd(int cmd
)
5604 case TARGET_F_DUPFD
:
5605 case TARGET_F_GETFD
:
5606 case TARGET_F_SETFD
:
5607 case TARGET_F_GETFL
:
5608 case TARGET_F_SETFL
:
5611 case TARGET_F_GETLK
:
5614 case TARGET_F_SETLK
:
5617 case TARGET_F_SETLKW
:
5620 case TARGET_F_GETOWN
:
5623 case TARGET_F_SETOWN
:
5626 case TARGET_F_GETSIG
:
5629 case TARGET_F_SETSIG
:
5632 #if TARGET_ABI_BITS == 32
5633 case TARGET_F_GETLK64
:
5636 case TARGET_F_SETLK64
:
5639 case TARGET_F_SETLKW64
:
5643 case TARGET_F_SETLEASE
:
5646 case TARGET_F_GETLEASE
:
5649 #ifdef F_DUPFD_CLOEXEC
5650 case TARGET_F_DUPFD_CLOEXEC
:
5651 ret
= F_DUPFD_CLOEXEC
;
5654 case TARGET_F_NOTIFY
:
5658 case TARGET_F_GETOWN_EX
:
5663 case TARGET_F_SETOWN_EX
:
5668 case TARGET_F_SETPIPE_SZ
:
5671 case TARGET_F_GETPIPE_SZ
:
5676 ret
= -TARGET_EINVAL
;
5680 #if defined(__powerpc64__)
5681 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5682 * is not supported by kernel. The glibc fcntl call actually adjusts
5683 * them to 5, 6 and 7 before making the syscall(). Since we make the
5684 * syscall directly, adjust to what is supported by the kernel.
5686 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5687 ret
-= F_GETLK64
- 5;
5694 #define FLOCK_TRANSTBL \
5696 TRANSTBL_CONVERT(F_RDLCK); \
5697 TRANSTBL_CONVERT(F_WRLCK); \
5698 TRANSTBL_CONVERT(F_UNLCK); \
5699 TRANSTBL_CONVERT(F_EXLCK); \
5700 TRANSTBL_CONVERT(F_SHLCK); \
5703 static int target_to_host_flock(int type
)
5705 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5707 #undef TRANSTBL_CONVERT
5708 return -TARGET_EINVAL
;
5711 static int host_to_target_flock(int type
)
5713 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5715 #undef TRANSTBL_CONVERT
5716 /* if we don't know how to convert the value coming
5717 * from the host we copy to the target field as-is
5722 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5723 abi_ulong target_flock_addr
)
5725 struct target_flock
*target_fl
;
5728 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5729 return -TARGET_EFAULT
;
5732 __get_user(l_type
, &target_fl
->l_type
);
5733 l_type
= target_to_host_flock(l_type
);
5737 fl
->l_type
= l_type
;
5738 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5739 __get_user(fl
->l_start
, &target_fl
->l_start
);
5740 __get_user(fl
->l_len
, &target_fl
->l_len
);
5741 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5742 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5746 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5747 const struct flock64
*fl
)
5749 struct target_flock
*target_fl
;
5752 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5753 return -TARGET_EFAULT
;
5756 l_type
= host_to_target_flock(fl
->l_type
);
5757 __put_user(l_type
, &target_fl
->l_type
);
5758 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5759 __put_user(fl
->l_start
, &target_fl
->l_start
);
5760 __put_user(fl
->l_len
, &target_fl
->l_len
);
5761 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5762 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5766 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5767 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5769 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5770 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5771 abi_ulong target_flock_addr
)
5773 struct target_oabi_flock64
*target_fl
;
5776 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5777 return -TARGET_EFAULT
;
5780 __get_user(l_type
, &target_fl
->l_type
);
5781 l_type
= target_to_host_flock(l_type
);
5785 fl
->l_type
= l_type
;
5786 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5787 __get_user(fl
->l_start
, &target_fl
->l_start
);
5788 __get_user(fl
->l_len
, &target_fl
->l_len
);
5789 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5790 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5794 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5795 const struct flock64
*fl
)
5797 struct target_oabi_flock64
*target_fl
;
5800 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5801 return -TARGET_EFAULT
;
5804 l_type
= host_to_target_flock(fl
->l_type
);
5805 __put_user(l_type
, &target_fl
->l_type
);
5806 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5807 __put_user(fl
->l_start
, &target_fl
->l_start
);
5808 __put_user(fl
->l_len
, &target_fl
->l_len
);
5809 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5810 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5815 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5816 abi_ulong target_flock_addr
)
5818 struct target_flock64
*target_fl
;
5821 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5822 return -TARGET_EFAULT
;
5825 __get_user(l_type
, &target_fl
->l_type
);
5826 l_type
= target_to_host_flock(l_type
);
5830 fl
->l_type
= l_type
;
5831 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5832 __get_user(fl
->l_start
, &target_fl
->l_start
);
5833 __get_user(fl
->l_len
, &target_fl
->l_len
);
5834 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5835 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5839 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5840 const struct flock64
*fl
)
5842 struct target_flock64
*target_fl
;
5845 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5846 return -TARGET_EFAULT
;
5849 l_type
= host_to_target_flock(fl
->l_type
);
5850 __put_user(l_type
, &target_fl
->l_type
);
5851 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5852 __put_user(fl
->l_start
, &target_fl
->l_start
);
5853 __put_user(fl
->l_len
, &target_fl
->l_len
);
5854 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5855 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5859 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5861 struct flock64 fl64
;
5863 struct f_owner_ex fox
;
5864 struct target_f_owner_ex
*target_fox
;
5867 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5869 if (host_cmd
== -TARGET_EINVAL
)
5873 case TARGET_F_GETLK
:
5874 ret
= copy_from_user_flock(&fl64
, arg
);
5878 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5880 ret
= copy_to_user_flock(arg
, &fl64
);
5884 case TARGET_F_SETLK
:
5885 case TARGET_F_SETLKW
:
5886 ret
= copy_from_user_flock(&fl64
, arg
);
5890 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5893 case TARGET_F_GETLK64
:
5894 ret
= copy_from_user_flock64(&fl64
, arg
);
5898 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5900 ret
= copy_to_user_flock64(arg
, &fl64
);
5903 case TARGET_F_SETLK64
:
5904 case TARGET_F_SETLKW64
:
5905 ret
= copy_from_user_flock64(&fl64
, arg
);
5909 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5912 case TARGET_F_GETFL
:
5913 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5915 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5919 case TARGET_F_SETFL
:
5920 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5921 target_to_host_bitmask(arg
,
5926 case TARGET_F_GETOWN_EX
:
5927 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5929 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5930 return -TARGET_EFAULT
;
5931 target_fox
->type
= tswap32(fox
.type
);
5932 target_fox
->pid
= tswap32(fox
.pid
);
5933 unlock_user_struct(target_fox
, arg
, 1);
5939 case TARGET_F_SETOWN_EX
:
5940 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5941 return -TARGET_EFAULT
;
5942 fox
.type
= tswap32(target_fox
->type
);
5943 fox
.pid
= tswap32(target_fox
->pid
);
5944 unlock_user_struct(target_fox
, arg
, 0);
5945 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5949 case TARGET_F_SETOWN
:
5950 case TARGET_F_GETOWN
:
5951 case TARGET_F_SETSIG
:
5952 case TARGET_F_GETSIG
:
5953 case TARGET_F_SETLEASE
:
5954 case TARGET_F_GETLEASE
:
5955 case TARGET_F_SETPIPE_SZ
:
5956 case TARGET_F_GETPIPE_SZ
:
5957 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5961 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5969 static inline int high2lowuid(int uid
)
5977 static inline int high2lowgid(int gid
)
5985 static inline int low2highuid(int uid
)
5987 if ((int16_t)uid
== -1)
5993 static inline int low2highgid(int gid
)
5995 if ((int16_t)gid
== -1)
6000 static inline int tswapid(int id
)
6005 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6007 #else /* !USE_UID16 */
6008 static inline int high2lowuid(int uid
)
6012 static inline int high2lowgid(int gid
)
6016 static inline int low2highuid(int uid
)
6020 static inline int low2highgid(int gid
)
6024 static inline int tswapid(int id
)
6029 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6031 #endif /* USE_UID16 */
6033 /* We must do direct syscalls for setting UID/GID, because we want to
6034 * implement the Linux system call semantics of "change only for this thread",
6035 * not the libc/POSIX semantics of "change for all threads in process".
6036 * (See http://ewontfix.com/17/ for more details.)
6037 * We use the 32-bit version of the syscalls if present; if it is not
6038 * then either the host architecture supports 32-bit UIDs natively with
6039 * the standard syscall, or the 16-bit UID is the best we can do.
6041 #ifdef __NR_setuid32
6042 #define __NR_sys_setuid __NR_setuid32
6044 #define __NR_sys_setuid __NR_setuid
6046 #ifdef __NR_setgid32
6047 #define __NR_sys_setgid __NR_setgid32
6049 #define __NR_sys_setgid __NR_setgid
6051 #ifdef __NR_setresuid32
6052 #define __NR_sys_setresuid __NR_setresuid32
6054 #define __NR_sys_setresuid __NR_setresuid
6056 #ifdef __NR_setresgid32
6057 #define __NR_sys_setresgid __NR_setresgid32
6059 #define __NR_sys_setresgid __NR_setresgid
6062 _syscall1(int, sys_setuid
, uid_t
, uid
)
6063 _syscall1(int, sys_setgid
, gid_t
, gid
)
6064 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6065 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6067 void syscall_init(void)
6070 const argtype
*arg_type
;
6074 thunk_init(STRUCT_MAX
);
6076 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6077 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6078 #include "syscall_types.h"
6080 #undef STRUCT_SPECIAL
6082 /* Build target_to_host_errno_table[] table from
6083 * host_to_target_errno_table[]. */
6084 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6085 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6088 /* we patch the ioctl size if necessary. We rely on the fact that
6089 no ioctl has all the bits at '1' in the size field */
6091 while (ie
->target_cmd
!= 0) {
6092 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6093 TARGET_IOC_SIZEMASK
) {
6094 arg_type
= ie
->arg_type
;
6095 if (arg_type
[0] != TYPE_PTR
) {
6096 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6101 size
= thunk_type_size(arg_type
, 0);
6102 ie
->target_cmd
= (ie
->target_cmd
&
6103 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6104 (size
<< TARGET_IOC_SIZESHIFT
);
6107 /* automatic consistency check if same arch */
6108 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6109 (defined(__x86_64__) && defined(TARGET_X86_64))
6110 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6111 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6112 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6119 #if TARGET_ABI_BITS == 32
6120 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6122 #ifdef TARGET_WORDS_BIGENDIAN
6123 return ((uint64_t)word0
<< 32) | word1
;
6125 return ((uint64_t)word1
<< 32) | word0
;
6128 #else /* TARGET_ABI_BITS == 32 */
6129 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6133 #endif /* TARGET_ABI_BITS != 32 */
6135 #ifdef TARGET_NR_truncate64
6136 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6141 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6145 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6149 #ifdef TARGET_NR_ftruncate64
6150 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6155 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6159 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6163 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6164 abi_ulong target_addr
)
6166 struct target_timespec
*target_ts
;
6168 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6169 return -TARGET_EFAULT
;
6170 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6171 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6172 unlock_user_struct(target_ts
, target_addr
, 0);
6176 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6177 struct timespec
*host_ts
)
6179 struct target_timespec
*target_ts
;
6181 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6182 return -TARGET_EFAULT
;
6183 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6184 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6185 unlock_user_struct(target_ts
, target_addr
, 1);
6189 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6190 abi_ulong target_addr
)
6192 struct target_itimerspec
*target_itspec
;
6194 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6195 return -TARGET_EFAULT
;
6198 host_itspec
->it_interval
.tv_sec
=
6199 tswapal(target_itspec
->it_interval
.tv_sec
);
6200 host_itspec
->it_interval
.tv_nsec
=
6201 tswapal(target_itspec
->it_interval
.tv_nsec
);
6202 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6203 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6205 unlock_user_struct(target_itspec
, target_addr
, 1);
6209 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6210 struct itimerspec
*host_its
)
6212 struct target_itimerspec
*target_itspec
;
6214 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6215 return -TARGET_EFAULT
;
6218 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6219 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6221 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6222 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6224 unlock_user_struct(target_itspec
, target_addr
, 0);
6228 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6229 abi_long target_addr
)
6231 struct target_timex
*target_tx
;
6233 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6234 return -TARGET_EFAULT
;
6237 __get_user(host_tx
->modes
, &target_tx
->modes
);
6238 __get_user(host_tx
->offset
, &target_tx
->offset
);
6239 __get_user(host_tx
->freq
, &target_tx
->freq
);
6240 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6241 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6242 __get_user(host_tx
->status
, &target_tx
->status
);
6243 __get_user(host_tx
->constant
, &target_tx
->constant
);
6244 __get_user(host_tx
->precision
, &target_tx
->precision
);
6245 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6246 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6247 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6248 __get_user(host_tx
->tick
, &target_tx
->tick
);
6249 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6250 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6251 __get_user(host_tx
->shift
, &target_tx
->shift
);
6252 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6253 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6254 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6255 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6256 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6257 __get_user(host_tx
->tai
, &target_tx
->tai
);
6259 unlock_user_struct(target_tx
, target_addr
, 0);
6263 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6264 struct timex
*host_tx
)
6266 struct target_timex
*target_tx
;
6268 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6269 return -TARGET_EFAULT
;
6272 __put_user(host_tx
->modes
, &target_tx
->modes
);
6273 __put_user(host_tx
->offset
, &target_tx
->offset
);
6274 __put_user(host_tx
->freq
, &target_tx
->freq
);
6275 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6276 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6277 __put_user(host_tx
->status
, &target_tx
->status
);
6278 __put_user(host_tx
->constant
, &target_tx
->constant
);
6279 __put_user(host_tx
->precision
, &target_tx
->precision
);
6280 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6281 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6282 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6283 __put_user(host_tx
->tick
, &target_tx
->tick
);
6284 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6285 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6286 __put_user(host_tx
->shift
, &target_tx
->shift
);
6287 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6288 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6289 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6290 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6291 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6292 __put_user(host_tx
->tai
, &target_tx
->tai
);
6294 unlock_user_struct(target_tx
, target_addr
, 1);
6299 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6300 abi_ulong target_addr
)
6302 struct target_sigevent
*target_sevp
;
6304 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6305 return -TARGET_EFAULT
;
6308 /* This union is awkward on 64 bit systems because it has a 32 bit
6309 * integer and a pointer in it; we follow the conversion approach
6310 * used for handling sigval types in signal.c so the guest should get
6311 * the correct value back even if we did a 64 bit byteswap and it's
6312 * using the 32 bit integer.
6314 host_sevp
->sigev_value
.sival_ptr
=
6315 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6316 host_sevp
->sigev_signo
=
6317 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6318 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6319 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6321 unlock_user_struct(target_sevp
, target_addr
, 1);
6325 #if defined(TARGET_NR_mlockall)
6326 static inline int target_to_host_mlockall_arg(int arg
)
6330 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6331 result
|= MCL_CURRENT
;
6333 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6334 result
|= MCL_FUTURE
;
6340 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6341 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6342 defined(TARGET_NR_newfstatat))
6343 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6344 abi_ulong target_addr
,
6345 struct stat
*host_st
)
6347 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6348 if (((CPUARMState
*)cpu_env
)->eabi
) {
6349 struct target_eabi_stat64
*target_st
;
6351 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6352 return -TARGET_EFAULT
;
6353 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6354 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6355 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6356 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6357 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6359 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6360 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6361 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6362 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6363 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6364 __put_user(host_st
->st_size
, &target_st
->st_size
);
6365 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6366 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6367 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6368 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6369 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6370 unlock_user_struct(target_st
, target_addr
, 1);
6374 #if defined(TARGET_HAS_STRUCT_STAT64)
6375 struct target_stat64
*target_st
;
6377 struct target_stat
*target_st
;
6380 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6381 return -TARGET_EFAULT
;
6382 memset(target_st
, 0, sizeof(*target_st
));
6383 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6384 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6385 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6386 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6388 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6389 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6390 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6391 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6392 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6393 /* XXX: better use of kernel struct */
6394 __put_user(host_st
->st_size
, &target_st
->st_size
);
6395 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6396 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6397 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6398 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6399 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6400 unlock_user_struct(target_st
, target_addr
, 1);
6407 /* ??? Using host futex calls even when target atomic operations
6408 are not really atomic probably breaks things. However implementing
6409 futexes locally would make futexes shared between multiple processes
6410 tricky. However they're probably useless because guest atomic
6411 operations won't work either. */
6412 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6413 target_ulong uaddr2
, int val3
)
6415 struct timespec ts
, *pts
;
6418 /* ??? We assume FUTEX_* constants are the same on both host
6420 #ifdef FUTEX_CMD_MASK
6421 base_op
= op
& FUTEX_CMD_MASK
;
6427 case FUTEX_WAIT_BITSET
:
6430 target_to_host_timespec(pts
, timeout
);
6434 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6437 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6439 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6441 case FUTEX_CMP_REQUEUE
:
6443 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6444 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6445 But the prototype takes a `struct timespec *'; insert casts
6446 to satisfy the compiler. We do not need to tswap TIMEOUT
6447 since it's not compared to guest memory. */
6448 pts
= (struct timespec
*)(uintptr_t) timeout
;
6449 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6451 (base_op
== FUTEX_CMP_REQUEUE
6455 return -TARGET_ENOSYS
;
6458 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6459 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6460 abi_long handle
, abi_long mount_id
,
6463 struct file_handle
*target_fh
;
6464 struct file_handle
*fh
;
6468 unsigned int size
, total_size
;
6470 if (get_user_s32(size
, handle
)) {
6471 return -TARGET_EFAULT
;
6474 name
= lock_user_string(pathname
);
6476 return -TARGET_EFAULT
;
6479 total_size
= sizeof(struct file_handle
) + size
;
6480 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6482 unlock_user(name
, pathname
, 0);
6483 return -TARGET_EFAULT
;
6486 fh
= g_malloc0(total_size
);
6487 fh
->handle_bytes
= size
;
6489 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6490 unlock_user(name
, pathname
, 0);
6492 /* man name_to_handle_at(2):
6493 * Other than the use of the handle_bytes field, the caller should treat
6494 * the file_handle structure as an opaque data type
6497 memcpy(target_fh
, fh
, total_size
);
6498 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6499 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6501 unlock_user(target_fh
, handle
, total_size
);
6503 if (put_user_s32(mid
, mount_id
)) {
6504 return -TARGET_EFAULT
;
6512 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6513 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6516 struct file_handle
*target_fh
;
6517 struct file_handle
*fh
;
6518 unsigned int size
, total_size
;
6521 if (get_user_s32(size
, handle
)) {
6522 return -TARGET_EFAULT
;
6525 total_size
= sizeof(struct file_handle
) + size
;
6526 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6528 return -TARGET_EFAULT
;
6531 fh
= g_memdup(target_fh
, total_size
);
6532 fh
->handle_bytes
= size
;
6533 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6535 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6536 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6540 unlock_user(target_fh
, handle
, total_size
);
6546 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6548 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6551 target_sigset_t
*target_mask
;
6555 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6556 return -TARGET_EINVAL
;
6558 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6559 return -TARGET_EFAULT
;
6562 target_to_host_sigset(&host_mask
, target_mask
);
6564 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6566 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6568 fd_trans_register(ret
, &target_signalfd_trans
);
6571 unlock_user_struct(target_mask
, mask
, 0);
6577 /* Map host to target signal numbers for the wait family of syscalls.
6578 Assume all other status bits are the same. */
6579 int host_to_target_waitstatus(int status
)
6581 if (WIFSIGNALED(status
)) {
6582 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6584 if (WIFSTOPPED(status
)) {
6585 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6591 static int open_self_cmdline(void *cpu_env
, int fd
)
6593 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6594 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6597 for (i
= 0; i
< bprm
->argc
; i
++) {
6598 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6600 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6608 static int open_self_maps(void *cpu_env
, int fd
)
6610 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6611 TaskState
*ts
= cpu
->opaque
;
6617 fp
= fopen("/proc/self/maps", "r");
6622 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6623 int fields
, dev_maj
, dev_min
, inode
;
6624 uint64_t min
, max
, offset
;
6625 char flag_r
, flag_w
, flag_x
, flag_p
;
6626 char path
[512] = "";
6627 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6628 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6629 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6631 if ((fields
< 10) || (fields
> 11)) {
6634 if (h2g_valid(min
)) {
6635 int flags
= page_get_flags(h2g(min
));
6636 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6637 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6640 if (h2g(min
) == ts
->info
->stack_limit
) {
6641 pstrcpy(path
, sizeof(path
), " [stack]");
6643 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6644 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6645 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6646 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6647 path
[0] ? " " : "", path
);
6657 static int open_self_stat(void *cpu_env
, int fd
)
6659 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6660 TaskState
*ts
= cpu
->opaque
;
6661 abi_ulong start_stack
= ts
->info
->start_stack
;
6664 for (i
= 0; i
< 44; i
++) {
6672 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6673 } else if (i
== 1) {
6675 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6676 } else if (i
== 27) {
6679 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6681 /* for the rest, there is MasterCard */
6682 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6686 if (write(fd
, buf
, len
) != len
) {
6694 static int open_self_auxv(void *cpu_env
, int fd
)
6696 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6697 TaskState
*ts
= cpu
->opaque
;
6698 abi_ulong auxv
= ts
->info
->saved_auxv
;
6699 abi_ulong len
= ts
->info
->auxv_len
;
6703 * Auxiliary vector is stored in target process stack.
6704 * read in whole auxv vector and copy it to file
6706 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6710 r
= write(fd
, ptr
, len
);
6717 lseek(fd
, 0, SEEK_SET
);
6718 unlock_user(ptr
, auxv
, len
);
6724 static int is_proc_myself(const char *filename
, const char *entry
)
6726 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6727 filename
+= strlen("/proc/");
6728 if (!strncmp(filename
, "self/", strlen("self/"))) {
6729 filename
+= strlen("self/");
6730 } else if (*filename
>= '1' && *filename
<= '9') {
6732 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6733 if (!strncmp(filename
, myself
, strlen(myself
))) {
6734 filename
+= strlen(myself
);
6741 if (!strcmp(filename
, entry
)) {
6748 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6749 static int is_proc(const char *filename
, const char *entry
)
6751 return strcmp(filename
, entry
) == 0;
6754 static int open_net_route(void *cpu_env
, int fd
)
6761 fp
= fopen("/proc/net/route", "r");
6768 read
= getline(&line
, &len
, fp
);
6769 dprintf(fd
, "%s", line
);
6773 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6775 uint32_t dest
, gw
, mask
;
6776 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6779 fields
= sscanf(line
,
6780 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6781 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6782 &mask
, &mtu
, &window
, &irtt
);
6786 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6787 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6788 metric
, tswap32(mask
), mtu
, window
, irtt
);
6798 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6801 const char *filename
;
6802 int (*fill
)(void *cpu_env
, int fd
);
6803 int (*cmp
)(const char *s1
, const char *s2
);
6805 const struct fake_open
*fake_open
;
6806 static const struct fake_open fakes
[] = {
6807 { "maps", open_self_maps
, is_proc_myself
},
6808 { "stat", open_self_stat
, is_proc_myself
},
6809 { "auxv", open_self_auxv
, is_proc_myself
},
6810 { "cmdline", open_self_cmdline
, is_proc_myself
},
6811 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6812 { "/proc/net/route", open_net_route
, is_proc
},
6814 { NULL
, NULL
, NULL
}
6817 if (is_proc_myself(pathname
, "exe")) {
6818 int execfd
= qemu_getauxval(AT_EXECFD
);
6819 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6822 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6823 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6828 if (fake_open
->filename
) {
6830 char filename
[PATH_MAX
];
6833 /* create temporary file to map stat to */
6834 tmpdir
= getenv("TMPDIR");
6837 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6838 fd
= mkstemp(filename
);
6844 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6850 lseek(fd
, 0, SEEK_SET
);
6855 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6858 #define TIMER_MAGIC 0x0caf0000
6859 #define TIMER_MAGIC_MASK 0xffff0000
6861 /* Convert QEMU provided timer ID back to internal 16bit index format */
6862 static target_timer_t
get_timer_id(abi_long arg
)
6864 target_timer_t timerid
= arg
;
6866 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6867 return -TARGET_EINVAL
;
6872 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6873 return -TARGET_EINVAL
;
6879 static int target_to_host_cpu_mask(unsigned long *host_mask
,
6881 abi_ulong target_addr
,
6884 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6885 unsigned host_bits
= sizeof(*host_mask
) * 8;
6886 abi_ulong
*target_mask
;
6889 assert(host_size
>= target_size
);
6891 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
6893 return -TARGET_EFAULT
;
6895 memset(host_mask
, 0, host_size
);
6897 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6898 unsigned bit
= i
* target_bits
;
6901 __get_user(val
, &target_mask
[i
]);
6902 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6903 if (val
& (1UL << j
)) {
6904 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
6909 unlock_user(target_mask
, target_addr
, 0);
6913 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
6915 abi_ulong target_addr
,
6918 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6919 unsigned host_bits
= sizeof(*host_mask
) * 8;
6920 abi_ulong
*target_mask
;
6923 assert(host_size
>= target_size
);
6925 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
6927 return -TARGET_EFAULT
;
6930 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6931 unsigned bit
= i
* target_bits
;
6934 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6935 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
6939 __put_user(val
, &target_mask
[i
]);
6942 unlock_user(target_mask
, target_addr
, target_size
);
6946 /* This is an internal helper for do_syscall so that it is easier
6947 * to have a single return point, so that actions, such as logging
6948 * of syscall results, can be performed.
6949 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6951 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
6952 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6953 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6956 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6958 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6959 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6960 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6963 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6964 || defined(TARGET_NR_fstatfs)
6970 case TARGET_NR_exit
:
6971 /* In old applications this may be used to implement _exit(2).
6972 However in threaded applictions it is used for thread termination,
6973 and _exit_group is used for application termination.
6974 Do thread termination if we have more then one thread. */
6976 if (block_signals()) {
6977 return -TARGET_ERESTARTSYS
;
6982 if (CPU_NEXT(first_cpu
)) {
6985 /* Remove the CPU from the list. */
6986 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
6991 if (ts
->child_tidptr
) {
6992 put_user_u32(0, ts
->child_tidptr
);
6993 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6997 object_unref(OBJECT(cpu
));
6999 rcu_unregister_thread();
7004 preexit_cleanup(cpu_env
, arg1
);
7006 return 0; /* avoid warning */
7007 case TARGET_NR_read
:
7008 if (arg2
== 0 && arg3
== 0) {
7009 return get_errno(safe_read(arg1
, 0, 0));
7011 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7012 return -TARGET_EFAULT
;
7013 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7015 fd_trans_host_to_target_data(arg1
)) {
7016 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7018 unlock_user(p
, arg2
, ret
);
7021 case TARGET_NR_write
:
7022 if (arg2
== 0 && arg3
== 0) {
7023 return get_errno(safe_write(arg1
, 0, 0));
7025 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7026 return -TARGET_EFAULT
;
7027 if (fd_trans_target_to_host_data(arg1
)) {
7028 void *copy
= g_malloc(arg3
);
7029 memcpy(copy
, p
, arg3
);
7030 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7032 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7036 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7038 unlock_user(p
, arg2
, 0);
7041 #ifdef TARGET_NR_open
7042 case TARGET_NR_open
:
7043 if (!(p
= lock_user_string(arg1
)))
7044 return -TARGET_EFAULT
;
7045 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7046 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7048 fd_trans_unregister(ret
);
7049 unlock_user(p
, arg1
, 0);
7052 case TARGET_NR_openat
:
7053 if (!(p
= lock_user_string(arg2
)))
7054 return -TARGET_EFAULT
;
7055 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7056 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7058 fd_trans_unregister(ret
);
7059 unlock_user(p
, arg2
, 0);
7061 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7062 case TARGET_NR_name_to_handle_at
:
7063 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7066 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7067 case TARGET_NR_open_by_handle_at
:
7068 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7069 fd_trans_unregister(ret
);
7072 case TARGET_NR_close
:
7073 fd_trans_unregister(arg1
);
7074 return get_errno(close(arg1
));
7077 return do_brk(arg1
);
7078 #ifdef TARGET_NR_fork
7079 case TARGET_NR_fork
:
7080 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7082 #ifdef TARGET_NR_waitpid
7083 case TARGET_NR_waitpid
:
7086 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7087 if (!is_error(ret
) && arg2
&& ret
7088 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7089 return -TARGET_EFAULT
;
7093 #ifdef TARGET_NR_waitid
7094 case TARGET_NR_waitid
:
7098 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7099 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7100 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7101 return -TARGET_EFAULT
;
7102 host_to_target_siginfo(p
, &info
);
7103 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7108 #ifdef TARGET_NR_creat /* not on alpha */
7109 case TARGET_NR_creat
:
7110 if (!(p
= lock_user_string(arg1
)))
7111 return -TARGET_EFAULT
;
7112 ret
= get_errno(creat(p
, arg2
));
7113 fd_trans_unregister(ret
);
7114 unlock_user(p
, arg1
, 0);
7117 #ifdef TARGET_NR_link
7118 case TARGET_NR_link
:
7121 p
= lock_user_string(arg1
);
7122 p2
= lock_user_string(arg2
);
7124 ret
= -TARGET_EFAULT
;
7126 ret
= get_errno(link(p
, p2
));
7127 unlock_user(p2
, arg2
, 0);
7128 unlock_user(p
, arg1
, 0);
7132 #if defined(TARGET_NR_linkat)
7133 case TARGET_NR_linkat
:
7137 return -TARGET_EFAULT
;
7138 p
= lock_user_string(arg2
);
7139 p2
= lock_user_string(arg4
);
7141 ret
= -TARGET_EFAULT
;
7143 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7144 unlock_user(p
, arg2
, 0);
7145 unlock_user(p2
, arg4
, 0);
7149 #ifdef TARGET_NR_unlink
7150 case TARGET_NR_unlink
:
7151 if (!(p
= lock_user_string(arg1
)))
7152 return -TARGET_EFAULT
;
7153 ret
= get_errno(unlink(p
));
7154 unlock_user(p
, arg1
, 0);
7157 #if defined(TARGET_NR_unlinkat)
7158 case TARGET_NR_unlinkat
:
7159 if (!(p
= lock_user_string(arg2
)))
7160 return -TARGET_EFAULT
;
7161 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7162 unlock_user(p
, arg2
, 0);
7165 case TARGET_NR_execve
:
7167 char **argp
, **envp
;
7170 abi_ulong guest_argp
;
7171 abi_ulong guest_envp
;
7178 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7179 if (get_user_ual(addr
, gp
))
7180 return -TARGET_EFAULT
;
7187 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7188 if (get_user_ual(addr
, gp
))
7189 return -TARGET_EFAULT
;
7195 argp
= g_new0(char *, argc
+ 1);
7196 envp
= g_new0(char *, envc
+ 1);
7198 for (gp
= guest_argp
, q
= argp
; gp
;
7199 gp
+= sizeof(abi_ulong
), q
++) {
7200 if (get_user_ual(addr
, gp
))
7204 if (!(*q
= lock_user_string(addr
)))
7206 total_size
+= strlen(*q
) + 1;
7210 for (gp
= guest_envp
, q
= envp
; gp
;
7211 gp
+= sizeof(abi_ulong
), q
++) {
7212 if (get_user_ual(addr
, gp
))
7216 if (!(*q
= lock_user_string(addr
)))
7218 total_size
+= strlen(*q
) + 1;
7222 if (!(p
= lock_user_string(arg1
)))
7224 /* Although execve() is not an interruptible syscall it is
7225 * a special case where we must use the safe_syscall wrapper:
7226 * if we allow a signal to happen before we make the host
7227 * syscall then we will 'lose' it, because at the point of
7228 * execve the process leaves QEMU's control. So we use the
7229 * safe syscall wrapper to ensure that we either take the
7230 * signal as a guest signal, or else it does not happen
7231 * before the execve completes and makes it the other
7232 * program's problem.
7234 ret
= get_errno(safe_execve(p
, argp
, envp
));
7235 unlock_user(p
, arg1
, 0);
7240 ret
= -TARGET_EFAULT
;
7243 for (gp
= guest_argp
, q
= argp
; *q
;
7244 gp
+= sizeof(abi_ulong
), q
++) {
7245 if (get_user_ual(addr
, gp
)
7248 unlock_user(*q
, addr
, 0);
7250 for (gp
= guest_envp
, q
= envp
; *q
;
7251 gp
+= sizeof(abi_ulong
), q
++) {
7252 if (get_user_ual(addr
, gp
)
7255 unlock_user(*q
, addr
, 0);
7262 case TARGET_NR_chdir
:
7263 if (!(p
= lock_user_string(arg1
)))
7264 return -TARGET_EFAULT
;
7265 ret
= get_errno(chdir(p
));
7266 unlock_user(p
, arg1
, 0);
7268 #ifdef TARGET_NR_time
7269 case TARGET_NR_time
:
7272 ret
= get_errno(time(&host_time
));
7275 && put_user_sal(host_time
, arg1
))
7276 return -TARGET_EFAULT
;
7280 #ifdef TARGET_NR_mknod
7281 case TARGET_NR_mknod
:
7282 if (!(p
= lock_user_string(arg1
)))
7283 return -TARGET_EFAULT
;
7284 ret
= get_errno(mknod(p
, arg2
, arg3
));
7285 unlock_user(p
, arg1
, 0);
7288 #if defined(TARGET_NR_mknodat)
7289 case TARGET_NR_mknodat
:
7290 if (!(p
= lock_user_string(arg2
)))
7291 return -TARGET_EFAULT
;
7292 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7293 unlock_user(p
, arg2
, 0);
7296 #ifdef TARGET_NR_chmod
7297 case TARGET_NR_chmod
:
7298 if (!(p
= lock_user_string(arg1
)))
7299 return -TARGET_EFAULT
;
7300 ret
= get_errno(chmod(p
, arg2
));
7301 unlock_user(p
, arg1
, 0);
7304 #ifdef TARGET_NR_lseek
7305 case TARGET_NR_lseek
:
7306 return get_errno(lseek(arg1
, arg2
, arg3
));
7308 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7309 /* Alpha specific */
7310 case TARGET_NR_getxpid
:
7311 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7312 return get_errno(getpid());
7314 #ifdef TARGET_NR_getpid
7315 case TARGET_NR_getpid
:
7316 return get_errno(getpid());
7318 case TARGET_NR_mount
:
7320 /* need to look at the data field */
7324 p
= lock_user_string(arg1
);
7326 return -TARGET_EFAULT
;
7332 p2
= lock_user_string(arg2
);
7335 unlock_user(p
, arg1
, 0);
7337 return -TARGET_EFAULT
;
7341 p3
= lock_user_string(arg3
);
7344 unlock_user(p
, arg1
, 0);
7346 unlock_user(p2
, arg2
, 0);
7347 return -TARGET_EFAULT
;
7353 /* FIXME - arg5 should be locked, but it isn't clear how to
7354 * do that since it's not guaranteed to be a NULL-terminated
7358 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7360 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7362 ret
= get_errno(ret
);
7365 unlock_user(p
, arg1
, 0);
7367 unlock_user(p2
, arg2
, 0);
7369 unlock_user(p3
, arg3
, 0);
7373 #ifdef TARGET_NR_umount
7374 case TARGET_NR_umount
:
7375 if (!(p
= lock_user_string(arg1
)))
7376 return -TARGET_EFAULT
;
7377 ret
= get_errno(umount(p
));
7378 unlock_user(p
, arg1
, 0);
7381 #ifdef TARGET_NR_stime /* not on alpha */
7382 case TARGET_NR_stime
:
7385 if (get_user_sal(host_time
, arg1
))
7386 return -TARGET_EFAULT
;
7387 return get_errno(stime(&host_time
));
7390 #ifdef TARGET_NR_alarm /* not on alpha */
7391 case TARGET_NR_alarm
:
7394 #ifdef TARGET_NR_pause /* not on alpha */
7395 case TARGET_NR_pause
:
7396 if (!block_signals()) {
7397 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7399 return -TARGET_EINTR
;
7401 #ifdef TARGET_NR_utime
7402 case TARGET_NR_utime
:
7404 struct utimbuf tbuf
, *host_tbuf
;
7405 struct target_utimbuf
*target_tbuf
;
7407 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7408 return -TARGET_EFAULT
;
7409 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7410 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7411 unlock_user_struct(target_tbuf
, arg2
, 0);
7416 if (!(p
= lock_user_string(arg1
)))
7417 return -TARGET_EFAULT
;
7418 ret
= get_errno(utime(p
, host_tbuf
));
7419 unlock_user(p
, arg1
, 0);
7423 #ifdef TARGET_NR_utimes
7424 case TARGET_NR_utimes
:
7426 struct timeval
*tvp
, tv
[2];
7428 if (copy_from_user_timeval(&tv
[0], arg2
)
7429 || copy_from_user_timeval(&tv
[1],
7430 arg2
+ sizeof(struct target_timeval
)))
7431 return -TARGET_EFAULT
;
7436 if (!(p
= lock_user_string(arg1
)))
7437 return -TARGET_EFAULT
;
7438 ret
= get_errno(utimes(p
, tvp
));
7439 unlock_user(p
, arg1
, 0);
7443 #if defined(TARGET_NR_futimesat)
7444 case TARGET_NR_futimesat
:
7446 struct timeval
*tvp
, tv
[2];
7448 if (copy_from_user_timeval(&tv
[0], arg3
)
7449 || copy_from_user_timeval(&tv
[1],
7450 arg3
+ sizeof(struct target_timeval
)))
7451 return -TARGET_EFAULT
;
7456 if (!(p
= lock_user_string(arg2
))) {
7457 return -TARGET_EFAULT
;
7459 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7460 unlock_user(p
, arg2
, 0);
7464 #ifdef TARGET_NR_access
7465 case TARGET_NR_access
:
7466 if (!(p
= lock_user_string(arg1
))) {
7467 return -TARGET_EFAULT
;
7469 ret
= get_errno(access(path(p
), arg2
));
7470 unlock_user(p
, arg1
, 0);
7473 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7474 case TARGET_NR_faccessat
:
7475 if (!(p
= lock_user_string(arg2
))) {
7476 return -TARGET_EFAULT
;
7478 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7479 unlock_user(p
, arg2
, 0);
7482 #ifdef TARGET_NR_nice /* not on alpha */
7483 case TARGET_NR_nice
:
7484 return get_errno(nice(arg1
));
7486 case TARGET_NR_sync
:
7489 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7490 case TARGET_NR_syncfs
:
7491 return get_errno(syncfs(arg1
));
7493 case TARGET_NR_kill
:
7494 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7495 #ifdef TARGET_NR_rename
7496 case TARGET_NR_rename
:
7499 p
= lock_user_string(arg1
);
7500 p2
= lock_user_string(arg2
);
7502 ret
= -TARGET_EFAULT
;
7504 ret
= get_errno(rename(p
, p2
));
7505 unlock_user(p2
, arg2
, 0);
7506 unlock_user(p
, arg1
, 0);
7510 #if defined(TARGET_NR_renameat)
7511 case TARGET_NR_renameat
:
7514 p
= lock_user_string(arg2
);
7515 p2
= lock_user_string(arg4
);
7517 ret
= -TARGET_EFAULT
;
7519 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7520 unlock_user(p2
, arg4
, 0);
7521 unlock_user(p
, arg2
, 0);
7525 #if defined(TARGET_NR_renameat2)
7526 case TARGET_NR_renameat2
:
7529 p
= lock_user_string(arg2
);
7530 p2
= lock_user_string(arg4
);
7532 ret
= -TARGET_EFAULT
;
7534 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7536 unlock_user(p2
, arg4
, 0);
7537 unlock_user(p
, arg2
, 0);
7541 #ifdef TARGET_NR_mkdir
7542 case TARGET_NR_mkdir
:
7543 if (!(p
= lock_user_string(arg1
)))
7544 return -TARGET_EFAULT
;
7545 ret
= get_errno(mkdir(p
, arg2
));
7546 unlock_user(p
, arg1
, 0);
7549 #if defined(TARGET_NR_mkdirat)
7550 case TARGET_NR_mkdirat
:
7551 if (!(p
= lock_user_string(arg2
)))
7552 return -TARGET_EFAULT
;
7553 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7554 unlock_user(p
, arg2
, 0);
7557 #ifdef TARGET_NR_rmdir
7558 case TARGET_NR_rmdir
:
7559 if (!(p
= lock_user_string(arg1
)))
7560 return -TARGET_EFAULT
;
7561 ret
= get_errno(rmdir(p
));
7562 unlock_user(p
, arg1
, 0);
7566 ret
= get_errno(dup(arg1
));
7568 fd_trans_dup(arg1
, ret
);
7571 #ifdef TARGET_NR_pipe
7572 case TARGET_NR_pipe
:
7573 return do_pipe(cpu_env
, arg1
, 0, 0);
7575 #ifdef TARGET_NR_pipe2
7576 case TARGET_NR_pipe2
:
7577 return do_pipe(cpu_env
, arg1
,
7578 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7580 case TARGET_NR_times
:
7582 struct target_tms
*tmsp
;
7584 ret
= get_errno(times(&tms
));
7586 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7588 return -TARGET_EFAULT
;
7589 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7590 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7591 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7592 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7595 ret
= host_to_target_clock_t(ret
);
7598 case TARGET_NR_acct
:
7600 ret
= get_errno(acct(NULL
));
7602 if (!(p
= lock_user_string(arg1
))) {
7603 return -TARGET_EFAULT
;
7605 ret
= get_errno(acct(path(p
)));
7606 unlock_user(p
, arg1
, 0);
7609 #ifdef TARGET_NR_umount2
7610 case TARGET_NR_umount2
:
7611 if (!(p
= lock_user_string(arg1
)))
7612 return -TARGET_EFAULT
;
7613 ret
= get_errno(umount2(p
, arg2
));
7614 unlock_user(p
, arg1
, 0);
7617 case TARGET_NR_ioctl
:
7618 return do_ioctl(arg1
, arg2
, arg3
);
7619 #ifdef TARGET_NR_fcntl
7620 case TARGET_NR_fcntl
:
7621 return do_fcntl(arg1
, arg2
, arg3
);
7623 case TARGET_NR_setpgid
:
7624 return get_errno(setpgid(arg1
, arg2
));
7625 case TARGET_NR_umask
:
7626 return get_errno(umask(arg1
));
7627 case TARGET_NR_chroot
:
7628 if (!(p
= lock_user_string(arg1
)))
7629 return -TARGET_EFAULT
;
7630 ret
= get_errno(chroot(p
));
7631 unlock_user(p
, arg1
, 0);
7633 #ifdef TARGET_NR_dup2
7634 case TARGET_NR_dup2
:
7635 ret
= get_errno(dup2(arg1
, arg2
));
7637 fd_trans_dup(arg1
, arg2
);
7641 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7642 case TARGET_NR_dup3
:
7646 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7649 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7650 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7652 fd_trans_dup(arg1
, arg2
);
7657 #ifdef TARGET_NR_getppid /* not on alpha */
7658 case TARGET_NR_getppid
:
7659 return get_errno(getppid());
7661 #ifdef TARGET_NR_getpgrp
7662 case TARGET_NR_getpgrp
:
7663 return get_errno(getpgrp());
7665 case TARGET_NR_setsid
:
7666 return get_errno(setsid());
7667 #ifdef TARGET_NR_sigaction
7668 case TARGET_NR_sigaction
:
7670 #if defined(TARGET_ALPHA)
7671 struct target_sigaction act
, oact
, *pact
= 0;
7672 struct target_old_sigaction
*old_act
;
7674 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7675 return -TARGET_EFAULT
;
7676 act
._sa_handler
= old_act
->_sa_handler
;
7677 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7678 act
.sa_flags
= old_act
->sa_flags
;
7679 act
.sa_restorer
= 0;
7680 unlock_user_struct(old_act
, arg2
, 0);
7683 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7684 if (!is_error(ret
) && arg3
) {
7685 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7686 return -TARGET_EFAULT
;
7687 old_act
->_sa_handler
= oact
._sa_handler
;
7688 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7689 old_act
->sa_flags
= oact
.sa_flags
;
7690 unlock_user_struct(old_act
, arg3
, 1);
7692 #elif defined(TARGET_MIPS)
7693 struct target_sigaction act
, oact
, *pact
, *old_act
;
7696 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7697 return -TARGET_EFAULT
;
7698 act
._sa_handler
= old_act
->_sa_handler
;
7699 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7700 act
.sa_flags
= old_act
->sa_flags
;
7701 unlock_user_struct(old_act
, arg2
, 0);
7707 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7709 if (!is_error(ret
) && arg3
) {
7710 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7711 return -TARGET_EFAULT
;
7712 old_act
->_sa_handler
= oact
._sa_handler
;
7713 old_act
->sa_flags
= oact
.sa_flags
;
7714 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7715 old_act
->sa_mask
.sig
[1] = 0;
7716 old_act
->sa_mask
.sig
[2] = 0;
7717 old_act
->sa_mask
.sig
[3] = 0;
7718 unlock_user_struct(old_act
, arg3
, 1);
7721 struct target_old_sigaction
*old_act
;
7722 struct target_sigaction act
, oact
, *pact
;
7724 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7725 return -TARGET_EFAULT
;
7726 act
._sa_handler
= old_act
->_sa_handler
;
7727 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7728 act
.sa_flags
= old_act
->sa_flags
;
7729 act
.sa_restorer
= old_act
->sa_restorer
;
7730 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7731 act
.ka_restorer
= 0;
7733 unlock_user_struct(old_act
, arg2
, 0);
7738 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7739 if (!is_error(ret
) && arg3
) {
7740 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7741 return -TARGET_EFAULT
;
7742 old_act
->_sa_handler
= oact
._sa_handler
;
7743 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7744 old_act
->sa_flags
= oact
.sa_flags
;
7745 old_act
->sa_restorer
= oact
.sa_restorer
;
7746 unlock_user_struct(old_act
, arg3
, 1);
7752 case TARGET_NR_rt_sigaction
:
7754 #if defined(TARGET_ALPHA)
7755 /* For Alpha and SPARC this is a 5 argument syscall, with
7756 * a 'restorer' parameter which must be copied into the
7757 * sa_restorer field of the sigaction struct.
7758 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7759 * and arg5 is the sigsetsize.
7760 * Alpha also has a separate rt_sigaction struct that it uses
7761 * here; SPARC uses the usual sigaction struct.
7763 struct target_rt_sigaction
*rt_act
;
7764 struct target_sigaction act
, oact
, *pact
= 0;
7766 if (arg4
!= sizeof(target_sigset_t
)) {
7767 return -TARGET_EINVAL
;
7770 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7771 return -TARGET_EFAULT
;
7772 act
._sa_handler
= rt_act
->_sa_handler
;
7773 act
.sa_mask
= rt_act
->sa_mask
;
7774 act
.sa_flags
= rt_act
->sa_flags
;
7775 act
.sa_restorer
= arg5
;
7776 unlock_user_struct(rt_act
, arg2
, 0);
7779 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7780 if (!is_error(ret
) && arg3
) {
7781 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7782 return -TARGET_EFAULT
;
7783 rt_act
->_sa_handler
= oact
._sa_handler
;
7784 rt_act
->sa_mask
= oact
.sa_mask
;
7785 rt_act
->sa_flags
= oact
.sa_flags
;
7786 unlock_user_struct(rt_act
, arg3
, 1);
7790 target_ulong restorer
= arg4
;
7791 target_ulong sigsetsize
= arg5
;
7793 target_ulong sigsetsize
= arg4
;
7795 struct target_sigaction
*act
;
7796 struct target_sigaction
*oact
;
7798 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7799 return -TARGET_EINVAL
;
7802 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7803 return -TARGET_EFAULT
;
7805 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7806 act
->ka_restorer
= restorer
;
7812 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7813 ret
= -TARGET_EFAULT
;
7814 goto rt_sigaction_fail
;
7818 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7821 unlock_user_struct(act
, arg2
, 0);
7823 unlock_user_struct(oact
, arg3
, 1);
7827 #ifdef TARGET_NR_sgetmask /* not on alpha */
7828 case TARGET_NR_sgetmask
:
7831 abi_ulong target_set
;
7832 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7834 host_to_target_old_sigset(&target_set
, &cur_set
);
7840 #ifdef TARGET_NR_ssetmask /* not on alpha */
7841 case TARGET_NR_ssetmask
:
7844 abi_ulong target_set
= arg1
;
7845 target_to_host_old_sigset(&set
, &target_set
);
7846 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7848 host_to_target_old_sigset(&target_set
, &oset
);
7854 #ifdef TARGET_NR_sigprocmask
7855 case TARGET_NR_sigprocmask
:
7857 #if defined(TARGET_ALPHA)
7858 sigset_t set
, oldset
;
7863 case TARGET_SIG_BLOCK
:
7866 case TARGET_SIG_UNBLOCK
:
7869 case TARGET_SIG_SETMASK
:
7873 return -TARGET_EINVAL
;
7876 target_to_host_old_sigset(&set
, &mask
);
7878 ret
= do_sigprocmask(how
, &set
, &oldset
);
7879 if (!is_error(ret
)) {
7880 host_to_target_old_sigset(&mask
, &oldset
);
7882 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7885 sigset_t set
, oldset
, *set_ptr
;
7890 case TARGET_SIG_BLOCK
:
7893 case TARGET_SIG_UNBLOCK
:
7896 case TARGET_SIG_SETMASK
:
7900 return -TARGET_EINVAL
;
7902 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7903 return -TARGET_EFAULT
;
7904 target_to_host_old_sigset(&set
, p
);
7905 unlock_user(p
, arg2
, 0);
7911 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7912 if (!is_error(ret
) && arg3
) {
7913 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7914 return -TARGET_EFAULT
;
7915 host_to_target_old_sigset(p
, &oldset
);
7916 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7922 case TARGET_NR_rt_sigprocmask
:
7925 sigset_t set
, oldset
, *set_ptr
;
7927 if (arg4
!= sizeof(target_sigset_t
)) {
7928 return -TARGET_EINVAL
;
7933 case TARGET_SIG_BLOCK
:
7936 case TARGET_SIG_UNBLOCK
:
7939 case TARGET_SIG_SETMASK
:
7943 return -TARGET_EINVAL
;
7945 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7946 return -TARGET_EFAULT
;
7947 target_to_host_sigset(&set
, p
);
7948 unlock_user(p
, arg2
, 0);
7954 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7955 if (!is_error(ret
) && arg3
) {
7956 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7957 return -TARGET_EFAULT
;
7958 host_to_target_sigset(p
, &oldset
);
7959 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7963 #ifdef TARGET_NR_sigpending
7964 case TARGET_NR_sigpending
:
7967 ret
= get_errno(sigpending(&set
));
7968 if (!is_error(ret
)) {
7969 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7970 return -TARGET_EFAULT
;
7971 host_to_target_old_sigset(p
, &set
);
7972 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7977 case TARGET_NR_rt_sigpending
:
7981 /* Yes, this check is >, not != like most. We follow the kernel's
7982 * logic and it does it like this because it implements
7983 * NR_sigpending through the same code path, and in that case
7984 * the old_sigset_t is smaller in size.
7986 if (arg2
> sizeof(target_sigset_t
)) {
7987 return -TARGET_EINVAL
;
7990 ret
= get_errno(sigpending(&set
));
7991 if (!is_error(ret
)) {
7992 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7993 return -TARGET_EFAULT
;
7994 host_to_target_sigset(p
, &set
);
7995 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7999 #ifdef TARGET_NR_sigsuspend
8000 case TARGET_NR_sigsuspend
:
8002 TaskState
*ts
= cpu
->opaque
;
8003 #if defined(TARGET_ALPHA)
8004 abi_ulong mask
= arg1
;
8005 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8007 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8008 return -TARGET_EFAULT
;
8009 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8010 unlock_user(p
, arg1
, 0);
8012 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8014 if (ret
!= -TARGET_ERESTARTSYS
) {
8015 ts
->in_sigsuspend
= 1;
8020 case TARGET_NR_rt_sigsuspend
:
8022 TaskState
*ts
= cpu
->opaque
;
8024 if (arg2
!= sizeof(target_sigset_t
)) {
8025 return -TARGET_EINVAL
;
8027 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8028 return -TARGET_EFAULT
;
8029 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8030 unlock_user(p
, arg1
, 0);
8031 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8033 if (ret
!= -TARGET_ERESTARTSYS
) {
8034 ts
->in_sigsuspend
= 1;
8038 case TARGET_NR_rt_sigtimedwait
:
8041 struct timespec uts
, *puts
;
8044 if (arg4
!= sizeof(target_sigset_t
)) {
8045 return -TARGET_EINVAL
;
8048 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8049 return -TARGET_EFAULT
;
8050 target_to_host_sigset(&set
, p
);
8051 unlock_user(p
, arg1
, 0);
8054 target_to_host_timespec(puts
, arg3
);
8058 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8060 if (!is_error(ret
)) {
8062 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8065 return -TARGET_EFAULT
;
8067 host_to_target_siginfo(p
, &uinfo
);
8068 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8070 ret
= host_to_target_signal(ret
);
8074 case TARGET_NR_rt_sigqueueinfo
:
8078 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8080 return -TARGET_EFAULT
;
8082 target_to_host_siginfo(&uinfo
, p
);
8083 unlock_user(p
, arg3
, 0);
8084 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8087 case TARGET_NR_rt_tgsigqueueinfo
:
8091 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8093 return -TARGET_EFAULT
;
8095 target_to_host_siginfo(&uinfo
, p
);
8096 unlock_user(p
, arg4
, 0);
8097 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8100 #ifdef TARGET_NR_sigreturn
8101 case TARGET_NR_sigreturn
:
8102 if (block_signals()) {
8103 return -TARGET_ERESTARTSYS
;
8105 return do_sigreturn(cpu_env
);
8107 case TARGET_NR_rt_sigreturn
:
8108 if (block_signals()) {
8109 return -TARGET_ERESTARTSYS
;
8111 return do_rt_sigreturn(cpu_env
);
8112 case TARGET_NR_sethostname
:
8113 if (!(p
= lock_user_string(arg1
)))
8114 return -TARGET_EFAULT
;
8115 ret
= get_errno(sethostname(p
, arg2
));
8116 unlock_user(p
, arg1
, 0);
8118 #ifdef TARGET_NR_setrlimit
8119 case TARGET_NR_setrlimit
:
8121 int resource
= target_to_host_resource(arg1
);
8122 struct target_rlimit
*target_rlim
;
8124 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8125 return -TARGET_EFAULT
;
8126 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8127 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8128 unlock_user_struct(target_rlim
, arg2
, 0);
8130 * If we just passed through resource limit settings for memory then
8131 * they would also apply to QEMU's own allocations, and QEMU will
8132 * crash or hang or die if its allocations fail. Ideally we would
8133 * track the guest allocations in QEMU and apply the limits ourselves.
8134 * For now, just tell the guest the call succeeded but don't actually
8137 if (resource
!= RLIMIT_AS
&&
8138 resource
!= RLIMIT_DATA
&&
8139 resource
!= RLIMIT_STACK
) {
8140 return get_errno(setrlimit(resource
, &rlim
));
8146 #ifdef TARGET_NR_getrlimit
8147 case TARGET_NR_getrlimit
:
8149 int resource
= target_to_host_resource(arg1
);
8150 struct target_rlimit
*target_rlim
;
8153 ret
= get_errno(getrlimit(resource
, &rlim
));
8154 if (!is_error(ret
)) {
8155 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8156 return -TARGET_EFAULT
;
8157 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8158 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8159 unlock_user_struct(target_rlim
, arg2
, 1);
8164 case TARGET_NR_getrusage
:
8166 struct rusage rusage
;
8167 ret
= get_errno(getrusage(arg1
, &rusage
));
8168 if (!is_error(ret
)) {
8169 ret
= host_to_target_rusage(arg2
, &rusage
);
8173 case TARGET_NR_gettimeofday
:
8176 ret
= get_errno(gettimeofday(&tv
, NULL
));
8177 if (!is_error(ret
)) {
8178 if (copy_to_user_timeval(arg1
, &tv
))
8179 return -TARGET_EFAULT
;
8183 case TARGET_NR_settimeofday
:
8185 struct timeval tv
, *ptv
= NULL
;
8186 struct timezone tz
, *ptz
= NULL
;
8189 if (copy_from_user_timeval(&tv
, arg1
)) {
8190 return -TARGET_EFAULT
;
8196 if (copy_from_user_timezone(&tz
, arg2
)) {
8197 return -TARGET_EFAULT
;
8202 return get_errno(settimeofday(ptv
, ptz
));
8204 #if defined(TARGET_NR_select)
8205 case TARGET_NR_select
:
8206 #if defined(TARGET_WANT_NI_OLD_SELECT)
8207 /* some architectures used to have old_select here
8208 * but now ENOSYS it.
8210 ret
= -TARGET_ENOSYS
;
8211 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8212 ret
= do_old_select(arg1
);
8214 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8218 #ifdef TARGET_NR_pselect6
8219 case TARGET_NR_pselect6
:
8221 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8222 fd_set rfds
, wfds
, efds
;
8223 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8224 struct timespec ts
, *ts_ptr
;
8227 * The 6th arg is actually two args smashed together,
8228 * so we cannot use the C library.
8236 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8237 target_sigset_t
*target_sigset
;
8245 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8249 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8253 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8259 * This takes a timespec, and not a timeval, so we cannot
8260 * use the do_select() helper ...
8263 if (target_to_host_timespec(&ts
, ts_addr
)) {
8264 return -TARGET_EFAULT
;
8271 /* Extract the two packed args for the sigset */
8274 sig
.size
= SIGSET_T_SIZE
;
8276 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8278 return -TARGET_EFAULT
;
8280 arg_sigset
= tswapal(arg7
[0]);
8281 arg_sigsize
= tswapal(arg7
[1]);
8282 unlock_user(arg7
, arg6
, 0);
8286 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8287 /* Like the kernel, we enforce correct size sigsets */
8288 return -TARGET_EINVAL
;
8290 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8291 sizeof(*target_sigset
), 1);
8292 if (!target_sigset
) {
8293 return -TARGET_EFAULT
;
8295 target_to_host_sigset(&set
, target_sigset
);
8296 unlock_user(target_sigset
, arg_sigset
, 0);
8304 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8307 if (!is_error(ret
)) {
8308 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8309 return -TARGET_EFAULT
;
8310 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8311 return -TARGET_EFAULT
;
8312 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8313 return -TARGET_EFAULT
;
8315 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8316 return -TARGET_EFAULT
;
8321 #ifdef TARGET_NR_symlink
8322 case TARGET_NR_symlink
:
8325 p
= lock_user_string(arg1
);
8326 p2
= lock_user_string(arg2
);
8328 ret
= -TARGET_EFAULT
;
8330 ret
= get_errno(symlink(p
, p2
));
8331 unlock_user(p2
, arg2
, 0);
8332 unlock_user(p
, arg1
, 0);
8336 #if defined(TARGET_NR_symlinkat)
8337 case TARGET_NR_symlinkat
:
8340 p
= lock_user_string(arg1
);
8341 p2
= lock_user_string(arg3
);
8343 ret
= -TARGET_EFAULT
;
8345 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8346 unlock_user(p2
, arg3
, 0);
8347 unlock_user(p
, arg1
, 0);
8351 #ifdef TARGET_NR_readlink
8352 case TARGET_NR_readlink
:
8355 p
= lock_user_string(arg1
);
8356 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8358 ret
= -TARGET_EFAULT
;
8360 /* Short circuit this for the magic exe check. */
8361 ret
= -TARGET_EINVAL
;
8362 } else if (is_proc_myself((const char *)p
, "exe")) {
8363 char real
[PATH_MAX
], *temp
;
8364 temp
= realpath(exec_path
, real
);
8365 /* Return value is # of bytes that we wrote to the buffer. */
8367 ret
= get_errno(-1);
8369 /* Don't worry about sign mismatch as earlier mapping
8370 * logic would have thrown a bad address error. */
8371 ret
= MIN(strlen(real
), arg3
);
8372 /* We cannot NUL terminate the string. */
8373 memcpy(p2
, real
, ret
);
8376 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8378 unlock_user(p2
, arg2
, ret
);
8379 unlock_user(p
, arg1
, 0);
8383 #if defined(TARGET_NR_readlinkat)
8384 case TARGET_NR_readlinkat
:
8387 p
= lock_user_string(arg2
);
8388 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8390 ret
= -TARGET_EFAULT
;
8391 } else if (is_proc_myself((const char *)p
, "exe")) {
8392 char real
[PATH_MAX
], *temp
;
8393 temp
= realpath(exec_path
, real
);
8394 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8395 snprintf((char *)p2
, arg4
, "%s", real
);
8397 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8399 unlock_user(p2
, arg3
, ret
);
8400 unlock_user(p
, arg2
, 0);
8404 #ifdef TARGET_NR_swapon
8405 case TARGET_NR_swapon
:
8406 if (!(p
= lock_user_string(arg1
)))
8407 return -TARGET_EFAULT
;
8408 ret
= get_errno(swapon(p
, arg2
));
8409 unlock_user(p
, arg1
, 0);
8412 case TARGET_NR_reboot
:
8413 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8414 /* arg4 must be ignored in all other cases */
8415 p
= lock_user_string(arg4
);
8417 return -TARGET_EFAULT
;
8419 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8420 unlock_user(p
, arg4
, 0);
8422 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8425 #ifdef TARGET_NR_mmap
8426 case TARGET_NR_mmap
:
8427 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8428 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8429 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8430 || defined(TARGET_S390X)
8433 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8434 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8435 return -TARGET_EFAULT
;
8442 unlock_user(v
, arg1
, 0);
8443 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8444 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8448 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8449 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8455 #ifdef TARGET_NR_mmap2
8456 case TARGET_NR_mmap2
:
8458 #define MMAP_SHIFT 12
8460 ret
= target_mmap(arg1
, arg2
, arg3
,
8461 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8462 arg5
, arg6
<< MMAP_SHIFT
);
8463 return get_errno(ret
);
8465 case TARGET_NR_munmap
:
8466 return get_errno(target_munmap(arg1
, arg2
));
8467 case TARGET_NR_mprotect
:
8469 TaskState
*ts
= cpu
->opaque
;
8470 /* Special hack to detect libc making the stack executable. */
8471 if ((arg3
& PROT_GROWSDOWN
)
8472 && arg1
>= ts
->info
->stack_limit
8473 && arg1
<= ts
->info
->start_stack
) {
8474 arg3
&= ~PROT_GROWSDOWN
;
8475 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8476 arg1
= ts
->info
->stack_limit
;
8479 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8480 #ifdef TARGET_NR_mremap
8481 case TARGET_NR_mremap
:
8482 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8484 /* ??? msync/mlock/munlock are broken for softmmu. */
8485 #ifdef TARGET_NR_msync
8486 case TARGET_NR_msync
:
8487 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8489 #ifdef TARGET_NR_mlock
8490 case TARGET_NR_mlock
:
8491 return get_errno(mlock(g2h(arg1
), arg2
));
8493 #ifdef TARGET_NR_munlock
8494 case TARGET_NR_munlock
:
8495 return get_errno(munlock(g2h(arg1
), arg2
));
8497 #ifdef TARGET_NR_mlockall
8498 case TARGET_NR_mlockall
:
8499 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8501 #ifdef TARGET_NR_munlockall
8502 case TARGET_NR_munlockall
:
8503 return get_errno(munlockall());
8505 #ifdef TARGET_NR_truncate
8506 case TARGET_NR_truncate
:
8507 if (!(p
= lock_user_string(arg1
)))
8508 return -TARGET_EFAULT
;
8509 ret
= get_errno(truncate(p
, arg2
));
8510 unlock_user(p
, arg1
, 0);
8513 #ifdef TARGET_NR_ftruncate
8514 case TARGET_NR_ftruncate
:
8515 return get_errno(ftruncate(arg1
, arg2
));
8517 case TARGET_NR_fchmod
:
8518 return get_errno(fchmod(arg1
, arg2
));
8519 #if defined(TARGET_NR_fchmodat)
8520 case TARGET_NR_fchmodat
:
8521 if (!(p
= lock_user_string(arg2
)))
8522 return -TARGET_EFAULT
;
8523 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8524 unlock_user(p
, arg2
, 0);
8527 case TARGET_NR_getpriority
:
8528 /* Note that negative values are valid for getpriority, so we must
8529 differentiate based on errno settings. */
8531 ret
= getpriority(arg1
, arg2
);
8532 if (ret
== -1 && errno
!= 0) {
8533 return -host_to_target_errno(errno
);
8536 /* Return value is the unbiased priority. Signal no error. */
8537 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8539 /* Return value is a biased priority to avoid negative numbers. */
8543 case TARGET_NR_setpriority
:
8544 return get_errno(setpriority(arg1
, arg2
, arg3
));
8545 #ifdef TARGET_NR_statfs
8546 case TARGET_NR_statfs
:
8547 if (!(p
= lock_user_string(arg1
))) {
8548 return -TARGET_EFAULT
;
8550 ret
= get_errno(statfs(path(p
), &stfs
));
8551 unlock_user(p
, arg1
, 0);
8553 if (!is_error(ret
)) {
8554 struct target_statfs
*target_stfs
;
8556 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8557 return -TARGET_EFAULT
;
8558 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8559 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8560 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8561 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8562 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8563 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8564 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8565 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8566 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8567 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8568 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8569 #ifdef _STATFS_F_FLAGS
8570 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8572 __put_user(0, &target_stfs
->f_flags
);
8574 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8575 unlock_user_struct(target_stfs
, arg2
, 1);
8579 #ifdef TARGET_NR_fstatfs
8580 case TARGET_NR_fstatfs
:
8581 ret
= get_errno(fstatfs(arg1
, &stfs
));
8582 goto convert_statfs
;
8584 #ifdef TARGET_NR_statfs64
8585 case TARGET_NR_statfs64
:
8586 if (!(p
= lock_user_string(arg1
))) {
8587 return -TARGET_EFAULT
;
8589 ret
= get_errno(statfs(path(p
), &stfs
));
8590 unlock_user(p
, arg1
, 0);
8592 if (!is_error(ret
)) {
8593 struct target_statfs64
*target_stfs
;
8595 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8596 return -TARGET_EFAULT
;
8597 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8598 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8599 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8600 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8601 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8602 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8603 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8604 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8605 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8606 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8607 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8608 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8609 unlock_user_struct(target_stfs
, arg3
, 1);
8612 case TARGET_NR_fstatfs64
:
8613 ret
= get_errno(fstatfs(arg1
, &stfs
));
8614 goto convert_statfs64
;
8616 #ifdef TARGET_NR_socketcall
8617 case TARGET_NR_socketcall
:
8618 return do_socketcall(arg1
, arg2
);
8620 #ifdef TARGET_NR_accept
8621 case TARGET_NR_accept
:
8622 return do_accept4(arg1
, arg2
, arg3
, 0);
8624 #ifdef TARGET_NR_accept4
8625 case TARGET_NR_accept4
:
8626 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8628 #ifdef TARGET_NR_bind
8629 case TARGET_NR_bind
:
8630 return do_bind(arg1
, arg2
, arg3
);
8632 #ifdef TARGET_NR_connect
8633 case TARGET_NR_connect
:
8634 return do_connect(arg1
, arg2
, arg3
);
8636 #ifdef TARGET_NR_getpeername
8637 case TARGET_NR_getpeername
:
8638 return do_getpeername(arg1
, arg2
, arg3
);
8640 #ifdef TARGET_NR_getsockname
8641 case TARGET_NR_getsockname
:
8642 return do_getsockname(arg1
, arg2
, arg3
);
8644 #ifdef TARGET_NR_getsockopt
8645 case TARGET_NR_getsockopt
:
8646 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8648 #ifdef TARGET_NR_listen
8649 case TARGET_NR_listen
:
8650 return get_errno(listen(arg1
, arg2
));
8652 #ifdef TARGET_NR_recv
8653 case TARGET_NR_recv
:
8654 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8656 #ifdef TARGET_NR_recvfrom
8657 case TARGET_NR_recvfrom
:
8658 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8660 #ifdef TARGET_NR_recvmsg
8661 case TARGET_NR_recvmsg
:
8662 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8664 #ifdef TARGET_NR_send
8665 case TARGET_NR_send
:
8666 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8668 #ifdef TARGET_NR_sendmsg
8669 case TARGET_NR_sendmsg
:
8670 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8672 #ifdef TARGET_NR_sendmmsg
8673 case TARGET_NR_sendmmsg
:
8674 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8675 case TARGET_NR_recvmmsg
:
8676 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8678 #ifdef TARGET_NR_sendto
8679 case TARGET_NR_sendto
:
8680 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8682 #ifdef TARGET_NR_shutdown
8683 case TARGET_NR_shutdown
:
8684 return get_errno(shutdown(arg1
, arg2
));
8686 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8687 case TARGET_NR_getrandom
:
8688 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8690 return -TARGET_EFAULT
;
8692 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8693 unlock_user(p
, arg1
, ret
);
8696 #ifdef TARGET_NR_socket
8697 case TARGET_NR_socket
:
8698 return do_socket(arg1
, arg2
, arg3
);
8700 #ifdef TARGET_NR_socketpair
8701 case TARGET_NR_socketpair
:
8702 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8704 #ifdef TARGET_NR_setsockopt
8705 case TARGET_NR_setsockopt
:
8706 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8708 #if defined(TARGET_NR_syslog)
8709 case TARGET_NR_syslog
:
8714 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8715 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8716 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8717 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8718 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8719 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8720 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8721 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8722 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8723 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8724 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8725 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8728 return -TARGET_EINVAL
;
8733 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8735 return -TARGET_EFAULT
;
8737 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8738 unlock_user(p
, arg2
, arg3
);
8742 return -TARGET_EINVAL
;
8747 case TARGET_NR_setitimer
:
8749 struct itimerval value
, ovalue
, *pvalue
;
8753 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8754 || copy_from_user_timeval(&pvalue
->it_value
,
8755 arg2
+ sizeof(struct target_timeval
)))
8756 return -TARGET_EFAULT
;
8760 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8761 if (!is_error(ret
) && arg3
) {
8762 if (copy_to_user_timeval(arg3
,
8763 &ovalue
.it_interval
)
8764 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8766 return -TARGET_EFAULT
;
8770 case TARGET_NR_getitimer
:
8772 struct itimerval value
;
8774 ret
= get_errno(getitimer(arg1
, &value
));
8775 if (!is_error(ret
) && arg2
) {
8776 if (copy_to_user_timeval(arg2
,
8778 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8780 return -TARGET_EFAULT
;
8784 #ifdef TARGET_NR_stat
8785 case TARGET_NR_stat
:
8786 if (!(p
= lock_user_string(arg1
))) {
8787 return -TARGET_EFAULT
;
8789 ret
= get_errno(stat(path(p
), &st
));
8790 unlock_user(p
, arg1
, 0);
8793 #ifdef TARGET_NR_lstat
8794 case TARGET_NR_lstat
:
8795 if (!(p
= lock_user_string(arg1
))) {
8796 return -TARGET_EFAULT
;
8798 ret
= get_errno(lstat(path(p
), &st
));
8799 unlock_user(p
, arg1
, 0);
8802 #ifdef TARGET_NR_fstat
8803 case TARGET_NR_fstat
:
8805 ret
= get_errno(fstat(arg1
, &st
));
8806 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8809 if (!is_error(ret
)) {
8810 struct target_stat
*target_st
;
8812 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8813 return -TARGET_EFAULT
;
8814 memset(target_st
, 0, sizeof(*target_st
));
8815 __put_user(st
.st_dev
, &target_st
->st_dev
);
8816 __put_user(st
.st_ino
, &target_st
->st_ino
);
8817 __put_user(st
.st_mode
, &target_st
->st_mode
);
8818 __put_user(st
.st_uid
, &target_st
->st_uid
);
8819 __put_user(st
.st_gid
, &target_st
->st_gid
);
8820 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8821 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8822 __put_user(st
.st_size
, &target_st
->st_size
);
8823 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8824 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8825 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8826 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8827 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8828 unlock_user_struct(target_st
, arg2
, 1);
8833 case TARGET_NR_vhangup
:
8834 return get_errno(vhangup());
8835 #ifdef TARGET_NR_syscall
8836 case TARGET_NR_syscall
:
8837 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8838 arg6
, arg7
, arg8
, 0);
8840 case TARGET_NR_wait4
:
8843 abi_long status_ptr
= arg2
;
8844 struct rusage rusage
, *rusage_ptr
;
8845 abi_ulong target_rusage
= arg4
;
8846 abi_long rusage_err
;
8848 rusage_ptr
= &rusage
;
8851 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8852 if (!is_error(ret
)) {
8853 if (status_ptr
&& ret
) {
8854 status
= host_to_target_waitstatus(status
);
8855 if (put_user_s32(status
, status_ptr
))
8856 return -TARGET_EFAULT
;
8858 if (target_rusage
) {
8859 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8867 #ifdef TARGET_NR_swapoff
8868 case TARGET_NR_swapoff
:
8869 if (!(p
= lock_user_string(arg1
)))
8870 return -TARGET_EFAULT
;
8871 ret
= get_errno(swapoff(p
));
8872 unlock_user(p
, arg1
, 0);
8875 case TARGET_NR_sysinfo
:
8877 struct target_sysinfo
*target_value
;
8878 struct sysinfo value
;
8879 ret
= get_errno(sysinfo(&value
));
8880 if (!is_error(ret
) && arg1
)
8882 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8883 return -TARGET_EFAULT
;
8884 __put_user(value
.uptime
, &target_value
->uptime
);
8885 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8886 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8887 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8888 __put_user(value
.totalram
, &target_value
->totalram
);
8889 __put_user(value
.freeram
, &target_value
->freeram
);
8890 __put_user(value
.sharedram
, &target_value
->sharedram
);
8891 __put_user(value
.bufferram
, &target_value
->bufferram
);
8892 __put_user(value
.totalswap
, &target_value
->totalswap
);
8893 __put_user(value
.freeswap
, &target_value
->freeswap
);
8894 __put_user(value
.procs
, &target_value
->procs
);
8895 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8896 __put_user(value
.freehigh
, &target_value
->freehigh
);
8897 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8898 unlock_user_struct(target_value
, arg1
, 1);
8902 #ifdef TARGET_NR_ipc
8904 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8906 #ifdef TARGET_NR_semget
8907 case TARGET_NR_semget
:
8908 return get_errno(semget(arg1
, arg2
, arg3
));
8910 #ifdef TARGET_NR_semop
8911 case TARGET_NR_semop
:
8912 return do_semop(arg1
, arg2
, arg3
);
8914 #ifdef TARGET_NR_semctl
8915 case TARGET_NR_semctl
:
8916 return do_semctl(arg1
, arg2
, arg3
, arg4
);
8918 #ifdef TARGET_NR_msgctl
8919 case TARGET_NR_msgctl
:
8920 return do_msgctl(arg1
, arg2
, arg3
);
8922 #ifdef TARGET_NR_msgget
8923 case TARGET_NR_msgget
:
8924 return get_errno(msgget(arg1
, arg2
));
8926 #ifdef TARGET_NR_msgrcv
8927 case TARGET_NR_msgrcv
:
8928 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8930 #ifdef TARGET_NR_msgsnd
8931 case TARGET_NR_msgsnd
:
8932 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8934 #ifdef TARGET_NR_shmget
8935 case TARGET_NR_shmget
:
8936 return get_errno(shmget(arg1
, arg2
, arg3
));
8938 #ifdef TARGET_NR_shmctl
8939 case TARGET_NR_shmctl
:
8940 return do_shmctl(arg1
, arg2
, arg3
);
8942 #ifdef TARGET_NR_shmat
8943 case TARGET_NR_shmat
:
8944 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
8946 #ifdef TARGET_NR_shmdt
8947 case TARGET_NR_shmdt
:
8948 return do_shmdt(arg1
);
8950 case TARGET_NR_fsync
:
8951 return get_errno(fsync(arg1
));
8952 case TARGET_NR_clone
:
8953 /* Linux manages to have three different orderings for its
8954 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8955 * match the kernel's CONFIG_CLONE_* settings.
8956 * Microblaze is further special in that it uses a sixth
8957 * implicit argument to clone for the TLS pointer.
8959 #if defined(TARGET_MICROBLAZE)
8960 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8961 #elif defined(TARGET_CLONE_BACKWARDS)
8962 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8963 #elif defined(TARGET_CLONE_BACKWARDS2)
8964 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8966 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8969 #ifdef __NR_exit_group
8970 /* new thread calls */
8971 case TARGET_NR_exit_group
:
8972 preexit_cleanup(cpu_env
, arg1
);
8973 return get_errno(exit_group(arg1
));
8975 case TARGET_NR_setdomainname
:
8976 if (!(p
= lock_user_string(arg1
)))
8977 return -TARGET_EFAULT
;
8978 ret
= get_errno(setdomainname(p
, arg2
));
8979 unlock_user(p
, arg1
, 0);
8981 case TARGET_NR_uname
:
8982 /* no need to transcode because we use the linux syscall */
8984 struct new_utsname
* buf
;
8986 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8987 return -TARGET_EFAULT
;
8988 ret
= get_errno(sys_uname(buf
));
8989 if (!is_error(ret
)) {
8990 /* Overwrite the native machine name with whatever is being
8992 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
8993 sizeof(buf
->machine
));
8994 /* Allow the user to override the reported release. */
8995 if (qemu_uname_release
&& *qemu_uname_release
) {
8996 g_strlcpy(buf
->release
, qemu_uname_release
,
8997 sizeof(buf
->release
));
9000 unlock_user_struct(buf
, arg1
, 1);
9004 case TARGET_NR_modify_ldt
:
9005 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9006 #if !defined(TARGET_X86_64)
9007 case TARGET_NR_vm86
:
9008 return do_vm86(cpu_env
, arg1
, arg2
);
9011 case TARGET_NR_adjtimex
:
9013 struct timex host_buf
;
9015 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9016 return -TARGET_EFAULT
;
9018 ret
= get_errno(adjtimex(&host_buf
));
9019 if (!is_error(ret
)) {
9020 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9021 return -TARGET_EFAULT
;
9026 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9027 case TARGET_NR_clock_adjtime
:
9029 struct timex htx
, *phtx
= &htx
;
9031 if (target_to_host_timex(phtx
, arg2
) != 0) {
9032 return -TARGET_EFAULT
;
9034 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9035 if (!is_error(ret
) && phtx
) {
9036 if (host_to_target_timex(arg2
, phtx
) != 0) {
9037 return -TARGET_EFAULT
;
9043 case TARGET_NR_getpgid
:
9044 return get_errno(getpgid(arg1
));
9045 case TARGET_NR_fchdir
:
9046 return get_errno(fchdir(arg1
));
9047 case TARGET_NR_personality
:
9048 return get_errno(personality(arg1
));
9049 #ifdef TARGET_NR__llseek /* Not on alpha */
9050 case TARGET_NR__llseek
:
9053 #if !defined(__NR_llseek)
9054 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9056 ret
= get_errno(res
);
9061 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9063 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9064 return -TARGET_EFAULT
;
9069 #ifdef TARGET_NR_getdents
9070 case TARGET_NR_getdents
:
9071 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9072 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9074 struct target_dirent
*target_dirp
;
9075 struct linux_dirent
*dirp
;
9076 abi_long count
= arg3
;
9078 dirp
= g_try_malloc(count
);
9080 return -TARGET_ENOMEM
;
9083 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9084 if (!is_error(ret
)) {
9085 struct linux_dirent
*de
;
9086 struct target_dirent
*tde
;
9088 int reclen
, treclen
;
9089 int count1
, tnamelen
;
9093 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9094 return -TARGET_EFAULT
;
9097 reclen
= de
->d_reclen
;
9098 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9099 assert(tnamelen
>= 0);
9100 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9101 assert(count1
+ treclen
<= count
);
9102 tde
->d_reclen
= tswap16(treclen
);
9103 tde
->d_ino
= tswapal(de
->d_ino
);
9104 tde
->d_off
= tswapal(de
->d_off
);
9105 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9106 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9108 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9112 unlock_user(target_dirp
, arg2
, ret
);
9118 struct linux_dirent
*dirp
;
9119 abi_long count
= arg3
;
9121 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9122 return -TARGET_EFAULT
;
9123 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9124 if (!is_error(ret
)) {
9125 struct linux_dirent
*de
;
9130 reclen
= de
->d_reclen
;
9133 de
->d_reclen
= tswap16(reclen
);
9134 tswapls(&de
->d_ino
);
9135 tswapls(&de
->d_off
);
9136 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9140 unlock_user(dirp
, arg2
, ret
);
9144 /* Implement getdents in terms of getdents64 */
9146 struct linux_dirent64
*dirp
;
9147 abi_long count
= arg3
;
9149 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9151 return -TARGET_EFAULT
;
9153 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9154 if (!is_error(ret
)) {
9155 /* Convert the dirent64 structs to target dirent. We do this
9156 * in-place, since we can guarantee that a target_dirent is no
9157 * larger than a dirent64; however this means we have to be
9158 * careful to read everything before writing in the new format.
9160 struct linux_dirent64
*de
;
9161 struct target_dirent
*tde
;
9166 tde
= (struct target_dirent
*)dirp
;
9168 int namelen
, treclen
;
9169 int reclen
= de
->d_reclen
;
9170 uint64_t ino
= de
->d_ino
;
9171 int64_t off
= de
->d_off
;
9172 uint8_t type
= de
->d_type
;
9174 namelen
= strlen(de
->d_name
);
9175 treclen
= offsetof(struct target_dirent
, d_name
)
9177 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9179 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9180 tde
->d_ino
= tswapal(ino
);
9181 tde
->d_off
= tswapal(off
);
9182 tde
->d_reclen
= tswap16(treclen
);
9183 /* The target_dirent type is in what was formerly a padding
9184 * byte at the end of the structure:
9186 *(((char *)tde
) + treclen
- 1) = type
;
9188 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9189 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9195 unlock_user(dirp
, arg2
, ret
);
9199 #endif /* TARGET_NR_getdents */
9200 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9201 case TARGET_NR_getdents64
:
9203 struct linux_dirent64
*dirp
;
9204 abi_long count
= arg3
;
9205 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9206 return -TARGET_EFAULT
;
9207 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9208 if (!is_error(ret
)) {
9209 struct linux_dirent64
*de
;
9214 reclen
= de
->d_reclen
;
9217 de
->d_reclen
= tswap16(reclen
);
9218 tswap64s((uint64_t *)&de
->d_ino
);
9219 tswap64s((uint64_t *)&de
->d_off
);
9220 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9224 unlock_user(dirp
, arg2
, ret
);
9227 #endif /* TARGET_NR_getdents64 */
9228 #if defined(TARGET_NR__newselect)
9229 case TARGET_NR__newselect
:
9230 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9232 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9233 # ifdef TARGET_NR_poll
9234 case TARGET_NR_poll
:
9236 # ifdef TARGET_NR_ppoll
9237 case TARGET_NR_ppoll
:
9240 struct target_pollfd
*target_pfd
;
9241 unsigned int nfds
= arg2
;
9248 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9249 return -TARGET_EINVAL
;
9252 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9253 sizeof(struct target_pollfd
) * nfds
, 1);
9255 return -TARGET_EFAULT
;
9258 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9259 for (i
= 0; i
< nfds
; i
++) {
9260 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9261 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9266 # ifdef TARGET_NR_ppoll
9267 case TARGET_NR_ppoll
:
9269 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9270 target_sigset_t
*target_set
;
9271 sigset_t _set
, *set
= &_set
;
9274 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9275 unlock_user(target_pfd
, arg1
, 0);
9276 return -TARGET_EFAULT
;
9283 if (arg5
!= sizeof(target_sigset_t
)) {
9284 unlock_user(target_pfd
, arg1
, 0);
9285 return -TARGET_EINVAL
;
9288 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9290 unlock_user(target_pfd
, arg1
, 0);
9291 return -TARGET_EFAULT
;
9293 target_to_host_sigset(set
, target_set
);
9298 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9299 set
, SIGSET_T_SIZE
));
9301 if (!is_error(ret
) && arg3
) {
9302 host_to_target_timespec(arg3
, timeout_ts
);
9305 unlock_user(target_set
, arg4
, 0);
9310 # ifdef TARGET_NR_poll
9311 case TARGET_NR_poll
:
9313 struct timespec ts
, *pts
;
9316 /* Convert ms to secs, ns */
9317 ts
.tv_sec
= arg3
/ 1000;
9318 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9321 /* -ve poll() timeout means "infinite" */
9324 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9329 g_assert_not_reached();
9332 if (!is_error(ret
)) {
9333 for(i
= 0; i
< nfds
; i
++) {
9334 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9337 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9341 case TARGET_NR_flock
:
9342 /* NOTE: the flock constant seems to be the same for every
9344 return get_errno(safe_flock(arg1
, arg2
));
9345 case TARGET_NR_readv
:
9347 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9349 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9350 unlock_iovec(vec
, arg2
, arg3
, 1);
9352 ret
= -host_to_target_errno(errno
);
9356 case TARGET_NR_writev
:
9358 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9360 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9361 unlock_iovec(vec
, arg2
, arg3
, 0);
9363 ret
= -host_to_target_errno(errno
);
9367 #if defined(TARGET_NR_preadv)
9368 case TARGET_NR_preadv
:
9370 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9372 unsigned long low
, high
;
9374 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9375 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9376 unlock_iovec(vec
, arg2
, arg3
, 1);
9378 ret
= -host_to_target_errno(errno
);
9383 #if defined(TARGET_NR_pwritev)
9384 case TARGET_NR_pwritev
:
9386 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9388 unsigned long low
, high
;
9390 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9391 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9392 unlock_iovec(vec
, arg2
, arg3
, 0);
9394 ret
= -host_to_target_errno(errno
);
9399 case TARGET_NR_getsid
:
9400 return get_errno(getsid(arg1
));
9401 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9402 case TARGET_NR_fdatasync
:
9403 return get_errno(fdatasync(arg1
));
9405 #ifdef TARGET_NR__sysctl
9406 case TARGET_NR__sysctl
:
9407 /* We don't implement this, but ENOTDIR is always a safe
9409 return -TARGET_ENOTDIR
;
9411 case TARGET_NR_sched_getaffinity
:
9413 unsigned int mask_size
;
9414 unsigned long *mask
;
9417 * sched_getaffinity needs multiples of ulong, so need to take
9418 * care of mismatches between target ulong and host ulong sizes.
9420 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9421 return -TARGET_EINVAL
;
9423 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9425 mask
= alloca(mask_size
);
9426 memset(mask
, 0, mask_size
);
9427 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9429 if (!is_error(ret
)) {
9431 /* More data returned than the caller's buffer will fit.
9432 * This only happens if sizeof(abi_long) < sizeof(long)
9433 * and the caller passed us a buffer holding an odd number
9434 * of abi_longs. If the host kernel is actually using the
9435 * extra 4 bytes then fail EINVAL; otherwise we can just
9436 * ignore them and only copy the interesting part.
9438 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9439 if (numcpus
> arg2
* 8) {
9440 return -TARGET_EINVAL
;
9445 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9446 return -TARGET_EFAULT
;
9451 case TARGET_NR_sched_setaffinity
:
9453 unsigned int mask_size
;
9454 unsigned long *mask
;
9457 * sched_setaffinity needs multiples of ulong, so need to take
9458 * care of mismatches between target ulong and host ulong sizes.
9460 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9461 return -TARGET_EINVAL
;
9463 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9464 mask
= alloca(mask_size
);
9466 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9471 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9473 case TARGET_NR_getcpu
:
9476 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9477 arg2
? &node
: NULL
,
9479 if (is_error(ret
)) {
9482 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9483 return -TARGET_EFAULT
;
9485 if (arg2
&& put_user_u32(node
, arg2
)) {
9486 return -TARGET_EFAULT
;
9490 case TARGET_NR_sched_setparam
:
9492 struct sched_param
*target_schp
;
9493 struct sched_param schp
;
9496 return -TARGET_EINVAL
;
9498 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9499 return -TARGET_EFAULT
;
9500 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9501 unlock_user_struct(target_schp
, arg2
, 0);
9502 return get_errno(sched_setparam(arg1
, &schp
));
9504 case TARGET_NR_sched_getparam
:
9506 struct sched_param
*target_schp
;
9507 struct sched_param schp
;
9510 return -TARGET_EINVAL
;
9512 ret
= get_errno(sched_getparam(arg1
, &schp
));
9513 if (!is_error(ret
)) {
9514 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9515 return -TARGET_EFAULT
;
9516 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9517 unlock_user_struct(target_schp
, arg2
, 1);
9521 case TARGET_NR_sched_setscheduler
:
9523 struct sched_param
*target_schp
;
9524 struct sched_param schp
;
9526 return -TARGET_EINVAL
;
9528 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9529 return -TARGET_EFAULT
;
9530 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9531 unlock_user_struct(target_schp
, arg3
, 0);
9532 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9534 case TARGET_NR_sched_getscheduler
:
9535 return get_errno(sched_getscheduler(arg1
));
9536 case TARGET_NR_sched_yield
:
9537 return get_errno(sched_yield());
9538 case TARGET_NR_sched_get_priority_max
:
9539 return get_errno(sched_get_priority_max(arg1
));
9540 case TARGET_NR_sched_get_priority_min
:
9541 return get_errno(sched_get_priority_min(arg1
));
9542 case TARGET_NR_sched_rr_get_interval
:
9545 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9546 if (!is_error(ret
)) {
9547 ret
= host_to_target_timespec(arg2
, &ts
);
9551 case TARGET_NR_nanosleep
:
9553 struct timespec req
, rem
;
9554 target_to_host_timespec(&req
, arg1
);
9555 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9556 if (is_error(ret
) && arg2
) {
9557 host_to_target_timespec(arg2
, &rem
);
9561 case TARGET_NR_prctl
:
9563 case PR_GET_PDEATHSIG
:
9566 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9567 if (!is_error(ret
) && arg2
9568 && put_user_ual(deathsig
, arg2
)) {
9569 return -TARGET_EFAULT
;
9576 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9578 return -TARGET_EFAULT
;
9580 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9582 unlock_user(name
, arg2
, 16);
9587 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9589 return -TARGET_EFAULT
;
9591 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9593 unlock_user(name
, arg2
, 0);
9598 case TARGET_PR_GET_FP_MODE
:
9600 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9602 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9603 ret
|= TARGET_PR_FP_MODE_FR
;
9605 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9606 ret
|= TARGET_PR_FP_MODE_FRE
;
9610 case TARGET_PR_SET_FP_MODE
:
9612 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9613 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9614 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9615 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9616 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9618 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9619 TARGET_PR_FP_MODE_FRE
;
9621 /* If nothing to change, return right away, successfully. */
9622 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9625 /* Check the value is valid */
9626 if (arg2
& ~known_bits
) {
9627 return -TARGET_EOPNOTSUPP
;
9629 /* Setting FRE without FR is not supported. */
9630 if (new_fre
&& !new_fr
) {
9631 return -TARGET_EOPNOTSUPP
;
9633 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9634 /* FR1 is not supported */
9635 return -TARGET_EOPNOTSUPP
;
9637 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9638 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9639 /* cannot set FR=0 */
9640 return -TARGET_EOPNOTSUPP
;
9642 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9643 /* Cannot set FRE=1 */
9644 return -TARGET_EOPNOTSUPP
;
9648 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9649 for (i
= 0; i
< 32 ; i
+= 2) {
9650 if (!old_fr
&& new_fr
) {
9651 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9652 } else if (old_fr
&& !new_fr
) {
9653 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9658 env
->CP0_Status
|= (1 << CP0St_FR
);
9659 env
->hflags
|= MIPS_HFLAG_F64
;
9661 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9662 env
->hflags
&= ~MIPS_HFLAG_F64
;
9665 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9666 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9667 env
->hflags
|= MIPS_HFLAG_FRE
;
9670 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9671 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9677 #ifdef TARGET_AARCH64
9678 case TARGET_PR_SVE_SET_VL
:
9680 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9681 * PR_SVE_VL_INHERIT. Note the kernel definition
9682 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9683 * even though the current architectural maximum is VQ=16.
9685 ret
= -TARGET_EINVAL
;
9686 if (cpu_isar_feature(aa64_sve
, arm_env_get_cpu(cpu_env
))
9687 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9688 CPUARMState
*env
= cpu_env
;
9689 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9690 uint32_t vq
, old_vq
;
9692 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9693 vq
= MAX(arg2
/ 16, 1);
9694 vq
= MIN(vq
, cpu
->sve_max_vq
);
9697 aarch64_sve_narrow_vq(env
, vq
);
9699 env
->vfp
.zcr_el
[1] = vq
- 1;
9703 case TARGET_PR_SVE_GET_VL
:
9704 ret
= -TARGET_EINVAL
;
9706 ARMCPU
*cpu
= arm_env_get_cpu(cpu_env
);
9707 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9708 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9712 case TARGET_PR_PAC_RESET_KEYS
:
9714 CPUARMState
*env
= cpu_env
;
9715 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9717 if (arg3
|| arg4
|| arg5
) {
9718 return -TARGET_EINVAL
;
9720 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
9721 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
9722 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
9723 TARGET_PR_PAC_APGAKEY
);
9726 } else if (arg2
& ~all
) {
9727 return -TARGET_EINVAL
;
9729 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
9730 arm_init_pauth_key(&env
->apia_key
);
9732 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
9733 arm_init_pauth_key(&env
->apib_key
);
9735 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
9736 arm_init_pauth_key(&env
->apda_key
);
9738 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
9739 arm_init_pauth_key(&env
->apdb_key
);
9741 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
9742 arm_init_pauth_key(&env
->apga_key
);
9747 return -TARGET_EINVAL
;
9748 #endif /* AARCH64 */
9749 case PR_GET_SECCOMP
:
9750 case PR_SET_SECCOMP
:
9751 /* Disable seccomp to prevent the target disabling syscalls we
9753 return -TARGET_EINVAL
;
9755 /* Most prctl options have no pointer arguments */
9756 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9759 #ifdef TARGET_NR_arch_prctl
9760 case TARGET_NR_arch_prctl
:
9761 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9762 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9767 #ifdef TARGET_NR_pread64
9768 case TARGET_NR_pread64
:
9769 if (regpairs_aligned(cpu_env
, num
)) {
9773 if (arg2
== 0 && arg3
== 0) {
9774 /* Special-case NULL buffer and zero length, which should succeed */
9777 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9779 return -TARGET_EFAULT
;
9782 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9783 unlock_user(p
, arg2
, ret
);
9785 case TARGET_NR_pwrite64
:
9786 if (regpairs_aligned(cpu_env
, num
)) {
9790 if (arg2
== 0 && arg3
== 0) {
9791 /* Special-case NULL buffer and zero length, which should succeed */
9794 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
9796 return -TARGET_EFAULT
;
9799 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9800 unlock_user(p
, arg2
, 0);
9803 case TARGET_NR_getcwd
:
9804 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9805 return -TARGET_EFAULT
;
9806 ret
= get_errno(sys_getcwd1(p
, arg2
));
9807 unlock_user(p
, arg1
, ret
);
9809 case TARGET_NR_capget
:
9810 case TARGET_NR_capset
:
9812 struct target_user_cap_header
*target_header
;
9813 struct target_user_cap_data
*target_data
= NULL
;
9814 struct __user_cap_header_struct header
;
9815 struct __user_cap_data_struct data
[2];
9816 struct __user_cap_data_struct
*dataptr
= NULL
;
9817 int i
, target_datalen
;
9820 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9821 return -TARGET_EFAULT
;
9823 header
.version
= tswap32(target_header
->version
);
9824 header
.pid
= tswap32(target_header
->pid
);
9826 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9827 /* Version 2 and up takes pointer to two user_data structs */
9831 target_datalen
= sizeof(*target_data
) * data_items
;
9834 if (num
== TARGET_NR_capget
) {
9835 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9837 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9840 unlock_user_struct(target_header
, arg1
, 0);
9841 return -TARGET_EFAULT
;
9844 if (num
== TARGET_NR_capset
) {
9845 for (i
= 0; i
< data_items
; i
++) {
9846 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9847 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9848 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9855 if (num
== TARGET_NR_capget
) {
9856 ret
= get_errno(capget(&header
, dataptr
));
9858 ret
= get_errno(capset(&header
, dataptr
));
9861 /* The kernel always updates version for both capget and capset */
9862 target_header
->version
= tswap32(header
.version
);
9863 unlock_user_struct(target_header
, arg1
, 1);
9866 if (num
== TARGET_NR_capget
) {
9867 for (i
= 0; i
< data_items
; i
++) {
9868 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9869 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9870 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9872 unlock_user(target_data
, arg2
, target_datalen
);
9874 unlock_user(target_data
, arg2
, 0);
9879 case TARGET_NR_sigaltstack
:
9880 return do_sigaltstack(arg1
, arg2
,
9881 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9883 #ifdef CONFIG_SENDFILE
9884 #ifdef TARGET_NR_sendfile
9885 case TARGET_NR_sendfile
:
9890 ret
= get_user_sal(off
, arg3
);
9891 if (is_error(ret
)) {
9896 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9897 if (!is_error(ret
) && arg3
) {
9898 abi_long ret2
= put_user_sal(off
, arg3
);
9899 if (is_error(ret2
)) {
9906 #ifdef TARGET_NR_sendfile64
9907 case TARGET_NR_sendfile64
:
9912 ret
= get_user_s64(off
, arg3
);
9913 if (is_error(ret
)) {
9918 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9919 if (!is_error(ret
) && arg3
) {
9920 abi_long ret2
= put_user_s64(off
, arg3
);
9921 if (is_error(ret2
)) {
9929 #ifdef TARGET_NR_vfork
9930 case TARGET_NR_vfork
:
9931 return get_errno(do_fork(cpu_env
,
9932 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
9935 #ifdef TARGET_NR_ugetrlimit
9936 case TARGET_NR_ugetrlimit
:
9939 int resource
= target_to_host_resource(arg1
);
9940 ret
= get_errno(getrlimit(resource
, &rlim
));
9941 if (!is_error(ret
)) {
9942 struct target_rlimit
*target_rlim
;
9943 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9944 return -TARGET_EFAULT
;
9945 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9946 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9947 unlock_user_struct(target_rlim
, arg2
, 1);
9952 #ifdef TARGET_NR_truncate64
9953 case TARGET_NR_truncate64
:
9954 if (!(p
= lock_user_string(arg1
)))
9955 return -TARGET_EFAULT
;
9956 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9957 unlock_user(p
, arg1
, 0);
9960 #ifdef TARGET_NR_ftruncate64
9961 case TARGET_NR_ftruncate64
:
9962 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9964 #ifdef TARGET_NR_stat64
9965 case TARGET_NR_stat64
:
9966 if (!(p
= lock_user_string(arg1
))) {
9967 return -TARGET_EFAULT
;
9969 ret
= get_errno(stat(path(p
), &st
));
9970 unlock_user(p
, arg1
, 0);
9972 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9975 #ifdef TARGET_NR_lstat64
9976 case TARGET_NR_lstat64
:
9977 if (!(p
= lock_user_string(arg1
))) {
9978 return -TARGET_EFAULT
;
9980 ret
= get_errno(lstat(path(p
), &st
));
9981 unlock_user(p
, arg1
, 0);
9983 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9986 #ifdef TARGET_NR_fstat64
9987 case TARGET_NR_fstat64
:
9988 ret
= get_errno(fstat(arg1
, &st
));
9990 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9993 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9994 #ifdef TARGET_NR_fstatat64
9995 case TARGET_NR_fstatat64
:
9997 #ifdef TARGET_NR_newfstatat
9998 case TARGET_NR_newfstatat
:
10000 if (!(p
= lock_user_string(arg2
))) {
10001 return -TARGET_EFAULT
;
10003 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10004 unlock_user(p
, arg2
, 0);
10005 if (!is_error(ret
))
10006 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10009 #ifdef TARGET_NR_lchown
10010 case TARGET_NR_lchown
:
10011 if (!(p
= lock_user_string(arg1
)))
10012 return -TARGET_EFAULT
;
10013 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10014 unlock_user(p
, arg1
, 0);
10017 #ifdef TARGET_NR_getuid
10018 case TARGET_NR_getuid
:
10019 return get_errno(high2lowuid(getuid()));
10021 #ifdef TARGET_NR_getgid
10022 case TARGET_NR_getgid
:
10023 return get_errno(high2lowgid(getgid()));
10025 #ifdef TARGET_NR_geteuid
10026 case TARGET_NR_geteuid
:
10027 return get_errno(high2lowuid(geteuid()));
10029 #ifdef TARGET_NR_getegid
10030 case TARGET_NR_getegid
:
10031 return get_errno(high2lowgid(getegid()));
10033 case TARGET_NR_setreuid
:
10034 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10035 case TARGET_NR_setregid
:
10036 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10037 case TARGET_NR_getgroups
:
10039 int gidsetsize
= arg1
;
10040 target_id
*target_grouplist
;
10044 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10045 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10046 if (gidsetsize
== 0)
10048 if (!is_error(ret
)) {
10049 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10050 if (!target_grouplist
)
10051 return -TARGET_EFAULT
;
10052 for(i
= 0;i
< ret
; i
++)
10053 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10054 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10058 case TARGET_NR_setgroups
:
10060 int gidsetsize
= arg1
;
10061 target_id
*target_grouplist
;
10062 gid_t
*grouplist
= NULL
;
10065 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10066 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10067 if (!target_grouplist
) {
10068 return -TARGET_EFAULT
;
10070 for (i
= 0; i
< gidsetsize
; i
++) {
10071 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10073 unlock_user(target_grouplist
, arg2
, 0);
10075 return get_errno(setgroups(gidsetsize
, grouplist
));
10077 case TARGET_NR_fchown
:
10078 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10079 #if defined(TARGET_NR_fchownat)
10080 case TARGET_NR_fchownat
:
10081 if (!(p
= lock_user_string(arg2
)))
10082 return -TARGET_EFAULT
;
10083 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10084 low2highgid(arg4
), arg5
));
10085 unlock_user(p
, arg2
, 0);
10088 #ifdef TARGET_NR_setresuid
10089 case TARGET_NR_setresuid
:
10090 return get_errno(sys_setresuid(low2highuid(arg1
),
10092 low2highuid(arg3
)));
10094 #ifdef TARGET_NR_getresuid
10095 case TARGET_NR_getresuid
:
10097 uid_t ruid
, euid
, suid
;
10098 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10099 if (!is_error(ret
)) {
10100 if (put_user_id(high2lowuid(ruid
), arg1
)
10101 || put_user_id(high2lowuid(euid
), arg2
)
10102 || put_user_id(high2lowuid(suid
), arg3
))
10103 return -TARGET_EFAULT
;
10108 #ifdef TARGET_NR_getresgid
10109 case TARGET_NR_setresgid
:
10110 return get_errno(sys_setresgid(low2highgid(arg1
),
10112 low2highgid(arg3
)));
10114 #ifdef TARGET_NR_getresgid
10115 case TARGET_NR_getresgid
:
10117 gid_t rgid
, egid
, sgid
;
10118 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10119 if (!is_error(ret
)) {
10120 if (put_user_id(high2lowgid(rgid
), arg1
)
10121 || put_user_id(high2lowgid(egid
), arg2
)
10122 || put_user_id(high2lowgid(sgid
), arg3
))
10123 return -TARGET_EFAULT
;
10128 #ifdef TARGET_NR_chown
10129 case TARGET_NR_chown
:
10130 if (!(p
= lock_user_string(arg1
)))
10131 return -TARGET_EFAULT
;
10132 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10133 unlock_user(p
, arg1
, 0);
10136 case TARGET_NR_setuid
:
10137 return get_errno(sys_setuid(low2highuid(arg1
)));
10138 case TARGET_NR_setgid
:
10139 return get_errno(sys_setgid(low2highgid(arg1
)));
10140 case TARGET_NR_setfsuid
:
10141 return get_errno(setfsuid(arg1
));
10142 case TARGET_NR_setfsgid
:
10143 return get_errno(setfsgid(arg1
));
10145 #ifdef TARGET_NR_lchown32
10146 case TARGET_NR_lchown32
:
10147 if (!(p
= lock_user_string(arg1
)))
10148 return -TARGET_EFAULT
;
10149 ret
= get_errno(lchown(p
, arg2
, arg3
));
10150 unlock_user(p
, arg1
, 0);
10153 #ifdef TARGET_NR_getuid32
10154 case TARGET_NR_getuid32
:
10155 return get_errno(getuid());
10158 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10159 /* Alpha specific */
10160 case TARGET_NR_getxuid
:
10164 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10166 return get_errno(getuid());
10168 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10169 /* Alpha specific */
10170 case TARGET_NR_getxgid
:
10174 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10176 return get_errno(getgid());
10178 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10179 /* Alpha specific */
10180 case TARGET_NR_osf_getsysinfo
:
10181 ret
= -TARGET_EOPNOTSUPP
;
10183 case TARGET_GSI_IEEE_FP_CONTROL
:
10185 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10187 /* Copied from linux ieee_fpcr_to_swcr. */
10188 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10189 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10190 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10191 | SWCR_TRAP_ENABLE_DZE
10192 | SWCR_TRAP_ENABLE_OVF
);
10193 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10194 | SWCR_TRAP_ENABLE_INE
);
10195 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10196 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10198 if (put_user_u64 (swcr
, arg2
))
10199 return -TARGET_EFAULT
;
10204 /* case GSI_IEEE_STATE_AT_SIGNAL:
10205 -- Not implemented in linux kernel.
10207 -- Retrieves current unaligned access state; not much used.
10208 case GSI_PROC_TYPE:
10209 -- Retrieves implver information; surely not used.
10210 case GSI_GET_HWRPB:
10211 -- Grabs a copy of the HWRPB; surely not used.
10216 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10217 /* Alpha specific */
10218 case TARGET_NR_osf_setsysinfo
:
10219 ret
= -TARGET_EOPNOTSUPP
;
10221 case TARGET_SSI_IEEE_FP_CONTROL
:
10223 uint64_t swcr
, fpcr
, orig_fpcr
;
10225 if (get_user_u64 (swcr
, arg2
)) {
10226 return -TARGET_EFAULT
;
10228 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10229 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10231 /* Copied from linux ieee_swcr_to_fpcr. */
10232 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10233 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10234 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10235 | SWCR_TRAP_ENABLE_DZE
10236 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10237 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10238 | SWCR_TRAP_ENABLE_INE
)) << 57;
10239 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10240 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10242 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10247 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10249 uint64_t exc
, fpcr
, orig_fpcr
;
10252 if (get_user_u64(exc
, arg2
)) {
10253 return -TARGET_EFAULT
;
10256 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10258 /* We only add to the exception status here. */
10259 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10261 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10264 /* Old exceptions are not signaled. */
10265 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10267 /* If any exceptions set by this call,
10268 and are unmasked, send a signal. */
10270 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10271 si_code
= TARGET_FPE_FLTRES
;
10273 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10274 si_code
= TARGET_FPE_FLTUND
;
10276 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10277 si_code
= TARGET_FPE_FLTOVF
;
10279 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10280 si_code
= TARGET_FPE_FLTDIV
;
10282 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10283 si_code
= TARGET_FPE_FLTINV
;
10285 if (si_code
!= 0) {
10286 target_siginfo_t info
;
10287 info
.si_signo
= SIGFPE
;
10289 info
.si_code
= si_code
;
10290 info
._sifields
._sigfault
._addr
10291 = ((CPUArchState
*)cpu_env
)->pc
;
10292 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10293 QEMU_SI_FAULT
, &info
);
10298 /* case SSI_NVPAIRS:
10299 -- Used with SSIN_UACPROC to enable unaligned accesses.
10300 case SSI_IEEE_STATE_AT_SIGNAL:
10301 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10302 -- Not implemented in linux kernel
10307 #ifdef TARGET_NR_osf_sigprocmask
10308 /* Alpha specific. */
10309 case TARGET_NR_osf_sigprocmask
:
10313 sigset_t set
, oldset
;
10316 case TARGET_SIG_BLOCK
:
10319 case TARGET_SIG_UNBLOCK
:
10322 case TARGET_SIG_SETMASK
:
10326 return -TARGET_EINVAL
;
10329 target_to_host_old_sigset(&set
, &mask
);
10330 ret
= do_sigprocmask(how
, &set
, &oldset
);
10332 host_to_target_old_sigset(&mask
, &oldset
);
10339 #ifdef TARGET_NR_getgid32
10340 case TARGET_NR_getgid32
:
10341 return get_errno(getgid());
10343 #ifdef TARGET_NR_geteuid32
10344 case TARGET_NR_geteuid32
:
10345 return get_errno(geteuid());
10347 #ifdef TARGET_NR_getegid32
10348 case TARGET_NR_getegid32
:
10349 return get_errno(getegid());
10351 #ifdef TARGET_NR_setreuid32
10352 case TARGET_NR_setreuid32
:
10353 return get_errno(setreuid(arg1
, arg2
));
10355 #ifdef TARGET_NR_setregid32
10356 case TARGET_NR_setregid32
:
10357 return get_errno(setregid(arg1
, arg2
));
10359 #ifdef TARGET_NR_getgroups32
10360 case TARGET_NR_getgroups32
:
10362 int gidsetsize
= arg1
;
10363 uint32_t *target_grouplist
;
10367 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10368 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10369 if (gidsetsize
== 0)
10371 if (!is_error(ret
)) {
10372 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10373 if (!target_grouplist
) {
10374 return -TARGET_EFAULT
;
10376 for(i
= 0;i
< ret
; i
++)
10377 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10378 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10383 #ifdef TARGET_NR_setgroups32
10384 case TARGET_NR_setgroups32
:
10386 int gidsetsize
= arg1
;
10387 uint32_t *target_grouplist
;
10391 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10392 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10393 if (!target_grouplist
) {
10394 return -TARGET_EFAULT
;
10396 for(i
= 0;i
< gidsetsize
; i
++)
10397 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10398 unlock_user(target_grouplist
, arg2
, 0);
10399 return get_errno(setgroups(gidsetsize
, grouplist
));
10402 #ifdef TARGET_NR_fchown32
10403 case TARGET_NR_fchown32
:
10404 return get_errno(fchown(arg1
, arg2
, arg3
));
10406 #ifdef TARGET_NR_setresuid32
10407 case TARGET_NR_setresuid32
:
10408 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10410 #ifdef TARGET_NR_getresuid32
10411 case TARGET_NR_getresuid32
:
10413 uid_t ruid
, euid
, suid
;
10414 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10415 if (!is_error(ret
)) {
10416 if (put_user_u32(ruid
, arg1
)
10417 || put_user_u32(euid
, arg2
)
10418 || put_user_u32(suid
, arg3
))
10419 return -TARGET_EFAULT
;
10424 #ifdef TARGET_NR_setresgid32
10425 case TARGET_NR_setresgid32
:
10426 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10428 #ifdef TARGET_NR_getresgid32
10429 case TARGET_NR_getresgid32
:
10431 gid_t rgid
, egid
, sgid
;
10432 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10433 if (!is_error(ret
)) {
10434 if (put_user_u32(rgid
, arg1
)
10435 || put_user_u32(egid
, arg2
)
10436 || put_user_u32(sgid
, arg3
))
10437 return -TARGET_EFAULT
;
10442 #ifdef TARGET_NR_chown32
10443 case TARGET_NR_chown32
:
10444 if (!(p
= lock_user_string(arg1
)))
10445 return -TARGET_EFAULT
;
10446 ret
= get_errno(chown(p
, arg2
, arg3
));
10447 unlock_user(p
, arg1
, 0);
10450 #ifdef TARGET_NR_setuid32
10451 case TARGET_NR_setuid32
:
10452 return get_errno(sys_setuid(arg1
));
10454 #ifdef TARGET_NR_setgid32
10455 case TARGET_NR_setgid32
:
10456 return get_errno(sys_setgid(arg1
));
10458 #ifdef TARGET_NR_setfsuid32
10459 case TARGET_NR_setfsuid32
:
10460 return get_errno(setfsuid(arg1
));
10462 #ifdef TARGET_NR_setfsgid32
10463 case TARGET_NR_setfsgid32
:
10464 return get_errno(setfsgid(arg1
));
10466 #ifdef TARGET_NR_mincore
10467 case TARGET_NR_mincore
:
10469 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10471 return -TARGET_ENOMEM
;
10473 p
= lock_user_string(arg3
);
10475 ret
= -TARGET_EFAULT
;
10477 ret
= get_errno(mincore(a
, arg2
, p
));
10478 unlock_user(p
, arg3
, ret
);
10480 unlock_user(a
, arg1
, 0);
10484 #ifdef TARGET_NR_arm_fadvise64_64
10485 case TARGET_NR_arm_fadvise64_64
:
10486 /* arm_fadvise64_64 looks like fadvise64_64 but
10487 * with different argument order: fd, advice, offset, len
10488 * rather than the usual fd, offset, len, advice.
10489 * Note that offset and len are both 64-bit so appear as
10490 * pairs of 32-bit registers.
10492 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10493 target_offset64(arg5
, arg6
), arg2
);
10494 return -host_to_target_errno(ret
);
10497 #if TARGET_ABI_BITS == 32
10499 #ifdef TARGET_NR_fadvise64_64
10500 case TARGET_NR_fadvise64_64
:
10501 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10502 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10510 /* 6 args: fd, offset (high, low), len (high, low), advice */
10511 if (regpairs_aligned(cpu_env
, num
)) {
10512 /* offset is in (3,4), len in (5,6) and advice in 7 */
10520 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10521 target_offset64(arg4
, arg5
), arg6
);
10522 return -host_to_target_errno(ret
);
10525 #ifdef TARGET_NR_fadvise64
10526 case TARGET_NR_fadvise64
:
10527 /* 5 args: fd, offset (high, low), len, advice */
10528 if (regpairs_aligned(cpu_env
, num
)) {
10529 /* offset is in (3,4), len in 5 and advice in 6 */
10535 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10536 return -host_to_target_errno(ret
);
10539 #else /* not a 32-bit ABI */
10540 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10541 #ifdef TARGET_NR_fadvise64_64
10542 case TARGET_NR_fadvise64_64
:
10544 #ifdef TARGET_NR_fadvise64
10545 case TARGET_NR_fadvise64
:
10547 #ifdef TARGET_S390X
10549 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10550 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10551 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10552 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10556 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10558 #endif /* end of 64-bit ABI fadvise handling */
10560 #ifdef TARGET_NR_madvise
10561 case TARGET_NR_madvise
:
10562 /* A straight passthrough may not be safe because qemu sometimes
10563 turns private file-backed mappings into anonymous mappings.
10564 This will break MADV_DONTNEED.
10565 This is a hint, so ignoring and returning success is ok. */
10568 #if TARGET_ABI_BITS == 32
10569 case TARGET_NR_fcntl64
:
10573 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10574 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10577 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10578 copyfrom
= copy_from_user_oabi_flock64
;
10579 copyto
= copy_to_user_oabi_flock64
;
10583 cmd
= target_to_host_fcntl_cmd(arg2
);
10584 if (cmd
== -TARGET_EINVAL
) {
10589 case TARGET_F_GETLK64
:
10590 ret
= copyfrom(&fl
, arg3
);
10594 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10596 ret
= copyto(arg3
, &fl
);
10600 case TARGET_F_SETLK64
:
10601 case TARGET_F_SETLKW64
:
10602 ret
= copyfrom(&fl
, arg3
);
10606 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10609 ret
= do_fcntl(arg1
, arg2
, arg3
);
10615 #ifdef TARGET_NR_cacheflush
10616 case TARGET_NR_cacheflush
:
10617 /* self-modifying code is handled automatically, so nothing needed */
10620 #ifdef TARGET_NR_getpagesize
10621 case TARGET_NR_getpagesize
:
10622 return TARGET_PAGE_SIZE
;
10624 case TARGET_NR_gettid
:
10625 return get_errno(sys_gettid());
10626 #ifdef TARGET_NR_readahead
10627 case TARGET_NR_readahead
:
10628 #if TARGET_ABI_BITS == 32
10629 if (regpairs_aligned(cpu_env
, num
)) {
10634 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10636 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10641 #ifdef TARGET_NR_setxattr
10642 case TARGET_NR_listxattr
:
10643 case TARGET_NR_llistxattr
:
10647 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10649 return -TARGET_EFAULT
;
10652 p
= lock_user_string(arg1
);
10654 if (num
== TARGET_NR_listxattr
) {
10655 ret
= get_errno(listxattr(p
, b
, arg3
));
10657 ret
= get_errno(llistxattr(p
, b
, arg3
));
10660 ret
= -TARGET_EFAULT
;
10662 unlock_user(p
, arg1
, 0);
10663 unlock_user(b
, arg2
, arg3
);
10666 case TARGET_NR_flistxattr
:
10670 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10672 return -TARGET_EFAULT
;
10675 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10676 unlock_user(b
, arg2
, arg3
);
10679 case TARGET_NR_setxattr
:
10680 case TARGET_NR_lsetxattr
:
10682 void *p
, *n
, *v
= 0;
10684 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10686 return -TARGET_EFAULT
;
10689 p
= lock_user_string(arg1
);
10690 n
= lock_user_string(arg2
);
10692 if (num
== TARGET_NR_setxattr
) {
10693 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10695 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10698 ret
= -TARGET_EFAULT
;
10700 unlock_user(p
, arg1
, 0);
10701 unlock_user(n
, arg2
, 0);
10702 unlock_user(v
, arg3
, 0);
10705 case TARGET_NR_fsetxattr
:
10709 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10711 return -TARGET_EFAULT
;
10714 n
= lock_user_string(arg2
);
10716 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10718 ret
= -TARGET_EFAULT
;
10720 unlock_user(n
, arg2
, 0);
10721 unlock_user(v
, arg3
, 0);
10724 case TARGET_NR_getxattr
:
10725 case TARGET_NR_lgetxattr
:
10727 void *p
, *n
, *v
= 0;
10729 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10731 return -TARGET_EFAULT
;
10734 p
= lock_user_string(arg1
);
10735 n
= lock_user_string(arg2
);
10737 if (num
== TARGET_NR_getxattr
) {
10738 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10740 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10743 ret
= -TARGET_EFAULT
;
10745 unlock_user(p
, arg1
, 0);
10746 unlock_user(n
, arg2
, 0);
10747 unlock_user(v
, arg3
, arg4
);
10750 case TARGET_NR_fgetxattr
:
10754 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10756 return -TARGET_EFAULT
;
10759 n
= lock_user_string(arg2
);
10761 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10763 ret
= -TARGET_EFAULT
;
10765 unlock_user(n
, arg2
, 0);
10766 unlock_user(v
, arg3
, arg4
);
10769 case TARGET_NR_removexattr
:
10770 case TARGET_NR_lremovexattr
:
10773 p
= lock_user_string(arg1
);
10774 n
= lock_user_string(arg2
);
10776 if (num
== TARGET_NR_removexattr
) {
10777 ret
= get_errno(removexattr(p
, n
));
10779 ret
= get_errno(lremovexattr(p
, n
));
10782 ret
= -TARGET_EFAULT
;
10784 unlock_user(p
, arg1
, 0);
10785 unlock_user(n
, arg2
, 0);
10788 case TARGET_NR_fremovexattr
:
10791 n
= lock_user_string(arg2
);
10793 ret
= get_errno(fremovexattr(arg1
, n
));
10795 ret
= -TARGET_EFAULT
;
10797 unlock_user(n
, arg2
, 0);
10801 #endif /* CONFIG_ATTR */
10802 #ifdef TARGET_NR_set_thread_area
10803 case TARGET_NR_set_thread_area
:
10804 #if defined(TARGET_MIPS)
10805 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10807 #elif defined(TARGET_CRIS)
10809 ret
= -TARGET_EINVAL
;
10811 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10815 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10816 return do_set_thread_area(cpu_env
, arg1
);
10817 #elif defined(TARGET_M68K)
10819 TaskState
*ts
= cpu
->opaque
;
10820 ts
->tp_value
= arg1
;
10824 return -TARGET_ENOSYS
;
10827 #ifdef TARGET_NR_get_thread_area
10828 case TARGET_NR_get_thread_area
:
10829 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10830 return do_get_thread_area(cpu_env
, arg1
);
10831 #elif defined(TARGET_M68K)
10833 TaskState
*ts
= cpu
->opaque
;
10834 return ts
->tp_value
;
10837 return -TARGET_ENOSYS
;
10840 #ifdef TARGET_NR_getdomainname
10841 case TARGET_NR_getdomainname
:
10842 return -TARGET_ENOSYS
;
10845 #ifdef TARGET_NR_clock_settime
10846 case TARGET_NR_clock_settime
:
10848 struct timespec ts
;
10850 ret
= target_to_host_timespec(&ts
, arg2
);
10851 if (!is_error(ret
)) {
10852 ret
= get_errno(clock_settime(arg1
, &ts
));
10857 #ifdef TARGET_NR_clock_gettime
10858 case TARGET_NR_clock_gettime
:
10860 struct timespec ts
;
10861 ret
= get_errno(clock_gettime(arg1
, &ts
));
10862 if (!is_error(ret
)) {
10863 ret
= host_to_target_timespec(arg2
, &ts
);
10868 #ifdef TARGET_NR_clock_getres
10869 case TARGET_NR_clock_getres
:
10871 struct timespec ts
;
10872 ret
= get_errno(clock_getres(arg1
, &ts
));
10873 if (!is_error(ret
)) {
10874 host_to_target_timespec(arg2
, &ts
);
10879 #ifdef TARGET_NR_clock_nanosleep
10880 case TARGET_NR_clock_nanosleep
:
10882 struct timespec ts
;
10883 target_to_host_timespec(&ts
, arg3
);
10884 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10885 &ts
, arg4
? &ts
: NULL
));
10887 host_to_target_timespec(arg4
, &ts
);
10889 #if defined(TARGET_PPC)
10890 /* clock_nanosleep is odd in that it returns positive errno values.
10891 * On PPC, CR0 bit 3 should be set in such a situation. */
10892 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10893 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10900 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10901 case TARGET_NR_set_tid_address
:
10902 return get_errno(set_tid_address((int *)g2h(arg1
)));
10905 case TARGET_NR_tkill
:
10906 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10908 case TARGET_NR_tgkill
:
10909 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10910 target_to_host_signal(arg3
)));
10912 #ifdef TARGET_NR_set_robust_list
10913 case TARGET_NR_set_robust_list
:
10914 case TARGET_NR_get_robust_list
:
10915 /* The ABI for supporting robust futexes has userspace pass
10916 * the kernel a pointer to a linked list which is updated by
10917 * userspace after the syscall; the list is walked by the kernel
10918 * when the thread exits. Since the linked list in QEMU guest
10919 * memory isn't a valid linked list for the host and we have
10920 * no way to reliably intercept the thread-death event, we can't
10921 * support these. Silently return ENOSYS so that guest userspace
10922 * falls back to a non-robust futex implementation (which should
10923 * be OK except in the corner case of the guest crashing while
10924 * holding a mutex that is shared with another process via
10927 return -TARGET_ENOSYS
;
10930 #if defined(TARGET_NR_utimensat)
10931 case TARGET_NR_utimensat
:
10933 struct timespec
*tsp
, ts
[2];
10937 target_to_host_timespec(ts
, arg3
);
10938 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10942 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10944 if (!(p
= lock_user_string(arg2
))) {
10945 return -TARGET_EFAULT
;
10947 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10948 unlock_user(p
, arg2
, 0);
10953 case TARGET_NR_futex
:
10954 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10955 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10956 case TARGET_NR_inotify_init
:
10957 ret
= get_errno(sys_inotify_init());
10959 fd_trans_register(ret
, &target_inotify_trans
);
10963 #ifdef CONFIG_INOTIFY1
10964 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10965 case TARGET_NR_inotify_init1
:
10966 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
10967 fcntl_flags_tbl
)));
10969 fd_trans_register(ret
, &target_inotify_trans
);
10974 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10975 case TARGET_NR_inotify_add_watch
:
10976 p
= lock_user_string(arg2
);
10977 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10978 unlock_user(p
, arg2
, 0);
10981 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10982 case TARGET_NR_inotify_rm_watch
:
10983 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10986 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10987 case TARGET_NR_mq_open
:
10989 struct mq_attr posix_mq_attr
;
10990 struct mq_attr
*pposix_mq_attr
;
10993 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
10994 pposix_mq_attr
= NULL
;
10996 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
10997 return -TARGET_EFAULT
;
10999 pposix_mq_attr
= &posix_mq_attr
;
11001 p
= lock_user_string(arg1
- 1);
11003 return -TARGET_EFAULT
;
11005 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11006 unlock_user (p
, arg1
, 0);
11010 case TARGET_NR_mq_unlink
:
11011 p
= lock_user_string(arg1
- 1);
11013 return -TARGET_EFAULT
;
11015 ret
= get_errno(mq_unlink(p
));
11016 unlock_user (p
, arg1
, 0);
11019 case TARGET_NR_mq_timedsend
:
11021 struct timespec ts
;
11023 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11025 target_to_host_timespec(&ts
, arg5
);
11026 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11027 host_to_target_timespec(arg5
, &ts
);
11029 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11031 unlock_user (p
, arg2
, arg3
);
11035 case TARGET_NR_mq_timedreceive
:
11037 struct timespec ts
;
11040 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11042 target_to_host_timespec(&ts
, arg5
);
11043 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11045 host_to_target_timespec(arg5
, &ts
);
11047 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11050 unlock_user (p
, arg2
, arg3
);
11052 put_user_u32(prio
, arg4
);
11056 /* Not implemented for now... */
11057 /* case TARGET_NR_mq_notify: */
11060 case TARGET_NR_mq_getsetattr
:
11062 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11065 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11066 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11067 &posix_mq_attr_out
));
11068 } else if (arg3
!= 0) {
11069 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11071 if (ret
== 0 && arg3
!= 0) {
11072 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11078 #ifdef CONFIG_SPLICE
11079 #ifdef TARGET_NR_tee
11080 case TARGET_NR_tee
:
11082 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11086 #ifdef TARGET_NR_splice
11087 case TARGET_NR_splice
:
11089 loff_t loff_in
, loff_out
;
11090 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11092 if (get_user_u64(loff_in
, arg2
)) {
11093 return -TARGET_EFAULT
;
11095 ploff_in
= &loff_in
;
11098 if (get_user_u64(loff_out
, arg4
)) {
11099 return -TARGET_EFAULT
;
11101 ploff_out
= &loff_out
;
11103 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11105 if (put_user_u64(loff_in
, arg2
)) {
11106 return -TARGET_EFAULT
;
11110 if (put_user_u64(loff_out
, arg4
)) {
11111 return -TARGET_EFAULT
;
11117 #ifdef TARGET_NR_vmsplice
11118 case TARGET_NR_vmsplice
:
11120 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11122 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11123 unlock_iovec(vec
, arg2
, arg3
, 0);
11125 ret
= -host_to_target_errno(errno
);
11130 #endif /* CONFIG_SPLICE */
11131 #ifdef CONFIG_EVENTFD
11132 #if defined(TARGET_NR_eventfd)
11133 case TARGET_NR_eventfd
:
11134 ret
= get_errno(eventfd(arg1
, 0));
11136 fd_trans_register(ret
, &target_eventfd_trans
);
11140 #if defined(TARGET_NR_eventfd2)
11141 case TARGET_NR_eventfd2
:
11143 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11144 if (arg2
& TARGET_O_NONBLOCK
) {
11145 host_flags
|= O_NONBLOCK
;
11147 if (arg2
& TARGET_O_CLOEXEC
) {
11148 host_flags
|= O_CLOEXEC
;
11150 ret
= get_errno(eventfd(arg1
, host_flags
));
11152 fd_trans_register(ret
, &target_eventfd_trans
);
11157 #endif /* CONFIG_EVENTFD */
11158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11159 case TARGET_NR_fallocate
:
11160 #if TARGET_ABI_BITS == 32
11161 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11162 target_offset64(arg5
, arg6
)));
11164 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11168 #if defined(CONFIG_SYNC_FILE_RANGE)
11169 #if defined(TARGET_NR_sync_file_range)
11170 case TARGET_NR_sync_file_range
:
11171 #if TARGET_ABI_BITS == 32
11172 #if defined(TARGET_MIPS)
11173 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11174 target_offset64(arg5
, arg6
), arg7
));
11176 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11177 target_offset64(arg4
, arg5
), arg6
));
11178 #endif /* !TARGET_MIPS */
11180 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11184 #if defined(TARGET_NR_sync_file_range2)
11185 case TARGET_NR_sync_file_range2
:
11186 /* This is like sync_file_range but the arguments are reordered */
11187 #if TARGET_ABI_BITS == 32
11188 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11189 target_offset64(arg5
, arg6
), arg2
));
11191 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11196 #if defined(TARGET_NR_signalfd4)
11197 case TARGET_NR_signalfd4
:
11198 return do_signalfd4(arg1
, arg2
, arg4
);
11200 #if defined(TARGET_NR_signalfd)
11201 case TARGET_NR_signalfd
:
11202 return do_signalfd4(arg1
, arg2
, 0);
11204 #if defined(CONFIG_EPOLL)
11205 #if defined(TARGET_NR_epoll_create)
11206 case TARGET_NR_epoll_create
:
11207 return get_errno(epoll_create(arg1
));
11209 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11210 case TARGET_NR_epoll_create1
:
11211 return get_errno(epoll_create1(arg1
));
11213 #if defined(TARGET_NR_epoll_ctl)
11214 case TARGET_NR_epoll_ctl
:
11216 struct epoll_event ep
;
11217 struct epoll_event
*epp
= 0;
11219 struct target_epoll_event
*target_ep
;
11220 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11221 return -TARGET_EFAULT
;
11223 ep
.events
= tswap32(target_ep
->events
);
11224 /* The epoll_data_t union is just opaque data to the kernel,
11225 * so we transfer all 64 bits across and need not worry what
11226 * actual data type it is.
11228 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11229 unlock_user_struct(target_ep
, arg4
, 0);
11232 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11236 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11237 #if defined(TARGET_NR_epoll_wait)
11238 case TARGET_NR_epoll_wait
:
11240 #if defined(TARGET_NR_epoll_pwait)
11241 case TARGET_NR_epoll_pwait
:
11244 struct target_epoll_event
*target_ep
;
11245 struct epoll_event
*ep
;
11247 int maxevents
= arg3
;
11248 int timeout
= arg4
;
11250 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11251 return -TARGET_EINVAL
;
11254 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11255 maxevents
* sizeof(struct target_epoll_event
), 1);
11257 return -TARGET_EFAULT
;
11260 ep
= g_try_new(struct epoll_event
, maxevents
);
11262 unlock_user(target_ep
, arg2
, 0);
11263 return -TARGET_ENOMEM
;
11267 #if defined(TARGET_NR_epoll_pwait)
11268 case TARGET_NR_epoll_pwait
:
11270 target_sigset_t
*target_set
;
11271 sigset_t _set
, *set
= &_set
;
11274 if (arg6
!= sizeof(target_sigset_t
)) {
11275 ret
= -TARGET_EINVAL
;
11279 target_set
= lock_user(VERIFY_READ
, arg5
,
11280 sizeof(target_sigset_t
), 1);
11282 ret
= -TARGET_EFAULT
;
11285 target_to_host_sigset(set
, target_set
);
11286 unlock_user(target_set
, arg5
, 0);
11291 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11292 set
, SIGSET_T_SIZE
));
11296 #if defined(TARGET_NR_epoll_wait)
11297 case TARGET_NR_epoll_wait
:
11298 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11303 ret
= -TARGET_ENOSYS
;
11305 if (!is_error(ret
)) {
11307 for (i
= 0; i
< ret
; i
++) {
11308 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11309 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11311 unlock_user(target_ep
, arg2
,
11312 ret
* sizeof(struct target_epoll_event
));
11314 unlock_user(target_ep
, arg2
, 0);
11321 #ifdef TARGET_NR_prlimit64
11322 case TARGET_NR_prlimit64
:
11324 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11325 struct target_rlimit64
*target_rnew
, *target_rold
;
11326 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11327 int resource
= target_to_host_resource(arg2
);
11329 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11330 return -TARGET_EFAULT
;
11332 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11333 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11334 unlock_user_struct(target_rnew
, arg3
, 0);
11338 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11339 if (!is_error(ret
) && arg4
) {
11340 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11341 return -TARGET_EFAULT
;
11343 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11344 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11345 unlock_user_struct(target_rold
, arg4
, 1);
11350 #ifdef TARGET_NR_gethostname
11351 case TARGET_NR_gethostname
:
11353 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11355 ret
= get_errno(gethostname(name
, arg2
));
11356 unlock_user(name
, arg1
, arg2
);
11358 ret
= -TARGET_EFAULT
;
11363 #ifdef TARGET_NR_atomic_cmpxchg_32
11364 case TARGET_NR_atomic_cmpxchg_32
:
11366 /* should use start_exclusive from main.c */
11367 abi_ulong mem_value
;
11368 if (get_user_u32(mem_value
, arg6
)) {
11369 target_siginfo_t info
;
11370 info
.si_signo
= SIGSEGV
;
11372 info
.si_code
= TARGET_SEGV_MAPERR
;
11373 info
._sifields
._sigfault
._addr
= arg6
;
11374 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11375 QEMU_SI_FAULT
, &info
);
11379 if (mem_value
== arg2
)
11380 put_user_u32(arg1
, arg6
);
11384 #ifdef TARGET_NR_atomic_barrier
11385 case TARGET_NR_atomic_barrier
:
11386 /* Like the kernel implementation and the
11387 qemu arm barrier, no-op this? */
11391 #ifdef TARGET_NR_timer_create
11392 case TARGET_NR_timer_create
:
11394 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11396 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11399 int timer_index
= next_free_host_timer();
11401 if (timer_index
< 0) {
11402 ret
= -TARGET_EAGAIN
;
11404 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11407 phost_sevp
= &host_sevp
;
11408 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11414 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11418 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11419 return -TARGET_EFAULT
;
11427 #ifdef TARGET_NR_timer_settime
11428 case TARGET_NR_timer_settime
:
11430 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11431 * struct itimerspec * old_value */
11432 target_timer_t timerid
= get_timer_id(arg1
);
11436 } else if (arg3
== 0) {
11437 ret
= -TARGET_EINVAL
;
11439 timer_t htimer
= g_posix_timers
[timerid
];
11440 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11442 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11443 return -TARGET_EFAULT
;
11446 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11447 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11448 return -TARGET_EFAULT
;
11455 #ifdef TARGET_NR_timer_gettime
11456 case TARGET_NR_timer_gettime
:
11458 /* args: timer_t timerid, struct itimerspec *curr_value */
11459 target_timer_t timerid
= get_timer_id(arg1
);
11463 } else if (!arg2
) {
11464 ret
= -TARGET_EFAULT
;
11466 timer_t htimer
= g_posix_timers
[timerid
];
11467 struct itimerspec hspec
;
11468 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11470 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11471 ret
= -TARGET_EFAULT
;
11478 #ifdef TARGET_NR_timer_getoverrun
11479 case TARGET_NR_timer_getoverrun
:
11481 /* args: timer_t timerid */
11482 target_timer_t timerid
= get_timer_id(arg1
);
11487 timer_t htimer
= g_posix_timers
[timerid
];
11488 ret
= get_errno(timer_getoverrun(htimer
));
11490 fd_trans_unregister(ret
);
11495 #ifdef TARGET_NR_timer_delete
11496 case TARGET_NR_timer_delete
:
11498 /* args: timer_t timerid */
11499 target_timer_t timerid
= get_timer_id(arg1
);
11504 timer_t htimer
= g_posix_timers
[timerid
];
11505 ret
= get_errno(timer_delete(htimer
));
11506 g_posix_timers
[timerid
] = 0;
11512 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11513 case TARGET_NR_timerfd_create
:
11514 return get_errno(timerfd_create(arg1
,
11515 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11518 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11519 case TARGET_NR_timerfd_gettime
:
11521 struct itimerspec its_curr
;
11523 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11525 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11526 return -TARGET_EFAULT
;
11532 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11533 case TARGET_NR_timerfd_settime
:
11535 struct itimerspec its_new
, its_old
, *p_new
;
11538 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11539 return -TARGET_EFAULT
;
11546 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11548 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11549 return -TARGET_EFAULT
;
11555 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11556 case TARGET_NR_ioprio_get
:
11557 return get_errno(ioprio_get(arg1
, arg2
));
11560 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11561 case TARGET_NR_ioprio_set
:
11562 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11565 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11566 case TARGET_NR_setns
:
11567 return get_errno(setns(arg1
, arg2
));
11569 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11570 case TARGET_NR_unshare
:
11571 return get_errno(unshare(arg1
));
11573 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11574 case TARGET_NR_kcmp
:
11575 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11577 #ifdef TARGET_NR_swapcontext
11578 case TARGET_NR_swapcontext
:
11579 /* PowerPC specific. */
11580 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11584 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11585 return -TARGET_ENOSYS
;
11590 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11591 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11592 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11595 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
11598 #ifdef DEBUG_ERESTARTSYS
11599 /* Debug-only code for exercising the syscall-restart code paths
11600 * in the per-architecture cpu main loops: restart every syscall
11601 * the guest makes once before letting it through.
11607 return -TARGET_ERESTARTSYS
;
11612 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11613 arg5
, arg6
, arg7
, arg8
);
11615 if (unlikely(do_strace
)) {
11616 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11617 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11618 arg5
, arg6
, arg7
, arg8
);
11619 print_syscall_ret(num
, ret
);
11621 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11622 arg5
, arg6
, arg7
, arg8
);
11625 trace_guest_user_syscall_ret(cpu
, num
, ret
);