4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
126 #define CLONE_IO 0x80000000 /* Clone io context */
129 /* We can't directly call the host clone syscall, because this will
130 * badly confuse libc (breaking mutexes, for example). So we must
131 * divide clone flags into:
132 * * flag combinations that look like pthread_create()
133 * * flag combinations that look like fork()
134 * * flags we can implement within QEMU itself
135 * * flags we can't support and will return an error for
137 /* For thread creation, all these flags must be present; for
138 * fork, none must be present.
140 #define CLONE_THREAD_FLAGS \
141 (CLONE_VM | CLONE_FS | CLONE_FILES | \
142 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
144 /* These flags are ignored:
145 * CLONE_DETACHED is now ignored by the kernel;
146 * CLONE_IO is just an optimisation hint to the I/O scheduler
148 #define CLONE_IGNORED_FLAGS \
149 (CLONE_DETACHED | CLONE_IO)
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
161 #define CLONE_INVALID_FORK_FLAGS \
162 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
164 #define CLONE_INVALID_THREAD_FLAGS \
165 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
166 CLONE_IGNORED_FLAGS))
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169 * have almost all been allocated. We cannot support any of
170 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172 * The checks against the invalid thread masks above will catch these.
173 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177 * once. This exercises the codepaths for restart.
179 //#define DEBUG_ERESTARTSYS
181 //#include <linux/msdos_fs.h>
182 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
183 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
193 #define _syscall0(type,name) \
194 static type name (void) \
196 return syscall(__NR_##name); \
199 #define _syscall1(type,name,type1,arg1) \
200 static type name (type1 arg1) \
202 return syscall(__NR_##name, arg1); \
205 #define _syscall2(type,name,type1,arg1,type2,arg2) \
206 static type name (type1 arg1,type2 arg2) \
208 return syscall(__NR_##name, arg1, arg2); \
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
212 static type name (type1 arg1,type2 arg2,type3 arg3) \
214 return syscall(__NR_##name, arg1, arg2, arg3); \
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
232 type5,arg5,type6,arg6) \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
236 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid
)
266 /* For the 64-bit guest on 32-bit host case we must emulate
267 * getdents using getdents64, because otherwise the host
268 * might hand us back more dirent records than we can fit
269 * into the guest buffer after structure format conversion.
270 * Otherwise we emulate getdents with getdents if the host has it.
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
279 #if (defined(TARGET_NR_getdents) && \
280 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
286 loff_t
*, res
, uint
, wh
);
288 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
289 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
291 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group
,int,error_code
)
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address
,int *,tidptr
)
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
300 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
307 unsigned long *, user_mask_ptr
);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
310 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
312 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
313 struct __user_cap_data_struct
*, data
);
314 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
315 struct __user_cap_data_struct
*, data
);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get
, int, which
, int, who
)
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
328 unsigned long, idx1
, unsigned long, idx2
)
332 * It is assumed that struct statx is architecture independent.
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
336 unsigned int, mask
, struct target_statx
*, statxbuf
)
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier
, int, cmd
, int, flags
)
342 static bitmask_transtbl fcntl_flags_tbl
[] = {
343 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
344 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
345 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
346 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
347 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
348 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
349 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
350 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
351 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
352 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
353 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
354 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
355 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
356 #if defined(O_DIRECT)
357 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
359 #if defined(O_NOATIME)
360 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
362 #if defined(O_CLOEXEC)
363 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
366 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
368 #if defined(O_TMPFILE)
369 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
371 /* Don't terminate the list prematurely on 64-bit host+guest. */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
378 static int sys_getcwd1(char *buf
, size_t size
)
380 if (getcwd(buf
, size
) == NULL
) {
381 /* getcwd() sets errno */
384 return strlen(buf
)+1;
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
391 const struct timespec
*,tsp
,int,flags
)
393 static int sys_utimensat(int dirfd
, const char *pathname
,
394 const struct timespec times
[2], int flags
)
400 #endif /* TARGET_NR_utimensat */
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
406 const char *, new, unsigned int, flags
)
408 static int sys_renameat2(int oldfd
, const char *old
,
409 int newfd
, const char *new, int flags
)
412 return renameat(oldfd
, old
, newfd
, new);
418 #endif /* TARGET_NR_renameat2 */
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
426 return (inotify_init());
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
432 return (inotify_add_watch(fd
, pathname
, mask
));
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
438 return (inotify_rm_watch(fd
, wd
));
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags
)
445 return (inotify_init1(flags
));
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY */
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64
{
467 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
468 const struct host_rlimit64
*, new_limit
,
469 struct host_rlimit64
*, old_limit
)
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers
[32] = { 0, } ;
477 static inline int next_free_host_timer(void)
480 /* FIXME: Does finding the next free slot require a lock? */
481 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
482 if (g_posix_timers
[k
] == 0) {
483 g_posix_timers
[k
] = (timer_t
) 1;
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
493 static inline int regpairs_aligned(void *cpu_env
, int num
)
495 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501 * of registers which translates to the same as ARM/MIPS, because we start with
503 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env
, int num
)
509 case TARGET_NR_pread64
:
510 case TARGET_NR_pwrite64
:
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
520 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
523 #define ERRNO_TABLE_SIZE 1200
525 /* target_to_host_errno_table[] is initialized from
526 * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
531 * This list is the union of errno values overridden in asm-<arch>/errno.h
532 * minus the errnos that are not actually generic to all archs.
534 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
535 [EAGAIN
] = TARGET_EAGAIN
,
536 [EIDRM
] = TARGET_EIDRM
,
537 [ECHRNG
] = TARGET_ECHRNG
,
538 [EL2NSYNC
] = TARGET_EL2NSYNC
,
539 [EL3HLT
] = TARGET_EL3HLT
,
540 [EL3RST
] = TARGET_EL3RST
,
541 [ELNRNG
] = TARGET_ELNRNG
,
542 [EUNATCH
] = TARGET_EUNATCH
,
543 [ENOCSI
] = TARGET_ENOCSI
,
544 [EL2HLT
] = TARGET_EL2HLT
,
545 [EDEADLK
] = TARGET_EDEADLK
,
546 [ENOLCK
] = TARGET_ENOLCK
,
547 [EBADE
] = TARGET_EBADE
,
548 [EBADR
] = TARGET_EBADR
,
549 [EXFULL
] = TARGET_EXFULL
,
550 [ENOANO
] = TARGET_ENOANO
,
551 [EBADRQC
] = TARGET_EBADRQC
,
552 [EBADSLT
] = TARGET_EBADSLT
,
553 [EBFONT
] = TARGET_EBFONT
,
554 [ENOSTR
] = TARGET_ENOSTR
,
555 [ENODATA
] = TARGET_ENODATA
,
556 [ETIME
] = TARGET_ETIME
,
557 [ENOSR
] = TARGET_ENOSR
,
558 [ENONET
] = TARGET_ENONET
,
559 [ENOPKG
] = TARGET_ENOPKG
,
560 [EREMOTE
] = TARGET_EREMOTE
,
561 [ENOLINK
] = TARGET_ENOLINK
,
562 [EADV
] = TARGET_EADV
,
563 [ESRMNT
] = TARGET_ESRMNT
,
564 [ECOMM
] = TARGET_ECOMM
,
565 [EPROTO
] = TARGET_EPROTO
,
566 [EDOTDOT
] = TARGET_EDOTDOT
,
567 [EMULTIHOP
] = TARGET_EMULTIHOP
,
568 [EBADMSG
] = TARGET_EBADMSG
,
569 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
570 [EOVERFLOW
] = TARGET_EOVERFLOW
,
571 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
572 [EBADFD
] = TARGET_EBADFD
,
573 [EREMCHG
] = TARGET_EREMCHG
,
574 [ELIBACC
] = TARGET_ELIBACC
,
575 [ELIBBAD
] = TARGET_ELIBBAD
,
576 [ELIBSCN
] = TARGET_ELIBSCN
,
577 [ELIBMAX
] = TARGET_ELIBMAX
,
578 [ELIBEXEC
] = TARGET_ELIBEXEC
,
579 [EILSEQ
] = TARGET_EILSEQ
,
580 [ENOSYS
] = TARGET_ENOSYS
,
581 [ELOOP
] = TARGET_ELOOP
,
582 [ERESTART
] = TARGET_ERESTART
,
583 [ESTRPIPE
] = TARGET_ESTRPIPE
,
584 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
585 [EUSERS
] = TARGET_EUSERS
,
586 [ENOTSOCK
] = TARGET_ENOTSOCK
,
587 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
588 [EMSGSIZE
] = TARGET_EMSGSIZE
,
589 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
590 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
591 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
592 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
593 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
594 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
595 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
596 [EADDRINUSE
] = TARGET_EADDRINUSE
,
597 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
598 [ENETDOWN
] = TARGET_ENETDOWN
,
599 [ENETUNREACH
] = TARGET_ENETUNREACH
,
600 [ENETRESET
] = TARGET_ENETRESET
,
601 [ECONNABORTED
] = TARGET_ECONNABORTED
,
602 [ECONNRESET
] = TARGET_ECONNRESET
,
603 [ENOBUFS
] = TARGET_ENOBUFS
,
604 [EISCONN
] = TARGET_EISCONN
,
605 [ENOTCONN
] = TARGET_ENOTCONN
,
606 [EUCLEAN
] = TARGET_EUCLEAN
,
607 [ENOTNAM
] = TARGET_ENOTNAM
,
608 [ENAVAIL
] = TARGET_ENAVAIL
,
609 [EISNAM
] = TARGET_EISNAM
,
610 [EREMOTEIO
] = TARGET_EREMOTEIO
,
611 [EDQUOT
] = TARGET_EDQUOT
,
612 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
613 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
614 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
615 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
616 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
617 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
618 [EALREADY
] = TARGET_EALREADY
,
619 [EINPROGRESS
] = TARGET_EINPROGRESS
,
620 [ESTALE
] = TARGET_ESTALE
,
621 [ECANCELED
] = TARGET_ECANCELED
,
622 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
623 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
625 [ENOKEY
] = TARGET_ENOKEY
,
628 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
631 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
634 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
637 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
639 #ifdef ENOTRECOVERABLE
640 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
643 [ENOMSG
] = TARGET_ENOMSG
,
646 [ERFKILL
] = TARGET_ERFKILL
,
649 [EHWPOISON
] = TARGET_EHWPOISON
,
653 static inline int host_to_target_errno(int err
)
655 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
656 host_to_target_errno_table
[err
]) {
657 return host_to_target_errno_table
[err
];
662 static inline int target_to_host_errno(int err
)
664 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
665 target_to_host_errno_table
[err
]) {
666 return target_to_host_errno_table
[err
];
671 static inline abi_long
get_errno(abi_long ret
)
674 return -host_to_target_errno(errno
);
679 const char *target_strerror(int err
)
681 if (err
== TARGET_ERESTARTSYS
) {
682 return "To be restarted";
684 if (err
== TARGET_QEMU_ESIGRETURN
) {
685 return "Successful exit from sigreturn";
688 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
691 return strerror(target_to_host_errno(err
));
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
697 return safe_syscall(__NR_##name); \
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
703 return safe_syscall(__NR_##name, arg1); \
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
709 return safe_syscall(__NR_##name, arg1, arg2); \
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
722 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726 type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
730 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734 type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736 type5 arg5, type6 arg6) \
738 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
741 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
742 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
743 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
744 int, flags
, mode_t
, mode
)
745 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
746 struct rusage
*, rusage
)
747 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
748 int, options
, struct rusage
*, rusage
)
749 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
750 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
751 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
752 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
753 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
755 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
756 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
758 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
759 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
760 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
761 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
762 safe_syscall2(int, tkill
, int, tid
, int, sig
)
763 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
764 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
765 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
766 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
767 unsigned long, pos_l
, unsigned long, pos_h
)
768 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
769 unsigned long, pos_l
, unsigned long, pos_h
)
770 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
772 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
773 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
774 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
775 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
776 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
777 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
778 safe_syscall2(int, flock
, int, fd
, int, operation
)
779 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
780 const struct timespec
*, uts
, size_t, sigsetsize
)
781 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
783 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
784 struct timespec
*, rem
)
785 #ifdef TARGET_NR_clock_nanosleep
786 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
787 const struct timespec
*, req
, struct timespec
*, rem
)
790 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
791 void *, ptr
, long, fifth
)
794 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
798 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
799 long, msgtype
, int, flags
)
801 #ifdef __NR_semtimedop
802 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
803 unsigned, nsops
, const struct timespec
*, timeout
)
805 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
806 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
807 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
808 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
809 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
811 /* We do ioctl like this rather than via safe_syscall3 to preserve the
812 * "third argument might be integer or pointer or not present" behaviour of
815 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
816 /* Similarly for fcntl. Note that callers must always:
817 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
818 * use the flock64 struct rather than unsuffixed flock
819 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
822 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 static inline int host_to_target_sock_type(int host_type
)
831 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
833 target_type
= TARGET_SOCK_DGRAM
;
836 target_type
= TARGET_SOCK_STREAM
;
839 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
843 #if defined(SOCK_CLOEXEC)
844 if (host_type
& SOCK_CLOEXEC
) {
845 target_type
|= TARGET_SOCK_CLOEXEC
;
849 #if defined(SOCK_NONBLOCK)
850 if (host_type
& SOCK_NONBLOCK
) {
851 target_type
|= TARGET_SOCK_NONBLOCK
;
858 static abi_ulong target_brk
;
859 static abi_ulong target_original_brk
;
860 static abi_ulong brk_page
;
862 void target_set_brk(abi_ulong new_brk
)
864 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
865 brk_page
= HOST_PAGE_ALIGN(target_brk
);
868 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
869 #define DEBUGF_BRK(message, args...)
871 /* do_brk() must return target values and target errnos. */
872 abi_long
do_brk(abi_ulong new_brk
)
874 abi_long mapped_addr
;
875 abi_ulong new_alloc_size
;
877 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
880 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
883 if (new_brk
< target_original_brk
) {
884 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
889 /* If the new brk is less than the highest page reserved to the
890 * target heap allocation, set it and we're almost done... */
891 if (new_brk
<= brk_page
) {
892 /* Heap contents are initialized to zero, as for anonymous
894 if (new_brk
> target_brk
) {
895 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
897 target_brk
= new_brk
;
898 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
902 /* We need to allocate more memory after the brk... Note that
903 * we don't use MAP_FIXED because that will map over the top of
904 * any existing mapping (like the one with the host libc or qemu
905 * itself); instead we treat "mapped but at wrong address" as
906 * a failure and unmap again.
908 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
909 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
910 PROT_READ
|PROT_WRITE
,
911 MAP_ANON
|MAP_PRIVATE
, 0, 0));
913 if (mapped_addr
== brk_page
) {
914 /* Heap contents are initialized to zero, as for anonymous
915 * mapped pages. Technically the new pages are already
916 * initialized to zero since they *are* anonymous mapped
917 * pages, however we have to take care with the contents that
918 * come from the remaining part of the previous page: it may
919 * contains garbage data due to a previous heap usage (grown
921 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
923 target_brk
= new_brk
;
924 brk_page
= HOST_PAGE_ALIGN(target_brk
);
925 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
928 } else if (mapped_addr
!= -1) {
929 /* Mapped but at wrong address, meaning there wasn't actually
930 * enough space for this brk.
932 target_munmap(mapped_addr
, new_alloc_size
);
934 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
937 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
940 #if defined(TARGET_ALPHA)
941 /* We (partially) emulate OSF/1 on Alpha, which requires we
942 return a proper errno, not an unchanged brk value. */
943 return -TARGET_ENOMEM
;
945 /* For everything else, return the previous break. */
949 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
950 abi_ulong target_fds_addr
,
954 abi_ulong b
, *target_fds
;
956 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
957 if (!(target_fds
= lock_user(VERIFY_READ
,
959 sizeof(abi_ulong
) * nw
,
961 return -TARGET_EFAULT
;
965 for (i
= 0; i
< nw
; i
++) {
966 /* grab the abi_ulong */
967 __get_user(b
, &target_fds
[i
]);
968 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
969 /* check the bit inside the abi_ulong */
976 unlock_user(target_fds
, target_fds_addr
, 0);
981 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
982 abi_ulong target_fds_addr
,
985 if (target_fds_addr
) {
986 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
987 return -TARGET_EFAULT
;
995 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1001 abi_ulong
*target_fds
;
1003 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1004 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1006 sizeof(abi_ulong
) * nw
,
1008 return -TARGET_EFAULT
;
1011 for (i
= 0; i
< nw
; i
++) {
1013 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1014 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1017 __put_user(v
, &target_fds
[i
]);
1020 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1025 #if defined(__alpha__)
1026 #define HOST_HZ 1024
1031 static inline abi_long
host_to_target_clock_t(long ticks
)
1033 #if HOST_HZ == TARGET_HZ
1036 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1040 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1041 const struct rusage
*rusage
)
1043 struct target_rusage
*target_rusage
;
1045 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1046 return -TARGET_EFAULT
;
1047 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1048 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1049 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1050 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1051 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1052 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1053 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1054 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1055 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1056 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1057 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1058 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1059 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1060 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1061 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1062 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1063 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1064 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1065 unlock_user_struct(target_rusage
, target_addr
, 1);
1070 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1072 abi_ulong target_rlim_swap
;
1075 target_rlim_swap
= tswapal(target_rlim
);
1076 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1077 return RLIM_INFINITY
;
1079 result
= target_rlim_swap
;
1080 if (target_rlim_swap
!= (rlim_t
)result
)
1081 return RLIM_INFINITY
;
1086 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1088 abi_ulong target_rlim_swap
;
1091 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1092 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1094 target_rlim_swap
= rlim
;
1095 result
= tswapal(target_rlim_swap
);
1100 static inline int target_to_host_resource(int code
)
1103 case TARGET_RLIMIT_AS
:
1105 case TARGET_RLIMIT_CORE
:
1107 case TARGET_RLIMIT_CPU
:
1109 case TARGET_RLIMIT_DATA
:
1111 case TARGET_RLIMIT_FSIZE
:
1112 return RLIMIT_FSIZE
;
1113 case TARGET_RLIMIT_LOCKS
:
1114 return RLIMIT_LOCKS
;
1115 case TARGET_RLIMIT_MEMLOCK
:
1116 return RLIMIT_MEMLOCK
;
1117 case TARGET_RLIMIT_MSGQUEUE
:
1118 return RLIMIT_MSGQUEUE
;
1119 case TARGET_RLIMIT_NICE
:
1121 case TARGET_RLIMIT_NOFILE
:
1122 return RLIMIT_NOFILE
;
1123 case TARGET_RLIMIT_NPROC
:
1124 return RLIMIT_NPROC
;
1125 case TARGET_RLIMIT_RSS
:
1127 case TARGET_RLIMIT_RTPRIO
:
1128 return RLIMIT_RTPRIO
;
1129 case TARGET_RLIMIT_SIGPENDING
:
1130 return RLIMIT_SIGPENDING
;
1131 case TARGET_RLIMIT_STACK
:
1132 return RLIMIT_STACK
;
1138 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1139 abi_ulong target_tv_addr
)
1141 struct target_timeval
*target_tv
;
1143 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1144 return -TARGET_EFAULT
;
1147 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1148 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1150 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1155 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1156 const struct timeval
*tv
)
1158 struct target_timeval
*target_tv
;
1160 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1161 return -TARGET_EFAULT
;
1164 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1165 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1167 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1172 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1173 const struct timeval
*tv
)
1175 struct target__kernel_sock_timeval
*target_tv
;
1177 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1178 return -TARGET_EFAULT
;
1181 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1182 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1184 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1189 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1190 abi_ulong target_addr
)
1192 struct target_timespec
*target_ts
;
1194 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1195 return -TARGET_EFAULT
;
1197 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1198 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1199 unlock_user_struct(target_ts
, target_addr
, 0);
1203 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1204 struct timespec
*host_ts
)
1206 struct target_timespec
*target_ts
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1209 return -TARGET_EFAULT
;
1211 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1212 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1213 unlock_user_struct(target_ts
, target_addr
, 1);
1217 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1218 struct timespec
*host_ts
)
1220 struct target__kernel_timespec
*target_ts
;
1222 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1223 return -TARGET_EFAULT
;
1225 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1226 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1227 unlock_user_struct(target_ts
, target_addr
, 1);
1231 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1232 abi_ulong target_tz_addr
)
1234 struct target_timezone
*target_tz
;
1236 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1237 return -TARGET_EFAULT
;
1240 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1241 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1243 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1248 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1251 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1252 abi_ulong target_mq_attr_addr
)
1254 struct target_mq_attr
*target_mq_attr
;
1256 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1257 target_mq_attr_addr
, 1))
1258 return -TARGET_EFAULT
;
1260 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1261 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1262 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1263 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1265 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1270 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1271 const struct mq_attr
*attr
)
1273 struct target_mq_attr
*target_mq_attr
;
1275 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1276 target_mq_attr_addr
, 0))
1277 return -TARGET_EFAULT
;
1279 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1280 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1281 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1282 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1284 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1290 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1291 /* do_select() must return target values and target errnos. */
1292 static abi_long
do_select(int n
,
1293 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1294 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1296 fd_set rfds
, wfds
, efds
;
1297 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1299 struct timespec ts
, *ts_ptr
;
1302 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1306 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1310 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1315 if (target_tv_addr
) {
1316 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1317 return -TARGET_EFAULT
;
1318 ts
.tv_sec
= tv
.tv_sec
;
1319 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1325 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1328 if (!is_error(ret
)) {
1329 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1330 return -TARGET_EFAULT
;
1331 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1332 return -TARGET_EFAULT
;
1333 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1334 return -TARGET_EFAULT
;
1336 if (target_tv_addr
) {
1337 tv
.tv_sec
= ts
.tv_sec
;
1338 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1339 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1340 return -TARGET_EFAULT
;
1348 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1349 static abi_long
do_old_select(abi_ulong arg1
)
1351 struct target_sel_arg_struct
*sel
;
1352 abi_ulong inp
, outp
, exp
, tvp
;
1355 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1356 return -TARGET_EFAULT
;
1359 nsel
= tswapal(sel
->n
);
1360 inp
= tswapal(sel
->inp
);
1361 outp
= tswapal(sel
->outp
);
1362 exp
= tswapal(sel
->exp
);
1363 tvp
= tswapal(sel
->tvp
);
1365 unlock_user_struct(sel
, arg1
, 0);
1367 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1372 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1375 return pipe2(host_pipe
, flags
);
1381 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1382 int flags
, int is_pipe2
)
1386 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1389 return get_errno(ret
);
1391 /* Several targets have special calling conventions for the original
1392 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1394 #if defined(TARGET_ALPHA)
1395 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1396 return host_pipe
[0];
1397 #elif defined(TARGET_MIPS)
1398 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1399 return host_pipe
[0];
1400 #elif defined(TARGET_SH4)
1401 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1402 return host_pipe
[0];
1403 #elif defined(TARGET_SPARC)
1404 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1405 return host_pipe
[0];
1409 if (put_user_s32(host_pipe
[0], pipedes
)
1410 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1411 return -TARGET_EFAULT
;
1412 return get_errno(ret
);
1415 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1416 abi_ulong target_addr
,
1419 struct target_ip_mreqn
*target_smreqn
;
1421 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1423 return -TARGET_EFAULT
;
1424 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1425 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1426 if (len
== sizeof(struct target_ip_mreqn
))
1427 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1428 unlock_user(target_smreqn
, target_addr
, 0);
1433 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1434 abi_ulong target_addr
,
1437 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1438 sa_family_t sa_family
;
1439 struct target_sockaddr
*target_saddr
;
1441 if (fd_trans_target_to_host_addr(fd
)) {
1442 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1445 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1447 return -TARGET_EFAULT
;
1449 sa_family
= tswap16(target_saddr
->sa_family
);
1451 /* Oops. The caller might send a incomplete sun_path; sun_path
1452 * must be terminated by \0 (see the manual page), but
1453 * unfortunately it is quite common to specify sockaddr_un
1454 * length as "strlen(x->sun_path)" while it should be
1455 * "strlen(...) + 1". We'll fix that here if needed.
1456 * Linux kernel has a similar feature.
1459 if (sa_family
== AF_UNIX
) {
1460 if (len
< unix_maxlen
&& len
> 0) {
1461 char *cp
= (char*)target_saddr
;
1463 if ( cp
[len
-1] && !cp
[len
] )
1466 if (len
> unix_maxlen
)
1470 memcpy(addr
, target_saddr
, len
);
1471 addr
->sa_family
= sa_family
;
1472 if (sa_family
== AF_NETLINK
) {
1473 struct sockaddr_nl
*nladdr
;
1475 nladdr
= (struct sockaddr_nl
*)addr
;
1476 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1477 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1478 } else if (sa_family
== AF_PACKET
) {
1479 struct target_sockaddr_ll
*lladdr
;
1481 lladdr
= (struct target_sockaddr_ll
*)addr
;
1482 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1483 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1485 unlock_user(target_saddr
, target_addr
, 0);
1490 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1491 struct sockaddr
*addr
,
1494 struct target_sockaddr
*target_saddr
;
1501 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1503 return -TARGET_EFAULT
;
1504 memcpy(target_saddr
, addr
, len
);
1505 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1506 sizeof(target_saddr
->sa_family
)) {
1507 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1509 if (addr
->sa_family
== AF_NETLINK
&&
1510 len
>= sizeof(struct target_sockaddr_nl
)) {
1511 struct target_sockaddr_nl
*target_nl
=
1512 (struct target_sockaddr_nl
*)target_saddr
;
1513 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1514 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1515 } else if (addr
->sa_family
== AF_PACKET
) {
1516 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1517 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1518 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1519 } else if (addr
->sa_family
== AF_INET6
&&
1520 len
>= sizeof(struct target_sockaddr_in6
)) {
1521 struct target_sockaddr_in6
*target_in6
=
1522 (struct target_sockaddr_in6
*)target_saddr
;
1523 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1525 unlock_user(target_saddr
, target_addr
, len
);
1530 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1531 struct target_msghdr
*target_msgh
)
1533 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1534 abi_long msg_controllen
;
1535 abi_ulong target_cmsg_addr
;
1536 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1537 socklen_t space
= 0;
1539 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1540 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1542 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1543 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1544 target_cmsg_start
= target_cmsg
;
1546 return -TARGET_EFAULT
;
1548 while (cmsg
&& target_cmsg
) {
1549 void *data
= CMSG_DATA(cmsg
);
1550 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1552 int len
= tswapal(target_cmsg
->cmsg_len
)
1553 - sizeof(struct target_cmsghdr
);
1555 space
+= CMSG_SPACE(len
);
1556 if (space
> msgh
->msg_controllen
) {
1557 space
-= CMSG_SPACE(len
);
1558 /* This is a QEMU bug, since we allocated the payload
1559 * area ourselves (unlike overflow in host-to-target
1560 * conversion, which is just the guest giving us a buffer
1561 * that's too small). It can't happen for the payload types
1562 * we currently support; if it becomes an issue in future
1563 * we would need to improve our allocation strategy to
1564 * something more intelligent than "twice the size of the
1565 * target buffer we're reading from".
1567 qemu_log_mask(LOG_UNIMP
,
1568 ("Unsupported ancillary data %d/%d: "
1569 "unhandled msg size\n"),
1570 tswap32(target_cmsg
->cmsg_level
),
1571 tswap32(target_cmsg
->cmsg_type
));
1575 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1576 cmsg
->cmsg_level
= SOL_SOCKET
;
1578 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1580 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1581 cmsg
->cmsg_len
= CMSG_LEN(len
);
1583 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1584 int *fd
= (int *)data
;
1585 int *target_fd
= (int *)target_data
;
1586 int i
, numfds
= len
/ sizeof(int);
1588 for (i
= 0; i
< numfds
; i
++) {
1589 __get_user(fd
[i
], target_fd
+ i
);
1591 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1592 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1593 struct ucred
*cred
= (struct ucred
*)data
;
1594 struct target_ucred
*target_cred
=
1595 (struct target_ucred
*)target_data
;
1597 __get_user(cred
->pid
, &target_cred
->pid
);
1598 __get_user(cred
->uid
, &target_cred
->uid
);
1599 __get_user(cred
->gid
, &target_cred
->gid
);
1601 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1602 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1603 memcpy(data
, target_data
, len
);
1606 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1607 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1610 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1612 msgh
->msg_controllen
= space
;
1616 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1617 struct msghdr
*msgh
)
1619 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1620 abi_long msg_controllen
;
1621 abi_ulong target_cmsg_addr
;
1622 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1623 socklen_t space
= 0;
1625 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1626 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1628 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1629 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1630 target_cmsg_start
= target_cmsg
;
1632 return -TARGET_EFAULT
;
1634 while (cmsg
&& target_cmsg
) {
1635 void *data
= CMSG_DATA(cmsg
);
1636 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1638 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1639 int tgt_len
, tgt_space
;
1641 /* We never copy a half-header but may copy half-data;
1642 * this is Linux's behaviour in put_cmsg(). Note that
1643 * truncation here is a guest problem (which we report
1644 * to the guest via the CTRUNC bit), unlike truncation
1645 * in target_to_host_cmsg, which is a QEMU bug.
1647 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1648 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1652 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1653 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1655 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1657 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1659 /* Payload types which need a different size of payload on
1660 * the target must adjust tgt_len here.
1663 switch (cmsg
->cmsg_level
) {
1665 switch (cmsg
->cmsg_type
) {
1667 tgt_len
= sizeof(struct target_timeval
);
1677 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1678 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1679 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1682 /* We must now copy-and-convert len bytes of payload
1683 * into tgt_len bytes of destination space. Bear in mind
1684 * that in both source and destination we may be dealing
1685 * with a truncated value!
1687 switch (cmsg
->cmsg_level
) {
1689 switch (cmsg
->cmsg_type
) {
1692 int *fd
= (int *)data
;
1693 int *target_fd
= (int *)target_data
;
1694 int i
, numfds
= tgt_len
/ sizeof(int);
1696 for (i
= 0; i
< numfds
; i
++) {
1697 __put_user(fd
[i
], target_fd
+ i
);
1703 struct timeval
*tv
= (struct timeval
*)data
;
1704 struct target_timeval
*target_tv
=
1705 (struct target_timeval
*)target_data
;
1707 if (len
!= sizeof(struct timeval
) ||
1708 tgt_len
!= sizeof(struct target_timeval
)) {
1712 /* copy struct timeval to target */
1713 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1714 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1717 case SCM_CREDENTIALS
:
1719 struct ucred
*cred
= (struct ucred
*)data
;
1720 struct target_ucred
*target_cred
=
1721 (struct target_ucred
*)target_data
;
1723 __put_user(cred
->pid
, &target_cred
->pid
);
1724 __put_user(cred
->uid
, &target_cred
->uid
);
1725 __put_user(cred
->gid
, &target_cred
->gid
);
1734 switch (cmsg
->cmsg_type
) {
1737 uint32_t *v
= (uint32_t *)data
;
1738 uint32_t *t_int
= (uint32_t *)target_data
;
1740 if (len
!= sizeof(uint32_t) ||
1741 tgt_len
!= sizeof(uint32_t)) {
1744 __put_user(*v
, t_int
);
1750 struct sock_extended_err ee
;
1751 struct sockaddr_in offender
;
1753 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1754 struct errhdr_t
*target_errh
=
1755 (struct errhdr_t
*)target_data
;
1757 if (len
!= sizeof(struct errhdr_t
) ||
1758 tgt_len
!= sizeof(struct errhdr_t
)) {
1761 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1762 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1763 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1764 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1765 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1766 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1767 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1768 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1769 (void *) &errh
->offender
, sizeof(errh
->offender
));
1778 switch (cmsg
->cmsg_type
) {
1781 uint32_t *v
= (uint32_t *)data
;
1782 uint32_t *t_int
= (uint32_t *)target_data
;
1784 if (len
!= sizeof(uint32_t) ||
1785 tgt_len
!= sizeof(uint32_t)) {
1788 __put_user(*v
, t_int
);
1794 struct sock_extended_err ee
;
1795 struct sockaddr_in6 offender
;
1797 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1798 struct errhdr6_t
*target_errh
=
1799 (struct errhdr6_t
*)target_data
;
1801 if (len
!= sizeof(struct errhdr6_t
) ||
1802 tgt_len
!= sizeof(struct errhdr6_t
)) {
1805 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1806 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1807 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1808 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1809 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1810 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1811 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1812 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1813 (void *) &errh
->offender
, sizeof(errh
->offender
));
1823 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1824 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1825 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1826 if (tgt_len
> len
) {
1827 memset(target_data
+ len
, 0, tgt_len
- len
);
1831 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1832 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1833 if (msg_controllen
< tgt_space
) {
1834 tgt_space
= msg_controllen
;
1836 msg_controllen
-= tgt_space
;
1838 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1839 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1842 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1844 target_msgh
->msg_controllen
= tswapal(space
);
1848 /* do_setsockopt() Must return target values and target errnos. */
1849 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1850 abi_ulong optval_addr
, socklen_t optlen
)
1854 struct ip_mreqn
*ip_mreq
;
1855 struct ip_mreq_source
*ip_mreq_source
;
1859 /* TCP options all take an 'int' value. */
1860 if (optlen
< sizeof(uint32_t))
1861 return -TARGET_EINVAL
;
1863 if (get_user_u32(val
, optval_addr
))
1864 return -TARGET_EFAULT
;
1865 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1872 case IP_ROUTER_ALERT
:
1876 case IP_MTU_DISCOVER
:
1883 case IP_MULTICAST_TTL
:
1884 case IP_MULTICAST_LOOP
:
1886 if (optlen
>= sizeof(uint32_t)) {
1887 if (get_user_u32(val
, optval_addr
))
1888 return -TARGET_EFAULT
;
1889 } else if (optlen
>= 1) {
1890 if (get_user_u8(val
, optval_addr
))
1891 return -TARGET_EFAULT
;
1893 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1895 case IP_ADD_MEMBERSHIP
:
1896 case IP_DROP_MEMBERSHIP
:
1897 if (optlen
< sizeof (struct target_ip_mreq
) ||
1898 optlen
> sizeof (struct target_ip_mreqn
))
1899 return -TARGET_EINVAL
;
1901 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1902 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1903 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1906 case IP_BLOCK_SOURCE
:
1907 case IP_UNBLOCK_SOURCE
:
1908 case IP_ADD_SOURCE_MEMBERSHIP
:
1909 case IP_DROP_SOURCE_MEMBERSHIP
:
1910 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1911 return -TARGET_EINVAL
;
1913 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1914 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1915 unlock_user (ip_mreq_source
, optval_addr
, 0);
1924 case IPV6_MTU_DISCOVER
:
1927 case IPV6_RECVPKTINFO
:
1928 case IPV6_UNICAST_HOPS
:
1929 case IPV6_MULTICAST_HOPS
:
1930 case IPV6_MULTICAST_LOOP
:
1932 case IPV6_RECVHOPLIMIT
:
1933 case IPV6_2292HOPLIMIT
:
1936 case IPV6_2292PKTINFO
:
1937 case IPV6_RECVTCLASS
:
1938 case IPV6_RECVRTHDR
:
1939 case IPV6_2292RTHDR
:
1940 case IPV6_RECVHOPOPTS
:
1941 case IPV6_2292HOPOPTS
:
1942 case IPV6_RECVDSTOPTS
:
1943 case IPV6_2292DSTOPTS
:
1945 #ifdef IPV6_RECVPATHMTU
1946 case IPV6_RECVPATHMTU
:
1948 #ifdef IPV6_TRANSPARENT
1949 case IPV6_TRANSPARENT
:
1951 #ifdef IPV6_FREEBIND
1954 #ifdef IPV6_RECVORIGDSTADDR
1955 case IPV6_RECVORIGDSTADDR
:
1958 if (optlen
< sizeof(uint32_t)) {
1959 return -TARGET_EINVAL
;
1961 if (get_user_u32(val
, optval_addr
)) {
1962 return -TARGET_EFAULT
;
1964 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1965 &val
, sizeof(val
)));
1969 struct in6_pktinfo pki
;
1971 if (optlen
< sizeof(pki
)) {
1972 return -TARGET_EINVAL
;
1975 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1976 return -TARGET_EFAULT
;
1979 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1981 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1982 &pki
, sizeof(pki
)));
1985 case IPV6_ADD_MEMBERSHIP
:
1986 case IPV6_DROP_MEMBERSHIP
:
1988 struct ipv6_mreq ipv6mreq
;
1990 if (optlen
< sizeof(ipv6mreq
)) {
1991 return -TARGET_EINVAL
;
1994 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1995 return -TARGET_EFAULT
;
1998 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2000 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2001 &ipv6mreq
, sizeof(ipv6mreq
)));
2012 struct icmp6_filter icmp6f
;
2014 if (optlen
> sizeof(icmp6f
)) {
2015 optlen
= sizeof(icmp6f
);
2018 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2019 return -TARGET_EFAULT
;
2022 for (val
= 0; val
< 8; val
++) {
2023 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2026 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2038 /* those take an u32 value */
2039 if (optlen
< sizeof(uint32_t)) {
2040 return -TARGET_EINVAL
;
2043 if (get_user_u32(val
, optval_addr
)) {
2044 return -TARGET_EFAULT
;
2046 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2047 &val
, sizeof(val
)));
2054 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2059 char *alg_key
= g_malloc(optlen
);
2062 return -TARGET_ENOMEM
;
2064 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2066 return -TARGET_EFAULT
;
2068 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2073 case ALG_SET_AEAD_AUTHSIZE
:
2075 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2084 case TARGET_SOL_SOCKET
:
2086 case TARGET_SO_RCVTIMEO
:
2090 optname
= SO_RCVTIMEO
;
2093 if (optlen
!= sizeof(struct target_timeval
)) {
2094 return -TARGET_EINVAL
;
2097 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2098 return -TARGET_EFAULT
;
2101 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2105 case TARGET_SO_SNDTIMEO
:
2106 optname
= SO_SNDTIMEO
;
2108 case TARGET_SO_ATTACH_FILTER
:
2110 struct target_sock_fprog
*tfprog
;
2111 struct target_sock_filter
*tfilter
;
2112 struct sock_fprog fprog
;
2113 struct sock_filter
*filter
;
2116 if (optlen
!= sizeof(*tfprog
)) {
2117 return -TARGET_EINVAL
;
2119 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2120 return -TARGET_EFAULT
;
2122 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2123 tswapal(tfprog
->filter
), 0)) {
2124 unlock_user_struct(tfprog
, optval_addr
, 1);
2125 return -TARGET_EFAULT
;
2128 fprog
.len
= tswap16(tfprog
->len
);
2129 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2130 if (filter
== NULL
) {
2131 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2132 unlock_user_struct(tfprog
, optval_addr
, 1);
2133 return -TARGET_ENOMEM
;
2135 for (i
= 0; i
< fprog
.len
; i
++) {
2136 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2137 filter
[i
].jt
= tfilter
[i
].jt
;
2138 filter
[i
].jf
= tfilter
[i
].jf
;
2139 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2141 fprog
.filter
= filter
;
2143 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2144 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2147 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2148 unlock_user_struct(tfprog
, optval_addr
, 1);
2151 case TARGET_SO_BINDTODEVICE
:
2153 char *dev_ifname
, *addr_ifname
;
2155 if (optlen
> IFNAMSIZ
- 1) {
2156 optlen
= IFNAMSIZ
- 1;
2158 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2160 return -TARGET_EFAULT
;
2162 optname
= SO_BINDTODEVICE
;
2163 addr_ifname
= alloca(IFNAMSIZ
);
2164 memcpy(addr_ifname
, dev_ifname
, optlen
);
2165 addr_ifname
[optlen
] = 0;
2166 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2167 addr_ifname
, optlen
));
2168 unlock_user (dev_ifname
, optval_addr
, 0);
2171 case TARGET_SO_LINGER
:
2174 struct target_linger
*tlg
;
2176 if (optlen
!= sizeof(struct target_linger
)) {
2177 return -TARGET_EINVAL
;
2179 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2180 return -TARGET_EFAULT
;
2182 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2183 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2184 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2186 unlock_user_struct(tlg
, optval_addr
, 0);
2189 /* Options with 'int' argument. */
2190 case TARGET_SO_DEBUG
:
2193 case TARGET_SO_REUSEADDR
:
2194 optname
= SO_REUSEADDR
;
2197 case TARGET_SO_REUSEPORT
:
2198 optname
= SO_REUSEPORT
;
2201 case TARGET_SO_TYPE
:
2204 case TARGET_SO_ERROR
:
2207 case TARGET_SO_DONTROUTE
:
2208 optname
= SO_DONTROUTE
;
2210 case TARGET_SO_BROADCAST
:
2211 optname
= SO_BROADCAST
;
2213 case TARGET_SO_SNDBUF
:
2214 optname
= SO_SNDBUF
;
2216 case TARGET_SO_SNDBUFFORCE
:
2217 optname
= SO_SNDBUFFORCE
;
2219 case TARGET_SO_RCVBUF
:
2220 optname
= SO_RCVBUF
;
2222 case TARGET_SO_RCVBUFFORCE
:
2223 optname
= SO_RCVBUFFORCE
;
2225 case TARGET_SO_KEEPALIVE
:
2226 optname
= SO_KEEPALIVE
;
2228 case TARGET_SO_OOBINLINE
:
2229 optname
= SO_OOBINLINE
;
2231 case TARGET_SO_NO_CHECK
:
2232 optname
= SO_NO_CHECK
;
2234 case TARGET_SO_PRIORITY
:
2235 optname
= SO_PRIORITY
;
2238 case TARGET_SO_BSDCOMPAT
:
2239 optname
= SO_BSDCOMPAT
;
2242 case TARGET_SO_PASSCRED
:
2243 optname
= SO_PASSCRED
;
2245 case TARGET_SO_PASSSEC
:
2246 optname
= SO_PASSSEC
;
2248 case TARGET_SO_TIMESTAMP
:
2249 optname
= SO_TIMESTAMP
;
2251 case TARGET_SO_RCVLOWAT
:
2252 optname
= SO_RCVLOWAT
;
2257 if (optlen
< sizeof(uint32_t))
2258 return -TARGET_EINVAL
;
2260 if (get_user_u32(val
, optval_addr
))
2261 return -TARGET_EFAULT
;
2262 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2267 case NETLINK_PKTINFO
:
2268 case NETLINK_ADD_MEMBERSHIP
:
2269 case NETLINK_DROP_MEMBERSHIP
:
2270 case NETLINK_BROADCAST_ERROR
:
2271 case NETLINK_NO_ENOBUFS
:
2272 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2273 case NETLINK_LISTEN_ALL_NSID
:
2274 case NETLINK_CAP_ACK
:
2275 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2276 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2277 case NETLINK_EXT_ACK
:
2278 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2279 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2280 case NETLINK_GET_STRICT_CHK
:
2281 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2287 if (optlen
< sizeof(uint32_t)) {
2288 return -TARGET_EINVAL
;
2290 if (get_user_u32(val
, optval_addr
)) {
2291 return -TARGET_EFAULT
;
2293 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2296 #endif /* SOL_NETLINK */
2299 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2301 ret
= -TARGET_ENOPROTOOPT
;
2306 /* do_getsockopt() Must return target values and target errnos. */
2307 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2308 abi_ulong optval_addr
, abi_ulong optlen
)
2315 case TARGET_SOL_SOCKET
:
2318 /* These don't just return a single integer */
2319 case TARGET_SO_PEERNAME
:
2321 case TARGET_SO_RCVTIMEO
: {
2325 optname
= SO_RCVTIMEO
;
2328 if (get_user_u32(len
, optlen
)) {
2329 return -TARGET_EFAULT
;
2332 return -TARGET_EINVAL
;
2336 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2341 if (len
> sizeof(struct target_timeval
)) {
2342 len
= sizeof(struct target_timeval
);
2344 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2345 return -TARGET_EFAULT
;
2347 if (put_user_u32(len
, optlen
)) {
2348 return -TARGET_EFAULT
;
2352 case TARGET_SO_SNDTIMEO
:
2353 optname
= SO_SNDTIMEO
;
2355 case TARGET_SO_PEERCRED
: {
2358 struct target_ucred
*tcr
;
2360 if (get_user_u32(len
, optlen
)) {
2361 return -TARGET_EFAULT
;
2364 return -TARGET_EINVAL
;
2368 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2376 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2377 return -TARGET_EFAULT
;
2379 __put_user(cr
.pid
, &tcr
->pid
);
2380 __put_user(cr
.uid
, &tcr
->uid
);
2381 __put_user(cr
.gid
, &tcr
->gid
);
2382 unlock_user_struct(tcr
, optval_addr
, 1);
2383 if (put_user_u32(len
, optlen
)) {
2384 return -TARGET_EFAULT
;
2388 case TARGET_SO_PEERSEC
: {
2391 if (get_user_u32(len
, optlen
)) {
2392 return -TARGET_EFAULT
;
2395 return -TARGET_EINVAL
;
2397 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2399 return -TARGET_EFAULT
;
2402 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2404 if (put_user_u32(lv
, optlen
)) {
2405 ret
= -TARGET_EFAULT
;
2407 unlock_user(name
, optval_addr
, lv
);
2410 case TARGET_SO_LINGER
:
2414 struct target_linger
*tlg
;
2416 if (get_user_u32(len
, optlen
)) {
2417 return -TARGET_EFAULT
;
2420 return -TARGET_EINVAL
;
2424 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2432 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2433 return -TARGET_EFAULT
;
2435 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2436 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2437 unlock_user_struct(tlg
, optval_addr
, 1);
2438 if (put_user_u32(len
, optlen
)) {
2439 return -TARGET_EFAULT
;
2443 /* Options with 'int' argument. */
2444 case TARGET_SO_DEBUG
:
2447 case TARGET_SO_REUSEADDR
:
2448 optname
= SO_REUSEADDR
;
2451 case TARGET_SO_REUSEPORT
:
2452 optname
= SO_REUSEPORT
;
2455 case TARGET_SO_TYPE
:
2458 case TARGET_SO_ERROR
:
2461 case TARGET_SO_DONTROUTE
:
2462 optname
= SO_DONTROUTE
;
2464 case TARGET_SO_BROADCAST
:
2465 optname
= SO_BROADCAST
;
2467 case TARGET_SO_SNDBUF
:
2468 optname
= SO_SNDBUF
;
2470 case TARGET_SO_RCVBUF
:
2471 optname
= SO_RCVBUF
;
2473 case TARGET_SO_KEEPALIVE
:
2474 optname
= SO_KEEPALIVE
;
2476 case TARGET_SO_OOBINLINE
:
2477 optname
= SO_OOBINLINE
;
2479 case TARGET_SO_NO_CHECK
:
2480 optname
= SO_NO_CHECK
;
2482 case TARGET_SO_PRIORITY
:
2483 optname
= SO_PRIORITY
;
2486 case TARGET_SO_BSDCOMPAT
:
2487 optname
= SO_BSDCOMPAT
;
2490 case TARGET_SO_PASSCRED
:
2491 optname
= SO_PASSCRED
;
2493 case TARGET_SO_TIMESTAMP
:
2494 optname
= SO_TIMESTAMP
;
2496 case TARGET_SO_RCVLOWAT
:
2497 optname
= SO_RCVLOWAT
;
2499 case TARGET_SO_ACCEPTCONN
:
2500 optname
= SO_ACCEPTCONN
;
2507 /* TCP options all take an 'int' value. */
2509 if (get_user_u32(len
, optlen
))
2510 return -TARGET_EFAULT
;
2512 return -TARGET_EINVAL
;
2514 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2517 if (optname
== SO_TYPE
) {
2518 val
= host_to_target_sock_type(val
);
2523 if (put_user_u32(val
, optval_addr
))
2524 return -TARGET_EFAULT
;
2526 if (put_user_u8(val
, optval_addr
))
2527 return -TARGET_EFAULT
;
2529 if (put_user_u32(len
, optlen
))
2530 return -TARGET_EFAULT
;
2537 case IP_ROUTER_ALERT
:
2541 case IP_MTU_DISCOVER
:
2547 case IP_MULTICAST_TTL
:
2548 case IP_MULTICAST_LOOP
:
2549 if (get_user_u32(len
, optlen
))
2550 return -TARGET_EFAULT
;
2552 return -TARGET_EINVAL
;
2554 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2557 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2559 if (put_user_u32(len
, optlen
)
2560 || put_user_u8(val
, optval_addr
))
2561 return -TARGET_EFAULT
;
2563 if (len
> sizeof(int))
2565 if (put_user_u32(len
, optlen
)
2566 || put_user_u32(val
, optval_addr
))
2567 return -TARGET_EFAULT
;
2571 ret
= -TARGET_ENOPROTOOPT
;
2577 case IPV6_MTU_DISCOVER
:
2580 case IPV6_RECVPKTINFO
:
2581 case IPV6_UNICAST_HOPS
:
2582 case IPV6_MULTICAST_HOPS
:
2583 case IPV6_MULTICAST_LOOP
:
2585 case IPV6_RECVHOPLIMIT
:
2586 case IPV6_2292HOPLIMIT
:
2589 case IPV6_2292PKTINFO
:
2590 case IPV6_RECVTCLASS
:
2591 case IPV6_RECVRTHDR
:
2592 case IPV6_2292RTHDR
:
2593 case IPV6_RECVHOPOPTS
:
2594 case IPV6_2292HOPOPTS
:
2595 case IPV6_RECVDSTOPTS
:
2596 case IPV6_2292DSTOPTS
:
2598 #ifdef IPV6_RECVPATHMTU
2599 case IPV6_RECVPATHMTU
:
2601 #ifdef IPV6_TRANSPARENT
2602 case IPV6_TRANSPARENT
:
2604 #ifdef IPV6_FREEBIND
2607 #ifdef IPV6_RECVORIGDSTADDR
2608 case IPV6_RECVORIGDSTADDR
:
2610 if (get_user_u32(len
, optlen
))
2611 return -TARGET_EFAULT
;
2613 return -TARGET_EINVAL
;
2615 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2618 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2620 if (put_user_u32(len
, optlen
)
2621 || put_user_u8(val
, optval_addr
))
2622 return -TARGET_EFAULT
;
2624 if (len
> sizeof(int))
2626 if (put_user_u32(len
, optlen
)
2627 || put_user_u32(val
, optval_addr
))
2628 return -TARGET_EFAULT
;
2632 ret
= -TARGET_ENOPROTOOPT
;
2639 case NETLINK_PKTINFO
:
2640 case NETLINK_BROADCAST_ERROR
:
2641 case NETLINK_NO_ENOBUFS
:
2642 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2643 case NETLINK_LISTEN_ALL_NSID
:
2644 case NETLINK_CAP_ACK
:
2645 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2647 case NETLINK_EXT_ACK
:
2648 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2649 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2650 case NETLINK_GET_STRICT_CHK
:
2651 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2652 if (get_user_u32(len
, optlen
)) {
2653 return -TARGET_EFAULT
;
2655 if (len
!= sizeof(val
)) {
2656 return -TARGET_EINVAL
;
2659 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2663 if (put_user_u32(lv
, optlen
)
2664 || put_user_u32(val
, optval_addr
)) {
2665 return -TARGET_EFAULT
;
2668 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2669 case NETLINK_LIST_MEMBERSHIPS
:
2673 if (get_user_u32(len
, optlen
)) {
2674 return -TARGET_EFAULT
;
2677 return -TARGET_EINVAL
;
2679 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2681 return -TARGET_EFAULT
;
2684 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2686 unlock_user(results
, optval_addr
, 0);
2689 /* swap host endianess to target endianess. */
2690 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2691 results
[i
] = tswap32(results
[i
]);
2693 if (put_user_u32(lv
, optlen
)) {
2694 return -TARGET_EFAULT
;
2696 unlock_user(results
, optval_addr
, 0);
2699 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2704 #endif /* SOL_NETLINK */
2707 qemu_log_mask(LOG_UNIMP
,
2708 "getsockopt level=%d optname=%d not yet supported\n",
2710 ret
= -TARGET_EOPNOTSUPP
;
2716 /* Convert target low/high pair representing file offset into the host
2717 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2718 * as the kernel doesn't handle them either.
2720 static void target_to_host_low_high(abi_ulong tlow
,
2722 unsigned long *hlow
,
2723 unsigned long *hhigh
)
2725 uint64_t off
= tlow
|
2726 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2727 TARGET_LONG_BITS
/ 2;
2730 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2733 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2734 abi_ulong count
, int copy
)
2736 struct target_iovec
*target_vec
;
2738 abi_ulong total_len
, max_len
;
2741 bool bad_address
= false;
2747 if (count
> IOV_MAX
) {
2752 vec
= g_try_new0(struct iovec
, count
);
2758 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2759 count
* sizeof(struct target_iovec
), 1);
2760 if (target_vec
== NULL
) {
2765 /* ??? If host page size > target page size, this will result in a
2766 value larger than what we can actually support. */
2767 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2770 for (i
= 0; i
< count
; i
++) {
2771 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2772 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2777 } else if (len
== 0) {
2778 /* Zero length pointer is ignored. */
2779 vec
[i
].iov_base
= 0;
2781 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2782 /* If the first buffer pointer is bad, this is a fault. But
2783 * subsequent bad buffers will result in a partial write; this
2784 * is realized by filling the vector with null pointers and
2786 if (!vec
[i
].iov_base
) {
2797 if (len
> max_len
- total_len
) {
2798 len
= max_len
- total_len
;
2801 vec
[i
].iov_len
= len
;
2805 unlock_user(target_vec
, target_addr
, 0);
2810 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2811 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2814 unlock_user(target_vec
, target_addr
, 0);
2821 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2822 abi_ulong count
, int copy
)
2824 struct target_iovec
*target_vec
;
2827 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2828 count
* sizeof(struct target_iovec
), 1);
2830 for (i
= 0; i
< count
; i
++) {
2831 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2832 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2836 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2838 unlock_user(target_vec
, target_addr
, 0);
2844 static inline int target_to_host_sock_type(int *type
)
2847 int target_type
= *type
;
2849 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2850 case TARGET_SOCK_DGRAM
:
2851 host_type
= SOCK_DGRAM
;
2853 case TARGET_SOCK_STREAM
:
2854 host_type
= SOCK_STREAM
;
2857 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2860 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2861 #if defined(SOCK_CLOEXEC)
2862 host_type
|= SOCK_CLOEXEC
;
2864 return -TARGET_EINVAL
;
2867 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2868 #if defined(SOCK_NONBLOCK)
2869 host_type
|= SOCK_NONBLOCK
;
2870 #elif !defined(O_NONBLOCK)
2871 return -TARGET_EINVAL
;
2878 /* Try to emulate socket type flags after socket creation. */
2879 static int sock_flags_fixup(int fd
, int target_type
)
2881 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2882 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2883 int flags
= fcntl(fd
, F_GETFL
);
2884 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2886 return -TARGET_EINVAL
;
2893 /* do_socket() Must return target values and target errnos. */
2894 static abi_long
do_socket(int domain
, int type
, int protocol
)
2896 int target_type
= type
;
2899 ret
= target_to_host_sock_type(&type
);
2904 if (domain
== PF_NETLINK
&& !(
2905 #ifdef CONFIG_RTNETLINK
2906 protocol
== NETLINK_ROUTE
||
2908 protocol
== NETLINK_KOBJECT_UEVENT
||
2909 protocol
== NETLINK_AUDIT
)) {
2910 return -EPFNOSUPPORT
;
2913 if (domain
== AF_PACKET
||
2914 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2915 protocol
= tswap16(protocol
);
2918 ret
= get_errno(socket(domain
, type
, protocol
));
2920 ret
= sock_flags_fixup(ret
, target_type
);
2921 if (type
== SOCK_PACKET
) {
2922 /* Manage an obsolete case :
2923 * if socket type is SOCK_PACKET, bind by name
2925 fd_trans_register(ret
, &target_packet_trans
);
2926 } else if (domain
== PF_NETLINK
) {
2928 #ifdef CONFIG_RTNETLINK
2930 fd_trans_register(ret
, &target_netlink_route_trans
);
2933 case NETLINK_KOBJECT_UEVENT
:
2934 /* nothing to do: messages are strings */
2937 fd_trans_register(ret
, &target_netlink_audit_trans
);
2940 g_assert_not_reached();
2947 /* do_bind() Must return target values and target errnos. */
2948 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2954 if ((int)addrlen
< 0) {
2955 return -TARGET_EINVAL
;
2958 addr
= alloca(addrlen
+1);
2960 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2964 return get_errno(bind(sockfd
, addr
, addrlen
));
2967 /* do_connect() Must return target values and target errnos. */
2968 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2974 if ((int)addrlen
< 0) {
2975 return -TARGET_EINVAL
;
2978 addr
= alloca(addrlen
+1);
2980 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2984 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2987 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2988 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2989 int flags
, int send
)
2995 abi_ulong target_vec
;
2997 if (msgp
->msg_name
) {
2998 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2999 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3000 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3001 tswapal(msgp
->msg_name
),
3003 if (ret
== -TARGET_EFAULT
) {
3004 /* For connected sockets msg_name and msg_namelen must
3005 * be ignored, so returning EFAULT immediately is wrong.
3006 * Instead, pass a bad msg_name to the host kernel, and
3007 * let it decide whether to return EFAULT or not.
3009 msg
.msg_name
= (void *)-1;
3014 msg
.msg_name
= NULL
;
3015 msg
.msg_namelen
= 0;
3017 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3018 msg
.msg_control
= alloca(msg
.msg_controllen
);
3019 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3021 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3023 count
= tswapal(msgp
->msg_iovlen
);
3024 target_vec
= tswapal(msgp
->msg_iov
);
3026 if (count
> IOV_MAX
) {
3027 /* sendrcvmsg returns a different errno for this condition than
3028 * readv/writev, so we must catch it here before lock_iovec() does.
3030 ret
= -TARGET_EMSGSIZE
;
3034 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3035 target_vec
, count
, send
);
3037 ret
= -host_to_target_errno(errno
);
3040 msg
.msg_iovlen
= count
;
3044 if (fd_trans_target_to_host_data(fd
)) {
3047 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3048 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3049 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3050 msg
.msg_iov
->iov_len
);
3052 msg
.msg_iov
->iov_base
= host_msg
;
3053 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3057 ret
= target_to_host_cmsg(&msg
, msgp
);
3059 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3063 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3064 if (!is_error(ret
)) {
3066 if (fd_trans_host_to_target_data(fd
)) {
3067 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3068 MIN(msg
.msg_iov
->iov_len
, len
));
3070 ret
= host_to_target_cmsg(msgp
, &msg
);
3072 if (!is_error(ret
)) {
3073 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3074 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3075 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3076 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3077 msg
.msg_name
, msg
.msg_namelen
);
3089 unlock_iovec(vec
, target_vec
, count
, !send
);
3094 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3095 int flags
, int send
)
3098 struct target_msghdr
*msgp
;
3100 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3104 return -TARGET_EFAULT
;
3106 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3107 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3111 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3112 * so it might not have this *mmsg-specific flag either.
3114 #ifndef MSG_WAITFORONE
3115 #define MSG_WAITFORONE 0x10000
3118 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3119 unsigned int vlen
, unsigned int flags
,
3122 struct target_mmsghdr
*mmsgp
;
3126 if (vlen
> UIO_MAXIOV
) {
3130 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3132 return -TARGET_EFAULT
;
3135 for (i
= 0; i
< vlen
; i
++) {
3136 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3137 if (is_error(ret
)) {
3140 mmsgp
[i
].msg_len
= tswap32(ret
);
3141 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3142 if (flags
& MSG_WAITFORONE
) {
3143 flags
|= MSG_DONTWAIT
;
3147 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3149 /* Return number of datagrams sent if we sent any at all;
3150 * otherwise return the error.
3158 /* do_accept4() Must return target values and target errnos. */
3159 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3160 abi_ulong target_addrlen_addr
, int flags
)
3162 socklen_t addrlen
, ret_addrlen
;
3167 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3169 if (target_addr
== 0) {
3170 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3173 /* linux returns EINVAL if addrlen pointer is invalid */
3174 if (get_user_u32(addrlen
, target_addrlen_addr
))
3175 return -TARGET_EINVAL
;
3177 if ((int)addrlen
< 0) {
3178 return -TARGET_EINVAL
;
3181 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3182 return -TARGET_EINVAL
;
3184 addr
= alloca(addrlen
);
3186 ret_addrlen
= addrlen
;
3187 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3188 if (!is_error(ret
)) {
3189 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3190 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3191 ret
= -TARGET_EFAULT
;
3197 /* do_getpeername() Must return target values and target errnos. */
3198 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3199 abi_ulong target_addrlen_addr
)
3201 socklen_t addrlen
, ret_addrlen
;
3205 if (get_user_u32(addrlen
, target_addrlen_addr
))
3206 return -TARGET_EFAULT
;
3208 if ((int)addrlen
< 0) {
3209 return -TARGET_EINVAL
;
3212 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3213 return -TARGET_EFAULT
;
3215 addr
= alloca(addrlen
);
3217 ret_addrlen
= addrlen
;
3218 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3219 if (!is_error(ret
)) {
3220 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3221 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3222 ret
= -TARGET_EFAULT
;
3228 /* do_getsockname() Must return target values and target errnos. */
3229 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3230 abi_ulong target_addrlen_addr
)
3232 socklen_t addrlen
, ret_addrlen
;
3236 if (get_user_u32(addrlen
, target_addrlen_addr
))
3237 return -TARGET_EFAULT
;
3239 if ((int)addrlen
< 0) {
3240 return -TARGET_EINVAL
;
3243 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3244 return -TARGET_EFAULT
;
3246 addr
= alloca(addrlen
);
3248 ret_addrlen
= addrlen
;
3249 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3250 if (!is_error(ret
)) {
3251 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3252 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3253 ret
= -TARGET_EFAULT
;
3259 /* do_socketpair() Must return target values and target errnos. */
3260 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3261 abi_ulong target_tab_addr
)
3266 target_to_host_sock_type(&type
);
3268 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3269 if (!is_error(ret
)) {
3270 if (put_user_s32(tab
[0], target_tab_addr
)
3271 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3272 ret
= -TARGET_EFAULT
;
3277 /* do_sendto() Must return target values and target errnos. */
3278 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3279 abi_ulong target_addr
, socklen_t addrlen
)
3283 void *copy_msg
= NULL
;
3286 if ((int)addrlen
< 0) {
3287 return -TARGET_EINVAL
;
3290 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3292 return -TARGET_EFAULT
;
3293 if (fd_trans_target_to_host_data(fd
)) {
3294 copy_msg
= host_msg
;
3295 host_msg
= g_malloc(len
);
3296 memcpy(host_msg
, copy_msg
, len
);
3297 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3303 addr
= alloca(addrlen
+1);
3304 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3308 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3310 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3315 host_msg
= copy_msg
;
3317 unlock_user(host_msg
, msg
, 0);
3321 /* do_recvfrom() Must return target values and target errnos. */
3322 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3323 abi_ulong target_addr
,
3324 abi_ulong target_addrlen
)
3326 socklen_t addrlen
, ret_addrlen
;
3331 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3333 return -TARGET_EFAULT
;
3335 if (get_user_u32(addrlen
, target_addrlen
)) {
3336 ret
= -TARGET_EFAULT
;
3339 if ((int)addrlen
< 0) {
3340 ret
= -TARGET_EINVAL
;
3343 addr
= alloca(addrlen
);
3344 ret_addrlen
= addrlen
;
3345 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3346 addr
, &ret_addrlen
));
3348 addr
= NULL
; /* To keep compiler quiet. */
3349 addrlen
= 0; /* To keep compiler quiet. */
3350 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3352 if (!is_error(ret
)) {
3353 if (fd_trans_host_to_target_data(fd
)) {
3355 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3356 if (is_error(trans
)) {
3362 host_to_target_sockaddr(target_addr
, addr
,
3363 MIN(addrlen
, ret_addrlen
));
3364 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3365 ret
= -TARGET_EFAULT
;
3369 unlock_user(host_msg
, msg
, len
);
3372 unlock_user(host_msg
, msg
, 0);
3377 #ifdef TARGET_NR_socketcall
3378 /* do_socketcall() must return target values and target errnos. */
3379 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3381 static const unsigned nargs
[] = { /* number of arguments per operation */
3382 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3383 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3384 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3385 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3386 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3387 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3388 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3389 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3390 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3391 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3392 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3393 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3394 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3395 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3396 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3397 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3398 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3399 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3400 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3401 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3403 abi_long a
[6]; /* max 6 args */
3406 /* check the range of the first argument num */
3407 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3408 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3409 return -TARGET_EINVAL
;
3411 /* ensure we have space for args */
3412 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3413 return -TARGET_EINVAL
;
3415 /* collect the arguments in a[] according to nargs[] */
3416 for (i
= 0; i
< nargs
[num
]; ++i
) {
3417 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3418 return -TARGET_EFAULT
;
3421 /* now when we have the args, invoke the appropriate underlying function */
3423 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3424 return do_socket(a
[0], a
[1], a
[2]);
3425 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3426 return do_bind(a
[0], a
[1], a
[2]);
3427 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3428 return do_connect(a
[0], a
[1], a
[2]);
3429 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3430 return get_errno(listen(a
[0], a
[1]));
3431 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3432 return do_accept4(a
[0], a
[1], a
[2], 0);
3433 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3434 return do_getsockname(a
[0], a
[1], a
[2]);
3435 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3436 return do_getpeername(a
[0], a
[1], a
[2]);
3437 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3438 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3439 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3440 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3441 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3442 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3443 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3444 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3445 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3446 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3447 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3448 return get_errno(shutdown(a
[0], a
[1]));
3449 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3450 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3451 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3452 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3453 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3454 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3455 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3456 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3457 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3458 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3459 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3460 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3461 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3462 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3464 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3465 return -TARGET_EINVAL
;
3470 #define N_SHM_REGIONS 32
3472 static struct shm_region
{
3476 } shm_regions
[N_SHM_REGIONS
];
3478 #ifndef TARGET_SEMID64_DS
3479 /* asm-generic version of this struct */
3480 struct target_semid64_ds
3482 struct target_ipc_perm sem_perm
;
3483 abi_ulong sem_otime
;
3484 #if TARGET_ABI_BITS == 32
3485 abi_ulong __unused1
;
3487 abi_ulong sem_ctime
;
3488 #if TARGET_ABI_BITS == 32
3489 abi_ulong __unused2
;
3491 abi_ulong sem_nsems
;
3492 abi_ulong __unused3
;
3493 abi_ulong __unused4
;
3497 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3498 abi_ulong target_addr
)
3500 struct target_ipc_perm
*target_ip
;
3501 struct target_semid64_ds
*target_sd
;
3503 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3504 return -TARGET_EFAULT
;
3505 target_ip
= &(target_sd
->sem_perm
);
3506 host_ip
->__key
= tswap32(target_ip
->__key
);
3507 host_ip
->uid
= tswap32(target_ip
->uid
);
3508 host_ip
->gid
= tswap32(target_ip
->gid
);
3509 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3510 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3511 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3512 host_ip
->mode
= tswap32(target_ip
->mode
);
3514 host_ip
->mode
= tswap16(target_ip
->mode
);
3516 #if defined(TARGET_PPC)
3517 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3519 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3521 unlock_user_struct(target_sd
, target_addr
, 0);
3525 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3526 struct ipc_perm
*host_ip
)
3528 struct target_ipc_perm
*target_ip
;
3529 struct target_semid64_ds
*target_sd
;
3531 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3532 return -TARGET_EFAULT
;
3533 target_ip
= &(target_sd
->sem_perm
);
3534 target_ip
->__key
= tswap32(host_ip
->__key
);
3535 target_ip
->uid
= tswap32(host_ip
->uid
);
3536 target_ip
->gid
= tswap32(host_ip
->gid
);
3537 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3538 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3539 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3540 target_ip
->mode
= tswap32(host_ip
->mode
);
3542 target_ip
->mode
= tswap16(host_ip
->mode
);
3544 #if defined(TARGET_PPC)
3545 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3547 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3549 unlock_user_struct(target_sd
, target_addr
, 1);
3553 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3554 abi_ulong target_addr
)
3556 struct target_semid64_ds
*target_sd
;
3558 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3559 return -TARGET_EFAULT
;
3560 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3561 return -TARGET_EFAULT
;
3562 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3563 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3564 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3565 unlock_user_struct(target_sd
, target_addr
, 0);
3569 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3570 struct semid_ds
*host_sd
)
3572 struct target_semid64_ds
*target_sd
;
3574 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3575 return -TARGET_EFAULT
;
3576 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3577 return -TARGET_EFAULT
;
3578 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3579 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3580 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3581 unlock_user_struct(target_sd
, target_addr
, 1);
3585 struct target_seminfo
{
3598 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3599 struct seminfo
*host_seminfo
)
3601 struct target_seminfo
*target_seminfo
;
3602 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3603 return -TARGET_EFAULT
;
3604 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3605 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3606 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3607 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3608 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3609 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3610 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3611 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3612 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3613 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3614 unlock_user_struct(target_seminfo
, target_addr
, 1);
3620 struct semid_ds
*buf
;
3621 unsigned short *array
;
3622 struct seminfo
*__buf
;
3625 union target_semun
{
3632 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3633 abi_ulong target_addr
)
3636 unsigned short *array
;
3638 struct semid_ds semid_ds
;
3641 semun
.buf
= &semid_ds
;
3643 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3645 return get_errno(ret
);
3647 nsems
= semid_ds
.sem_nsems
;
3649 *host_array
= g_try_new(unsigned short, nsems
);
3651 return -TARGET_ENOMEM
;
3653 array
= lock_user(VERIFY_READ
, target_addr
,
3654 nsems
*sizeof(unsigned short), 1);
3656 g_free(*host_array
);
3657 return -TARGET_EFAULT
;
3660 for(i
=0; i
<nsems
; i
++) {
3661 __get_user((*host_array
)[i
], &array
[i
]);
3663 unlock_user(array
, target_addr
, 0);
3668 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3669 unsigned short **host_array
)
3672 unsigned short *array
;
3674 struct semid_ds semid_ds
;
3677 semun
.buf
= &semid_ds
;
3679 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3681 return get_errno(ret
);
3683 nsems
= semid_ds
.sem_nsems
;
3685 array
= lock_user(VERIFY_WRITE
, target_addr
,
3686 nsems
*sizeof(unsigned short), 0);
3688 return -TARGET_EFAULT
;
3690 for(i
=0; i
<nsems
; i
++) {
3691 __put_user((*host_array
)[i
], &array
[i
]);
3693 g_free(*host_array
);
3694 unlock_user(array
, target_addr
, 1);
3699 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3700 abi_ulong target_arg
)
3702 union target_semun target_su
= { .buf
= target_arg
};
3704 struct semid_ds dsarg
;
3705 unsigned short *array
= NULL
;
3706 struct seminfo seminfo
;
3707 abi_long ret
= -TARGET_EINVAL
;
3714 /* In 64 bit cross-endian situations, we will erroneously pick up
3715 * the wrong half of the union for the "val" element. To rectify
3716 * this, the entire 8-byte structure is byteswapped, followed by
3717 * a swap of the 4 byte val field. In other cases, the data is
3718 * already in proper host byte order. */
3719 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3720 target_su
.buf
= tswapal(target_su
.buf
);
3721 arg
.val
= tswap32(target_su
.val
);
3723 arg
.val
= target_su
.val
;
3725 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3729 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3733 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3734 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3741 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3745 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3746 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3752 arg
.__buf
= &seminfo
;
3753 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3754 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3762 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3769 struct target_sembuf
{
3770 unsigned short sem_num
;
3775 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3776 abi_ulong target_addr
,
3779 struct target_sembuf
*target_sembuf
;
3782 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3783 nsops
*sizeof(struct target_sembuf
), 1);
3785 return -TARGET_EFAULT
;
3787 for(i
=0; i
<nsops
; i
++) {
3788 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3789 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3790 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3793 unlock_user(target_sembuf
, target_addr
, 0);
3798 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3800 struct sembuf sops
[nsops
];
3803 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3804 return -TARGET_EFAULT
;
3806 ret
= -TARGET_ENOSYS
;
3807 #ifdef __NR_semtimedop
3808 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3811 if (ret
== -TARGET_ENOSYS
) {
3812 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3818 struct target_msqid_ds
3820 struct target_ipc_perm msg_perm
;
3821 abi_ulong msg_stime
;
3822 #if TARGET_ABI_BITS == 32
3823 abi_ulong __unused1
;
3825 abi_ulong msg_rtime
;
3826 #if TARGET_ABI_BITS == 32
3827 abi_ulong __unused2
;
3829 abi_ulong msg_ctime
;
3830 #if TARGET_ABI_BITS == 32
3831 abi_ulong __unused3
;
3833 abi_ulong __msg_cbytes
;
3835 abi_ulong msg_qbytes
;
3836 abi_ulong msg_lspid
;
3837 abi_ulong msg_lrpid
;
3838 abi_ulong __unused4
;
3839 abi_ulong __unused5
;
3842 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3843 abi_ulong target_addr
)
3845 struct target_msqid_ds
*target_md
;
3847 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3848 return -TARGET_EFAULT
;
3849 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3850 return -TARGET_EFAULT
;
3851 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3852 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3853 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3854 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3855 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3856 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3857 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3858 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3859 unlock_user_struct(target_md
, target_addr
, 0);
3863 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3864 struct msqid_ds
*host_md
)
3866 struct target_msqid_ds
*target_md
;
3868 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3869 return -TARGET_EFAULT
;
3870 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3871 return -TARGET_EFAULT
;
3872 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3873 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3874 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3875 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3876 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3877 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3878 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3879 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3880 unlock_user_struct(target_md
, target_addr
, 1);
3884 struct target_msginfo
{
3892 unsigned short int msgseg
;
3895 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3896 struct msginfo
*host_msginfo
)
3898 struct target_msginfo
*target_msginfo
;
3899 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3900 return -TARGET_EFAULT
;
3901 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3902 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3903 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3904 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3905 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3906 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3907 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3908 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3909 unlock_user_struct(target_msginfo
, target_addr
, 1);
3913 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3915 struct msqid_ds dsarg
;
3916 struct msginfo msginfo
;
3917 abi_long ret
= -TARGET_EINVAL
;
3925 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3926 return -TARGET_EFAULT
;
3927 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3928 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3929 return -TARGET_EFAULT
;
3932 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3936 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3937 if (host_to_target_msginfo(ptr
, &msginfo
))
3938 return -TARGET_EFAULT
;
3945 struct target_msgbuf
{
3950 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3951 ssize_t msgsz
, int msgflg
)
3953 struct target_msgbuf
*target_mb
;
3954 struct msgbuf
*host_mb
;
3958 return -TARGET_EINVAL
;
3961 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3962 return -TARGET_EFAULT
;
3963 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3965 unlock_user_struct(target_mb
, msgp
, 0);
3966 return -TARGET_ENOMEM
;
3968 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3969 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3970 ret
= -TARGET_ENOSYS
;
3972 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3975 if (ret
== -TARGET_ENOSYS
) {
3976 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3981 unlock_user_struct(target_mb
, msgp
, 0);
3986 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3987 ssize_t msgsz
, abi_long msgtyp
,
3990 struct target_msgbuf
*target_mb
;
3992 struct msgbuf
*host_mb
;
3996 return -TARGET_EINVAL
;
3999 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4000 return -TARGET_EFAULT
;
4002 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4004 ret
= -TARGET_ENOMEM
;
4007 ret
= -TARGET_ENOSYS
;
4009 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4012 if (ret
== -TARGET_ENOSYS
) {
4013 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4014 msgflg
, host_mb
, msgtyp
));
4019 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4020 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4021 if (!target_mtext
) {
4022 ret
= -TARGET_EFAULT
;
4025 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4026 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4029 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4033 unlock_user_struct(target_mb
, msgp
, 1);
4038 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4039 abi_ulong target_addr
)
4041 struct target_shmid_ds
*target_sd
;
4043 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4044 return -TARGET_EFAULT
;
4045 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4046 return -TARGET_EFAULT
;
4047 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4048 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4049 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4050 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4051 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4052 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4053 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4054 unlock_user_struct(target_sd
, target_addr
, 0);
4058 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4059 struct shmid_ds
*host_sd
)
4061 struct target_shmid_ds
*target_sd
;
4063 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4064 return -TARGET_EFAULT
;
4065 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4066 return -TARGET_EFAULT
;
4067 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4068 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4069 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4070 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4071 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4072 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4073 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4074 unlock_user_struct(target_sd
, target_addr
, 1);
4078 struct target_shminfo
{
4086 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4087 struct shminfo
*host_shminfo
)
4089 struct target_shminfo
*target_shminfo
;
4090 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4091 return -TARGET_EFAULT
;
4092 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4093 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4094 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4095 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4096 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4097 unlock_user_struct(target_shminfo
, target_addr
, 1);
4101 struct target_shm_info
{
4106 abi_ulong swap_attempts
;
4107 abi_ulong swap_successes
;
4110 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4111 struct shm_info
*host_shm_info
)
4113 struct target_shm_info
*target_shm_info
;
4114 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4115 return -TARGET_EFAULT
;
4116 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4117 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4118 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4119 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4120 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4121 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4122 unlock_user_struct(target_shm_info
, target_addr
, 1);
4126 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4128 struct shmid_ds dsarg
;
4129 struct shminfo shminfo
;
4130 struct shm_info shm_info
;
4131 abi_long ret
= -TARGET_EINVAL
;
4139 if (target_to_host_shmid_ds(&dsarg
, buf
))
4140 return -TARGET_EFAULT
;
4141 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4142 if (host_to_target_shmid_ds(buf
, &dsarg
))
4143 return -TARGET_EFAULT
;
4146 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4147 if (host_to_target_shminfo(buf
, &shminfo
))
4148 return -TARGET_EFAULT
;
4151 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4152 if (host_to_target_shm_info(buf
, &shm_info
))
4153 return -TARGET_EFAULT
;
4158 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4165 #ifndef TARGET_FORCE_SHMLBA
4166 /* For most architectures, SHMLBA is the same as the page size;
4167 * some architectures have larger values, in which case they should
4168 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4169 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4170 * and defining its own value for SHMLBA.
4172 * The kernel also permits SHMLBA to be set by the architecture to a
4173 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4174 * this means that addresses are rounded to the large size if
4175 * SHM_RND is set but addresses not aligned to that size are not rejected
4176 * as long as they are at least page-aligned. Since the only architecture
4177 * which uses this is ia64 this code doesn't provide for that oddity.
4179 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4181 return TARGET_PAGE_SIZE
;
4185 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4186 int shmid
, abi_ulong shmaddr
, int shmflg
)
4190 struct shmid_ds shm_info
;
4194 /* find out the length of the shared memory segment */
4195 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4196 if (is_error(ret
)) {
4197 /* can't get length, bail out */
4201 shmlba
= target_shmlba(cpu_env
);
4203 if (shmaddr
& (shmlba
- 1)) {
4204 if (shmflg
& SHM_RND
) {
4205 shmaddr
&= ~(shmlba
- 1);
4207 return -TARGET_EINVAL
;
4210 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4211 return -TARGET_EINVAL
;
4217 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4219 abi_ulong mmap_start
;
4221 /* In order to use the host shmat, we need to honor host SHMLBA. */
4222 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4224 if (mmap_start
== -1) {
4226 host_raddr
= (void *)-1;
4228 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4231 if (host_raddr
== (void *)-1) {
4233 return get_errno((long)host_raddr
);
4235 raddr
=h2g((unsigned long)host_raddr
);
4237 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4238 PAGE_VALID
| PAGE_READ
|
4239 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4241 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4242 if (!shm_regions
[i
].in_use
) {
4243 shm_regions
[i
].in_use
= true;
4244 shm_regions
[i
].start
= raddr
;
4245 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4255 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4262 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4263 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4264 shm_regions
[i
].in_use
= false;
4265 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4269 rv
= get_errno(shmdt(g2h(shmaddr
)));
4276 #ifdef TARGET_NR_ipc
4277 /* ??? This only works with linear mappings. */
4278 /* do_ipc() must return target values and target errnos. */
4279 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4280 unsigned int call
, abi_long first
,
4281 abi_long second
, abi_long third
,
4282 abi_long ptr
, abi_long fifth
)
4287 version
= call
>> 16;
4292 ret
= do_semop(first
, ptr
, second
);
4296 ret
= get_errno(semget(first
, second
, third
));
4299 case IPCOP_semctl
: {
4300 /* The semun argument to semctl is passed by value, so dereference the
4303 get_user_ual(atptr
, ptr
);
4304 ret
= do_semctl(first
, second
, third
, atptr
);
4309 ret
= get_errno(msgget(first
, second
));
4313 ret
= do_msgsnd(first
, ptr
, second
, third
);
4317 ret
= do_msgctl(first
, second
, ptr
);
4324 struct target_ipc_kludge
{
4329 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4330 ret
= -TARGET_EFAULT
;
4334 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4336 unlock_user_struct(tmp
, ptr
, 0);
4340 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4349 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4350 if (is_error(raddr
))
4351 return get_errno(raddr
);
4352 if (put_user_ual(raddr
, third
))
4353 return -TARGET_EFAULT
;
4357 ret
= -TARGET_EINVAL
;
4362 ret
= do_shmdt(ptr
);
4366 /* IPC_* flag values are the same on all linux platforms */
4367 ret
= get_errno(shmget(first
, second
, third
));
4370 /* IPC_* and SHM_* command values are the same on all linux platforms */
4372 ret
= do_shmctl(first
, second
, ptr
);
4375 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4377 ret
= -TARGET_ENOSYS
;
4384 /* kernel structure types definitions */
4386 #define STRUCT(name, ...) STRUCT_ ## name,
4387 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4389 #include "syscall_types.h"
4393 #undef STRUCT_SPECIAL
4395 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4396 #define STRUCT_SPECIAL(name)
4397 #include "syscall_types.h"
4399 #undef STRUCT_SPECIAL
4401 typedef struct IOCTLEntry IOCTLEntry
;
4403 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4404 int fd
, int cmd
, abi_long arg
);
4408 unsigned int host_cmd
;
4411 do_ioctl_fn
*do_ioctl
;
4412 const argtype arg_type
[5];
4415 #define IOC_R 0x0001
4416 #define IOC_W 0x0002
4417 #define IOC_RW (IOC_R | IOC_W)
4419 #define MAX_STRUCT_SIZE 4096
4421 #ifdef CONFIG_FIEMAP
4422 /* So fiemap access checks don't overflow on 32 bit systems.
4423 * This is very slightly smaller than the limit imposed by
4424 * the underlying kernel.
4426 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4427 / sizeof(struct fiemap_extent))
4429 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4430 int fd
, int cmd
, abi_long arg
)
4432 /* The parameter for this ioctl is a struct fiemap followed
4433 * by an array of struct fiemap_extent whose size is set
4434 * in fiemap->fm_extent_count. The array is filled in by the
4437 int target_size_in
, target_size_out
;
4439 const argtype
*arg_type
= ie
->arg_type
;
4440 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4443 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4447 assert(arg_type
[0] == TYPE_PTR
);
4448 assert(ie
->access
== IOC_RW
);
4450 target_size_in
= thunk_type_size(arg_type
, 0);
4451 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4453 return -TARGET_EFAULT
;
4455 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4456 unlock_user(argptr
, arg
, 0);
4457 fm
= (struct fiemap
*)buf_temp
;
4458 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4459 return -TARGET_EINVAL
;
4462 outbufsz
= sizeof (*fm
) +
4463 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4465 if (outbufsz
> MAX_STRUCT_SIZE
) {
4466 /* We can't fit all the extents into the fixed size buffer.
4467 * Allocate one that is large enough and use it instead.
4469 fm
= g_try_malloc(outbufsz
);
4471 return -TARGET_ENOMEM
;
4473 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4476 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4477 if (!is_error(ret
)) {
4478 target_size_out
= target_size_in
;
4479 /* An extent_count of 0 means we were only counting the extents
4480 * so there are no structs to copy
4482 if (fm
->fm_extent_count
!= 0) {
4483 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4485 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4487 ret
= -TARGET_EFAULT
;
4489 /* Convert the struct fiemap */
4490 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4491 if (fm
->fm_extent_count
!= 0) {
4492 p
= argptr
+ target_size_in
;
4493 /* ...and then all the struct fiemap_extents */
4494 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4495 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4500 unlock_user(argptr
, arg
, target_size_out
);
4510 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4511 int fd
, int cmd
, abi_long arg
)
4513 const argtype
*arg_type
= ie
->arg_type
;
4517 struct ifconf
*host_ifconf
;
4519 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4520 int target_ifreq_size
;
4525 abi_long target_ifc_buf
;
4529 assert(arg_type
[0] == TYPE_PTR
);
4530 assert(ie
->access
== IOC_RW
);
4533 target_size
= thunk_type_size(arg_type
, 0);
4535 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4537 return -TARGET_EFAULT
;
4538 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4539 unlock_user(argptr
, arg
, 0);
4541 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4542 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4543 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4545 if (target_ifc_buf
!= 0) {
4546 target_ifc_len
= host_ifconf
->ifc_len
;
4547 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4548 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4550 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4551 if (outbufsz
> MAX_STRUCT_SIZE
) {
4553 * We can't fit all the extents into the fixed size buffer.
4554 * Allocate one that is large enough and use it instead.
4556 host_ifconf
= malloc(outbufsz
);
4558 return -TARGET_ENOMEM
;
4560 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4563 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4565 host_ifconf
->ifc_len
= host_ifc_len
;
4567 host_ifc_buf
= NULL
;
4569 host_ifconf
->ifc_buf
= host_ifc_buf
;
4571 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4572 if (!is_error(ret
)) {
4573 /* convert host ifc_len to target ifc_len */
4575 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4576 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4577 host_ifconf
->ifc_len
= target_ifc_len
;
4579 /* restore target ifc_buf */
4581 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4583 /* copy struct ifconf to target user */
4585 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4587 return -TARGET_EFAULT
;
4588 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4589 unlock_user(argptr
, arg
, target_size
);
4591 if (target_ifc_buf
!= 0) {
4592 /* copy ifreq[] to target user */
4593 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4594 for (i
= 0; i
< nb_ifreq
; i
++) {
4595 thunk_convert(argptr
+ i
* target_ifreq_size
,
4596 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4597 ifreq_arg_type
, THUNK_TARGET
);
4599 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4610 #if defined(CONFIG_USBFS)
4611 #if HOST_LONG_BITS > 64
4612 #error USBDEVFS thunks do not support >64 bit hosts yet.
4615 uint64_t target_urb_adr
;
4616 uint64_t target_buf_adr
;
4617 char *target_buf_ptr
;
4618 struct usbdevfs_urb host_urb
;
4621 static GHashTable
*usbdevfs_urb_hashtable(void)
4623 static GHashTable
*urb_hashtable
;
4625 if (!urb_hashtable
) {
4626 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4628 return urb_hashtable
;
4631 static void urb_hashtable_insert(struct live_urb
*urb
)
4633 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4634 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4637 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4639 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4640 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4643 static void urb_hashtable_remove(struct live_urb
*urb
)
4645 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4646 g_hash_table_remove(urb_hashtable
, urb
);
4650 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4651 int fd
, int cmd
, abi_long arg
)
4653 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4654 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4655 struct live_urb
*lurb
;
4659 uintptr_t target_urb_adr
;
4662 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4664 memset(buf_temp
, 0, sizeof(uint64_t));
4665 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4666 if (is_error(ret
)) {
4670 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4671 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4672 if (!lurb
->target_urb_adr
) {
4673 return -TARGET_EFAULT
;
4675 urb_hashtable_remove(lurb
);
4676 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4677 lurb
->host_urb
.buffer_length
);
4678 lurb
->target_buf_ptr
= NULL
;
4680 /* restore the guest buffer pointer */
4681 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4683 /* update the guest urb struct */
4684 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4687 return -TARGET_EFAULT
;
4689 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4690 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4692 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4693 /* write back the urb handle */
4694 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4697 return -TARGET_EFAULT
;
4700 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4701 target_urb_adr
= lurb
->target_urb_adr
;
4702 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4703 unlock_user(argptr
, arg
, target_size
);
4710 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4711 uint8_t *buf_temp
__attribute__((unused
)),
4712 int fd
, int cmd
, abi_long arg
)
4714 struct live_urb
*lurb
;
4716 /* map target address back to host URB with metadata. */
4717 lurb
= urb_hashtable_lookup(arg
);
4719 return -TARGET_EFAULT
;
4721 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4725 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4726 int fd
, int cmd
, abi_long arg
)
4728 const argtype
*arg_type
= ie
->arg_type
;
4733 struct live_urb
*lurb
;
4736 * each submitted URB needs to map to a unique ID for the
4737 * kernel, and that unique ID needs to be a pointer to
4738 * host memory. hence, we need to malloc for each URB.
4739 * isochronous transfers have a variable length struct.
4742 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4744 /* construct host copy of urb and metadata */
4745 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4747 return -TARGET_ENOMEM
;
4750 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4753 return -TARGET_EFAULT
;
4755 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4756 unlock_user(argptr
, arg
, 0);
4758 lurb
->target_urb_adr
= arg
;
4759 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4761 /* buffer space used depends on endpoint type so lock the entire buffer */
4762 /* control type urbs should check the buffer contents for true direction */
4763 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4764 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4765 lurb
->host_urb
.buffer_length
, 1);
4766 if (lurb
->target_buf_ptr
== NULL
) {
4768 return -TARGET_EFAULT
;
4771 /* update buffer pointer in host copy */
4772 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4774 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4775 if (is_error(ret
)) {
4776 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4779 urb_hashtable_insert(lurb
);
4784 #endif /* CONFIG_USBFS */
4786 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4787 int cmd
, abi_long arg
)
4790 struct dm_ioctl
*host_dm
;
4791 abi_long guest_data
;
4792 uint32_t guest_data_size
;
4794 const argtype
*arg_type
= ie
->arg_type
;
4796 void *big_buf
= NULL
;
4800 target_size
= thunk_type_size(arg_type
, 0);
4801 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4803 ret
= -TARGET_EFAULT
;
4806 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4807 unlock_user(argptr
, arg
, 0);
4809 /* buf_temp is too small, so fetch things into a bigger buffer */
4810 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4811 memcpy(big_buf
, buf_temp
, target_size
);
4815 guest_data
= arg
+ host_dm
->data_start
;
4816 if ((guest_data
- arg
) < 0) {
4817 ret
= -TARGET_EINVAL
;
4820 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4821 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4823 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4825 ret
= -TARGET_EFAULT
;
4829 switch (ie
->host_cmd
) {
4831 case DM_LIST_DEVICES
:
4834 case DM_DEV_SUSPEND
:
4837 case DM_TABLE_STATUS
:
4838 case DM_TABLE_CLEAR
:
4840 case DM_LIST_VERSIONS
:
4844 case DM_DEV_SET_GEOMETRY
:
4845 /* data contains only strings */
4846 memcpy(host_data
, argptr
, guest_data_size
);
4849 memcpy(host_data
, argptr
, guest_data_size
);
4850 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4854 void *gspec
= argptr
;
4855 void *cur_data
= host_data
;
4856 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4857 int spec_size
= thunk_type_size(arg_type
, 0);
4860 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4861 struct dm_target_spec
*spec
= cur_data
;
4865 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4866 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4868 spec
->next
= sizeof(*spec
) + slen
;
4869 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4871 cur_data
+= spec
->next
;
4876 ret
= -TARGET_EINVAL
;
4877 unlock_user(argptr
, guest_data
, 0);
4880 unlock_user(argptr
, guest_data
, 0);
4882 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4883 if (!is_error(ret
)) {
4884 guest_data
= arg
+ host_dm
->data_start
;
4885 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4886 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4887 switch (ie
->host_cmd
) {
4892 case DM_DEV_SUSPEND
:
4895 case DM_TABLE_CLEAR
:
4897 case DM_DEV_SET_GEOMETRY
:
4898 /* no return data */
4900 case DM_LIST_DEVICES
:
4902 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4903 uint32_t remaining_data
= guest_data_size
;
4904 void *cur_data
= argptr
;
4905 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4906 int nl_size
= 12; /* can't use thunk_size due to alignment */
4909 uint32_t next
= nl
->next
;
4911 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4913 if (remaining_data
< nl
->next
) {
4914 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4917 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4918 strcpy(cur_data
+ nl_size
, nl
->name
);
4919 cur_data
+= nl
->next
;
4920 remaining_data
-= nl
->next
;
4924 nl
= (void*)nl
+ next
;
4929 case DM_TABLE_STATUS
:
4931 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4932 void *cur_data
= argptr
;
4933 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4934 int spec_size
= thunk_type_size(arg_type
, 0);
4937 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4938 uint32_t next
= spec
->next
;
4939 int slen
= strlen((char*)&spec
[1]) + 1;
4940 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4941 if (guest_data_size
< spec
->next
) {
4942 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4945 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4946 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4947 cur_data
= argptr
+ spec
->next
;
4948 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4954 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4955 int count
= *(uint32_t*)hdata
;
4956 uint64_t *hdev
= hdata
+ 8;
4957 uint64_t *gdev
= argptr
+ 8;
4960 *(uint32_t*)argptr
= tswap32(count
);
4961 for (i
= 0; i
< count
; i
++) {
4962 *gdev
= tswap64(*hdev
);
4968 case DM_LIST_VERSIONS
:
4970 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4971 uint32_t remaining_data
= guest_data_size
;
4972 void *cur_data
= argptr
;
4973 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4974 int vers_size
= thunk_type_size(arg_type
, 0);
4977 uint32_t next
= vers
->next
;
4979 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4981 if (remaining_data
< vers
->next
) {
4982 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4985 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4986 strcpy(cur_data
+ vers_size
, vers
->name
);
4987 cur_data
+= vers
->next
;
4988 remaining_data
-= vers
->next
;
4992 vers
= (void*)vers
+ next
;
4997 unlock_user(argptr
, guest_data
, 0);
4998 ret
= -TARGET_EINVAL
;
5001 unlock_user(argptr
, guest_data
, guest_data_size
);
5003 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5005 ret
= -TARGET_EFAULT
;
5008 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5009 unlock_user(argptr
, arg
, target_size
);
5016 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5017 int cmd
, abi_long arg
)
5021 const argtype
*arg_type
= ie
->arg_type
;
5022 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5025 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5026 struct blkpg_partition host_part
;
5028 /* Read and convert blkpg */
5030 target_size
= thunk_type_size(arg_type
, 0);
5031 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5033 ret
= -TARGET_EFAULT
;
5036 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5037 unlock_user(argptr
, arg
, 0);
5039 switch (host_blkpg
->op
) {
5040 case BLKPG_ADD_PARTITION
:
5041 case BLKPG_DEL_PARTITION
:
5042 /* payload is struct blkpg_partition */
5045 /* Unknown opcode */
5046 ret
= -TARGET_EINVAL
;
5050 /* Read and convert blkpg->data */
5051 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5052 target_size
= thunk_type_size(part_arg_type
, 0);
5053 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5055 ret
= -TARGET_EFAULT
;
5058 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5059 unlock_user(argptr
, arg
, 0);
5061 /* Swizzle the data pointer to our local copy and call! */
5062 host_blkpg
->data
= &host_part
;
5063 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5069 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5070 int fd
, int cmd
, abi_long arg
)
5072 const argtype
*arg_type
= ie
->arg_type
;
5073 const StructEntry
*se
;
5074 const argtype
*field_types
;
5075 const int *dst_offsets
, *src_offsets
;
5078 abi_ulong
*target_rt_dev_ptr
= NULL
;
5079 unsigned long *host_rt_dev_ptr
= NULL
;
5083 assert(ie
->access
== IOC_W
);
5084 assert(*arg_type
== TYPE_PTR
);
5086 assert(*arg_type
== TYPE_STRUCT
);
5087 target_size
= thunk_type_size(arg_type
, 0);
5088 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5090 return -TARGET_EFAULT
;
5093 assert(*arg_type
== (int)STRUCT_rtentry
);
5094 se
= struct_entries
+ *arg_type
++;
5095 assert(se
->convert
[0] == NULL
);
5096 /* convert struct here to be able to catch rt_dev string */
5097 field_types
= se
->field_types
;
5098 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5099 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5100 for (i
= 0; i
< se
->nb_fields
; i
++) {
5101 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5102 assert(*field_types
== TYPE_PTRVOID
);
5103 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5104 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5105 if (*target_rt_dev_ptr
!= 0) {
5106 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5107 tswapal(*target_rt_dev_ptr
));
5108 if (!*host_rt_dev_ptr
) {
5109 unlock_user(argptr
, arg
, 0);
5110 return -TARGET_EFAULT
;
5113 *host_rt_dev_ptr
= 0;
5118 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5119 argptr
+ src_offsets
[i
],
5120 field_types
, THUNK_HOST
);
5122 unlock_user(argptr
, arg
, 0);
5124 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5126 assert(host_rt_dev_ptr
!= NULL
);
5127 assert(target_rt_dev_ptr
!= NULL
);
5128 if (*host_rt_dev_ptr
!= 0) {
5129 unlock_user((void *)*host_rt_dev_ptr
,
5130 *target_rt_dev_ptr
, 0);
5135 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5136 int fd
, int cmd
, abi_long arg
)
5138 int sig
= target_to_host_signal(arg
);
5139 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5142 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5143 int fd
, int cmd
, abi_long arg
)
5148 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5149 if (is_error(ret
)) {
5153 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5154 if (copy_to_user_timeval(arg
, &tv
)) {
5155 return -TARGET_EFAULT
;
5158 if (copy_to_user_timeval64(arg
, &tv
)) {
5159 return -TARGET_EFAULT
;
5166 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5167 int fd
, int cmd
, abi_long arg
)
5172 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5173 if (is_error(ret
)) {
5177 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5178 if (host_to_target_timespec(arg
, &ts
)) {
5179 return -TARGET_EFAULT
;
5182 if (host_to_target_timespec64(arg
, &ts
)) {
5183 return -TARGET_EFAULT
;
5191 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5192 int fd
, int cmd
, abi_long arg
)
5194 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5195 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5199 static IOCTLEntry ioctl_entries
[] = {
5200 #define IOCTL(cmd, access, ...) \
5201 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5202 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5203 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5204 #define IOCTL_IGNORE(cmd) \
5205 { TARGET_ ## cmd, 0, #cmd },
5210 /* ??? Implement proper locking for ioctls. */
5211 /* do_ioctl() Must return target values and target errnos. */
5212 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5214 const IOCTLEntry
*ie
;
5215 const argtype
*arg_type
;
5217 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5223 if (ie
->target_cmd
== 0) {
5225 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5226 return -TARGET_ENOSYS
;
5228 if (ie
->target_cmd
== cmd
)
5232 arg_type
= ie
->arg_type
;
5234 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5235 } else if (!ie
->host_cmd
) {
5236 /* Some architectures define BSD ioctls in their headers
5237 that are not implemented in Linux. */
5238 return -TARGET_ENOSYS
;
5241 switch(arg_type
[0]) {
5244 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5250 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5254 target_size
= thunk_type_size(arg_type
, 0);
5255 switch(ie
->access
) {
5257 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5258 if (!is_error(ret
)) {
5259 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5261 return -TARGET_EFAULT
;
5262 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5263 unlock_user(argptr
, arg
, target_size
);
5267 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5269 return -TARGET_EFAULT
;
5270 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5271 unlock_user(argptr
, arg
, 0);
5272 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5276 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5278 return -TARGET_EFAULT
;
5279 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5280 unlock_user(argptr
, arg
, 0);
5281 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5282 if (!is_error(ret
)) {
5283 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5285 return -TARGET_EFAULT
;
5286 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5287 unlock_user(argptr
, arg
, target_size
);
5293 qemu_log_mask(LOG_UNIMP
,
5294 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5295 (long)cmd
, arg_type
[0]);
5296 ret
= -TARGET_ENOSYS
;
5302 static const bitmask_transtbl iflag_tbl
[] = {
5303 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5304 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5305 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5306 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5307 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5308 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5309 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5310 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5311 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5312 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5313 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5314 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5315 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5316 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5320 static const bitmask_transtbl oflag_tbl
[] = {
5321 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5322 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5323 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5324 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5325 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5326 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5327 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5328 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5329 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5330 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5331 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5332 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5333 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5334 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5335 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5336 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5337 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5338 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5339 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5340 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5341 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5342 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5343 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5344 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5348 static const bitmask_transtbl cflag_tbl
[] = {
5349 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5350 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5351 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5352 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5353 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5354 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5355 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5356 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5357 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5358 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5359 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5360 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5361 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5362 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5363 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5364 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5365 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5366 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5367 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5368 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5369 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5370 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5371 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5372 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5373 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5374 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5375 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5376 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5377 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5378 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5379 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5383 static const bitmask_transtbl lflag_tbl
[] = {
5384 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5385 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5386 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5387 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5388 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5389 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5390 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5391 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5392 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5393 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5394 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5395 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5396 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5397 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5398 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5402 static void target_to_host_termios (void *dst
, const void *src
)
5404 struct host_termios
*host
= dst
;
5405 const struct target_termios
*target
= src
;
5408 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5410 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5412 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5414 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5415 host
->c_line
= target
->c_line
;
5417 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5418 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5419 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5420 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5421 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5422 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5423 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5424 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5425 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5426 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5427 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5428 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5429 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5430 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5431 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5432 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5433 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5434 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5437 static void host_to_target_termios (void *dst
, const void *src
)
5439 struct target_termios
*target
= dst
;
5440 const struct host_termios
*host
= src
;
5443 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5445 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5447 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5449 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5450 target
->c_line
= host
->c_line
;
5452 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5453 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5454 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5455 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5456 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5457 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5458 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5459 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5460 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5461 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5462 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5463 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5464 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5465 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5466 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5467 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5468 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5469 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5472 static const StructEntry struct_termios_def
= {
5473 .convert
= { host_to_target_termios
, target_to_host_termios
},
5474 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5475 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5478 static bitmask_transtbl mmap_flags_tbl
[] = {
5479 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5480 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5481 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5482 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5483 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5484 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5485 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5486 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5487 MAP_DENYWRITE
, MAP_DENYWRITE
},
5488 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5489 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5490 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5491 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5492 MAP_NORESERVE
, MAP_NORESERVE
},
5493 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5494 /* MAP_STACK had been ignored by the kernel for quite some time.
5495 Recognize it for the target insofar as we do not want to pass
5496 it through to the host. */
5497 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5501 #if defined(TARGET_I386)
5503 /* NOTE: there is really one LDT for all the threads */
5504 static uint8_t *ldt_table
;
5506 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5513 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5514 if (size
> bytecount
)
5516 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5518 return -TARGET_EFAULT
;
5519 /* ??? Should this by byteswapped? */
5520 memcpy(p
, ldt_table
, size
);
5521 unlock_user(p
, ptr
, size
);
5525 /* XXX: add locking support */
5526 static abi_long
write_ldt(CPUX86State
*env
,
5527 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5529 struct target_modify_ldt_ldt_s ldt_info
;
5530 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5531 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5532 int seg_not_present
, useable
, lm
;
5533 uint32_t *lp
, entry_1
, entry_2
;
5535 if (bytecount
!= sizeof(ldt_info
))
5536 return -TARGET_EINVAL
;
5537 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5538 return -TARGET_EFAULT
;
5539 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5540 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5541 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5542 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5543 unlock_user_struct(target_ldt_info
, ptr
, 0);
5545 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5546 return -TARGET_EINVAL
;
5547 seg_32bit
= ldt_info
.flags
& 1;
5548 contents
= (ldt_info
.flags
>> 1) & 3;
5549 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5550 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5551 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5552 useable
= (ldt_info
.flags
>> 6) & 1;
5556 lm
= (ldt_info
.flags
>> 7) & 1;
5558 if (contents
== 3) {
5560 return -TARGET_EINVAL
;
5561 if (seg_not_present
== 0)
5562 return -TARGET_EINVAL
;
5564 /* allocate the LDT */
5566 env
->ldt
.base
= target_mmap(0,
5567 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5568 PROT_READ
|PROT_WRITE
,
5569 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5570 if (env
->ldt
.base
== -1)
5571 return -TARGET_ENOMEM
;
5572 memset(g2h(env
->ldt
.base
), 0,
5573 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5574 env
->ldt
.limit
= 0xffff;
5575 ldt_table
= g2h(env
->ldt
.base
);
5578 /* NOTE: same code as Linux kernel */
5579 /* Allow LDTs to be cleared by the user. */
5580 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5583 read_exec_only
== 1 &&
5585 limit_in_pages
== 0 &&
5586 seg_not_present
== 1 &&
5594 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5595 (ldt_info
.limit
& 0x0ffff);
5596 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5597 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5598 (ldt_info
.limit
& 0xf0000) |
5599 ((read_exec_only
^ 1) << 9) |
5601 ((seg_not_present
^ 1) << 15) |
5603 (limit_in_pages
<< 23) |
5607 entry_2
|= (useable
<< 20);
5609 /* Install the new entry ... */
5611 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5612 lp
[0] = tswap32(entry_1
);
5613 lp
[1] = tswap32(entry_2
);
5617 /* specific and weird i386 syscalls */
5618 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5619 unsigned long bytecount
)
5625 ret
= read_ldt(ptr
, bytecount
);
5628 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5631 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5634 ret
= -TARGET_ENOSYS
;
5640 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5641 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5643 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5644 struct target_modify_ldt_ldt_s ldt_info
;
5645 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5646 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5647 int seg_not_present
, useable
, lm
;
5648 uint32_t *lp
, entry_1
, entry_2
;
5651 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5652 if (!target_ldt_info
)
5653 return -TARGET_EFAULT
;
5654 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5655 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5656 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5657 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5658 if (ldt_info
.entry_number
== -1) {
5659 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5660 if (gdt_table
[i
] == 0) {
5661 ldt_info
.entry_number
= i
;
5662 target_ldt_info
->entry_number
= tswap32(i
);
5667 unlock_user_struct(target_ldt_info
, ptr
, 1);
5669 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5670 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5671 return -TARGET_EINVAL
;
5672 seg_32bit
= ldt_info
.flags
& 1;
5673 contents
= (ldt_info
.flags
>> 1) & 3;
5674 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5675 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5676 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5677 useable
= (ldt_info
.flags
>> 6) & 1;
5681 lm
= (ldt_info
.flags
>> 7) & 1;
5684 if (contents
== 3) {
5685 if (seg_not_present
== 0)
5686 return -TARGET_EINVAL
;
5689 /* NOTE: same code as Linux kernel */
5690 /* Allow LDTs to be cleared by the user. */
5691 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5692 if ((contents
== 0 &&
5693 read_exec_only
== 1 &&
5695 limit_in_pages
== 0 &&
5696 seg_not_present
== 1 &&
5704 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5705 (ldt_info
.limit
& 0x0ffff);
5706 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5707 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5708 (ldt_info
.limit
& 0xf0000) |
5709 ((read_exec_only
^ 1) << 9) |
5711 ((seg_not_present
^ 1) << 15) |
5713 (limit_in_pages
<< 23) |
5718 /* Install the new entry ... */
5720 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5721 lp
[0] = tswap32(entry_1
);
5722 lp
[1] = tswap32(entry_2
);
5726 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5728 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5729 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5730 uint32_t base_addr
, limit
, flags
;
5731 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5732 int seg_not_present
, useable
, lm
;
5733 uint32_t *lp
, entry_1
, entry_2
;
5735 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5736 if (!target_ldt_info
)
5737 return -TARGET_EFAULT
;
5738 idx
= tswap32(target_ldt_info
->entry_number
);
5739 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5740 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5741 unlock_user_struct(target_ldt_info
, ptr
, 1);
5742 return -TARGET_EINVAL
;
5744 lp
= (uint32_t *)(gdt_table
+ idx
);
5745 entry_1
= tswap32(lp
[0]);
5746 entry_2
= tswap32(lp
[1]);
5748 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5749 contents
= (entry_2
>> 10) & 3;
5750 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5751 seg_32bit
= (entry_2
>> 22) & 1;
5752 limit_in_pages
= (entry_2
>> 23) & 1;
5753 useable
= (entry_2
>> 20) & 1;
5757 lm
= (entry_2
>> 21) & 1;
5759 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5760 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5761 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5762 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5763 base_addr
= (entry_1
>> 16) |
5764 (entry_2
& 0xff000000) |
5765 ((entry_2
& 0xff) << 16);
5766 target_ldt_info
->base_addr
= tswapal(base_addr
);
5767 target_ldt_info
->limit
= tswap32(limit
);
5768 target_ldt_info
->flags
= tswap32(flags
);
5769 unlock_user_struct(target_ldt_info
, ptr
, 1);
5772 #endif /* TARGET_I386 && TARGET_ABI32 */
5774 #ifndef TARGET_ABI32
5775 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5782 case TARGET_ARCH_SET_GS
:
5783 case TARGET_ARCH_SET_FS
:
5784 if (code
== TARGET_ARCH_SET_GS
)
5788 cpu_x86_load_seg(env
, idx
, 0);
5789 env
->segs
[idx
].base
= addr
;
5791 case TARGET_ARCH_GET_GS
:
5792 case TARGET_ARCH_GET_FS
:
5793 if (code
== TARGET_ARCH_GET_GS
)
5797 val
= env
->segs
[idx
].base
;
5798 if (put_user(val
, addr
, abi_ulong
))
5799 ret
= -TARGET_EFAULT
;
5802 ret
= -TARGET_EINVAL
;
5809 #endif /* defined(TARGET_I386) */
5811 #define NEW_STACK_SIZE 0x40000
5814 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5817 pthread_mutex_t mutex
;
5818 pthread_cond_t cond
;
5821 abi_ulong child_tidptr
;
5822 abi_ulong parent_tidptr
;
5826 static void *clone_func(void *arg
)
5828 new_thread_info
*info
= arg
;
5833 rcu_register_thread();
5834 tcg_register_thread();
5838 ts
= (TaskState
*)cpu
->opaque
;
5839 info
->tid
= sys_gettid();
5841 if (info
->child_tidptr
)
5842 put_user_u32(info
->tid
, info
->child_tidptr
);
5843 if (info
->parent_tidptr
)
5844 put_user_u32(info
->tid
, info
->parent_tidptr
);
5845 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5846 /* Enable signals. */
5847 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5848 /* Signal to the parent that we're ready. */
5849 pthread_mutex_lock(&info
->mutex
);
5850 pthread_cond_broadcast(&info
->cond
);
5851 pthread_mutex_unlock(&info
->mutex
);
5852 /* Wait until the parent has finished initializing the tls state. */
5853 pthread_mutex_lock(&clone_lock
);
5854 pthread_mutex_unlock(&clone_lock
);
5860 /* do_fork() Must return host values and target errnos (unlike most
5861 do_*() functions). */
5862 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5863 abi_ulong parent_tidptr
, target_ulong newtls
,
5864 abi_ulong child_tidptr
)
5866 CPUState
*cpu
= env_cpu(env
);
5870 CPUArchState
*new_env
;
5873 flags
&= ~CLONE_IGNORED_FLAGS
;
5875 /* Emulate vfork() with fork() */
5876 if (flags
& CLONE_VFORK
)
5877 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5879 if (flags
& CLONE_VM
) {
5880 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5881 new_thread_info info
;
5882 pthread_attr_t attr
;
5884 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5885 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5886 return -TARGET_EINVAL
;
5889 ts
= g_new0(TaskState
, 1);
5890 init_task_state(ts
);
5892 /* Grab a mutex so that thread setup appears atomic. */
5893 pthread_mutex_lock(&clone_lock
);
5895 /* we create a new CPU instance. */
5896 new_env
= cpu_copy(env
);
5897 /* Init regs that differ from the parent. */
5898 cpu_clone_regs_child(new_env
, newsp
, flags
);
5899 cpu_clone_regs_parent(env
, flags
);
5900 new_cpu
= env_cpu(new_env
);
5901 new_cpu
->opaque
= ts
;
5902 ts
->bprm
= parent_ts
->bprm
;
5903 ts
->info
= parent_ts
->info
;
5904 ts
->signal_mask
= parent_ts
->signal_mask
;
5906 if (flags
& CLONE_CHILD_CLEARTID
) {
5907 ts
->child_tidptr
= child_tidptr
;
5910 if (flags
& CLONE_SETTLS
) {
5911 cpu_set_tls (new_env
, newtls
);
5914 memset(&info
, 0, sizeof(info
));
5915 pthread_mutex_init(&info
.mutex
, NULL
);
5916 pthread_mutex_lock(&info
.mutex
);
5917 pthread_cond_init(&info
.cond
, NULL
);
5919 if (flags
& CLONE_CHILD_SETTID
) {
5920 info
.child_tidptr
= child_tidptr
;
5922 if (flags
& CLONE_PARENT_SETTID
) {
5923 info
.parent_tidptr
= parent_tidptr
;
5926 ret
= pthread_attr_init(&attr
);
5927 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5928 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5929 /* It is not safe to deliver signals until the child has finished
5930 initializing, so temporarily block all signals. */
5931 sigfillset(&sigmask
);
5932 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5933 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5935 /* If this is our first additional thread, we need to ensure we
5936 * generate code for parallel execution and flush old translations.
5938 if (!parallel_cpus
) {
5939 parallel_cpus
= true;
5943 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5944 /* TODO: Free new CPU state if thread creation failed. */
5946 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5947 pthread_attr_destroy(&attr
);
5949 /* Wait for the child to initialize. */
5950 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5955 pthread_mutex_unlock(&info
.mutex
);
5956 pthread_cond_destroy(&info
.cond
);
5957 pthread_mutex_destroy(&info
.mutex
);
5958 pthread_mutex_unlock(&clone_lock
);
5960 /* if no CLONE_VM, we consider it is a fork */
5961 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5962 return -TARGET_EINVAL
;
5965 /* We can't support custom termination signals */
5966 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5967 return -TARGET_EINVAL
;
5970 if (block_signals()) {
5971 return -TARGET_ERESTARTSYS
;
5977 /* Child Process. */
5978 cpu_clone_regs_child(env
, newsp
, flags
);
5980 /* There is a race condition here. The parent process could
5981 theoretically read the TID in the child process before the child
5982 tid is set. This would require using either ptrace
5983 (not implemented) or having *_tidptr to point at a shared memory
5984 mapping. We can't repeat the spinlock hack used above because
5985 the child process gets its own copy of the lock. */
5986 if (flags
& CLONE_CHILD_SETTID
)
5987 put_user_u32(sys_gettid(), child_tidptr
);
5988 if (flags
& CLONE_PARENT_SETTID
)
5989 put_user_u32(sys_gettid(), parent_tidptr
);
5990 ts
= (TaskState
*)cpu
->opaque
;
5991 if (flags
& CLONE_SETTLS
)
5992 cpu_set_tls (env
, newtls
);
5993 if (flags
& CLONE_CHILD_CLEARTID
)
5994 ts
->child_tidptr
= child_tidptr
;
5996 cpu_clone_regs_parent(env
, flags
);
6003 /* warning : doesn't handle linux specific flags... */
6004 static int target_to_host_fcntl_cmd(int cmd
)
6009 case TARGET_F_DUPFD
:
6010 case TARGET_F_GETFD
:
6011 case TARGET_F_SETFD
:
6012 case TARGET_F_GETFL
:
6013 case TARGET_F_SETFL
:
6016 case TARGET_F_GETLK
:
6019 case TARGET_F_SETLK
:
6022 case TARGET_F_SETLKW
:
6025 case TARGET_F_GETOWN
:
6028 case TARGET_F_SETOWN
:
6031 case TARGET_F_GETSIG
:
6034 case TARGET_F_SETSIG
:
6037 #if TARGET_ABI_BITS == 32
6038 case TARGET_F_GETLK64
:
6041 case TARGET_F_SETLK64
:
6044 case TARGET_F_SETLKW64
:
6048 case TARGET_F_SETLEASE
:
6051 case TARGET_F_GETLEASE
:
6054 #ifdef F_DUPFD_CLOEXEC
6055 case TARGET_F_DUPFD_CLOEXEC
:
6056 ret
= F_DUPFD_CLOEXEC
;
6059 case TARGET_F_NOTIFY
:
6063 case TARGET_F_GETOWN_EX
:
6068 case TARGET_F_SETOWN_EX
:
6073 case TARGET_F_SETPIPE_SZ
:
6076 case TARGET_F_GETPIPE_SZ
:
6081 ret
= -TARGET_EINVAL
;
6085 #if defined(__powerpc64__)
6086 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6087 * is not supported by kernel. The glibc fcntl call actually adjusts
6088 * them to 5, 6 and 7 before making the syscall(). Since we make the
6089 * syscall directly, adjust to what is supported by the kernel.
6091 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6092 ret
-= F_GETLK64
- 5;
6099 #define FLOCK_TRANSTBL \
6101 TRANSTBL_CONVERT(F_RDLCK); \
6102 TRANSTBL_CONVERT(F_WRLCK); \
6103 TRANSTBL_CONVERT(F_UNLCK); \
6104 TRANSTBL_CONVERT(F_EXLCK); \
6105 TRANSTBL_CONVERT(F_SHLCK); \
6108 static int target_to_host_flock(int type
)
6110 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6112 #undef TRANSTBL_CONVERT
6113 return -TARGET_EINVAL
;
6116 static int host_to_target_flock(int type
)
6118 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6120 #undef TRANSTBL_CONVERT
6121 /* if we don't know how to convert the value coming
6122 * from the host we copy to the target field as-is
6127 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6128 abi_ulong target_flock_addr
)
6130 struct target_flock
*target_fl
;
6133 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6134 return -TARGET_EFAULT
;
6137 __get_user(l_type
, &target_fl
->l_type
);
6138 l_type
= target_to_host_flock(l_type
);
6142 fl
->l_type
= l_type
;
6143 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6144 __get_user(fl
->l_start
, &target_fl
->l_start
);
6145 __get_user(fl
->l_len
, &target_fl
->l_len
);
6146 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6147 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6151 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6152 const struct flock64
*fl
)
6154 struct target_flock
*target_fl
;
6157 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6158 return -TARGET_EFAULT
;
6161 l_type
= host_to_target_flock(fl
->l_type
);
6162 __put_user(l_type
, &target_fl
->l_type
);
6163 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6164 __put_user(fl
->l_start
, &target_fl
->l_start
);
6165 __put_user(fl
->l_len
, &target_fl
->l_len
);
6166 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6167 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6171 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6172 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6174 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6175 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6176 abi_ulong target_flock_addr
)
6178 struct target_oabi_flock64
*target_fl
;
6181 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6182 return -TARGET_EFAULT
;
6185 __get_user(l_type
, &target_fl
->l_type
);
6186 l_type
= target_to_host_flock(l_type
);
6190 fl
->l_type
= l_type
;
6191 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6192 __get_user(fl
->l_start
, &target_fl
->l_start
);
6193 __get_user(fl
->l_len
, &target_fl
->l_len
);
6194 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6195 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6199 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6200 const struct flock64
*fl
)
6202 struct target_oabi_flock64
*target_fl
;
6205 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6206 return -TARGET_EFAULT
;
6209 l_type
= host_to_target_flock(fl
->l_type
);
6210 __put_user(l_type
, &target_fl
->l_type
);
6211 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6212 __put_user(fl
->l_start
, &target_fl
->l_start
);
6213 __put_user(fl
->l_len
, &target_fl
->l_len
);
6214 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6215 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6220 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6221 abi_ulong target_flock_addr
)
6223 struct target_flock64
*target_fl
;
6226 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6227 return -TARGET_EFAULT
;
6230 __get_user(l_type
, &target_fl
->l_type
);
6231 l_type
= target_to_host_flock(l_type
);
6235 fl
->l_type
= l_type
;
6236 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6237 __get_user(fl
->l_start
, &target_fl
->l_start
);
6238 __get_user(fl
->l_len
, &target_fl
->l_len
);
6239 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6240 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6244 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6245 const struct flock64
*fl
)
6247 struct target_flock64
*target_fl
;
6250 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6251 return -TARGET_EFAULT
;
6254 l_type
= host_to_target_flock(fl
->l_type
);
6255 __put_user(l_type
, &target_fl
->l_type
);
6256 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6257 __put_user(fl
->l_start
, &target_fl
->l_start
);
6258 __put_user(fl
->l_len
, &target_fl
->l_len
);
6259 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6260 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6264 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6266 struct flock64 fl64
;
6268 struct f_owner_ex fox
;
6269 struct target_f_owner_ex
*target_fox
;
6272 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6274 if (host_cmd
== -TARGET_EINVAL
)
6278 case TARGET_F_GETLK
:
6279 ret
= copy_from_user_flock(&fl64
, arg
);
6283 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6285 ret
= copy_to_user_flock(arg
, &fl64
);
6289 case TARGET_F_SETLK
:
6290 case TARGET_F_SETLKW
:
6291 ret
= copy_from_user_flock(&fl64
, arg
);
6295 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6298 case TARGET_F_GETLK64
:
6299 ret
= copy_from_user_flock64(&fl64
, arg
);
6303 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6305 ret
= copy_to_user_flock64(arg
, &fl64
);
6308 case TARGET_F_SETLK64
:
6309 case TARGET_F_SETLKW64
:
6310 ret
= copy_from_user_flock64(&fl64
, arg
);
6314 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6317 case TARGET_F_GETFL
:
6318 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6320 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6324 case TARGET_F_SETFL
:
6325 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6326 target_to_host_bitmask(arg
,
6331 case TARGET_F_GETOWN_EX
:
6332 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6334 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6335 return -TARGET_EFAULT
;
6336 target_fox
->type
= tswap32(fox
.type
);
6337 target_fox
->pid
= tswap32(fox
.pid
);
6338 unlock_user_struct(target_fox
, arg
, 1);
6344 case TARGET_F_SETOWN_EX
:
6345 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6346 return -TARGET_EFAULT
;
6347 fox
.type
= tswap32(target_fox
->type
);
6348 fox
.pid
= tswap32(target_fox
->pid
);
6349 unlock_user_struct(target_fox
, arg
, 0);
6350 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6354 case TARGET_F_SETOWN
:
6355 case TARGET_F_GETOWN
:
6356 case TARGET_F_SETSIG
:
6357 case TARGET_F_GETSIG
:
6358 case TARGET_F_SETLEASE
:
6359 case TARGET_F_GETLEASE
:
6360 case TARGET_F_SETPIPE_SZ
:
6361 case TARGET_F_GETPIPE_SZ
:
6362 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6366 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6374 static inline int high2lowuid(int uid
)
6382 static inline int high2lowgid(int gid
)
6390 static inline int low2highuid(int uid
)
6392 if ((int16_t)uid
== -1)
6398 static inline int low2highgid(int gid
)
6400 if ((int16_t)gid
== -1)
6405 static inline int tswapid(int id
)
6410 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6412 #else /* !USE_UID16 */
6413 static inline int high2lowuid(int uid
)
6417 static inline int high2lowgid(int gid
)
6421 static inline int low2highuid(int uid
)
6425 static inline int low2highgid(int gid
)
6429 static inline int tswapid(int id
)
6434 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6436 #endif /* USE_UID16 */
6438 /* We must do direct syscalls for setting UID/GID, because we want to
6439 * implement the Linux system call semantics of "change only for this thread",
6440 * not the libc/POSIX semantics of "change for all threads in process".
6441 * (See http://ewontfix.com/17/ for more details.)
6442 * We use the 32-bit version of the syscalls if present; if it is not
6443 * then either the host architecture supports 32-bit UIDs natively with
6444 * the standard syscall, or the 16-bit UID is the best we can do.
6446 #ifdef __NR_setuid32
6447 #define __NR_sys_setuid __NR_setuid32
6449 #define __NR_sys_setuid __NR_setuid
6451 #ifdef __NR_setgid32
6452 #define __NR_sys_setgid __NR_setgid32
6454 #define __NR_sys_setgid __NR_setgid
6456 #ifdef __NR_setresuid32
6457 #define __NR_sys_setresuid __NR_setresuid32
6459 #define __NR_sys_setresuid __NR_setresuid
6461 #ifdef __NR_setresgid32
6462 #define __NR_sys_setresgid __NR_setresgid32
6464 #define __NR_sys_setresgid __NR_setresgid
6467 _syscall1(int, sys_setuid
, uid_t
, uid
)
6468 _syscall1(int, sys_setgid
, gid_t
, gid
)
6469 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6470 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6472 void syscall_init(void)
6475 const argtype
*arg_type
;
6479 thunk_init(STRUCT_MAX
);
6481 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6482 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6483 #include "syscall_types.h"
6485 #undef STRUCT_SPECIAL
6487 /* Build target_to_host_errno_table[] table from
6488 * host_to_target_errno_table[]. */
6489 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6490 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6493 /* we patch the ioctl size if necessary. We rely on the fact that
6494 no ioctl has all the bits at '1' in the size field */
6496 while (ie
->target_cmd
!= 0) {
6497 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6498 TARGET_IOC_SIZEMASK
) {
6499 arg_type
= ie
->arg_type
;
6500 if (arg_type
[0] != TYPE_PTR
) {
6501 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6506 size
= thunk_type_size(arg_type
, 0);
6507 ie
->target_cmd
= (ie
->target_cmd
&
6508 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6509 (size
<< TARGET_IOC_SIZESHIFT
);
6512 /* automatic consistency check if same arch */
6513 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6514 (defined(__x86_64__) && defined(TARGET_X86_64))
6515 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6516 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6517 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6524 #if TARGET_ABI_BITS == 32
6525 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6527 #ifdef TARGET_WORDS_BIGENDIAN
6528 return ((uint64_t)word0
<< 32) | word1
;
6530 return ((uint64_t)word1
<< 32) | word0
;
6533 #else /* TARGET_ABI_BITS == 32 */
6534 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6538 #endif /* TARGET_ABI_BITS != 32 */
6540 #ifdef TARGET_NR_truncate64
6541 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6546 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6550 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6554 #ifdef TARGET_NR_ftruncate64
6555 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6560 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6564 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6568 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6569 abi_ulong target_addr
)
6571 struct target_itimerspec
*target_itspec
;
6573 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6574 return -TARGET_EFAULT
;
6577 host_itspec
->it_interval
.tv_sec
=
6578 tswapal(target_itspec
->it_interval
.tv_sec
);
6579 host_itspec
->it_interval
.tv_nsec
=
6580 tswapal(target_itspec
->it_interval
.tv_nsec
);
6581 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6582 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6584 unlock_user_struct(target_itspec
, target_addr
, 1);
6588 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6589 struct itimerspec
*host_its
)
6591 struct target_itimerspec
*target_itspec
;
6593 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6594 return -TARGET_EFAULT
;
6597 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6598 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6600 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6601 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6603 unlock_user_struct(target_itspec
, target_addr
, 0);
6607 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6608 abi_long target_addr
)
6610 struct target_timex
*target_tx
;
6612 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6613 return -TARGET_EFAULT
;
6616 __get_user(host_tx
->modes
, &target_tx
->modes
);
6617 __get_user(host_tx
->offset
, &target_tx
->offset
);
6618 __get_user(host_tx
->freq
, &target_tx
->freq
);
6619 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6620 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6621 __get_user(host_tx
->status
, &target_tx
->status
);
6622 __get_user(host_tx
->constant
, &target_tx
->constant
);
6623 __get_user(host_tx
->precision
, &target_tx
->precision
);
6624 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6625 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6626 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6627 __get_user(host_tx
->tick
, &target_tx
->tick
);
6628 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6629 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6630 __get_user(host_tx
->shift
, &target_tx
->shift
);
6631 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6632 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6633 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6634 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6635 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6636 __get_user(host_tx
->tai
, &target_tx
->tai
);
6638 unlock_user_struct(target_tx
, target_addr
, 0);
6642 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6643 struct timex
*host_tx
)
6645 struct target_timex
*target_tx
;
6647 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6648 return -TARGET_EFAULT
;
6651 __put_user(host_tx
->modes
, &target_tx
->modes
);
6652 __put_user(host_tx
->offset
, &target_tx
->offset
);
6653 __put_user(host_tx
->freq
, &target_tx
->freq
);
6654 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6655 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6656 __put_user(host_tx
->status
, &target_tx
->status
);
6657 __put_user(host_tx
->constant
, &target_tx
->constant
);
6658 __put_user(host_tx
->precision
, &target_tx
->precision
);
6659 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6660 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6661 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6662 __put_user(host_tx
->tick
, &target_tx
->tick
);
6663 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6664 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6665 __put_user(host_tx
->shift
, &target_tx
->shift
);
6666 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6667 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6668 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6669 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6670 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6671 __put_user(host_tx
->tai
, &target_tx
->tai
);
6673 unlock_user_struct(target_tx
, target_addr
, 1);
6678 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6679 abi_ulong target_addr
)
6681 struct target_sigevent
*target_sevp
;
6683 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6684 return -TARGET_EFAULT
;
6687 /* This union is awkward on 64 bit systems because it has a 32 bit
6688 * integer and a pointer in it; we follow the conversion approach
6689 * used for handling sigval types in signal.c so the guest should get
6690 * the correct value back even if we did a 64 bit byteswap and it's
6691 * using the 32 bit integer.
6693 host_sevp
->sigev_value
.sival_ptr
=
6694 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6695 host_sevp
->sigev_signo
=
6696 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6697 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6698 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6700 unlock_user_struct(target_sevp
, target_addr
, 1);
6704 #if defined(TARGET_NR_mlockall)
6705 static inline int target_to_host_mlockall_arg(int arg
)
6709 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6710 result
|= MCL_CURRENT
;
6712 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6713 result
|= MCL_FUTURE
;
6719 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6720 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6721 defined(TARGET_NR_newfstatat))
6722 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6723 abi_ulong target_addr
,
6724 struct stat
*host_st
)
6726 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6727 if (((CPUARMState
*)cpu_env
)->eabi
) {
6728 struct target_eabi_stat64
*target_st
;
6730 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6731 return -TARGET_EFAULT
;
6732 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6733 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6734 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6735 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6736 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6738 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6739 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6740 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6741 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6742 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6743 __put_user(host_st
->st_size
, &target_st
->st_size
);
6744 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6745 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6746 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6747 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6748 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6749 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6750 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6751 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6752 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6754 unlock_user_struct(target_st
, target_addr
, 1);
6758 #if defined(TARGET_HAS_STRUCT_STAT64)
6759 struct target_stat64
*target_st
;
6761 struct target_stat
*target_st
;
6764 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6765 return -TARGET_EFAULT
;
6766 memset(target_st
, 0, sizeof(*target_st
));
6767 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6768 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6769 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6770 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6772 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6773 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6774 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6775 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6776 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6777 /* XXX: better use of kernel struct */
6778 __put_user(host_st
->st_size
, &target_st
->st_size
);
6779 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6780 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6781 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6782 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6783 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6784 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6785 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6786 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6787 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6789 unlock_user_struct(target_st
, target_addr
, 1);
6796 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6797 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6798 abi_ulong target_addr
)
6800 struct target_statx
*target_stx
;
6802 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6803 return -TARGET_EFAULT
;
6805 memset(target_stx
, 0, sizeof(*target_stx
));
6807 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6808 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6809 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6810 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6811 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6812 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6813 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6814 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6815 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6816 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6817 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6818 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6819 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6820 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6821 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6822 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6823 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6824 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6825 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6826 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6827 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6828 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6829 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6831 unlock_user_struct(target_stx
, target_addr
, 1);
6838 /* ??? Using host futex calls even when target atomic operations
6839 are not really atomic probably breaks things. However implementing
6840 futexes locally would make futexes shared between multiple processes
6841 tricky. However they're probably useless because guest atomic
6842 operations won't work either. */
6843 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6844 target_ulong uaddr2
, int val3
)
6846 struct timespec ts
, *pts
;
6849 /* ??? We assume FUTEX_* constants are the same on both host
6851 #ifdef FUTEX_CMD_MASK
6852 base_op
= op
& FUTEX_CMD_MASK
;
6858 case FUTEX_WAIT_BITSET
:
6861 target_to_host_timespec(pts
, timeout
);
6865 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6868 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6870 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6872 case FUTEX_CMP_REQUEUE
:
6874 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6875 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6876 But the prototype takes a `struct timespec *'; insert casts
6877 to satisfy the compiler. We do not need to tswap TIMEOUT
6878 since it's not compared to guest memory. */
6879 pts
= (struct timespec
*)(uintptr_t) timeout
;
6880 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6882 (base_op
== FUTEX_CMP_REQUEUE
6886 return -TARGET_ENOSYS
;
6889 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6890 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6891 abi_long handle
, abi_long mount_id
,
6894 struct file_handle
*target_fh
;
6895 struct file_handle
*fh
;
6899 unsigned int size
, total_size
;
6901 if (get_user_s32(size
, handle
)) {
6902 return -TARGET_EFAULT
;
6905 name
= lock_user_string(pathname
);
6907 return -TARGET_EFAULT
;
6910 total_size
= sizeof(struct file_handle
) + size
;
6911 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6913 unlock_user(name
, pathname
, 0);
6914 return -TARGET_EFAULT
;
6917 fh
= g_malloc0(total_size
);
6918 fh
->handle_bytes
= size
;
6920 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6921 unlock_user(name
, pathname
, 0);
6923 /* man name_to_handle_at(2):
6924 * Other than the use of the handle_bytes field, the caller should treat
6925 * the file_handle structure as an opaque data type
6928 memcpy(target_fh
, fh
, total_size
);
6929 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6930 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6932 unlock_user(target_fh
, handle
, total_size
);
6934 if (put_user_s32(mid
, mount_id
)) {
6935 return -TARGET_EFAULT
;
6943 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6944 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6947 struct file_handle
*target_fh
;
6948 struct file_handle
*fh
;
6949 unsigned int size
, total_size
;
6952 if (get_user_s32(size
, handle
)) {
6953 return -TARGET_EFAULT
;
6956 total_size
= sizeof(struct file_handle
) + size
;
6957 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6959 return -TARGET_EFAULT
;
6962 fh
= g_memdup(target_fh
, total_size
);
6963 fh
->handle_bytes
= size
;
6964 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6966 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6967 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6971 unlock_user(target_fh
, handle
, total_size
);
6977 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6979 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6982 target_sigset_t
*target_mask
;
6986 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6987 return -TARGET_EINVAL
;
6989 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6990 return -TARGET_EFAULT
;
6993 target_to_host_sigset(&host_mask
, target_mask
);
6995 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6997 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6999 fd_trans_register(ret
, &target_signalfd_trans
);
7002 unlock_user_struct(target_mask
, mask
, 0);
7008 /* Map host to target signal numbers for the wait family of syscalls.
7009 Assume all other status bits are the same. */
7010 int host_to_target_waitstatus(int status
)
7012 if (WIFSIGNALED(status
)) {
7013 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7015 if (WIFSTOPPED(status
)) {
7016 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7022 static int open_self_cmdline(void *cpu_env
, int fd
)
7024 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7025 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7028 for (i
= 0; i
< bprm
->argc
; i
++) {
7029 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7031 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7039 static int open_self_maps(void *cpu_env
, int fd
)
7041 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7042 TaskState
*ts
= cpu
->opaque
;
7048 fp
= fopen("/proc/self/maps", "r");
7053 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7054 int fields
, dev_maj
, dev_min
, inode
;
7055 uint64_t min
, max
, offset
;
7056 char flag_r
, flag_w
, flag_x
, flag_p
;
7057 char path
[512] = "";
7058 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7059 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7060 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7062 if ((fields
< 10) || (fields
> 11)) {
7065 if (h2g_valid(min
)) {
7066 int flags
= page_get_flags(h2g(min
));
7067 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7068 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7071 if (h2g(min
) == ts
->info
->stack_limit
) {
7072 pstrcpy(path
, sizeof(path
), " [stack]");
7074 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7075 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7076 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7077 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7078 path
[0] ? " " : "", path
);
7088 static int open_self_stat(void *cpu_env
, int fd
)
7090 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7091 TaskState
*ts
= cpu
->opaque
;
7092 abi_ulong start_stack
= ts
->info
->start_stack
;
7095 for (i
= 0; i
< 44; i
++) {
7103 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7104 } else if (i
== 1) {
7106 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7107 } else if (i
== 27) {
7110 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7112 /* for the rest, there is MasterCard */
7113 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7117 if (write(fd
, buf
, len
) != len
) {
7125 static int open_self_auxv(void *cpu_env
, int fd
)
7127 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7128 TaskState
*ts
= cpu
->opaque
;
7129 abi_ulong auxv
= ts
->info
->saved_auxv
;
7130 abi_ulong len
= ts
->info
->auxv_len
;
7134 * Auxiliary vector is stored in target process stack.
7135 * read in whole auxv vector and copy it to file
7137 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7141 r
= write(fd
, ptr
, len
);
7148 lseek(fd
, 0, SEEK_SET
);
7149 unlock_user(ptr
, auxv
, len
);
7155 static int is_proc_myself(const char *filename
, const char *entry
)
7157 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7158 filename
+= strlen("/proc/");
7159 if (!strncmp(filename
, "self/", strlen("self/"))) {
7160 filename
+= strlen("self/");
7161 } else if (*filename
>= '1' && *filename
<= '9') {
7163 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7164 if (!strncmp(filename
, myself
, strlen(myself
))) {
7165 filename
+= strlen(myself
);
7172 if (!strcmp(filename
, entry
)) {
7179 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7180 defined(TARGET_SPARC) || defined(TARGET_M68K)
7181 static int is_proc(const char *filename
, const char *entry
)
7183 return strcmp(filename
, entry
) == 0;
7187 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7188 static int open_net_route(void *cpu_env
, int fd
)
7195 fp
= fopen("/proc/net/route", "r");
7202 read
= getline(&line
, &len
, fp
);
7203 dprintf(fd
, "%s", line
);
7207 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7209 uint32_t dest
, gw
, mask
;
7210 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7213 fields
= sscanf(line
,
7214 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7215 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7216 &mask
, &mtu
, &window
, &irtt
);
7220 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7221 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7222 metric
, tswap32(mask
), mtu
, window
, irtt
);
7232 #if defined(TARGET_SPARC)
7233 static int open_cpuinfo(void *cpu_env
, int fd
)
7235 dprintf(fd
, "type\t\t: sun4u\n");
7240 #if defined(TARGET_M68K)
7241 static int open_hardware(void *cpu_env
, int fd
)
7243 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7248 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7251 const char *filename
;
7252 int (*fill
)(void *cpu_env
, int fd
);
7253 int (*cmp
)(const char *s1
, const char *s2
);
7255 const struct fake_open
*fake_open
;
7256 static const struct fake_open fakes
[] = {
7257 { "maps", open_self_maps
, is_proc_myself
},
7258 { "stat", open_self_stat
, is_proc_myself
},
7259 { "auxv", open_self_auxv
, is_proc_myself
},
7260 { "cmdline", open_self_cmdline
, is_proc_myself
},
7261 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7262 { "/proc/net/route", open_net_route
, is_proc
},
7264 #if defined(TARGET_SPARC)
7265 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7267 #if defined(TARGET_M68K)
7268 { "/proc/hardware", open_hardware
, is_proc
},
7270 { NULL
, NULL
, NULL
}
7273 if (is_proc_myself(pathname
, "exe")) {
7274 int execfd
= qemu_getauxval(AT_EXECFD
);
7275 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7278 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7279 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7284 if (fake_open
->filename
) {
7286 char filename
[PATH_MAX
];
7289 /* create temporary file to map stat to */
7290 tmpdir
= getenv("TMPDIR");
7293 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7294 fd
= mkstemp(filename
);
7300 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7306 lseek(fd
, 0, SEEK_SET
);
7311 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7314 #define TIMER_MAGIC 0x0caf0000
7315 #define TIMER_MAGIC_MASK 0xffff0000
7317 /* Convert QEMU provided timer ID back to internal 16bit index format */
7318 static target_timer_t
get_timer_id(abi_long arg
)
7320 target_timer_t timerid
= arg
;
7322 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7323 return -TARGET_EINVAL
;
7328 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7329 return -TARGET_EINVAL
;
7335 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7337 abi_ulong target_addr
,
7340 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7341 unsigned host_bits
= sizeof(*host_mask
) * 8;
7342 abi_ulong
*target_mask
;
7345 assert(host_size
>= target_size
);
7347 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7349 return -TARGET_EFAULT
;
7351 memset(host_mask
, 0, host_size
);
7353 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7354 unsigned bit
= i
* target_bits
;
7357 __get_user(val
, &target_mask
[i
]);
7358 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7359 if (val
& (1UL << j
)) {
7360 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7365 unlock_user(target_mask
, target_addr
, 0);
7369 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7371 abi_ulong target_addr
,
7374 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7375 unsigned host_bits
= sizeof(*host_mask
) * 8;
7376 abi_ulong
*target_mask
;
7379 assert(host_size
>= target_size
);
7381 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7383 return -TARGET_EFAULT
;
7386 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7387 unsigned bit
= i
* target_bits
;
7390 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7391 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7395 __put_user(val
, &target_mask
[i
]);
7398 unlock_user(target_mask
, target_addr
, target_size
);
7402 /* This is an internal helper for do_syscall so that it is easier
7403 * to have a single return point, so that actions, such as logging
7404 * of syscall results, can be performed.
7405 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7407 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7408 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7409 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7412 CPUState
*cpu
= env_cpu(cpu_env
);
7414 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7415 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7416 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7417 || defined(TARGET_NR_statx)
7420 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7421 || defined(TARGET_NR_fstatfs)
7427 case TARGET_NR_exit
:
7428 /* In old applications this may be used to implement _exit(2).
7429 However in threaded applictions it is used for thread termination,
7430 and _exit_group is used for application termination.
7431 Do thread termination if we have more then one thread. */
7433 if (block_signals()) {
7434 return -TARGET_ERESTARTSYS
;
7439 if (CPU_NEXT(first_cpu
)) {
7442 /* Remove the CPU from the list. */
7443 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7448 if (ts
->child_tidptr
) {
7449 put_user_u32(0, ts
->child_tidptr
);
7450 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7454 object_unref(OBJECT(cpu
));
7456 rcu_unregister_thread();
7461 preexit_cleanup(cpu_env
, arg1
);
7463 return 0; /* avoid warning */
7464 case TARGET_NR_read
:
7465 if (arg2
== 0 && arg3
== 0) {
7466 return get_errno(safe_read(arg1
, 0, 0));
7468 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7469 return -TARGET_EFAULT
;
7470 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7472 fd_trans_host_to_target_data(arg1
)) {
7473 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7475 unlock_user(p
, arg2
, ret
);
7478 case TARGET_NR_write
:
7479 if (arg2
== 0 && arg3
== 0) {
7480 return get_errno(safe_write(arg1
, 0, 0));
7482 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7483 return -TARGET_EFAULT
;
7484 if (fd_trans_target_to_host_data(arg1
)) {
7485 void *copy
= g_malloc(arg3
);
7486 memcpy(copy
, p
, arg3
);
7487 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7489 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7493 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7495 unlock_user(p
, arg2
, 0);
7498 #ifdef TARGET_NR_open
7499 case TARGET_NR_open
:
7500 if (!(p
= lock_user_string(arg1
)))
7501 return -TARGET_EFAULT
;
7502 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7503 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7505 fd_trans_unregister(ret
);
7506 unlock_user(p
, arg1
, 0);
7509 case TARGET_NR_openat
:
7510 if (!(p
= lock_user_string(arg2
)))
7511 return -TARGET_EFAULT
;
7512 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7513 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7515 fd_trans_unregister(ret
);
7516 unlock_user(p
, arg2
, 0);
7518 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7519 case TARGET_NR_name_to_handle_at
:
7520 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7523 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7524 case TARGET_NR_open_by_handle_at
:
7525 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7526 fd_trans_unregister(ret
);
7529 case TARGET_NR_close
:
7530 fd_trans_unregister(arg1
);
7531 return get_errno(close(arg1
));
7534 return do_brk(arg1
);
7535 #ifdef TARGET_NR_fork
7536 case TARGET_NR_fork
:
7537 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7539 #ifdef TARGET_NR_waitpid
7540 case TARGET_NR_waitpid
:
7543 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7544 if (!is_error(ret
) && arg2
&& ret
7545 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7546 return -TARGET_EFAULT
;
7550 #ifdef TARGET_NR_waitid
7551 case TARGET_NR_waitid
:
7555 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7556 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7557 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7558 return -TARGET_EFAULT
;
7559 host_to_target_siginfo(p
, &info
);
7560 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7565 #ifdef TARGET_NR_creat /* not on alpha */
7566 case TARGET_NR_creat
:
7567 if (!(p
= lock_user_string(arg1
)))
7568 return -TARGET_EFAULT
;
7569 ret
= get_errno(creat(p
, arg2
));
7570 fd_trans_unregister(ret
);
7571 unlock_user(p
, arg1
, 0);
7574 #ifdef TARGET_NR_link
7575 case TARGET_NR_link
:
7578 p
= lock_user_string(arg1
);
7579 p2
= lock_user_string(arg2
);
7581 ret
= -TARGET_EFAULT
;
7583 ret
= get_errno(link(p
, p2
));
7584 unlock_user(p2
, arg2
, 0);
7585 unlock_user(p
, arg1
, 0);
7589 #if defined(TARGET_NR_linkat)
7590 case TARGET_NR_linkat
:
7594 return -TARGET_EFAULT
;
7595 p
= lock_user_string(arg2
);
7596 p2
= lock_user_string(arg4
);
7598 ret
= -TARGET_EFAULT
;
7600 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7601 unlock_user(p
, arg2
, 0);
7602 unlock_user(p2
, arg4
, 0);
7606 #ifdef TARGET_NR_unlink
7607 case TARGET_NR_unlink
:
7608 if (!(p
= lock_user_string(arg1
)))
7609 return -TARGET_EFAULT
;
7610 ret
= get_errno(unlink(p
));
7611 unlock_user(p
, arg1
, 0);
7614 #if defined(TARGET_NR_unlinkat)
7615 case TARGET_NR_unlinkat
:
7616 if (!(p
= lock_user_string(arg2
)))
7617 return -TARGET_EFAULT
;
7618 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7619 unlock_user(p
, arg2
, 0);
7622 case TARGET_NR_execve
:
7624 char **argp
, **envp
;
7627 abi_ulong guest_argp
;
7628 abi_ulong guest_envp
;
7635 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7636 if (get_user_ual(addr
, gp
))
7637 return -TARGET_EFAULT
;
7644 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7645 if (get_user_ual(addr
, gp
))
7646 return -TARGET_EFAULT
;
7652 argp
= g_new0(char *, argc
+ 1);
7653 envp
= g_new0(char *, envc
+ 1);
7655 for (gp
= guest_argp
, q
= argp
; gp
;
7656 gp
+= sizeof(abi_ulong
), q
++) {
7657 if (get_user_ual(addr
, gp
))
7661 if (!(*q
= lock_user_string(addr
)))
7663 total_size
+= strlen(*q
) + 1;
7667 for (gp
= guest_envp
, q
= envp
; gp
;
7668 gp
+= sizeof(abi_ulong
), q
++) {
7669 if (get_user_ual(addr
, gp
))
7673 if (!(*q
= lock_user_string(addr
)))
7675 total_size
+= strlen(*q
) + 1;
7679 if (!(p
= lock_user_string(arg1
)))
7681 /* Although execve() is not an interruptible syscall it is
7682 * a special case where we must use the safe_syscall wrapper:
7683 * if we allow a signal to happen before we make the host
7684 * syscall then we will 'lose' it, because at the point of
7685 * execve the process leaves QEMU's control. So we use the
7686 * safe syscall wrapper to ensure that we either take the
7687 * signal as a guest signal, or else it does not happen
7688 * before the execve completes and makes it the other
7689 * program's problem.
7691 ret
= get_errno(safe_execve(p
, argp
, envp
));
7692 unlock_user(p
, arg1
, 0);
7697 ret
= -TARGET_EFAULT
;
7700 for (gp
= guest_argp
, q
= argp
; *q
;
7701 gp
+= sizeof(abi_ulong
), q
++) {
7702 if (get_user_ual(addr
, gp
)
7705 unlock_user(*q
, addr
, 0);
7707 for (gp
= guest_envp
, q
= envp
; *q
;
7708 gp
+= sizeof(abi_ulong
), q
++) {
7709 if (get_user_ual(addr
, gp
)
7712 unlock_user(*q
, addr
, 0);
7719 case TARGET_NR_chdir
:
7720 if (!(p
= lock_user_string(arg1
)))
7721 return -TARGET_EFAULT
;
7722 ret
= get_errno(chdir(p
));
7723 unlock_user(p
, arg1
, 0);
7725 #ifdef TARGET_NR_time
7726 case TARGET_NR_time
:
7729 ret
= get_errno(time(&host_time
));
7732 && put_user_sal(host_time
, arg1
))
7733 return -TARGET_EFAULT
;
7737 #ifdef TARGET_NR_mknod
7738 case TARGET_NR_mknod
:
7739 if (!(p
= lock_user_string(arg1
)))
7740 return -TARGET_EFAULT
;
7741 ret
= get_errno(mknod(p
, arg2
, arg3
));
7742 unlock_user(p
, arg1
, 0);
7745 #if defined(TARGET_NR_mknodat)
7746 case TARGET_NR_mknodat
:
7747 if (!(p
= lock_user_string(arg2
)))
7748 return -TARGET_EFAULT
;
7749 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7750 unlock_user(p
, arg2
, 0);
7753 #ifdef TARGET_NR_chmod
7754 case TARGET_NR_chmod
:
7755 if (!(p
= lock_user_string(arg1
)))
7756 return -TARGET_EFAULT
;
7757 ret
= get_errno(chmod(p
, arg2
));
7758 unlock_user(p
, arg1
, 0);
7761 #ifdef TARGET_NR_lseek
7762 case TARGET_NR_lseek
:
7763 return get_errno(lseek(arg1
, arg2
, arg3
));
7765 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7766 /* Alpha specific */
7767 case TARGET_NR_getxpid
:
7768 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7769 return get_errno(getpid());
7771 #ifdef TARGET_NR_getpid
7772 case TARGET_NR_getpid
:
7773 return get_errno(getpid());
7775 case TARGET_NR_mount
:
7777 /* need to look at the data field */
7781 p
= lock_user_string(arg1
);
7783 return -TARGET_EFAULT
;
7789 p2
= lock_user_string(arg2
);
7792 unlock_user(p
, arg1
, 0);
7794 return -TARGET_EFAULT
;
7798 p3
= lock_user_string(arg3
);
7801 unlock_user(p
, arg1
, 0);
7803 unlock_user(p2
, arg2
, 0);
7804 return -TARGET_EFAULT
;
7810 /* FIXME - arg5 should be locked, but it isn't clear how to
7811 * do that since it's not guaranteed to be a NULL-terminated
7815 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7817 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7819 ret
= get_errno(ret
);
7822 unlock_user(p
, arg1
, 0);
7824 unlock_user(p2
, arg2
, 0);
7826 unlock_user(p3
, arg3
, 0);
7830 #ifdef TARGET_NR_umount
7831 case TARGET_NR_umount
:
7832 if (!(p
= lock_user_string(arg1
)))
7833 return -TARGET_EFAULT
;
7834 ret
= get_errno(umount(p
));
7835 unlock_user(p
, arg1
, 0);
7838 #ifdef TARGET_NR_stime /* not on alpha */
7839 case TARGET_NR_stime
:
7843 if (get_user_sal(ts
.tv_sec
, arg1
)) {
7844 return -TARGET_EFAULT
;
7846 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
7849 #ifdef TARGET_NR_alarm /* not on alpha */
7850 case TARGET_NR_alarm
:
7853 #ifdef TARGET_NR_pause /* not on alpha */
7854 case TARGET_NR_pause
:
7855 if (!block_signals()) {
7856 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7858 return -TARGET_EINTR
;
7860 #ifdef TARGET_NR_utime
7861 case TARGET_NR_utime
:
7863 struct utimbuf tbuf
, *host_tbuf
;
7864 struct target_utimbuf
*target_tbuf
;
7866 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7867 return -TARGET_EFAULT
;
7868 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7869 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7870 unlock_user_struct(target_tbuf
, arg2
, 0);
7875 if (!(p
= lock_user_string(arg1
)))
7876 return -TARGET_EFAULT
;
7877 ret
= get_errno(utime(p
, host_tbuf
));
7878 unlock_user(p
, arg1
, 0);
7882 #ifdef TARGET_NR_utimes
7883 case TARGET_NR_utimes
:
7885 struct timeval
*tvp
, tv
[2];
7887 if (copy_from_user_timeval(&tv
[0], arg2
)
7888 || copy_from_user_timeval(&tv
[1],
7889 arg2
+ sizeof(struct target_timeval
)))
7890 return -TARGET_EFAULT
;
7895 if (!(p
= lock_user_string(arg1
)))
7896 return -TARGET_EFAULT
;
7897 ret
= get_errno(utimes(p
, tvp
));
7898 unlock_user(p
, arg1
, 0);
7902 #if defined(TARGET_NR_futimesat)
7903 case TARGET_NR_futimesat
:
7905 struct timeval
*tvp
, tv
[2];
7907 if (copy_from_user_timeval(&tv
[0], arg3
)
7908 || copy_from_user_timeval(&tv
[1],
7909 arg3
+ sizeof(struct target_timeval
)))
7910 return -TARGET_EFAULT
;
7915 if (!(p
= lock_user_string(arg2
))) {
7916 return -TARGET_EFAULT
;
7918 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7919 unlock_user(p
, arg2
, 0);
7923 #ifdef TARGET_NR_access
7924 case TARGET_NR_access
:
7925 if (!(p
= lock_user_string(arg1
))) {
7926 return -TARGET_EFAULT
;
7928 ret
= get_errno(access(path(p
), arg2
));
7929 unlock_user(p
, arg1
, 0);
7932 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7933 case TARGET_NR_faccessat
:
7934 if (!(p
= lock_user_string(arg2
))) {
7935 return -TARGET_EFAULT
;
7937 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7938 unlock_user(p
, arg2
, 0);
7941 #ifdef TARGET_NR_nice /* not on alpha */
7942 case TARGET_NR_nice
:
7943 return get_errno(nice(arg1
));
7945 case TARGET_NR_sync
:
7948 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7949 case TARGET_NR_syncfs
:
7950 return get_errno(syncfs(arg1
));
7952 case TARGET_NR_kill
:
7953 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7954 #ifdef TARGET_NR_rename
7955 case TARGET_NR_rename
:
7958 p
= lock_user_string(arg1
);
7959 p2
= lock_user_string(arg2
);
7961 ret
= -TARGET_EFAULT
;
7963 ret
= get_errno(rename(p
, p2
));
7964 unlock_user(p2
, arg2
, 0);
7965 unlock_user(p
, arg1
, 0);
7969 #if defined(TARGET_NR_renameat)
7970 case TARGET_NR_renameat
:
7973 p
= lock_user_string(arg2
);
7974 p2
= lock_user_string(arg4
);
7976 ret
= -TARGET_EFAULT
;
7978 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7979 unlock_user(p2
, arg4
, 0);
7980 unlock_user(p
, arg2
, 0);
7984 #if defined(TARGET_NR_renameat2)
7985 case TARGET_NR_renameat2
:
7988 p
= lock_user_string(arg2
);
7989 p2
= lock_user_string(arg4
);
7991 ret
= -TARGET_EFAULT
;
7993 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7995 unlock_user(p2
, arg4
, 0);
7996 unlock_user(p
, arg2
, 0);
8000 #ifdef TARGET_NR_mkdir
8001 case TARGET_NR_mkdir
:
8002 if (!(p
= lock_user_string(arg1
)))
8003 return -TARGET_EFAULT
;
8004 ret
= get_errno(mkdir(p
, arg2
));
8005 unlock_user(p
, arg1
, 0);
8008 #if defined(TARGET_NR_mkdirat)
8009 case TARGET_NR_mkdirat
:
8010 if (!(p
= lock_user_string(arg2
)))
8011 return -TARGET_EFAULT
;
8012 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8013 unlock_user(p
, arg2
, 0);
8016 #ifdef TARGET_NR_rmdir
8017 case TARGET_NR_rmdir
:
8018 if (!(p
= lock_user_string(arg1
)))
8019 return -TARGET_EFAULT
;
8020 ret
= get_errno(rmdir(p
));
8021 unlock_user(p
, arg1
, 0);
8025 ret
= get_errno(dup(arg1
));
8027 fd_trans_dup(arg1
, ret
);
8030 #ifdef TARGET_NR_pipe
8031 case TARGET_NR_pipe
:
8032 return do_pipe(cpu_env
, arg1
, 0, 0);
8034 #ifdef TARGET_NR_pipe2
8035 case TARGET_NR_pipe2
:
8036 return do_pipe(cpu_env
, arg1
,
8037 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8039 case TARGET_NR_times
:
8041 struct target_tms
*tmsp
;
8043 ret
= get_errno(times(&tms
));
8045 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8047 return -TARGET_EFAULT
;
8048 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8049 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8050 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8051 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8054 ret
= host_to_target_clock_t(ret
);
8057 case TARGET_NR_acct
:
8059 ret
= get_errno(acct(NULL
));
8061 if (!(p
= lock_user_string(arg1
))) {
8062 return -TARGET_EFAULT
;
8064 ret
= get_errno(acct(path(p
)));
8065 unlock_user(p
, arg1
, 0);
8068 #ifdef TARGET_NR_umount2
8069 case TARGET_NR_umount2
:
8070 if (!(p
= lock_user_string(arg1
)))
8071 return -TARGET_EFAULT
;
8072 ret
= get_errno(umount2(p
, arg2
));
8073 unlock_user(p
, arg1
, 0);
8076 case TARGET_NR_ioctl
:
8077 return do_ioctl(arg1
, arg2
, arg3
);
8078 #ifdef TARGET_NR_fcntl
8079 case TARGET_NR_fcntl
:
8080 return do_fcntl(arg1
, arg2
, arg3
);
8082 case TARGET_NR_setpgid
:
8083 return get_errno(setpgid(arg1
, arg2
));
8084 case TARGET_NR_umask
:
8085 return get_errno(umask(arg1
));
8086 case TARGET_NR_chroot
:
8087 if (!(p
= lock_user_string(arg1
)))
8088 return -TARGET_EFAULT
;
8089 ret
= get_errno(chroot(p
));
8090 unlock_user(p
, arg1
, 0);
8092 #ifdef TARGET_NR_dup2
8093 case TARGET_NR_dup2
:
8094 ret
= get_errno(dup2(arg1
, arg2
));
8096 fd_trans_dup(arg1
, arg2
);
8100 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8101 case TARGET_NR_dup3
:
8105 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8108 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8109 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8111 fd_trans_dup(arg1
, arg2
);
8116 #ifdef TARGET_NR_getppid /* not on alpha */
8117 case TARGET_NR_getppid
:
8118 return get_errno(getppid());
8120 #ifdef TARGET_NR_getpgrp
8121 case TARGET_NR_getpgrp
:
8122 return get_errno(getpgrp());
8124 case TARGET_NR_setsid
:
8125 return get_errno(setsid());
8126 #ifdef TARGET_NR_sigaction
8127 case TARGET_NR_sigaction
:
8129 #if defined(TARGET_ALPHA)
8130 struct target_sigaction act
, oact
, *pact
= 0;
8131 struct target_old_sigaction
*old_act
;
8133 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8134 return -TARGET_EFAULT
;
8135 act
._sa_handler
= old_act
->_sa_handler
;
8136 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8137 act
.sa_flags
= old_act
->sa_flags
;
8138 act
.sa_restorer
= 0;
8139 unlock_user_struct(old_act
, arg2
, 0);
8142 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8143 if (!is_error(ret
) && arg3
) {
8144 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8145 return -TARGET_EFAULT
;
8146 old_act
->_sa_handler
= oact
._sa_handler
;
8147 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8148 old_act
->sa_flags
= oact
.sa_flags
;
8149 unlock_user_struct(old_act
, arg3
, 1);
8151 #elif defined(TARGET_MIPS)
8152 struct target_sigaction act
, oact
, *pact
, *old_act
;
8155 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8156 return -TARGET_EFAULT
;
8157 act
._sa_handler
= old_act
->_sa_handler
;
8158 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8159 act
.sa_flags
= old_act
->sa_flags
;
8160 unlock_user_struct(old_act
, arg2
, 0);
8166 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8168 if (!is_error(ret
) && arg3
) {
8169 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8170 return -TARGET_EFAULT
;
8171 old_act
->_sa_handler
= oact
._sa_handler
;
8172 old_act
->sa_flags
= oact
.sa_flags
;
8173 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8174 old_act
->sa_mask
.sig
[1] = 0;
8175 old_act
->sa_mask
.sig
[2] = 0;
8176 old_act
->sa_mask
.sig
[3] = 0;
8177 unlock_user_struct(old_act
, arg3
, 1);
8180 struct target_old_sigaction
*old_act
;
8181 struct target_sigaction act
, oact
, *pact
;
8183 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8184 return -TARGET_EFAULT
;
8185 act
._sa_handler
= old_act
->_sa_handler
;
8186 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8187 act
.sa_flags
= old_act
->sa_flags
;
8188 act
.sa_restorer
= old_act
->sa_restorer
;
8189 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8190 act
.ka_restorer
= 0;
8192 unlock_user_struct(old_act
, arg2
, 0);
8197 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8198 if (!is_error(ret
) && arg3
) {
8199 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8200 return -TARGET_EFAULT
;
8201 old_act
->_sa_handler
= oact
._sa_handler
;
8202 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8203 old_act
->sa_flags
= oact
.sa_flags
;
8204 old_act
->sa_restorer
= oact
.sa_restorer
;
8205 unlock_user_struct(old_act
, arg3
, 1);
8211 case TARGET_NR_rt_sigaction
:
8213 #if defined(TARGET_ALPHA)
8214 /* For Alpha and SPARC this is a 5 argument syscall, with
8215 * a 'restorer' parameter which must be copied into the
8216 * sa_restorer field of the sigaction struct.
8217 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8218 * and arg5 is the sigsetsize.
8219 * Alpha also has a separate rt_sigaction struct that it uses
8220 * here; SPARC uses the usual sigaction struct.
8222 struct target_rt_sigaction
*rt_act
;
8223 struct target_sigaction act
, oact
, *pact
= 0;
8225 if (arg4
!= sizeof(target_sigset_t
)) {
8226 return -TARGET_EINVAL
;
8229 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8230 return -TARGET_EFAULT
;
8231 act
._sa_handler
= rt_act
->_sa_handler
;
8232 act
.sa_mask
= rt_act
->sa_mask
;
8233 act
.sa_flags
= rt_act
->sa_flags
;
8234 act
.sa_restorer
= arg5
;
8235 unlock_user_struct(rt_act
, arg2
, 0);
8238 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8239 if (!is_error(ret
) && arg3
) {
8240 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8241 return -TARGET_EFAULT
;
8242 rt_act
->_sa_handler
= oact
._sa_handler
;
8243 rt_act
->sa_mask
= oact
.sa_mask
;
8244 rt_act
->sa_flags
= oact
.sa_flags
;
8245 unlock_user_struct(rt_act
, arg3
, 1);
8249 target_ulong restorer
= arg4
;
8250 target_ulong sigsetsize
= arg5
;
8252 target_ulong sigsetsize
= arg4
;
8254 struct target_sigaction
*act
;
8255 struct target_sigaction
*oact
;
8257 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8258 return -TARGET_EINVAL
;
8261 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8262 return -TARGET_EFAULT
;
8264 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8265 act
->ka_restorer
= restorer
;
8271 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8272 ret
= -TARGET_EFAULT
;
8273 goto rt_sigaction_fail
;
8277 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8280 unlock_user_struct(act
, arg2
, 0);
8282 unlock_user_struct(oact
, arg3
, 1);
8286 #ifdef TARGET_NR_sgetmask /* not on alpha */
8287 case TARGET_NR_sgetmask
:
8290 abi_ulong target_set
;
8291 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8293 host_to_target_old_sigset(&target_set
, &cur_set
);
8299 #ifdef TARGET_NR_ssetmask /* not on alpha */
8300 case TARGET_NR_ssetmask
:
8303 abi_ulong target_set
= arg1
;
8304 target_to_host_old_sigset(&set
, &target_set
);
8305 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8307 host_to_target_old_sigset(&target_set
, &oset
);
8313 #ifdef TARGET_NR_sigprocmask
8314 case TARGET_NR_sigprocmask
:
8316 #if defined(TARGET_ALPHA)
8317 sigset_t set
, oldset
;
8322 case TARGET_SIG_BLOCK
:
8325 case TARGET_SIG_UNBLOCK
:
8328 case TARGET_SIG_SETMASK
:
8332 return -TARGET_EINVAL
;
8335 target_to_host_old_sigset(&set
, &mask
);
8337 ret
= do_sigprocmask(how
, &set
, &oldset
);
8338 if (!is_error(ret
)) {
8339 host_to_target_old_sigset(&mask
, &oldset
);
8341 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8344 sigset_t set
, oldset
, *set_ptr
;
8349 case TARGET_SIG_BLOCK
:
8352 case TARGET_SIG_UNBLOCK
:
8355 case TARGET_SIG_SETMASK
:
8359 return -TARGET_EINVAL
;
8361 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8362 return -TARGET_EFAULT
;
8363 target_to_host_old_sigset(&set
, p
);
8364 unlock_user(p
, arg2
, 0);
8370 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8371 if (!is_error(ret
) && arg3
) {
8372 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8373 return -TARGET_EFAULT
;
8374 host_to_target_old_sigset(p
, &oldset
);
8375 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8381 case TARGET_NR_rt_sigprocmask
:
8384 sigset_t set
, oldset
, *set_ptr
;
8386 if (arg4
!= sizeof(target_sigset_t
)) {
8387 return -TARGET_EINVAL
;
8392 case TARGET_SIG_BLOCK
:
8395 case TARGET_SIG_UNBLOCK
:
8398 case TARGET_SIG_SETMASK
:
8402 return -TARGET_EINVAL
;
8404 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8405 return -TARGET_EFAULT
;
8406 target_to_host_sigset(&set
, p
);
8407 unlock_user(p
, arg2
, 0);
8413 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8414 if (!is_error(ret
) && arg3
) {
8415 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8416 return -TARGET_EFAULT
;
8417 host_to_target_sigset(p
, &oldset
);
8418 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8422 #ifdef TARGET_NR_sigpending
8423 case TARGET_NR_sigpending
:
8426 ret
= get_errno(sigpending(&set
));
8427 if (!is_error(ret
)) {
8428 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8429 return -TARGET_EFAULT
;
8430 host_to_target_old_sigset(p
, &set
);
8431 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8436 case TARGET_NR_rt_sigpending
:
8440 /* Yes, this check is >, not != like most. We follow the kernel's
8441 * logic and it does it like this because it implements
8442 * NR_sigpending through the same code path, and in that case
8443 * the old_sigset_t is smaller in size.
8445 if (arg2
> sizeof(target_sigset_t
)) {
8446 return -TARGET_EINVAL
;
8449 ret
= get_errno(sigpending(&set
));
8450 if (!is_error(ret
)) {
8451 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8452 return -TARGET_EFAULT
;
8453 host_to_target_sigset(p
, &set
);
8454 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8458 #ifdef TARGET_NR_sigsuspend
8459 case TARGET_NR_sigsuspend
:
8461 TaskState
*ts
= cpu
->opaque
;
8462 #if defined(TARGET_ALPHA)
8463 abi_ulong mask
= arg1
;
8464 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8466 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8467 return -TARGET_EFAULT
;
8468 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8469 unlock_user(p
, arg1
, 0);
8471 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8473 if (ret
!= -TARGET_ERESTARTSYS
) {
8474 ts
->in_sigsuspend
= 1;
8479 case TARGET_NR_rt_sigsuspend
:
8481 TaskState
*ts
= cpu
->opaque
;
8483 if (arg2
!= sizeof(target_sigset_t
)) {
8484 return -TARGET_EINVAL
;
8486 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8487 return -TARGET_EFAULT
;
8488 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8489 unlock_user(p
, arg1
, 0);
8490 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8492 if (ret
!= -TARGET_ERESTARTSYS
) {
8493 ts
->in_sigsuspend
= 1;
8497 case TARGET_NR_rt_sigtimedwait
:
8500 struct timespec uts
, *puts
;
8503 if (arg4
!= sizeof(target_sigset_t
)) {
8504 return -TARGET_EINVAL
;
8507 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8508 return -TARGET_EFAULT
;
8509 target_to_host_sigset(&set
, p
);
8510 unlock_user(p
, arg1
, 0);
8513 target_to_host_timespec(puts
, arg3
);
8517 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8519 if (!is_error(ret
)) {
8521 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8524 return -TARGET_EFAULT
;
8526 host_to_target_siginfo(p
, &uinfo
);
8527 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8529 ret
= host_to_target_signal(ret
);
8533 case TARGET_NR_rt_sigqueueinfo
:
8537 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8539 return -TARGET_EFAULT
;
8541 target_to_host_siginfo(&uinfo
, p
);
8542 unlock_user(p
, arg3
, 0);
8543 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8546 case TARGET_NR_rt_tgsigqueueinfo
:
8550 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8552 return -TARGET_EFAULT
;
8554 target_to_host_siginfo(&uinfo
, p
);
8555 unlock_user(p
, arg4
, 0);
8556 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8559 #ifdef TARGET_NR_sigreturn
8560 case TARGET_NR_sigreturn
:
8561 if (block_signals()) {
8562 return -TARGET_ERESTARTSYS
;
8564 return do_sigreturn(cpu_env
);
8566 case TARGET_NR_rt_sigreturn
:
8567 if (block_signals()) {
8568 return -TARGET_ERESTARTSYS
;
8570 return do_rt_sigreturn(cpu_env
);
8571 case TARGET_NR_sethostname
:
8572 if (!(p
= lock_user_string(arg1
)))
8573 return -TARGET_EFAULT
;
8574 ret
= get_errno(sethostname(p
, arg2
));
8575 unlock_user(p
, arg1
, 0);
8577 #ifdef TARGET_NR_setrlimit
8578 case TARGET_NR_setrlimit
:
8580 int resource
= target_to_host_resource(arg1
);
8581 struct target_rlimit
*target_rlim
;
8583 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8584 return -TARGET_EFAULT
;
8585 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8586 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8587 unlock_user_struct(target_rlim
, arg2
, 0);
8589 * If we just passed through resource limit settings for memory then
8590 * they would also apply to QEMU's own allocations, and QEMU will
8591 * crash or hang or die if its allocations fail. Ideally we would
8592 * track the guest allocations in QEMU and apply the limits ourselves.
8593 * For now, just tell the guest the call succeeded but don't actually
8596 if (resource
!= RLIMIT_AS
&&
8597 resource
!= RLIMIT_DATA
&&
8598 resource
!= RLIMIT_STACK
) {
8599 return get_errno(setrlimit(resource
, &rlim
));
8605 #ifdef TARGET_NR_getrlimit
8606 case TARGET_NR_getrlimit
:
8608 int resource
= target_to_host_resource(arg1
);
8609 struct target_rlimit
*target_rlim
;
8612 ret
= get_errno(getrlimit(resource
, &rlim
));
8613 if (!is_error(ret
)) {
8614 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8615 return -TARGET_EFAULT
;
8616 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8617 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8618 unlock_user_struct(target_rlim
, arg2
, 1);
8623 case TARGET_NR_getrusage
:
8625 struct rusage rusage
;
8626 ret
= get_errno(getrusage(arg1
, &rusage
));
8627 if (!is_error(ret
)) {
8628 ret
= host_to_target_rusage(arg2
, &rusage
);
8632 case TARGET_NR_gettimeofday
:
8635 ret
= get_errno(gettimeofday(&tv
, NULL
));
8636 if (!is_error(ret
)) {
8637 if (copy_to_user_timeval(arg1
, &tv
))
8638 return -TARGET_EFAULT
;
8642 case TARGET_NR_settimeofday
:
8644 struct timeval tv
, *ptv
= NULL
;
8645 struct timezone tz
, *ptz
= NULL
;
8648 if (copy_from_user_timeval(&tv
, arg1
)) {
8649 return -TARGET_EFAULT
;
8655 if (copy_from_user_timezone(&tz
, arg2
)) {
8656 return -TARGET_EFAULT
;
8661 return get_errno(settimeofday(ptv
, ptz
));
8663 #if defined(TARGET_NR_select)
8664 case TARGET_NR_select
:
8665 #if defined(TARGET_WANT_NI_OLD_SELECT)
8666 /* some architectures used to have old_select here
8667 * but now ENOSYS it.
8669 ret
= -TARGET_ENOSYS
;
8670 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8671 ret
= do_old_select(arg1
);
8673 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8677 #ifdef TARGET_NR_pselect6
8678 case TARGET_NR_pselect6
:
8680 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8681 fd_set rfds
, wfds
, efds
;
8682 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8683 struct timespec ts
, *ts_ptr
;
8686 * The 6th arg is actually two args smashed together,
8687 * so we cannot use the C library.
8695 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8696 target_sigset_t
*target_sigset
;
8704 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8708 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8712 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8718 * This takes a timespec, and not a timeval, so we cannot
8719 * use the do_select() helper ...
8722 if (target_to_host_timespec(&ts
, ts_addr
)) {
8723 return -TARGET_EFAULT
;
8730 /* Extract the two packed args for the sigset */
8733 sig
.size
= SIGSET_T_SIZE
;
8735 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8737 return -TARGET_EFAULT
;
8739 arg_sigset
= tswapal(arg7
[0]);
8740 arg_sigsize
= tswapal(arg7
[1]);
8741 unlock_user(arg7
, arg6
, 0);
8745 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8746 /* Like the kernel, we enforce correct size sigsets */
8747 return -TARGET_EINVAL
;
8749 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8750 sizeof(*target_sigset
), 1);
8751 if (!target_sigset
) {
8752 return -TARGET_EFAULT
;
8754 target_to_host_sigset(&set
, target_sigset
);
8755 unlock_user(target_sigset
, arg_sigset
, 0);
8763 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8766 if (!is_error(ret
)) {
8767 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8768 return -TARGET_EFAULT
;
8769 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8770 return -TARGET_EFAULT
;
8771 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8772 return -TARGET_EFAULT
;
8774 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8775 return -TARGET_EFAULT
;
8780 #ifdef TARGET_NR_symlink
8781 case TARGET_NR_symlink
:
8784 p
= lock_user_string(arg1
);
8785 p2
= lock_user_string(arg2
);
8787 ret
= -TARGET_EFAULT
;
8789 ret
= get_errno(symlink(p
, p2
));
8790 unlock_user(p2
, arg2
, 0);
8791 unlock_user(p
, arg1
, 0);
8795 #if defined(TARGET_NR_symlinkat)
8796 case TARGET_NR_symlinkat
:
8799 p
= lock_user_string(arg1
);
8800 p2
= lock_user_string(arg3
);
8802 ret
= -TARGET_EFAULT
;
8804 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8805 unlock_user(p2
, arg3
, 0);
8806 unlock_user(p
, arg1
, 0);
8810 #ifdef TARGET_NR_readlink
8811 case TARGET_NR_readlink
:
8814 p
= lock_user_string(arg1
);
8815 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8817 ret
= -TARGET_EFAULT
;
8819 /* Short circuit this for the magic exe check. */
8820 ret
= -TARGET_EINVAL
;
8821 } else if (is_proc_myself((const char *)p
, "exe")) {
8822 char real
[PATH_MAX
], *temp
;
8823 temp
= realpath(exec_path
, real
);
8824 /* Return value is # of bytes that we wrote to the buffer. */
8826 ret
= get_errno(-1);
8828 /* Don't worry about sign mismatch as earlier mapping
8829 * logic would have thrown a bad address error. */
8830 ret
= MIN(strlen(real
), arg3
);
8831 /* We cannot NUL terminate the string. */
8832 memcpy(p2
, real
, ret
);
8835 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8837 unlock_user(p2
, arg2
, ret
);
8838 unlock_user(p
, arg1
, 0);
8842 #if defined(TARGET_NR_readlinkat)
8843 case TARGET_NR_readlinkat
:
8846 p
= lock_user_string(arg2
);
8847 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8849 ret
= -TARGET_EFAULT
;
8850 } else if (is_proc_myself((const char *)p
, "exe")) {
8851 char real
[PATH_MAX
], *temp
;
8852 temp
= realpath(exec_path
, real
);
8853 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8854 snprintf((char *)p2
, arg4
, "%s", real
);
8856 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8858 unlock_user(p2
, arg3
, ret
);
8859 unlock_user(p
, arg2
, 0);
8863 #ifdef TARGET_NR_swapon
8864 case TARGET_NR_swapon
:
8865 if (!(p
= lock_user_string(arg1
)))
8866 return -TARGET_EFAULT
;
8867 ret
= get_errno(swapon(p
, arg2
));
8868 unlock_user(p
, arg1
, 0);
8871 case TARGET_NR_reboot
:
8872 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8873 /* arg4 must be ignored in all other cases */
8874 p
= lock_user_string(arg4
);
8876 return -TARGET_EFAULT
;
8878 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8879 unlock_user(p
, arg4
, 0);
8881 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8884 #ifdef TARGET_NR_mmap
8885 case TARGET_NR_mmap
:
8886 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8887 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8888 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8889 || defined(TARGET_S390X)
8892 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8893 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8894 return -TARGET_EFAULT
;
8901 unlock_user(v
, arg1
, 0);
8902 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8903 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8907 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8908 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8914 #ifdef TARGET_NR_mmap2
8915 case TARGET_NR_mmap2
:
8917 #define MMAP_SHIFT 12
8919 ret
= target_mmap(arg1
, arg2
, arg3
,
8920 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8921 arg5
, arg6
<< MMAP_SHIFT
);
8922 return get_errno(ret
);
8924 case TARGET_NR_munmap
:
8925 return get_errno(target_munmap(arg1
, arg2
));
8926 case TARGET_NR_mprotect
:
8928 TaskState
*ts
= cpu
->opaque
;
8929 /* Special hack to detect libc making the stack executable. */
8930 if ((arg3
& PROT_GROWSDOWN
)
8931 && arg1
>= ts
->info
->stack_limit
8932 && arg1
<= ts
->info
->start_stack
) {
8933 arg3
&= ~PROT_GROWSDOWN
;
8934 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8935 arg1
= ts
->info
->stack_limit
;
8938 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8939 #ifdef TARGET_NR_mremap
8940 case TARGET_NR_mremap
:
8941 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8943 /* ??? msync/mlock/munlock are broken for softmmu. */
8944 #ifdef TARGET_NR_msync
8945 case TARGET_NR_msync
:
8946 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8948 #ifdef TARGET_NR_mlock
8949 case TARGET_NR_mlock
:
8950 return get_errno(mlock(g2h(arg1
), arg2
));
8952 #ifdef TARGET_NR_munlock
8953 case TARGET_NR_munlock
:
8954 return get_errno(munlock(g2h(arg1
), arg2
));
8956 #ifdef TARGET_NR_mlockall
8957 case TARGET_NR_mlockall
:
8958 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8960 #ifdef TARGET_NR_munlockall
8961 case TARGET_NR_munlockall
:
8962 return get_errno(munlockall());
8964 #ifdef TARGET_NR_truncate
8965 case TARGET_NR_truncate
:
8966 if (!(p
= lock_user_string(arg1
)))
8967 return -TARGET_EFAULT
;
8968 ret
= get_errno(truncate(p
, arg2
));
8969 unlock_user(p
, arg1
, 0);
8972 #ifdef TARGET_NR_ftruncate
8973 case TARGET_NR_ftruncate
:
8974 return get_errno(ftruncate(arg1
, arg2
));
8976 case TARGET_NR_fchmod
:
8977 return get_errno(fchmod(arg1
, arg2
));
8978 #if defined(TARGET_NR_fchmodat)
8979 case TARGET_NR_fchmodat
:
8980 if (!(p
= lock_user_string(arg2
)))
8981 return -TARGET_EFAULT
;
8982 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8983 unlock_user(p
, arg2
, 0);
8986 case TARGET_NR_getpriority
:
8987 /* Note that negative values are valid for getpriority, so we must
8988 differentiate based on errno settings. */
8990 ret
= getpriority(arg1
, arg2
);
8991 if (ret
== -1 && errno
!= 0) {
8992 return -host_to_target_errno(errno
);
8995 /* Return value is the unbiased priority. Signal no error. */
8996 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8998 /* Return value is a biased priority to avoid negative numbers. */
9002 case TARGET_NR_setpriority
:
9003 return get_errno(setpriority(arg1
, arg2
, arg3
));
9004 #ifdef TARGET_NR_statfs
9005 case TARGET_NR_statfs
:
9006 if (!(p
= lock_user_string(arg1
))) {
9007 return -TARGET_EFAULT
;
9009 ret
= get_errno(statfs(path(p
), &stfs
));
9010 unlock_user(p
, arg1
, 0);
9012 if (!is_error(ret
)) {
9013 struct target_statfs
*target_stfs
;
9015 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9016 return -TARGET_EFAULT
;
9017 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9018 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9019 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9020 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9021 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9022 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9023 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9024 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9025 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9026 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9027 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9028 #ifdef _STATFS_F_FLAGS
9029 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9031 __put_user(0, &target_stfs
->f_flags
);
9033 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9034 unlock_user_struct(target_stfs
, arg2
, 1);
9038 #ifdef TARGET_NR_fstatfs
9039 case TARGET_NR_fstatfs
:
9040 ret
= get_errno(fstatfs(arg1
, &stfs
));
9041 goto convert_statfs
;
9043 #ifdef TARGET_NR_statfs64
9044 case TARGET_NR_statfs64
:
9045 if (!(p
= lock_user_string(arg1
))) {
9046 return -TARGET_EFAULT
;
9048 ret
= get_errno(statfs(path(p
), &stfs
));
9049 unlock_user(p
, arg1
, 0);
9051 if (!is_error(ret
)) {
9052 struct target_statfs64
*target_stfs
;
9054 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9055 return -TARGET_EFAULT
;
9056 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9057 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9058 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9059 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9060 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9061 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9062 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9063 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9064 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9065 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9066 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9067 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9068 unlock_user_struct(target_stfs
, arg3
, 1);
9071 case TARGET_NR_fstatfs64
:
9072 ret
= get_errno(fstatfs(arg1
, &stfs
));
9073 goto convert_statfs64
;
9075 #ifdef TARGET_NR_socketcall
9076 case TARGET_NR_socketcall
:
9077 return do_socketcall(arg1
, arg2
);
9079 #ifdef TARGET_NR_accept
9080 case TARGET_NR_accept
:
9081 return do_accept4(arg1
, arg2
, arg3
, 0);
9083 #ifdef TARGET_NR_accept4
9084 case TARGET_NR_accept4
:
9085 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9087 #ifdef TARGET_NR_bind
9088 case TARGET_NR_bind
:
9089 return do_bind(arg1
, arg2
, arg3
);
9091 #ifdef TARGET_NR_connect
9092 case TARGET_NR_connect
:
9093 return do_connect(arg1
, arg2
, arg3
);
9095 #ifdef TARGET_NR_getpeername
9096 case TARGET_NR_getpeername
:
9097 return do_getpeername(arg1
, arg2
, arg3
);
9099 #ifdef TARGET_NR_getsockname
9100 case TARGET_NR_getsockname
:
9101 return do_getsockname(arg1
, arg2
, arg3
);
9103 #ifdef TARGET_NR_getsockopt
9104 case TARGET_NR_getsockopt
:
9105 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9107 #ifdef TARGET_NR_listen
9108 case TARGET_NR_listen
:
9109 return get_errno(listen(arg1
, arg2
));
9111 #ifdef TARGET_NR_recv
9112 case TARGET_NR_recv
:
9113 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9115 #ifdef TARGET_NR_recvfrom
9116 case TARGET_NR_recvfrom
:
9117 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9119 #ifdef TARGET_NR_recvmsg
9120 case TARGET_NR_recvmsg
:
9121 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9123 #ifdef TARGET_NR_send
9124 case TARGET_NR_send
:
9125 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9127 #ifdef TARGET_NR_sendmsg
9128 case TARGET_NR_sendmsg
:
9129 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9131 #ifdef TARGET_NR_sendmmsg
9132 case TARGET_NR_sendmmsg
:
9133 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9134 case TARGET_NR_recvmmsg
:
9135 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9137 #ifdef TARGET_NR_sendto
9138 case TARGET_NR_sendto
:
9139 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9141 #ifdef TARGET_NR_shutdown
9142 case TARGET_NR_shutdown
:
9143 return get_errno(shutdown(arg1
, arg2
));
9145 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9146 case TARGET_NR_getrandom
:
9147 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9149 return -TARGET_EFAULT
;
9151 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9152 unlock_user(p
, arg1
, ret
);
9155 #ifdef TARGET_NR_socket
9156 case TARGET_NR_socket
:
9157 return do_socket(arg1
, arg2
, arg3
);
9159 #ifdef TARGET_NR_socketpair
9160 case TARGET_NR_socketpair
:
9161 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9163 #ifdef TARGET_NR_setsockopt
9164 case TARGET_NR_setsockopt
:
9165 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9167 #if defined(TARGET_NR_syslog)
9168 case TARGET_NR_syslog
:
9173 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9174 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9175 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9176 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9177 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9178 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9179 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9180 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9181 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9182 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9183 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9184 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9187 return -TARGET_EINVAL
;
9192 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9194 return -TARGET_EFAULT
;
9196 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9197 unlock_user(p
, arg2
, arg3
);
9201 return -TARGET_EINVAL
;
9206 case TARGET_NR_setitimer
:
9208 struct itimerval value
, ovalue
, *pvalue
;
9212 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9213 || copy_from_user_timeval(&pvalue
->it_value
,
9214 arg2
+ sizeof(struct target_timeval
)))
9215 return -TARGET_EFAULT
;
9219 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9220 if (!is_error(ret
) && arg3
) {
9221 if (copy_to_user_timeval(arg3
,
9222 &ovalue
.it_interval
)
9223 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9225 return -TARGET_EFAULT
;
9229 case TARGET_NR_getitimer
:
9231 struct itimerval value
;
9233 ret
= get_errno(getitimer(arg1
, &value
));
9234 if (!is_error(ret
) && arg2
) {
9235 if (copy_to_user_timeval(arg2
,
9237 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9239 return -TARGET_EFAULT
;
9243 #ifdef TARGET_NR_stat
9244 case TARGET_NR_stat
:
9245 if (!(p
= lock_user_string(arg1
))) {
9246 return -TARGET_EFAULT
;
9248 ret
= get_errno(stat(path(p
), &st
));
9249 unlock_user(p
, arg1
, 0);
9252 #ifdef TARGET_NR_lstat
9253 case TARGET_NR_lstat
:
9254 if (!(p
= lock_user_string(arg1
))) {
9255 return -TARGET_EFAULT
;
9257 ret
= get_errno(lstat(path(p
), &st
));
9258 unlock_user(p
, arg1
, 0);
9261 #ifdef TARGET_NR_fstat
9262 case TARGET_NR_fstat
:
9264 ret
= get_errno(fstat(arg1
, &st
));
9265 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9268 if (!is_error(ret
)) {
9269 struct target_stat
*target_st
;
9271 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9272 return -TARGET_EFAULT
;
9273 memset(target_st
, 0, sizeof(*target_st
));
9274 __put_user(st
.st_dev
, &target_st
->st_dev
);
9275 __put_user(st
.st_ino
, &target_st
->st_ino
);
9276 __put_user(st
.st_mode
, &target_st
->st_mode
);
9277 __put_user(st
.st_uid
, &target_st
->st_uid
);
9278 __put_user(st
.st_gid
, &target_st
->st_gid
);
9279 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9280 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9281 __put_user(st
.st_size
, &target_st
->st_size
);
9282 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9283 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9284 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9285 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9286 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9287 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9288 defined(TARGET_STAT_HAVE_NSEC)
9289 __put_user(st
.st_atim
.tv_nsec
,
9290 &target_st
->target_st_atime_nsec
);
9291 __put_user(st
.st_mtim
.tv_nsec
,
9292 &target_st
->target_st_mtime_nsec
);
9293 __put_user(st
.st_ctim
.tv_nsec
,
9294 &target_st
->target_st_ctime_nsec
);
9296 unlock_user_struct(target_st
, arg2
, 1);
9301 case TARGET_NR_vhangup
:
9302 return get_errno(vhangup());
9303 #ifdef TARGET_NR_syscall
9304 case TARGET_NR_syscall
:
9305 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9306 arg6
, arg7
, arg8
, 0);
9308 case TARGET_NR_wait4
:
9311 abi_long status_ptr
= arg2
;
9312 struct rusage rusage
, *rusage_ptr
;
9313 abi_ulong target_rusage
= arg4
;
9314 abi_long rusage_err
;
9316 rusage_ptr
= &rusage
;
9319 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9320 if (!is_error(ret
)) {
9321 if (status_ptr
&& ret
) {
9322 status
= host_to_target_waitstatus(status
);
9323 if (put_user_s32(status
, status_ptr
))
9324 return -TARGET_EFAULT
;
9326 if (target_rusage
) {
9327 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9335 #ifdef TARGET_NR_swapoff
9336 case TARGET_NR_swapoff
:
9337 if (!(p
= lock_user_string(arg1
)))
9338 return -TARGET_EFAULT
;
9339 ret
= get_errno(swapoff(p
));
9340 unlock_user(p
, arg1
, 0);
9343 case TARGET_NR_sysinfo
:
9345 struct target_sysinfo
*target_value
;
9346 struct sysinfo value
;
9347 ret
= get_errno(sysinfo(&value
));
9348 if (!is_error(ret
) && arg1
)
9350 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9351 return -TARGET_EFAULT
;
9352 __put_user(value
.uptime
, &target_value
->uptime
);
9353 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9354 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9355 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9356 __put_user(value
.totalram
, &target_value
->totalram
);
9357 __put_user(value
.freeram
, &target_value
->freeram
);
9358 __put_user(value
.sharedram
, &target_value
->sharedram
);
9359 __put_user(value
.bufferram
, &target_value
->bufferram
);
9360 __put_user(value
.totalswap
, &target_value
->totalswap
);
9361 __put_user(value
.freeswap
, &target_value
->freeswap
);
9362 __put_user(value
.procs
, &target_value
->procs
);
9363 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9364 __put_user(value
.freehigh
, &target_value
->freehigh
);
9365 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9366 unlock_user_struct(target_value
, arg1
, 1);
9370 #ifdef TARGET_NR_ipc
9372 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9374 #ifdef TARGET_NR_semget
9375 case TARGET_NR_semget
:
9376 return get_errno(semget(arg1
, arg2
, arg3
));
9378 #ifdef TARGET_NR_semop
9379 case TARGET_NR_semop
:
9380 return do_semop(arg1
, arg2
, arg3
);
9382 #ifdef TARGET_NR_semctl
9383 case TARGET_NR_semctl
:
9384 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9386 #ifdef TARGET_NR_msgctl
9387 case TARGET_NR_msgctl
:
9388 return do_msgctl(arg1
, arg2
, arg3
);
9390 #ifdef TARGET_NR_msgget
9391 case TARGET_NR_msgget
:
9392 return get_errno(msgget(arg1
, arg2
));
9394 #ifdef TARGET_NR_msgrcv
9395 case TARGET_NR_msgrcv
:
9396 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9398 #ifdef TARGET_NR_msgsnd
9399 case TARGET_NR_msgsnd
:
9400 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9402 #ifdef TARGET_NR_shmget
9403 case TARGET_NR_shmget
:
9404 return get_errno(shmget(arg1
, arg2
, arg3
));
9406 #ifdef TARGET_NR_shmctl
9407 case TARGET_NR_shmctl
:
9408 return do_shmctl(arg1
, arg2
, arg3
);
9410 #ifdef TARGET_NR_shmat
9411 case TARGET_NR_shmat
:
9412 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9414 #ifdef TARGET_NR_shmdt
9415 case TARGET_NR_shmdt
:
9416 return do_shmdt(arg1
);
9418 case TARGET_NR_fsync
:
9419 return get_errno(fsync(arg1
));
9420 case TARGET_NR_clone
:
9421 /* Linux manages to have three different orderings for its
9422 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9423 * match the kernel's CONFIG_CLONE_* settings.
9424 * Microblaze is further special in that it uses a sixth
9425 * implicit argument to clone for the TLS pointer.
9427 #if defined(TARGET_MICROBLAZE)
9428 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9429 #elif defined(TARGET_CLONE_BACKWARDS)
9430 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9431 #elif defined(TARGET_CLONE_BACKWARDS2)
9432 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9434 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9437 #ifdef __NR_exit_group
9438 /* new thread calls */
9439 case TARGET_NR_exit_group
:
9440 preexit_cleanup(cpu_env
, arg1
);
9441 return get_errno(exit_group(arg1
));
9443 case TARGET_NR_setdomainname
:
9444 if (!(p
= lock_user_string(arg1
)))
9445 return -TARGET_EFAULT
;
9446 ret
= get_errno(setdomainname(p
, arg2
));
9447 unlock_user(p
, arg1
, 0);
9449 case TARGET_NR_uname
:
9450 /* no need to transcode because we use the linux syscall */
9452 struct new_utsname
* buf
;
9454 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9455 return -TARGET_EFAULT
;
9456 ret
= get_errno(sys_uname(buf
));
9457 if (!is_error(ret
)) {
9458 /* Overwrite the native machine name with whatever is being
9460 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9461 sizeof(buf
->machine
));
9462 /* Allow the user to override the reported release. */
9463 if (qemu_uname_release
&& *qemu_uname_release
) {
9464 g_strlcpy(buf
->release
, qemu_uname_release
,
9465 sizeof(buf
->release
));
9468 unlock_user_struct(buf
, arg1
, 1);
9472 case TARGET_NR_modify_ldt
:
9473 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9474 #if !defined(TARGET_X86_64)
9475 case TARGET_NR_vm86
:
9476 return do_vm86(cpu_env
, arg1
, arg2
);
9479 case TARGET_NR_adjtimex
:
9481 struct timex host_buf
;
9483 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9484 return -TARGET_EFAULT
;
9486 ret
= get_errno(adjtimex(&host_buf
));
9487 if (!is_error(ret
)) {
9488 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9489 return -TARGET_EFAULT
;
9494 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9495 case TARGET_NR_clock_adjtime
:
9497 struct timex htx
, *phtx
= &htx
;
9499 if (target_to_host_timex(phtx
, arg2
) != 0) {
9500 return -TARGET_EFAULT
;
9502 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9503 if (!is_error(ret
) && phtx
) {
9504 if (host_to_target_timex(arg2
, phtx
) != 0) {
9505 return -TARGET_EFAULT
;
9511 case TARGET_NR_getpgid
:
9512 return get_errno(getpgid(arg1
));
9513 case TARGET_NR_fchdir
:
9514 return get_errno(fchdir(arg1
));
9515 case TARGET_NR_personality
:
9516 return get_errno(personality(arg1
));
9517 #ifdef TARGET_NR__llseek /* Not on alpha */
9518 case TARGET_NR__llseek
:
9521 #if !defined(__NR_llseek)
9522 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9524 ret
= get_errno(res
);
9529 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9531 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9532 return -TARGET_EFAULT
;
9537 #ifdef TARGET_NR_getdents
9538 case TARGET_NR_getdents
:
9539 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9540 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9542 struct target_dirent
*target_dirp
;
9543 struct linux_dirent
*dirp
;
9544 abi_long count
= arg3
;
9546 dirp
= g_try_malloc(count
);
9548 return -TARGET_ENOMEM
;
9551 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9552 if (!is_error(ret
)) {
9553 struct linux_dirent
*de
;
9554 struct target_dirent
*tde
;
9556 int reclen
, treclen
;
9557 int count1
, tnamelen
;
9561 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9562 return -TARGET_EFAULT
;
9565 reclen
= de
->d_reclen
;
9566 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9567 assert(tnamelen
>= 0);
9568 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9569 assert(count1
+ treclen
<= count
);
9570 tde
->d_reclen
= tswap16(treclen
);
9571 tde
->d_ino
= tswapal(de
->d_ino
);
9572 tde
->d_off
= tswapal(de
->d_off
);
9573 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9574 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9576 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9580 unlock_user(target_dirp
, arg2
, ret
);
9586 struct linux_dirent
*dirp
;
9587 abi_long count
= arg3
;
9589 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9590 return -TARGET_EFAULT
;
9591 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9592 if (!is_error(ret
)) {
9593 struct linux_dirent
*de
;
9598 reclen
= de
->d_reclen
;
9601 de
->d_reclen
= tswap16(reclen
);
9602 tswapls(&de
->d_ino
);
9603 tswapls(&de
->d_off
);
9604 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9608 unlock_user(dirp
, arg2
, ret
);
9612 /* Implement getdents in terms of getdents64 */
9614 struct linux_dirent64
*dirp
;
9615 abi_long count
= arg3
;
9617 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9619 return -TARGET_EFAULT
;
9621 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9622 if (!is_error(ret
)) {
9623 /* Convert the dirent64 structs to target dirent. We do this
9624 * in-place, since we can guarantee that a target_dirent is no
9625 * larger than a dirent64; however this means we have to be
9626 * careful to read everything before writing in the new format.
9628 struct linux_dirent64
*de
;
9629 struct target_dirent
*tde
;
9634 tde
= (struct target_dirent
*)dirp
;
9636 int namelen
, treclen
;
9637 int reclen
= de
->d_reclen
;
9638 uint64_t ino
= de
->d_ino
;
9639 int64_t off
= de
->d_off
;
9640 uint8_t type
= de
->d_type
;
9642 namelen
= strlen(de
->d_name
);
9643 treclen
= offsetof(struct target_dirent
, d_name
)
9645 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9647 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9648 tde
->d_ino
= tswapal(ino
);
9649 tde
->d_off
= tswapal(off
);
9650 tde
->d_reclen
= tswap16(treclen
);
9651 /* The target_dirent type is in what was formerly a padding
9652 * byte at the end of the structure:
9654 *(((char *)tde
) + treclen
- 1) = type
;
9656 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9657 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9663 unlock_user(dirp
, arg2
, ret
);
9667 #endif /* TARGET_NR_getdents */
9668 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9669 case TARGET_NR_getdents64
:
9671 struct linux_dirent64
*dirp
;
9672 abi_long count
= arg3
;
9673 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9674 return -TARGET_EFAULT
;
9675 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9676 if (!is_error(ret
)) {
9677 struct linux_dirent64
*de
;
9682 reclen
= de
->d_reclen
;
9685 de
->d_reclen
= tswap16(reclen
);
9686 tswap64s((uint64_t *)&de
->d_ino
);
9687 tswap64s((uint64_t *)&de
->d_off
);
9688 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9692 unlock_user(dirp
, arg2
, ret
);
9695 #endif /* TARGET_NR_getdents64 */
9696 #if defined(TARGET_NR__newselect)
9697 case TARGET_NR__newselect
:
9698 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9700 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9701 # ifdef TARGET_NR_poll
9702 case TARGET_NR_poll
:
9704 # ifdef TARGET_NR_ppoll
9705 case TARGET_NR_ppoll
:
9708 struct target_pollfd
*target_pfd
;
9709 unsigned int nfds
= arg2
;
9716 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9717 return -TARGET_EINVAL
;
9720 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9721 sizeof(struct target_pollfd
) * nfds
, 1);
9723 return -TARGET_EFAULT
;
9726 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9727 for (i
= 0; i
< nfds
; i
++) {
9728 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9729 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9734 # ifdef TARGET_NR_ppoll
9735 case TARGET_NR_ppoll
:
9737 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9738 target_sigset_t
*target_set
;
9739 sigset_t _set
, *set
= &_set
;
9742 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9743 unlock_user(target_pfd
, arg1
, 0);
9744 return -TARGET_EFAULT
;
9751 if (arg5
!= sizeof(target_sigset_t
)) {
9752 unlock_user(target_pfd
, arg1
, 0);
9753 return -TARGET_EINVAL
;
9756 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9758 unlock_user(target_pfd
, arg1
, 0);
9759 return -TARGET_EFAULT
;
9761 target_to_host_sigset(set
, target_set
);
9766 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9767 set
, SIGSET_T_SIZE
));
9769 if (!is_error(ret
) && arg3
) {
9770 host_to_target_timespec(arg3
, timeout_ts
);
9773 unlock_user(target_set
, arg4
, 0);
9778 # ifdef TARGET_NR_poll
9779 case TARGET_NR_poll
:
9781 struct timespec ts
, *pts
;
9784 /* Convert ms to secs, ns */
9785 ts
.tv_sec
= arg3
/ 1000;
9786 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9789 /* -ve poll() timeout means "infinite" */
9792 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9797 g_assert_not_reached();
9800 if (!is_error(ret
)) {
9801 for(i
= 0; i
< nfds
; i
++) {
9802 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9805 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9809 case TARGET_NR_flock
:
9810 /* NOTE: the flock constant seems to be the same for every
9812 return get_errno(safe_flock(arg1
, arg2
));
9813 case TARGET_NR_readv
:
9815 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9817 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9818 unlock_iovec(vec
, arg2
, arg3
, 1);
9820 ret
= -host_to_target_errno(errno
);
9824 case TARGET_NR_writev
:
9826 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9828 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9829 unlock_iovec(vec
, arg2
, arg3
, 0);
9831 ret
= -host_to_target_errno(errno
);
9835 #if defined(TARGET_NR_preadv)
9836 case TARGET_NR_preadv
:
9838 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9840 unsigned long low
, high
;
9842 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9843 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9844 unlock_iovec(vec
, arg2
, arg3
, 1);
9846 ret
= -host_to_target_errno(errno
);
9851 #if defined(TARGET_NR_pwritev)
9852 case TARGET_NR_pwritev
:
9854 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9856 unsigned long low
, high
;
9858 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9859 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9860 unlock_iovec(vec
, arg2
, arg3
, 0);
9862 ret
= -host_to_target_errno(errno
);
9867 case TARGET_NR_getsid
:
9868 return get_errno(getsid(arg1
));
9869 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9870 case TARGET_NR_fdatasync
:
9871 return get_errno(fdatasync(arg1
));
9873 #ifdef TARGET_NR__sysctl
9874 case TARGET_NR__sysctl
:
9875 /* We don't implement this, but ENOTDIR is always a safe
9877 return -TARGET_ENOTDIR
;
9879 case TARGET_NR_sched_getaffinity
:
9881 unsigned int mask_size
;
9882 unsigned long *mask
;
9885 * sched_getaffinity needs multiples of ulong, so need to take
9886 * care of mismatches between target ulong and host ulong sizes.
9888 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9889 return -TARGET_EINVAL
;
9891 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9893 mask
= alloca(mask_size
);
9894 memset(mask
, 0, mask_size
);
9895 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9897 if (!is_error(ret
)) {
9899 /* More data returned than the caller's buffer will fit.
9900 * This only happens if sizeof(abi_long) < sizeof(long)
9901 * and the caller passed us a buffer holding an odd number
9902 * of abi_longs. If the host kernel is actually using the
9903 * extra 4 bytes then fail EINVAL; otherwise we can just
9904 * ignore them and only copy the interesting part.
9906 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9907 if (numcpus
> arg2
* 8) {
9908 return -TARGET_EINVAL
;
9913 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9914 return -TARGET_EFAULT
;
9919 case TARGET_NR_sched_setaffinity
:
9921 unsigned int mask_size
;
9922 unsigned long *mask
;
9925 * sched_setaffinity needs multiples of ulong, so need to take
9926 * care of mismatches between target ulong and host ulong sizes.
9928 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9929 return -TARGET_EINVAL
;
9931 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9932 mask
= alloca(mask_size
);
9934 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9939 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9941 case TARGET_NR_getcpu
:
9944 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9945 arg2
? &node
: NULL
,
9947 if (is_error(ret
)) {
9950 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9951 return -TARGET_EFAULT
;
9953 if (arg2
&& put_user_u32(node
, arg2
)) {
9954 return -TARGET_EFAULT
;
9958 case TARGET_NR_sched_setparam
:
9960 struct sched_param
*target_schp
;
9961 struct sched_param schp
;
9964 return -TARGET_EINVAL
;
9966 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9967 return -TARGET_EFAULT
;
9968 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9969 unlock_user_struct(target_schp
, arg2
, 0);
9970 return get_errno(sched_setparam(arg1
, &schp
));
9972 case TARGET_NR_sched_getparam
:
9974 struct sched_param
*target_schp
;
9975 struct sched_param schp
;
9978 return -TARGET_EINVAL
;
9980 ret
= get_errno(sched_getparam(arg1
, &schp
));
9981 if (!is_error(ret
)) {
9982 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9983 return -TARGET_EFAULT
;
9984 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9985 unlock_user_struct(target_schp
, arg2
, 1);
9989 case TARGET_NR_sched_setscheduler
:
9991 struct sched_param
*target_schp
;
9992 struct sched_param schp
;
9994 return -TARGET_EINVAL
;
9996 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9997 return -TARGET_EFAULT
;
9998 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9999 unlock_user_struct(target_schp
, arg3
, 0);
10000 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10002 case TARGET_NR_sched_getscheduler
:
10003 return get_errno(sched_getscheduler(arg1
));
10004 case TARGET_NR_sched_yield
:
10005 return get_errno(sched_yield());
10006 case TARGET_NR_sched_get_priority_max
:
10007 return get_errno(sched_get_priority_max(arg1
));
10008 case TARGET_NR_sched_get_priority_min
:
10009 return get_errno(sched_get_priority_min(arg1
));
10010 case TARGET_NR_sched_rr_get_interval
:
10012 struct timespec ts
;
10013 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10014 if (!is_error(ret
)) {
10015 ret
= host_to_target_timespec(arg2
, &ts
);
10019 case TARGET_NR_nanosleep
:
10021 struct timespec req
, rem
;
10022 target_to_host_timespec(&req
, arg1
);
10023 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10024 if (is_error(ret
) && arg2
) {
10025 host_to_target_timespec(arg2
, &rem
);
10029 case TARGET_NR_prctl
:
10031 case PR_GET_PDEATHSIG
:
10034 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10035 if (!is_error(ret
) && arg2
10036 && put_user_ual(deathsig
, arg2
)) {
10037 return -TARGET_EFAULT
;
10044 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10046 return -TARGET_EFAULT
;
10048 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10049 arg3
, arg4
, arg5
));
10050 unlock_user(name
, arg2
, 16);
10055 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10057 return -TARGET_EFAULT
;
10059 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10060 arg3
, arg4
, arg5
));
10061 unlock_user(name
, arg2
, 0);
10066 case TARGET_PR_GET_FP_MODE
:
10068 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10070 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10071 ret
|= TARGET_PR_FP_MODE_FR
;
10073 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10074 ret
|= TARGET_PR_FP_MODE_FRE
;
10078 case TARGET_PR_SET_FP_MODE
:
10080 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10081 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10082 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10083 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10084 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10086 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10087 TARGET_PR_FP_MODE_FRE
;
10089 /* If nothing to change, return right away, successfully. */
10090 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10093 /* Check the value is valid */
10094 if (arg2
& ~known_bits
) {
10095 return -TARGET_EOPNOTSUPP
;
10097 /* Setting FRE without FR is not supported. */
10098 if (new_fre
&& !new_fr
) {
10099 return -TARGET_EOPNOTSUPP
;
10101 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10102 /* FR1 is not supported */
10103 return -TARGET_EOPNOTSUPP
;
10105 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10106 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10107 /* cannot set FR=0 */
10108 return -TARGET_EOPNOTSUPP
;
10110 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10111 /* Cannot set FRE=1 */
10112 return -TARGET_EOPNOTSUPP
;
10116 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10117 for (i
= 0; i
< 32 ; i
+= 2) {
10118 if (!old_fr
&& new_fr
) {
10119 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10120 } else if (old_fr
&& !new_fr
) {
10121 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10126 env
->CP0_Status
|= (1 << CP0St_FR
);
10127 env
->hflags
|= MIPS_HFLAG_F64
;
10129 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10130 env
->hflags
&= ~MIPS_HFLAG_F64
;
10133 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10134 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10135 env
->hflags
|= MIPS_HFLAG_FRE
;
10138 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10139 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10145 #ifdef TARGET_AARCH64
10146 case TARGET_PR_SVE_SET_VL
:
10148 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10149 * PR_SVE_VL_INHERIT. Note the kernel definition
10150 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10151 * even though the current architectural maximum is VQ=16.
10153 ret
= -TARGET_EINVAL
;
10154 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10155 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10156 CPUARMState
*env
= cpu_env
;
10157 ARMCPU
*cpu
= env_archcpu(env
);
10158 uint32_t vq
, old_vq
;
10160 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10161 vq
= MAX(arg2
/ 16, 1);
10162 vq
= MIN(vq
, cpu
->sve_max_vq
);
10165 aarch64_sve_narrow_vq(env
, vq
);
10167 env
->vfp
.zcr_el
[1] = vq
- 1;
10168 arm_rebuild_hflags(env
);
10172 case TARGET_PR_SVE_GET_VL
:
10173 ret
= -TARGET_EINVAL
;
10175 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10176 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10177 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10181 case TARGET_PR_PAC_RESET_KEYS
:
10183 CPUARMState
*env
= cpu_env
;
10184 ARMCPU
*cpu
= env_archcpu(env
);
10186 if (arg3
|| arg4
|| arg5
) {
10187 return -TARGET_EINVAL
;
10189 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10190 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10191 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10192 TARGET_PR_PAC_APGAKEY
);
10198 } else if (arg2
& ~all
) {
10199 return -TARGET_EINVAL
;
10201 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10202 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10203 sizeof(ARMPACKey
), &err
);
10205 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10206 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10207 sizeof(ARMPACKey
), &err
);
10209 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10210 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10211 sizeof(ARMPACKey
), &err
);
10213 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10214 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10215 sizeof(ARMPACKey
), &err
);
10217 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10218 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10219 sizeof(ARMPACKey
), &err
);
10223 * Some unknown failure in the crypto. The best
10224 * we can do is log it and fail the syscall.
10225 * The real syscall cannot fail this way.
10227 qemu_log_mask(LOG_UNIMP
,
10228 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10229 error_get_pretty(err
));
10231 return -TARGET_EIO
;
10236 return -TARGET_EINVAL
;
10237 #endif /* AARCH64 */
10238 case PR_GET_SECCOMP
:
10239 case PR_SET_SECCOMP
:
10240 /* Disable seccomp to prevent the target disabling syscalls we
10242 return -TARGET_EINVAL
;
10244 /* Most prctl options have no pointer arguments */
10245 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10248 #ifdef TARGET_NR_arch_prctl
10249 case TARGET_NR_arch_prctl
:
10250 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10251 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10256 #ifdef TARGET_NR_pread64
10257 case TARGET_NR_pread64
:
10258 if (regpairs_aligned(cpu_env
, num
)) {
10262 if (arg2
== 0 && arg3
== 0) {
10263 /* Special-case NULL buffer and zero length, which should succeed */
10266 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10268 return -TARGET_EFAULT
;
10271 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10272 unlock_user(p
, arg2
, ret
);
10274 case TARGET_NR_pwrite64
:
10275 if (regpairs_aligned(cpu_env
, num
)) {
10279 if (arg2
== 0 && arg3
== 0) {
10280 /* Special-case NULL buffer and zero length, which should succeed */
10283 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10285 return -TARGET_EFAULT
;
10288 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10289 unlock_user(p
, arg2
, 0);
10292 case TARGET_NR_getcwd
:
10293 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10294 return -TARGET_EFAULT
;
10295 ret
= get_errno(sys_getcwd1(p
, arg2
));
10296 unlock_user(p
, arg1
, ret
);
10298 case TARGET_NR_capget
:
10299 case TARGET_NR_capset
:
10301 struct target_user_cap_header
*target_header
;
10302 struct target_user_cap_data
*target_data
= NULL
;
10303 struct __user_cap_header_struct header
;
10304 struct __user_cap_data_struct data
[2];
10305 struct __user_cap_data_struct
*dataptr
= NULL
;
10306 int i
, target_datalen
;
10307 int data_items
= 1;
10309 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10310 return -TARGET_EFAULT
;
10312 header
.version
= tswap32(target_header
->version
);
10313 header
.pid
= tswap32(target_header
->pid
);
10315 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10316 /* Version 2 and up takes pointer to two user_data structs */
10320 target_datalen
= sizeof(*target_data
) * data_items
;
10323 if (num
== TARGET_NR_capget
) {
10324 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10326 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10328 if (!target_data
) {
10329 unlock_user_struct(target_header
, arg1
, 0);
10330 return -TARGET_EFAULT
;
10333 if (num
== TARGET_NR_capset
) {
10334 for (i
= 0; i
< data_items
; i
++) {
10335 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10336 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10337 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10344 if (num
== TARGET_NR_capget
) {
10345 ret
= get_errno(capget(&header
, dataptr
));
10347 ret
= get_errno(capset(&header
, dataptr
));
10350 /* The kernel always updates version for both capget and capset */
10351 target_header
->version
= tswap32(header
.version
);
10352 unlock_user_struct(target_header
, arg1
, 1);
10355 if (num
== TARGET_NR_capget
) {
10356 for (i
= 0; i
< data_items
; i
++) {
10357 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10358 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10359 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10361 unlock_user(target_data
, arg2
, target_datalen
);
10363 unlock_user(target_data
, arg2
, 0);
10368 case TARGET_NR_sigaltstack
:
10369 return do_sigaltstack(arg1
, arg2
,
10370 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10372 #ifdef CONFIG_SENDFILE
10373 #ifdef TARGET_NR_sendfile
10374 case TARGET_NR_sendfile
:
10376 off_t
*offp
= NULL
;
10379 ret
= get_user_sal(off
, arg3
);
10380 if (is_error(ret
)) {
10385 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10386 if (!is_error(ret
) && arg3
) {
10387 abi_long ret2
= put_user_sal(off
, arg3
);
10388 if (is_error(ret2
)) {
10395 #ifdef TARGET_NR_sendfile64
10396 case TARGET_NR_sendfile64
:
10398 off_t
*offp
= NULL
;
10401 ret
= get_user_s64(off
, arg3
);
10402 if (is_error(ret
)) {
10407 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10408 if (!is_error(ret
) && arg3
) {
10409 abi_long ret2
= put_user_s64(off
, arg3
);
10410 if (is_error(ret2
)) {
10418 #ifdef TARGET_NR_vfork
10419 case TARGET_NR_vfork
:
10420 return get_errno(do_fork(cpu_env
,
10421 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10424 #ifdef TARGET_NR_ugetrlimit
10425 case TARGET_NR_ugetrlimit
:
10427 struct rlimit rlim
;
10428 int resource
= target_to_host_resource(arg1
);
10429 ret
= get_errno(getrlimit(resource
, &rlim
));
10430 if (!is_error(ret
)) {
10431 struct target_rlimit
*target_rlim
;
10432 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10433 return -TARGET_EFAULT
;
10434 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10435 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10436 unlock_user_struct(target_rlim
, arg2
, 1);
10441 #ifdef TARGET_NR_truncate64
10442 case TARGET_NR_truncate64
:
10443 if (!(p
= lock_user_string(arg1
)))
10444 return -TARGET_EFAULT
;
10445 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10446 unlock_user(p
, arg1
, 0);
10449 #ifdef TARGET_NR_ftruncate64
10450 case TARGET_NR_ftruncate64
:
10451 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10453 #ifdef TARGET_NR_stat64
10454 case TARGET_NR_stat64
:
10455 if (!(p
= lock_user_string(arg1
))) {
10456 return -TARGET_EFAULT
;
10458 ret
= get_errno(stat(path(p
), &st
));
10459 unlock_user(p
, arg1
, 0);
10460 if (!is_error(ret
))
10461 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10464 #ifdef TARGET_NR_lstat64
10465 case TARGET_NR_lstat64
:
10466 if (!(p
= lock_user_string(arg1
))) {
10467 return -TARGET_EFAULT
;
10469 ret
= get_errno(lstat(path(p
), &st
));
10470 unlock_user(p
, arg1
, 0);
10471 if (!is_error(ret
))
10472 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10475 #ifdef TARGET_NR_fstat64
10476 case TARGET_NR_fstat64
:
10477 ret
= get_errno(fstat(arg1
, &st
));
10478 if (!is_error(ret
))
10479 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10482 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10483 #ifdef TARGET_NR_fstatat64
10484 case TARGET_NR_fstatat64
:
10486 #ifdef TARGET_NR_newfstatat
10487 case TARGET_NR_newfstatat
:
10489 if (!(p
= lock_user_string(arg2
))) {
10490 return -TARGET_EFAULT
;
10492 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10493 unlock_user(p
, arg2
, 0);
10494 if (!is_error(ret
))
10495 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10498 #if defined(TARGET_NR_statx)
10499 case TARGET_NR_statx
:
10501 struct target_statx
*target_stx
;
10505 p
= lock_user_string(arg2
);
10507 return -TARGET_EFAULT
;
10509 #if defined(__NR_statx)
10512 * It is assumed that struct statx is architecture independent.
10514 struct target_statx host_stx
;
10517 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10518 if (!is_error(ret
)) {
10519 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10520 unlock_user(p
, arg2
, 0);
10521 return -TARGET_EFAULT
;
10525 if (ret
!= -TARGET_ENOSYS
) {
10526 unlock_user(p
, arg2
, 0);
10531 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10532 unlock_user(p
, arg2
, 0);
10534 if (!is_error(ret
)) {
10535 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10536 return -TARGET_EFAULT
;
10538 memset(target_stx
, 0, sizeof(*target_stx
));
10539 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10540 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10541 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10542 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10543 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10544 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10545 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10546 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10547 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10548 __put_user(st
.st_size
, &target_stx
->stx_size
);
10549 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10550 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10551 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10552 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10553 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10554 unlock_user_struct(target_stx
, arg5
, 1);
10559 #ifdef TARGET_NR_lchown
10560 case TARGET_NR_lchown
:
10561 if (!(p
= lock_user_string(arg1
)))
10562 return -TARGET_EFAULT
;
10563 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10564 unlock_user(p
, arg1
, 0);
10567 #ifdef TARGET_NR_getuid
10568 case TARGET_NR_getuid
:
10569 return get_errno(high2lowuid(getuid()));
10571 #ifdef TARGET_NR_getgid
10572 case TARGET_NR_getgid
:
10573 return get_errno(high2lowgid(getgid()));
10575 #ifdef TARGET_NR_geteuid
10576 case TARGET_NR_geteuid
:
10577 return get_errno(high2lowuid(geteuid()));
10579 #ifdef TARGET_NR_getegid
10580 case TARGET_NR_getegid
:
10581 return get_errno(high2lowgid(getegid()));
10583 case TARGET_NR_setreuid
:
10584 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10585 case TARGET_NR_setregid
:
10586 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10587 case TARGET_NR_getgroups
:
10589 int gidsetsize
= arg1
;
10590 target_id
*target_grouplist
;
10594 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10595 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10596 if (gidsetsize
== 0)
10598 if (!is_error(ret
)) {
10599 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10600 if (!target_grouplist
)
10601 return -TARGET_EFAULT
;
10602 for(i
= 0;i
< ret
; i
++)
10603 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10604 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10608 case TARGET_NR_setgroups
:
10610 int gidsetsize
= arg1
;
10611 target_id
*target_grouplist
;
10612 gid_t
*grouplist
= NULL
;
10615 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10616 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10617 if (!target_grouplist
) {
10618 return -TARGET_EFAULT
;
10620 for (i
= 0; i
< gidsetsize
; i
++) {
10621 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10623 unlock_user(target_grouplist
, arg2
, 0);
10625 return get_errno(setgroups(gidsetsize
, grouplist
));
10627 case TARGET_NR_fchown
:
10628 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10629 #if defined(TARGET_NR_fchownat)
10630 case TARGET_NR_fchownat
:
10631 if (!(p
= lock_user_string(arg2
)))
10632 return -TARGET_EFAULT
;
10633 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10634 low2highgid(arg4
), arg5
));
10635 unlock_user(p
, arg2
, 0);
10638 #ifdef TARGET_NR_setresuid
10639 case TARGET_NR_setresuid
:
10640 return get_errno(sys_setresuid(low2highuid(arg1
),
10642 low2highuid(arg3
)));
10644 #ifdef TARGET_NR_getresuid
10645 case TARGET_NR_getresuid
:
10647 uid_t ruid
, euid
, suid
;
10648 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10649 if (!is_error(ret
)) {
10650 if (put_user_id(high2lowuid(ruid
), arg1
)
10651 || put_user_id(high2lowuid(euid
), arg2
)
10652 || put_user_id(high2lowuid(suid
), arg3
))
10653 return -TARGET_EFAULT
;
10658 #ifdef TARGET_NR_getresgid
10659 case TARGET_NR_setresgid
:
10660 return get_errno(sys_setresgid(low2highgid(arg1
),
10662 low2highgid(arg3
)));
10664 #ifdef TARGET_NR_getresgid
10665 case TARGET_NR_getresgid
:
10667 gid_t rgid
, egid
, sgid
;
10668 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10669 if (!is_error(ret
)) {
10670 if (put_user_id(high2lowgid(rgid
), arg1
)
10671 || put_user_id(high2lowgid(egid
), arg2
)
10672 || put_user_id(high2lowgid(sgid
), arg3
))
10673 return -TARGET_EFAULT
;
10678 #ifdef TARGET_NR_chown
10679 case TARGET_NR_chown
:
10680 if (!(p
= lock_user_string(arg1
)))
10681 return -TARGET_EFAULT
;
10682 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10683 unlock_user(p
, arg1
, 0);
10686 case TARGET_NR_setuid
:
10687 return get_errno(sys_setuid(low2highuid(arg1
)));
10688 case TARGET_NR_setgid
:
10689 return get_errno(sys_setgid(low2highgid(arg1
)));
10690 case TARGET_NR_setfsuid
:
10691 return get_errno(setfsuid(arg1
));
10692 case TARGET_NR_setfsgid
:
10693 return get_errno(setfsgid(arg1
));
10695 #ifdef TARGET_NR_lchown32
10696 case TARGET_NR_lchown32
:
10697 if (!(p
= lock_user_string(arg1
)))
10698 return -TARGET_EFAULT
;
10699 ret
= get_errno(lchown(p
, arg2
, arg3
));
10700 unlock_user(p
, arg1
, 0);
10703 #ifdef TARGET_NR_getuid32
10704 case TARGET_NR_getuid32
:
10705 return get_errno(getuid());
10708 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10709 /* Alpha specific */
10710 case TARGET_NR_getxuid
:
10714 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10716 return get_errno(getuid());
10718 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10719 /* Alpha specific */
10720 case TARGET_NR_getxgid
:
10724 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10726 return get_errno(getgid());
10728 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10729 /* Alpha specific */
10730 case TARGET_NR_osf_getsysinfo
:
10731 ret
= -TARGET_EOPNOTSUPP
;
10733 case TARGET_GSI_IEEE_FP_CONTROL
:
10735 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10736 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10738 swcr
&= ~SWCR_STATUS_MASK
;
10739 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10741 if (put_user_u64 (swcr
, arg2
))
10742 return -TARGET_EFAULT
;
10747 /* case GSI_IEEE_STATE_AT_SIGNAL:
10748 -- Not implemented in linux kernel.
10750 -- Retrieves current unaligned access state; not much used.
10751 case GSI_PROC_TYPE:
10752 -- Retrieves implver information; surely not used.
10753 case GSI_GET_HWRPB:
10754 -- Grabs a copy of the HWRPB; surely not used.
10759 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10760 /* Alpha specific */
10761 case TARGET_NR_osf_setsysinfo
:
10762 ret
= -TARGET_EOPNOTSUPP
;
10764 case TARGET_SSI_IEEE_FP_CONTROL
:
10766 uint64_t swcr
, fpcr
;
10768 if (get_user_u64 (swcr
, arg2
)) {
10769 return -TARGET_EFAULT
;
10773 * The kernel calls swcr_update_status to update the
10774 * status bits from the fpcr at every point that it
10775 * could be queried. Therefore, we store the status
10776 * bits only in FPCR.
10778 ((CPUAlphaState
*)cpu_env
)->swcr
10779 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10781 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10782 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10783 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10784 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10789 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10791 uint64_t exc
, fpcr
, fex
;
10793 if (get_user_u64(exc
, arg2
)) {
10794 return -TARGET_EFAULT
;
10796 exc
&= SWCR_STATUS_MASK
;
10797 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10799 /* Old exceptions are not signaled. */
10800 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10802 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10803 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10805 /* Update the hardware fpcr. */
10806 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10807 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10810 int si_code
= TARGET_FPE_FLTUNK
;
10811 target_siginfo_t info
;
10813 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10814 si_code
= TARGET_FPE_FLTUND
;
10816 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10817 si_code
= TARGET_FPE_FLTRES
;
10819 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10820 si_code
= TARGET_FPE_FLTUND
;
10822 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10823 si_code
= TARGET_FPE_FLTOVF
;
10825 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10826 si_code
= TARGET_FPE_FLTDIV
;
10828 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10829 si_code
= TARGET_FPE_FLTINV
;
10832 info
.si_signo
= SIGFPE
;
10834 info
.si_code
= si_code
;
10835 info
._sifields
._sigfault
._addr
10836 = ((CPUArchState
*)cpu_env
)->pc
;
10837 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10838 QEMU_SI_FAULT
, &info
);
10844 /* case SSI_NVPAIRS:
10845 -- Used with SSIN_UACPROC to enable unaligned accesses.
10846 case SSI_IEEE_STATE_AT_SIGNAL:
10847 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10848 -- Not implemented in linux kernel
10853 #ifdef TARGET_NR_osf_sigprocmask
10854 /* Alpha specific. */
10855 case TARGET_NR_osf_sigprocmask
:
10859 sigset_t set
, oldset
;
10862 case TARGET_SIG_BLOCK
:
10865 case TARGET_SIG_UNBLOCK
:
10868 case TARGET_SIG_SETMASK
:
10872 return -TARGET_EINVAL
;
10875 target_to_host_old_sigset(&set
, &mask
);
10876 ret
= do_sigprocmask(how
, &set
, &oldset
);
10878 host_to_target_old_sigset(&mask
, &oldset
);
10885 #ifdef TARGET_NR_getgid32
10886 case TARGET_NR_getgid32
:
10887 return get_errno(getgid());
10889 #ifdef TARGET_NR_geteuid32
10890 case TARGET_NR_geteuid32
:
10891 return get_errno(geteuid());
10893 #ifdef TARGET_NR_getegid32
10894 case TARGET_NR_getegid32
:
10895 return get_errno(getegid());
10897 #ifdef TARGET_NR_setreuid32
10898 case TARGET_NR_setreuid32
:
10899 return get_errno(setreuid(arg1
, arg2
));
10901 #ifdef TARGET_NR_setregid32
10902 case TARGET_NR_setregid32
:
10903 return get_errno(setregid(arg1
, arg2
));
10905 #ifdef TARGET_NR_getgroups32
10906 case TARGET_NR_getgroups32
:
10908 int gidsetsize
= arg1
;
10909 uint32_t *target_grouplist
;
10913 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10914 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10915 if (gidsetsize
== 0)
10917 if (!is_error(ret
)) {
10918 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10919 if (!target_grouplist
) {
10920 return -TARGET_EFAULT
;
10922 for(i
= 0;i
< ret
; i
++)
10923 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10924 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10929 #ifdef TARGET_NR_setgroups32
10930 case TARGET_NR_setgroups32
:
10932 int gidsetsize
= arg1
;
10933 uint32_t *target_grouplist
;
10937 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10938 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10939 if (!target_grouplist
) {
10940 return -TARGET_EFAULT
;
10942 for(i
= 0;i
< gidsetsize
; i
++)
10943 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10944 unlock_user(target_grouplist
, arg2
, 0);
10945 return get_errno(setgroups(gidsetsize
, grouplist
));
10948 #ifdef TARGET_NR_fchown32
10949 case TARGET_NR_fchown32
:
10950 return get_errno(fchown(arg1
, arg2
, arg3
));
10952 #ifdef TARGET_NR_setresuid32
10953 case TARGET_NR_setresuid32
:
10954 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10956 #ifdef TARGET_NR_getresuid32
10957 case TARGET_NR_getresuid32
:
10959 uid_t ruid
, euid
, suid
;
10960 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10961 if (!is_error(ret
)) {
10962 if (put_user_u32(ruid
, arg1
)
10963 || put_user_u32(euid
, arg2
)
10964 || put_user_u32(suid
, arg3
))
10965 return -TARGET_EFAULT
;
10970 #ifdef TARGET_NR_setresgid32
10971 case TARGET_NR_setresgid32
:
10972 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10974 #ifdef TARGET_NR_getresgid32
10975 case TARGET_NR_getresgid32
:
10977 gid_t rgid
, egid
, sgid
;
10978 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10979 if (!is_error(ret
)) {
10980 if (put_user_u32(rgid
, arg1
)
10981 || put_user_u32(egid
, arg2
)
10982 || put_user_u32(sgid
, arg3
))
10983 return -TARGET_EFAULT
;
10988 #ifdef TARGET_NR_chown32
10989 case TARGET_NR_chown32
:
10990 if (!(p
= lock_user_string(arg1
)))
10991 return -TARGET_EFAULT
;
10992 ret
= get_errno(chown(p
, arg2
, arg3
));
10993 unlock_user(p
, arg1
, 0);
10996 #ifdef TARGET_NR_setuid32
10997 case TARGET_NR_setuid32
:
10998 return get_errno(sys_setuid(arg1
));
11000 #ifdef TARGET_NR_setgid32
11001 case TARGET_NR_setgid32
:
11002 return get_errno(sys_setgid(arg1
));
11004 #ifdef TARGET_NR_setfsuid32
11005 case TARGET_NR_setfsuid32
:
11006 return get_errno(setfsuid(arg1
));
11008 #ifdef TARGET_NR_setfsgid32
11009 case TARGET_NR_setfsgid32
:
11010 return get_errno(setfsgid(arg1
));
11012 #ifdef TARGET_NR_mincore
11013 case TARGET_NR_mincore
:
11015 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11017 return -TARGET_ENOMEM
;
11019 p
= lock_user_string(arg3
);
11021 ret
= -TARGET_EFAULT
;
11023 ret
= get_errno(mincore(a
, arg2
, p
));
11024 unlock_user(p
, arg3
, ret
);
11026 unlock_user(a
, arg1
, 0);
11030 #ifdef TARGET_NR_arm_fadvise64_64
11031 case TARGET_NR_arm_fadvise64_64
:
11032 /* arm_fadvise64_64 looks like fadvise64_64 but
11033 * with different argument order: fd, advice, offset, len
11034 * rather than the usual fd, offset, len, advice.
11035 * Note that offset and len are both 64-bit so appear as
11036 * pairs of 32-bit registers.
11038 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11039 target_offset64(arg5
, arg6
), arg2
);
11040 return -host_to_target_errno(ret
);
11043 #if TARGET_ABI_BITS == 32
11045 #ifdef TARGET_NR_fadvise64_64
11046 case TARGET_NR_fadvise64_64
:
11047 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11048 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11056 /* 6 args: fd, offset (high, low), len (high, low), advice */
11057 if (regpairs_aligned(cpu_env
, num
)) {
11058 /* offset is in (3,4), len in (5,6) and advice in 7 */
11066 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11067 target_offset64(arg4
, arg5
), arg6
);
11068 return -host_to_target_errno(ret
);
11071 #ifdef TARGET_NR_fadvise64
11072 case TARGET_NR_fadvise64
:
11073 /* 5 args: fd, offset (high, low), len, advice */
11074 if (regpairs_aligned(cpu_env
, num
)) {
11075 /* offset is in (3,4), len in 5 and advice in 6 */
11081 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11082 return -host_to_target_errno(ret
);
11085 #else /* not a 32-bit ABI */
11086 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11087 #ifdef TARGET_NR_fadvise64_64
11088 case TARGET_NR_fadvise64_64
:
11090 #ifdef TARGET_NR_fadvise64
11091 case TARGET_NR_fadvise64
:
11093 #ifdef TARGET_S390X
11095 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11096 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11097 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11098 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11102 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11104 #endif /* end of 64-bit ABI fadvise handling */
11106 #ifdef TARGET_NR_madvise
11107 case TARGET_NR_madvise
:
11108 /* A straight passthrough may not be safe because qemu sometimes
11109 turns private file-backed mappings into anonymous mappings.
11110 This will break MADV_DONTNEED.
11111 This is a hint, so ignoring and returning success is ok. */
11114 #if TARGET_ABI_BITS == 32
11115 case TARGET_NR_fcntl64
:
11119 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11120 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11123 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11124 copyfrom
= copy_from_user_oabi_flock64
;
11125 copyto
= copy_to_user_oabi_flock64
;
11129 cmd
= target_to_host_fcntl_cmd(arg2
);
11130 if (cmd
== -TARGET_EINVAL
) {
11135 case TARGET_F_GETLK64
:
11136 ret
= copyfrom(&fl
, arg3
);
11140 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11142 ret
= copyto(arg3
, &fl
);
11146 case TARGET_F_SETLK64
:
11147 case TARGET_F_SETLKW64
:
11148 ret
= copyfrom(&fl
, arg3
);
11152 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11155 ret
= do_fcntl(arg1
, arg2
, arg3
);
11161 #ifdef TARGET_NR_cacheflush
11162 case TARGET_NR_cacheflush
:
11163 /* self-modifying code is handled automatically, so nothing needed */
11166 #ifdef TARGET_NR_getpagesize
11167 case TARGET_NR_getpagesize
:
11168 return TARGET_PAGE_SIZE
;
11170 case TARGET_NR_gettid
:
11171 return get_errno(sys_gettid());
11172 #ifdef TARGET_NR_readahead
11173 case TARGET_NR_readahead
:
11174 #if TARGET_ABI_BITS == 32
11175 if (regpairs_aligned(cpu_env
, num
)) {
11180 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11182 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11187 #ifdef TARGET_NR_setxattr
11188 case TARGET_NR_listxattr
:
11189 case TARGET_NR_llistxattr
:
11193 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11195 return -TARGET_EFAULT
;
11198 p
= lock_user_string(arg1
);
11200 if (num
== TARGET_NR_listxattr
) {
11201 ret
= get_errno(listxattr(p
, b
, arg3
));
11203 ret
= get_errno(llistxattr(p
, b
, arg3
));
11206 ret
= -TARGET_EFAULT
;
11208 unlock_user(p
, arg1
, 0);
11209 unlock_user(b
, arg2
, arg3
);
11212 case TARGET_NR_flistxattr
:
11216 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11218 return -TARGET_EFAULT
;
11221 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11222 unlock_user(b
, arg2
, arg3
);
11225 case TARGET_NR_setxattr
:
11226 case TARGET_NR_lsetxattr
:
11228 void *p
, *n
, *v
= 0;
11230 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11232 return -TARGET_EFAULT
;
11235 p
= lock_user_string(arg1
);
11236 n
= lock_user_string(arg2
);
11238 if (num
== TARGET_NR_setxattr
) {
11239 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11241 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11244 ret
= -TARGET_EFAULT
;
11246 unlock_user(p
, arg1
, 0);
11247 unlock_user(n
, arg2
, 0);
11248 unlock_user(v
, arg3
, 0);
11251 case TARGET_NR_fsetxattr
:
11255 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11257 return -TARGET_EFAULT
;
11260 n
= lock_user_string(arg2
);
11262 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11264 ret
= -TARGET_EFAULT
;
11266 unlock_user(n
, arg2
, 0);
11267 unlock_user(v
, arg3
, 0);
11270 case TARGET_NR_getxattr
:
11271 case TARGET_NR_lgetxattr
:
11273 void *p
, *n
, *v
= 0;
11275 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11277 return -TARGET_EFAULT
;
11280 p
= lock_user_string(arg1
);
11281 n
= lock_user_string(arg2
);
11283 if (num
== TARGET_NR_getxattr
) {
11284 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11286 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11289 ret
= -TARGET_EFAULT
;
11291 unlock_user(p
, arg1
, 0);
11292 unlock_user(n
, arg2
, 0);
11293 unlock_user(v
, arg3
, arg4
);
11296 case TARGET_NR_fgetxattr
:
11300 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11302 return -TARGET_EFAULT
;
11305 n
= lock_user_string(arg2
);
11307 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11309 ret
= -TARGET_EFAULT
;
11311 unlock_user(n
, arg2
, 0);
11312 unlock_user(v
, arg3
, arg4
);
11315 case TARGET_NR_removexattr
:
11316 case TARGET_NR_lremovexattr
:
11319 p
= lock_user_string(arg1
);
11320 n
= lock_user_string(arg2
);
11322 if (num
== TARGET_NR_removexattr
) {
11323 ret
= get_errno(removexattr(p
, n
));
11325 ret
= get_errno(lremovexattr(p
, n
));
11328 ret
= -TARGET_EFAULT
;
11330 unlock_user(p
, arg1
, 0);
11331 unlock_user(n
, arg2
, 0);
11334 case TARGET_NR_fremovexattr
:
11337 n
= lock_user_string(arg2
);
11339 ret
= get_errno(fremovexattr(arg1
, n
));
11341 ret
= -TARGET_EFAULT
;
11343 unlock_user(n
, arg2
, 0);
11347 #endif /* CONFIG_ATTR */
11348 #ifdef TARGET_NR_set_thread_area
11349 case TARGET_NR_set_thread_area
:
11350 #if defined(TARGET_MIPS)
11351 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11353 #elif defined(TARGET_CRIS)
11355 ret
= -TARGET_EINVAL
;
11357 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11361 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11362 return do_set_thread_area(cpu_env
, arg1
);
11363 #elif defined(TARGET_M68K)
11365 TaskState
*ts
= cpu
->opaque
;
11366 ts
->tp_value
= arg1
;
11370 return -TARGET_ENOSYS
;
11373 #ifdef TARGET_NR_get_thread_area
11374 case TARGET_NR_get_thread_area
:
11375 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11376 return do_get_thread_area(cpu_env
, arg1
);
11377 #elif defined(TARGET_M68K)
11379 TaskState
*ts
= cpu
->opaque
;
11380 return ts
->tp_value
;
11383 return -TARGET_ENOSYS
;
11386 #ifdef TARGET_NR_getdomainname
11387 case TARGET_NR_getdomainname
:
11388 return -TARGET_ENOSYS
;
11391 #ifdef TARGET_NR_clock_settime
11392 case TARGET_NR_clock_settime
:
11394 struct timespec ts
;
11396 ret
= target_to_host_timespec(&ts
, arg2
);
11397 if (!is_error(ret
)) {
11398 ret
= get_errno(clock_settime(arg1
, &ts
));
11403 #ifdef TARGET_NR_clock_gettime
11404 case TARGET_NR_clock_gettime
:
11406 struct timespec ts
;
11407 ret
= get_errno(clock_gettime(arg1
, &ts
));
11408 if (!is_error(ret
)) {
11409 ret
= host_to_target_timespec(arg2
, &ts
);
11414 #ifdef TARGET_NR_clock_getres
11415 case TARGET_NR_clock_getres
:
11417 struct timespec ts
;
11418 ret
= get_errno(clock_getres(arg1
, &ts
));
11419 if (!is_error(ret
)) {
11420 host_to_target_timespec(arg2
, &ts
);
11425 #ifdef TARGET_NR_clock_nanosleep
11426 case TARGET_NR_clock_nanosleep
:
11428 struct timespec ts
;
11429 target_to_host_timespec(&ts
, arg3
);
11430 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11431 &ts
, arg4
? &ts
: NULL
));
11433 host_to_target_timespec(arg4
, &ts
);
11435 #if defined(TARGET_PPC)
11436 /* clock_nanosleep is odd in that it returns positive errno values.
11437 * On PPC, CR0 bit 3 should be set in such a situation. */
11438 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11439 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11446 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11447 case TARGET_NR_set_tid_address
:
11448 return get_errno(set_tid_address((int *)g2h(arg1
)));
11451 case TARGET_NR_tkill
:
11452 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11454 case TARGET_NR_tgkill
:
11455 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11456 target_to_host_signal(arg3
)));
11458 #ifdef TARGET_NR_set_robust_list
11459 case TARGET_NR_set_robust_list
:
11460 case TARGET_NR_get_robust_list
:
11461 /* The ABI for supporting robust futexes has userspace pass
11462 * the kernel a pointer to a linked list which is updated by
11463 * userspace after the syscall; the list is walked by the kernel
11464 * when the thread exits. Since the linked list in QEMU guest
11465 * memory isn't a valid linked list for the host and we have
11466 * no way to reliably intercept the thread-death event, we can't
11467 * support these. Silently return ENOSYS so that guest userspace
11468 * falls back to a non-robust futex implementation (which should
11469 * be OK except in the corner case of the guest crashing while
11470 * holding a mutex that is shared with another process via
11473 return -TARGET_ENOSYS
;
11476 #if defined(TARGET_NR_utimensat)
11477 case TARGET_NR_utimensat
:
11479 struct timespec
*tsp
, ts
[2];
11483 target_to_host_timespec(ts
, arg3
);
11484 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11488 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11490 if (!(p
= lock_user_string(arg2
))) {
11491 return -TARGET_EFAULT
;
11493 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11494 unlock_user(p
, arg2
, 0);
11499 case TARGET_NR_futex
:
11500 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11502 case TARGET_NR_inotify_init
:
11503 ret
= get_errno(sys_inotify_init());
11505 fd_trans_register(ret
, &target_inotify_trans
);
11509 #ifdef CONFIG_INOTIFY1
11510 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11511 case TARGET_NR_inotify_init1
:
11512 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11513 fcntl_flags_tbl
)));
11515 fd_trans_register(ret
, &target_inotify_trans
);
11520 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11521 case TARGET_NR_inotify_add_watch
:
11522 p
= lock_user_string(arg2
);
11523 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11524 unlock_user(p
, arg2
, 0);
11527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11528 case TARGET_NR_inotify_rm_watch
:
11529 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11532 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11533 case TARGET_NR_mq_open
:
11535 struct mq_attr posix_mq_attr
;
11536 struct mq_attr
*pposix_mq_attr
;
11539 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11540 pposix_mq_attr
= NULL
;
11542 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11543 return -TARGET_EFAULT
;
11545 pposix_mq_attr
= &posix_mq_attr
;
11547 p
= lock_user_string(arg1
- 1);
11549 return -TARGET_EFAULT
;
11551 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11552 unlock_user (p
, arg1
, 0);
11556 case TARGET_NR_mq_unlink
:
11557 p
= lock_user_string(arg1
- 1);
11559 return -TARGET_EFAULT
;
11561 ret
= get_errno(mq_unlink(p
));
11562 unlock_user (p
, arg1
, 0);
11565 case TARGET_NR_mq_timedsend
:
11567 struct timespec ts
;
11569 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11571 target_to_host_timespec(&ts
, arg5
);
11572 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11573 host_to_target_timespec(arg5
, &ts
);
11575 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11577 unlock_user (p
, arg2
, arg3
);
11581 case TARGET_NR_mq_timedreceive
:
11583 struct timespec ts
;
11586 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11588 target_to_host_timespec(&ts
, arg5
);
11589 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11591 host_to_target_timespec(arg5
, &ts
);
11593 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11596 unlock_user (p
, arg2
, arg3
);
11598 put_user_u32(prio
, arg4
);
11602 /* Not implemented for now... */
11603 /* case TARGET_NR_mq_notify: */
11606 case TARGET_NR_mq_getsetattr
:
11608 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11611 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11612 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11613 &posix_mq_attr_out
));
11614 } else if (arg3
!= 0) {
11615 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11617 if (ret
== 0 && arg3
!= 0) {
11618 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11624 #ifdef CONFIG_SPLICE
11625 #ifdef TARGET_NR_tee
11626 case TARGET_NR_tee
:
11628 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11632 #ifdef TARGET_NR_splice
11633 case TARGET_NR_splice
:
11635 loff_t loff_in
, loff_out
;
11636 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11638 if (get_user_u64(loff_in
, arg2
)) {
11639 return -TARGET_EFAULT
;
11641 ploff_in
= &loff_in
;
11644 if (get_user_u64(loff_out
, arg4
)) {
11645 return -TARGET_EFAULT
;
11647 ploff_out
= &loff_out
;
11649 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11651 if (put_user_u64(loff_in
, arg2
)) {
11652 return -TARGET_EFAULT
;
11656 if (put_user_u64(loff_out
, arg4
)) {
11657 return -TARGET_EFAULT
;
11663 #ifdef TARGET_NR_vmsplice
11664 case TARGET_NR_vmsplice
:
11666 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11668 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11669 unlock_iovec(vec
, arg2
, arg3
, 0);
11671 ret
= -host_to_target_errno(errno
);
11676 #endif /* CONFIG_SPLICE */
11677 #ifdef CONFIG_EVENTFD
11678 #if defined(TARGET_NR_eventfd)
11679 case TARGET_NR_eventfd
:
11680 ret
= get_errno(eventfd(arg1
, 0));
11682 fd_trans_register(ret
, &target_eventfd_trans
);
11686 #if defined(TARGET_NR_eventfd2)
11687 case TARGET_NR_eventfd2
:
11689 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11690 if (arg2
& TARGET_O_NONBLOCK
) {
11691 host_flags
|= O_NONBLOCK
;
11693 if (arg2
& TARGET_O_CLOEXEC
) {
11694 host_flags
|= O_CLOEXEC
;
11696 ret
= get_errno(eventfd(arg1
, host_flags
));
11698 fd_trans_register(ret
, &target_eventfd_trans
);
11703 #endif /* CONFIG_EVENTFD */
11704 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11705 case TARGET_NR_fallocate
:
11706 #if TARGET_ABI_BITS == 32
11707 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11708 target_offset64(arg5
, arg6
)));
11710 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11714 #if defined(CONFIG_SYNC_FILE_RANGE)
11715 #if defined(TARGET_NR_sync_file_range)
11716 case TARGET_NR_sync_file_range
:
11717 #if TARGET_ABI_BITS == 32
11718 #if defined(TARGET_MIPS)
11719 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11720 target_offset64(arg5
, arg6
), arg7
));
11722 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11723 target_offset64(arg4
, arg5
), arg6
));
11724 #endif /* !TARGET_MIPS */
11726 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11730 #if defined(TARGET_NR_sync_file_range2)
11731 case TARGET_NR_sync_file_range2
:
11732 /* This is like sync_file_range but the arguments are reordered */
11733 #if TARGET_ABI_BITS == 32
11734 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11735 target_offset64(arg5
, arg6
), arg2
));
11737 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11742 #if defined(TARGET_NR_signalfd4)
11743 case TARGET_NR_signalfd4
:
11744 return do_signalfd4(arg1
, arg2
, arg4
);
11746 #if defined(TARGET_NR_signalfd)
11747 case TARGET_NR_signalfd
:
11748 return do_signalfd4(arg1
, arg2
, 0);
11750 #if defined(CONFIG_EPOLL)
11751 #if defined(TARGET_NR_epoll_create)
11752 case TARGET_NR_epoll_create
:
11753 return get_errno(epoll_create(arg1
));
11755 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11756 case TARGET_NR_epoll_create1
:
11757 return get_errno(epoll_create1(arg1
));
11759 #if defined(TARGET_NR_epoll_ctl)
11760 case TARGET_NR_epoll_ctl
:
11762 struct epoll_event ep
;
11763 struct epoll_event
*epp
= 0;
11765 struct target_epoll_event
*target_ep
;
11766 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11767 return -TARGET_EFAULT
;
11769 ep
.events
= tswap32(target_ep
->events
);
11770 /* The epoll_data_t union is just opaque data to the kernel,
11771 * so we transfer all 64 bits across and need not worry what
11772 * actual data type it is.
11774 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11775 unlock_user_struct(target_ep
, arg4
, 0);
11778 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11782 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11783 #if defined(TARGET_NR_epoll_wait)
11784 case TARGET_NR_epoll_wait
:
11786 #if defined(TARGET_NR_epoll_pwait)
11787 case TARGET_NR_epoll_pwait
:
11790 struct target_epoll_event
*target_ep
;
11791 struct epoll_event
*ep
;
11793 int maxevents
= arg3
;
11794 int timeout
= arg4
;
11796 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11797 return -TARGET_EINVAL
;
11800 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11801 maxevents
* sizeof(struct target_epoll_event
), 1);
11803 return -TARGET_EFAULT
;
11806 ep
= g_try_new(struct epoll_event
, maxevents
);
11808 unlock_user(target_ep
, arg2
, 0);
11809 return -TARGET_ENOMEM
;
11813 #if defined(TARGET_NR_epoll_pwait)
11814 case TARGET_NR_epoll_pwait
:
11816 target_sigset_t
*target_set
;
11817 sigset_t _set
, *set
= &_set
;
11820 if (arg6
!= sizeof(target_sigset_t
)) {
11821 ret
= -TARGET_EINVAL
;
11825 target_set
= lock_user(VERIFY_READ
, arg5
,
11826 sizeof(target_sigset_t
), 1);
11828 ret
= -TARGET_EFAULT
;
11831 target_to_host_sigset(set
, target_set
);
11832 unlock_user(target_set
, arg5
, 0);
11837 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11838 set
, SIGSET_T_SIZE
));
11842 #if defined(TARGET_NR_epoll_wait)
11843 case TARGET_NR_epoll_wait
:
11844 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11849 ret
= -TARGET_ENOSYS
;
11851 if (!is_error(ret
)) {
11853 for (i
= 0; i
< ret
; i
++) {
11854 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11855 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11857 unlock_user(target_ep
, arg2
,
11858 ret
* sizeof(struct target_epoll_event
));
11860 unlock_user(target_ep
, arg2
, 0);
11867 #ifdef TARGET_NR_prlimit64
11868 case TARGET_NR_prlimit64
:
11870 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11871 struct target_rlimit64
*target_rnew
, *target_rold
;
11872 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11873 int resource
= target_to_host_resource(arg2
);
11875 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11876 return -TARGET_EFAULT
;
11878 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11879 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11880 unlock_user_struct(target_rnew
, arg3
, 0);
11884 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11885 if (!is_error(ret
) && arg4
) {
11886 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11887 return -TARGET_EFAULT
;
11889 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11890 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11891 unlock_user_struct(target_rold
, arg4
, 1);
11896 #ifdef TARGET_NR_gethostname
11897 case TARGET_NR_gethostname
:
11899 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11901 ret
= get_errno(gethostname(name
, arg2
));
11902 unlock_user(name
, arg1
, arg2
);
11904 ret
= -TARGET_EFAULT
;
11909 #ifdef TARGET_NR_atomic_cmpxchg_32
11910 case TARGET_NR_atomic_cmpxchg_32
:
11912 /* should use start_exclusive from main.c */
11913 abi_ulong mem_value
;
11914 if (get_user_u32(mem_value
, arg6
)) {
11915 target_siginfo_t info
;
11916 info
.si_signo
= SIGSEGV
;
11918 info
.si_code
= TARGET_SEGV_MAPERR
;
11919 info
._sifields
._sigfault
._addr
= arg6
;
11920 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11921 QEMU_SI_FAULT
, &info
);
11925 if (mem_value
== arg2
)
11926 put_user_u32(arg1
, arg6
);
11930 #ifdef TARGET_NR_atomic_barrier
11931 case TARGET_NR_atomic_barrier
:
11932 /* Like the kernel implementation and the
11933 qemu arm barrier, no-op this? */
11937 #ifdef TARGET_NR_timer_create
11938 case TARGET_NR_timer_create
:
11940 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11942 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11945 int timer_index
= next_free_host_timer();
11947 if (timer_index
< 0) {
11948 ret
= -TARGET_EAGAIN
;
11950 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11953 phost_sevp
= &host_sevp
;
11954 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11960 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11964 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11965 return -TARGET_EFAULT
;
11973 #ifdef TARGET_NR_timer_settime
11974 case TARGET_NR_timer_settime
:
11976 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11977 * struct itimerspec * old_value */
11978 target_timer_t timerid
= get_timer_id(arg1
);
11982 } else if (arg3
== 0) {
11983 ret
= -TARGET_EINVAL
;
11985 timer_t htimer
= g_posix_timers
[timerid
];
11986 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11988 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11989 return -TARGET_EFAULT
;
11992 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11993 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11994 return -TARGET_EFAULT
;
12001 #ifdef TARGET_NR_timer_gettime
12002 case TARGET_NR_timer_gettime
:
12004 /* args: timer_t timerid, struct itimerspec *curr_value */
12005 target_timer_t timerid
= get_timer_id(arg1
);
12009 } else if (!arg2
) {
12010 ret
= -TARGET_EFAULT
;
12012 timer_t htimer
= g_posix_timers
[timerid
];
12013 struct itimerspec hspec
;
12014 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12016 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12017 ret
= -TARGET_EFAULT
;
12024 #ifdef TARGET_NR_timer_getoverrun
12025 case TARGET_NR_timer_getoverrun
:
12027 /* args: timer_t timerid */
12028 target_timer_t timerid
= get_timer_id(arg1
);
12033 timer_t htimer
= g_posix_timers
[timerid
];
12034 ret
= get_errno(timer_getoverrun(htimer
));
12040 #ifdef TARGET_NR_timer_delete
12041 case TARGET_NR_timer_delete
:
12043 /* args: timer_t timerid */
12044 target_timer_t timerid
= get_timer_id(arg1
);
12049 timer_t htimer
= g_posix_timers
[timerid
];
12050 ret
= get_errno(timer_delete(htimer
));
12051 g_posix_timers
[timerid
] = 0;
12057 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12058 case TARGET_NR_timerfd_create
:
12059 return get_errno(timerfd_create(arg1
,
12060 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12063 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12064 case TARGET_NR_timerfd_gettime
:
12066 struct itimerspec its_curr
;
12068 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12070 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12071 return -TARGET_EFAULT
;
12077 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12078 case TARGET_NR_timerfd_settime
:
12080 struct itimerspec its_new
, its_old
, *p_new
;
12083 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12084 return -TARGET_EFAULT
;
12091 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12093 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12094 return -TARGET_EFAULT
;
12100 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12101 case TARGET_NR_ioprio_get
:
12102 return get_errno(ioprio_get(arg1
, arg2
));
12105 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12106 case TARGET_NR_ioprio_set
:
12107 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12110 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12111 case TARGET_NR_setns
:
12112 return get_errno(setns(arg1
, arg2
));
12114 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12115 case TARGET_NR_unshare
:
12116 return get_errno(unshare(arg1
));
12118 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12119 case TARGET_NR_kcmp
:
12120 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12122 #ifdef TARGET_NR_swapcontext
12123 case TARGET_NR_swapcontext
:
12124 /* PowerPC specific. */
12125 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12127 #ifdef TARGET_NR_memfd_create
12128 case TARGET_NR_memfd_create
:
12129 p
= lock_user_string(arg1
);
12131 return -TARGET_EFAULT
;
12133 ret
= get_errno(memfd_create(p
, arg2
));
12134 fd_trans_unregister(ret
);
12135 unlock_user(p
, arg1
, 0);
12138 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12139 case TARGET_NR_membarrier
:
12140 return get_errno(membarrier(arg1
, arg2
));
12144 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12145 return -TARGET_ENOSYS
;
12150 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12151 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12152 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12155 CPUState
*cpu
= env_cpu(cpu_env
);
12158 #ifdef DEBUG_ERESTARTSYS
12159 /* Debug-only code for exercising the syscall-restart code paths
12160 * in the per-architecture cpu main loops: restart every syscall
12161 * the guest makes once before letting it through.
12167 return -TARGET_ERESTARTSYS
;
12172 record_syscall_start(cpu
, num
, arg1
,
12173 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12175 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12176 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12179 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12180 arg5
, arg6
, arg7
, arg8
);
12182 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12183 print_syscall_ret(num
, ret
);
12186 record_syscall_return(cpu
, num
, ret
);