4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
120 #define CLONE_IO 0x80000000 /* Clone io context */
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171 * once. This exercises the codepaths for restart.
173 //#define DEBUG_ERESTARTSYS
175 //#include <linux/msdos_fs.h>
176 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
177 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
187 #define _syscall0(type,name) \
188 static type name (void) \
190 return syscall(__NR_##name); \
193 #define _syscall1(type,name,type1,arg1) \
194 static type name (type1 arg1) \
196 return syscall(__NR_##name, arg1); \
199 #define _syscall2(type,name,type1,arg1,type2,arg2) \
200 static type name (type1 arg1,type2 arg2) \
202 return syscall(__NR_##name, arg1, arg2); \
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
206 static type name (type1 arg1,type2 arg2,type3 arg3) \
208 return syscall(__NR_##name, arg1, arg2, arg3); \
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
214 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
221 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
226 type5,arg5,type6,arg6) \
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
230 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid
)
260 /* For the 64-bit guest on 32-bit host case we must emulate
261 * getdents using getdents64, because otherwise the host
262 * might hand us back more dirent records than we can fit
263 * into the guest buffer after structure format conversion.
264 * Otherwise we emulate getdents with getdents if the host has it.
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
273 #if (defined(TARGET_NR_getdents) && \
274 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
280 loff_t
*, res
, uint
, wh
);
282 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
283 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
285 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group
,int,error_code
)
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address
,int *,tidptr
)
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
294 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
298 unsigned long *, user_mask_ptr
);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
304 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
306 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
307 struct __user_cap_data_struct
*, data
);
308 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
309 struct __user_cap_data_struct
*, data
);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get
, int, which
, int, who
)
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
322 unsigned long, idx1
, unsigned long, idx2
)
326 * It is assumed that struct statx is architecture independent.
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
330 unsigned int, mask
, struct target_statx
*, statxbuf
)
333 static bitmask_transtbl fcntl_flags_tbl
[] = {
334 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
335 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
336 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
337 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
338 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
339 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
340 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
341 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
342 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
343 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
344 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
345 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
346 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
347 #if defined(O_DIRECT)
348 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
350 #if defined(O_NOATIME)
351 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
353 #if defined(O_CLOEXEC)
354 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
357 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
359 #if defined(O_TMPFILE)
360 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
362 /* Don't terminate the list prematurely on 64-bit host+guest. */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
369 static int sys_getcwd1(char *buf
, size_t size
)
371 if (getcwd(buf
, size
) == NULL
) {
372 /* getcwd() sets errno */
375 return strlen(buf
)+1;
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
382 const struct timespec
*,tsp
,int,flags
)
384 static int sys_utimensat(int dirfd
, const char *pathname
,
385 const struct timespec times
[2], int flags
)
391 #endif /* TARGET_NR_utimensat */
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
397 const char *, new, unsigned int, flags
)
399 static int sys_renameat2(int oldfd
, const char *old
,
400 int newfd
, const char *new, int flags
)
403 return renameat(oldfd
, old
, newfd
, new);
409 #endif /* TARGET_NR_renameat2 */
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
417 return (inotify_init());
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
423 return (inotify_add_watch(fd
, pathname
, mask
));
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
429 return (inotify_rm_watch(fd
, wd
));
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags
)
436 return (inotify_init1(flags
));
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY */
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64
{
458 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
459 const struct host_rlimit64
*, new_limit
,
460 struct host_rlimit64
*, old_limit
)
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers
[32] = { 0, } ;
468 static inline int next_free_host_timer(void)
471 /* FIXME: Does finding the next free slot require a lock? */
472 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
473 if (g_posix_timers
[k
] == 0) {
474 g_posix_timers
[k
] = (timer_t
) 1;
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
484 static inline int regpairs_aligned(void *cpu_env
, int num
)
486 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492 * of registers which translates to the same as ARM/MIPS, because we start with
494 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env
, int num
)
500 case TARGET_NR_pread64
:
501 case TARGET_NR_pwrite64
:
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
511 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
514 #define ERRNO_TABLE_SIZE 1200
516 /* target_to_host_errno_table[] is initialized from
517 * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
522 * This list is the union of errno values overridden in asm-<arch>/errno.h
523 * minus the errnos that are not actually generic to all archs.
525 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
526 [EAGAIN
] = TARGET_EAGAIN
,
527 [EIDRM
] = TARGET_EIDRM
,
528 [ECHRNG
] = TARGET_ECHRNG
,
529 [EL2NSYNC
] = TARGET_EL2NSYNC
,
530 [EL3HLT
] = TARGET_EL3HLT
,
531 [EL3RST
] = TARGET_EL3RST
,
532 [ELNRNG
] = TARGET_ELNRNG
,
533 [EUNATCH
] = TARGET_EUNATCH
,
534 [ENOCSI
] = TARGET_ENOCSI
,
535 [EL2HLT
] = TARGET_EL2HLT
,
536 [EDEADLK
] = TARGET_EDEADLK
,
537 [ENOLCK
] = TARGET_ENOLCK
,
538 [EBADE
] = TARGET_EBADE
,
539 [EBADR
] = TARGET_EBADR
,
540 [EXFULL
] = TARGET_EXFULL
,
541 [ENOANO
] = TARGET_ENOANO
,
542 [EBADRQC
] = TARGET_EBADRQC
,
543 [EBADSLT
] = TARGET_EBADSLT
,
544 [EBFONT
] = TARGET_EBFONT
,
545 [ENOSTR
] = TARGET_ENOSTR
,
546 [ENODATA
] = TARGET_ENODATA
,
547 [ETIME
] = TARGET_ETIME
,
548 [ENOSR
] = TARGET_ENOSR
,
549 [ENONET
] = TARGET_ENONET
,
550 [ENOPKG
] = TARGET_ENOPKG
,
551 [EREMOTE
] = TARGET_EREMOTE
,
552 [ENOLINK
] = TARGET_ENOLINK
,
553 [EADV
] = TARGET_EADV
,
554 [ESRMNT
] = TARGET_ESRMNT
,
555 [ECOMM
] = TARGET_ECOMM
,
556 [EPROTO
] = TARGET_EPROTO
,
557 [EDOTDOT
] = TARGET_EDOTDOT
,
558 [EMULTIHOP
] = TARGET_EMULTIHOP
,
559 [EBADMSG
] = TARGET_EBADMSG
,
560 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
561 [EOVERFLOW
] = TARGET_EOVERFLOW
,
562 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
563 [EBADFD
] = TARGET_EBADFD
,
564 [EREMCHG
] = TARGET_EREMCHG
,
565 [ELIBACC
] = TARGET_ELIBACC
,
566 [ELIBBAD
] = TARGET_ELIBBAD
,
567 [ELIBSCN
] = TARGET_ELIBSCN
,
568 [ELIBMAX
] = TARGET_ELIBMAX
,
569 [ELIBEXEC
] = TARGET_ELIBEXEC
,
570 [EILSEQ
] = TARGET_EILSEQ
,
571 [ENOSYS
] = TARGET_ENOSYS
,
572 [ELOOP
] = TARGET_ELOOP
,
573 [ERESTART
] = TARGET_ERESTART
,
574 [ESTRPIPE
] = TARGET_ESTRPIPE
,
575 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
576 [EUSERS
] = TARGET_EUSERS
,
577 [ENOTSOCK
] = TARGET_ENOTSOCK
,
578 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
579 [EMSGSIZE
] = TARGET_EMSGSIZE
,
580 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
581 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
582 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
583 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
584 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
585 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
586 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
587 [EADDRINUSE
] = TARGET_EADDRINUSE
,
588 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
589 [ENETDOWN
] = TARGET_ENETDOWN
,
590 [ENETUNREACH
] = TARGET_ENETUNREACH
,
591 [ENETRESET
] = TARGET_ENETRESET
,
592 [ECONNABORTED
] = TARGET_ECONNABORTED
,
593 [ECONNRESET
] = TARGET_ECONNRESET
,
594 [ENOBUFS
] = TARGET_ENOBUFS
,
595 [EISCONN
] = TARGET_EISCONN
,
596 [ENOTCONN
] = TARGET_ENOTCONN
,
597 [EUCLEAN
] = TARGET_EUCLEAN
,
598 [ENOTNAM
] = TARGET_ENOTNAM
,
599 [ENAVAIL
] = TARGET_ENAVAIL
,
600 [EISNAM
] = TARGET_EISNAM
,
601 [EREMOTEIO
] = TARGET_EREMOTEIO
,
602 [EDQUOT
] = TARGET_EDQUOT
,
603 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
604 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
605 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
606 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
607 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
608 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
609 [EALREADY
] = TARGET_EALREADY
,
610 [EINPROGRESS
] = TARGET_EINPROGRESS
,
611 [ESTALE
] = TARGET_ESTALE
,
612 [ECANCELED
] = TARGET_ECANCELED
,
613 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
614 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
616 [ENOKEY
] = TARGET_ENOKEY
,
619 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
622 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
625 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
628 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
630 #ifdef ENOTRECOVERABLE
631 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
634 [ENOMSG
] = TARGET_ENOMSG
,
637 [ERFKILL
] = TARGET_ERFKILL
,
640 [EHWPOISON
] = TARGET_EHWPOISON
,
644 static inline int host_to_target_errno(int err
)
646 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
647 host_to_target_errno_table
[err
]) {
648 return host_to_target_errno_table
[err
];
653 static inline int target_to_host_errno(int err
)
655 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
656 target_to_host_errno_table
[err
]) {
657 return target_to_host_errno_table
[err
];
662 static inline abi_long
get_errno(abi_long ret
)
665 return -host_to_target_errno(errno
);
670 const char *target_strerror(int err
)
672 if (err
== TARGET_ERESTARTSYS
) {
673 return "To be restarted";
675 if (err
== TARGET_QEMU_ESIGRETURN
) {
676 return "Successful exit from sigreturn";
679 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
682 return strerror(target_to_host_errno(err
));
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
688 return safe_syscall(__NR_##name); \
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
694 return safe_syscall(__NR_##name, arg1); \
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
700 return safe_syscall(__NR_##name, arg1, arg2); \
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
706 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717 type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725 type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727 type5 arg5, type6 arg6) \
729 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
732 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
733 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
734 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
735 int, flags
, mode_t
, mode
)
736 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
737 struct rusage
*, rusage
)
738 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
739 int, options
, struct rusage
*, rusage
)
740 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
741 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
742 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
743 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
744 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
746 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
747 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
749 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
750 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
751 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
752 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
753 safe_syscall2(int, tkill
, int, tid
, int, sig
)
754 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
755 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
756 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
757 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
758 unsigned long, pos_l
, unsigned long, pos_h
)
759 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
760 unsigned long, pos_l
, unsigned long, pos_h
)
761 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
763 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
764 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
765 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
766 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
767 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
768 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
769 safe_syscall2(int, flock
, int, fd
, int, operation
)
770 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
771 const struct timespec
*, uts
, size_t, sigsetsize
)
772 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
774 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
775 struct timespec
*, rem
)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
778 const struct timespec
*, req
, struct timespec
*, rem
)
781 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
782 void *, ptr
, long, fifth
)
785 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
789 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
790 long, msgtype
, int, flags
)
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
794 unsigned, nsops
, const struct timespec
*, timeout
)
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
798 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
799 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
800 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803 * "third argument might be integer or pointer or not present" behaviour of
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809 * use the flock64 struct rather than unsuffixed flock
810 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
818 static inline int host_to_target_sock_type(int host_type
)
822 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
824 target_type
= TARGET_SOCK_DGRAM
;
827 target_type
= TARGET_SOCK_STREAM
;
830 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
834 #if defined(SOCK_CLOEXEC)
835 if (host_type
& SOCK_CLOEXEC
) {
836 target_type
|= TARGET_SOCK_CLOEXEC
;
840 #if defined(SOCK_NONBLOCK)
841 if (host_type
& SOCK_NONBLOCK
) {
842 target_type
|= TARGET_SOCK_NONBLOCK
;
849 static abi_ulong target_brk
;
850 static abi_ulong target_original_brk
;
851 static abi_ulong brk_page
;
853 void target_set_brk(abi_ulong new_brk
)
855 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
856 brk_page
= HOST_PAGE_ALIGN(target_brk
);
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
862 /* do_brk() must return target values and target errnos. */
863 abi_long
do_brk(abi_ulong new_brk
)
865 abi_long mapped_addr
;
866 abi_ulong new_alloc_size
;
868 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
871 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
874 if (new_brk
< target_original_brk
) {
875 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
880 /* If the new brk is less than the highest page reserved to the
881 * target heap allocation, set it and we're almost done... */
882 if (new_brk
<= brk_page
) {
883 /* Heap contents are initialized to zero, as for anonymous
885 if (new_brk
> target_brk
) {
886 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
888 target_brk
= new_brk
;
889 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
893 /* We need to allocate more memory after the brk... Note that
894 * we don't use MAP_FIXED because that will map over the top of
895 * any existing mapping (like the one with the host libc or qemu
896 * itself); instead we treat "mapped but at wrong address" as
897 * a failure and unmap again.
899 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
900 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
901 PROT_READ
|PROT_WRITE
,
902 MAP_ANON
|MAP_PRIVATE
, 0, 0));
904 if (mapped_addr
== brk_page
) {
905 /* Heap contents are initialized to zero, as for anonymous
906 * mapped pages. Technically the new pages are already
907 * initialized to zero since they *are* anonymous mapped
908 * pages, however we have to take care with the contents that
909 * come from the remaining part of the previous page: it may
910 * contains garbage data due to a previous heap usage (grown
912 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
914 target_brk
= new_brk
;
915 brk_page
= HOST_PAGE_ALIGN(target_brk
);
916 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
919 } else if (mapped_addr
!= -1) {
920 /* Mapped but at wrong address, meaning there wasn't actually
921 * enough space for this brk.
923 target_munmap(mapped_addr
, new_alloc_size
);
925 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
928 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
931 #if defined(TARGET_ALPHA)
932 /* We (partially) emulate OSF/1 on Alpha, which requires we
933 return a proper errno, not an unchanged brk value. */
934 return -TARGET_ENOMEM
;
936 /* For everything else, return the previous break. */
940 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
941 abi_ulong target_fds_addr
,
945 abi_ulong b
, *target_fds
;
947 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
948 if (!(target_fds
= lock_user(VERIFY_READ
,
950 sizeof(abi_ulong
) * nw
,
952 return -TARGET_EFAULT
;
956 for (i
= 0; i
< nw
; i
++) {
957 /* grab the abi_ulong */
958 __get_user(b
, &target_fds
[i
]);
959 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
960 /* check the bit inside the abi_ulong */
967 unlock_user(target_fds
, target_fds_addr
, 0);
972 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
973 abi_ulong target_fds_addr
,
976 if (target_fds_addr
) {
977 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
978 return -TARGET_EFAULT
;
986 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
992 abi_ulong
*target_fds
;
994 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
995 if (!(target_fds
= lock_user(VERIFY_WRITE
,
997 sizeof(abi_ulong
) * nw
,
999 return -TARGET_EFAULT
;
1002 for (i
= 0; i
< nw
; i
++) {
1004 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1005 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1008 __put_user(v
, &target_fds
[i
]);
1011 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1022 static inline abi_long
host_to_target_clock_t(long ticks
)
1024 #if HOST_HZ == TARGET_HZ
1027 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1031 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1032 const struct rusage
*rusage
)
1034 struct target_rusage
*target_rusage
;
1036 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1037 return -TARGET_EFAULT
;
1038 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1039 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1040 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1041 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1042 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1043 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1044 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1045 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1046 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1047 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1048 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1049 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1050 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1051 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1052 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1053 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1054 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1055 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1056 unlock_user_struct(target_rusage
, target_addr
, 1);
1061 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1063 abi_ulong target_rlim_swap
;
1066 target_rlim_swap
= tswapal(target_rlim
);
1067 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1068 return RLIM_INFINITY
;
1070 result
= target_rlim_swap
;
1071 if (target_rlim_swap
!= (rlim_t
)result
)
1072 return RLIM_INFINITY
;
1077 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1079 abi_ulong target_rlim_swap
;
1082 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1083 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1085 target_rlim_swap
= rlim
;
1086 result
= tswapal(target_rlim_swap
);
1091 static inline int target_to_host_resource(int code
)
1094 case TARGET_RLIMIT_AS
:
1096 case TARGET_RLIMIT_CORE
:
1098 case TARGET_RLIMIT_CPU
:
1100 case TARGET_RLIMIT_DATA
:
1102 case TARGET_RLIMIT_FSIZE
:
1103 return RLIMIT_FSIZE
;
1104 case TARGET_RLIMIT_LOCKS
:
1105 return RLIMIT_LOCKS
;
1106 case TARGET_RLIMIT_MEMLOCK
:
1107 return RLIMIT_MEMLOCK
;
1108 case TARGET_RLIMIT_MSGQUEUE
:
1109 return RLIMIT_MSGQUEUE
;
1110 case TARGET_RLIMIT_NICE
:
1112 case TARGET_RLIMIT_NOFILE
:
1113 return RLIMIT_NOFILE
;
1114 case TARGET_RLIMIT_NPROC
:
1115 return RLIMIT_NPROC
;
1116 case TARGET_RLIMIT_RSS
:
1118 case TARGET_RLIMIT_RTPRIO
:
1119 return RLIMIT_RTPRIO
;
1120 case TARGET_RLIMIT_SIGPENDING
:
1121 return RLIMIT_SIGPENDING
;
1122 case TARGET_RLIMIT_STACK
:
1123 return RLIMIT_STACK
;
1129 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1130 abi_ulong target_tv_addr
)
1132 struct target_timeval
*target_tv
;
1134 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1135 return -TARGET_EFAULT
;
1138 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1139 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1141 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1146 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1147 const struct timeval
*tv
)
1149 struct target_timeval
*target_tv
;
1151 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1152 return -TARGET_EFAULT
;
1155 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1156 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1158 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1163 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1164 const struct timeval
*tv
)
1166 struct target__kernel_sock_timeval
*target_tv
;
1168 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1169 return -TARGET_EFAULT
;
1172 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1173 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1175 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1180 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1181 abi_ulong target_addr
)
1183 struct target_timespec
*target_ts
;
1185 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1186 return -TARGET_EFAULT
;
1188 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1189 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1190 unlock_user_struct(target_ts
, target_addr
, 0);
1194 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1195 struct timespec
*host_ts
)
1197 struct target_timespec
*target_ts
;
1199 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1200 return -TARGET_EFAULT
;
1202 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1203 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1204 unlock_user_struct(target_ts
, target_addr
, 1);
1208 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1209 struct timespec
*host_ts
)
1211 struct target__kernel_timespec
*target_ts
;
1213 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1214 return -TARGET_EFAULT
;
1216 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1217 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1218 unlock_user_struct(target_ts
, target_addr
, 1);
1222 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1223 abi_ulong target_tz_addr
)
1225 struct target_timezone
*target_tz
;
1227 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1228 return -TARGET_EFAULT
;
1231 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1232 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1234 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1242 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1243 abi_ulong target_mq_attr_addr
)
1245 struct target_mq_attr
*target_mq_attr
;
1247 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1248 target_mq_attr_addr
, 1))
1249 return -TARGET_EFAULT
;
1251 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1252 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1253 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1254 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1256 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1261 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1262 const struct mq_attr
*attr
)
1264 struct target_mq_attr
*target_mq_attr
;
1266 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1267 target_mq_attr_addr
, 0))
1268 return -TARGET_EFAULT
;
1270 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1271 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1272 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1273 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1275 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long
do_select(int n
,
1284 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1285 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1287 fd_set rfds
, wfds
, efds
;
1288 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1290 struct timespec ts
, *ts_ptr
;
1293 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1297 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1301 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1306 if (target_tv_addr
) {
1307 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1308 return -TARGET_EFAULT
;
1309 ts
.tv_sec
= tv
.tv_sec
;
1310 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1316 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1319 if (!is_error(ret
)) {
1320 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1321 return -TARGET_EFAULT
;
1322 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1323 return -TARGET_EFAULT
;
1324 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1325 return -TARGET_EFAULT
;
1327 if (target_tv_addr
) {
1328 tv
.tv_sec
= ts
.tv_sec
;
1329 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1330 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1331 return -TARGET_EFAULT
;
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long
do_old_select(abi_ulong arg1
)
1342 struct target_sel_arg_struct
*sel
;
1343 abi_ulong inp
, outp
, exp
, tvp
;
1346 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1347 return -TARGET_EFAULT
;
1350 nsel
= tswapal(sel
->n
);
1351 inp
= tswapal(sel
->inp
);
1352 outp
= tswapal(sel
->outp
);
1353 exp
= tswapal(sel
->exp
);
1354 tvp
= tswapal(sel
->tvp
);
1356 unlock_user_struct(sel
, arg1
, 0);
1358 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1363 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1366 return pipe2(host_pipe
, flags
);
1372 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1373 int flags
, int is_pipe2
)
1377 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1380 return get_errno(ret
);
1382 /* Several targets have special calling conventions for the original
1383 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1385 #if defined(TARGET_ALPHA)
1386 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1387 return host_pipe
[0];
1388 #elif defined(TARGET_MIPS)
1389 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1390 return host_pipe
[0];
1391 #elif defined(TARGET_SH4)
1392 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1393 return host_pipe
[0];
1394 #elif defined(TARGET_SPARC)
1395 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1396 return host_pipe
[0];
1400 if (put_user_s32(host_pipe
[0], pipedes
)
1401 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1402 return -TARGET_EFAULT
;
1403 return get_errno(ret
);
1406 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1407 abi_ulong target_addr
,
1410 struct target_ip_mreqn
*target_smreqn
;
1412 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1414 return -TARGET_EFAULT
;
1415 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1416 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1417 if (len
== sizeof(struct target_ip_mreqn
))
1418 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1419 unlock_user(target_smreqn
, target_addr
, 0);
1424 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1425 abi_ulong target_addr
,
1428 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1429 sa_family_t sa_family
;
1430 struct target_sockaddr
*target_saddr
;
1432 if (fd_trans_target_to_host_addr(fd
)) {
1433 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1436 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1438 return -TARGET_EFAULT
;
1440 sa_family
= tswap16(target_saddr
->sa_family
);
1442 /* Oops. The caller might send a incomplete sun_path; sun_path
1443 * must be terminated by \0 (see the manual page), but
1444 * unfortunately it is quite common to specify sockaddr_un
1445 * length as "strlen(x->sun_path)" while it should be
1446 * "strlen(...) + 1". We'll fix that here if needed.
1447 * Linux kernel has a similar feature.
1450 if (sa_family
== AF_UNIX
) {
1451 if (len
< unix_maxlen
&& len
> 0) {
1452 char *cp
= (char*)target_saddr
;
1454 if ( cp
[len
-1] && !cp
[len
] )
1457 if (len
> unix_maxlen
)
1461 memcpy(addr
, target_saddr
, len
);
1462 addr
->sa_family
= sa_family
;
1463 if (sa_family
== AF_NETLINK
) {
1464 struct sockaddr_nl
*nladdr
;
1466 nladdr
= (struct sockaddr_nl
*)addr
;
1467 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1468 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1469 } else if (sa_family
== AF_PACKET
) {
1470 struct target_sockaddr_ll
*lladdr
;
1472 lladdr
= (struct target_sockaddr_ll
*)addr
;
1473 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1474 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1476 unlock_user(target_saddr
, target_addr
, 0);
1481 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1482 struct sockaddr
*addr
,
1485 struct target_sockaddr
*target_saddr
;
1492 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1494 return -TARGET_EFAULT
;
1495 memcpy(target_saddr
, addr
, len
);
1496 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1497 sizeof(target_saddr
->sa_family
)) {
1498 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1500 if (addr
->sa_family
== AF_NETLINK
&&
1501 len
>= sizeof(struct target_sockaddr_nl
)) {
1502 struct target_sockaddr_nl
*target_nl
=
1503 (struct target_sockaddr_nl
*)target_saddr
;
1504 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1505 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1506 } else if (addr
->sa_family
== AF_PACKET
) {
1507 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1508 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1509 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1510 } else if (addr
->sa_family
== AF_INET6
&&
1511 len
>= sizeof(struct target_sockaddr_in6
)) {
1512 struct target_sockaddr_in6
*target_in6
=
1513 (struct target_sockaddr_in6
*)target_saddr
;
1514 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1516 unlock_user(target_saddr
, target_addr
, len
);
1521 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1522 struct target_msghdr
*target_msgh
)
1524 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1525 abi_long msg_controllen
;
1526 abi_ulong target_cmsg_addr
;
1527 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1528 socklen_t space
= 0;
1530 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1531 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1533 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1534 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1535 target_cmsg_start
= target_cmsg
;
1537 return -TARGET_EFAULT
;
1539 while (cmsg
&& target_cmsg
) {
1540 void *data
= CMSG_DATA(cmsg
);
1541 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1543 int len
= tswapal(target_cmsg
->cmsg_len
)
1544 - sizeof(struct target_cmsghdr
);
1546 space
+= CMSG_SPACE(len
);
1547 if (space
> msgh
->msg_controllen
) {
1548 space
-= CMSG_SPACE(len
);
1549 /* This is a QEMU bug, since we allocated the payload
1550 * area ourselves (unlike overflow in host-to-target
1551 * conversion, which is just the guest giving us a buffer
1552 * that's too small). It can't happen for the payload types
1553 * we currently support; if it becomes an issue in future
1554 * we would need to improve our allocation strategy to
1555 * something more intelligent than "twice the size of the
1556 * target buffer we're reading from".
1558 gemu_log("Host cmsg overflow\n");
1562 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1563 cmsg
->cmsg_level
= SOL_SOCKET
;
1565 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1567 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1568 cmsg
->cmsg_len
= CMSG_LEN(len
);
1570 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1571 int *fd
= (int *)data
;
1572 int *target_fd
= (int *)target_data
;
1573 int i
, numfds
= len
/ sizeof(int);
1575 for (i
= 0; i
< numfds
; i
++) {
1576 __get_user(fd
[i
], target_fd
+ i
);
1578 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1579 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1580 struct ucred
*cred
= (struct ucred
*)data
;
1581 struct target_ucred
*target_cred
=
1582 (struct target_ucred
*)target_data
;
1584 __get_user(cred
->pid
, &target_cred
->pid
);
1585 __get_user(cred
->uid
, &target_cred
->uid
);
1586 __get_user(cred
->gid
, &target_cred
->gid
);
1588 gemu_log("Unsupported ancillary data: %d/%d\n",
1589 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1590 memcpy(data
, target_data
, len
);
1593 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1594 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1597 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1599 msgh
->msg_controllen
= space
;
1603 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1604 struct msghdr
*msgh
)
1606 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1607 abi_long msg_controllen
;
1608 abi_ulong target_cmsg_addr
;
1609 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1610 socklen_t space
= 0;
1612 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1613 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1615 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1616 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1617 target_cmsg_start
= target_cmsg
;
1619 return -TARGET_EFAULT
;
1621 while (cmsg
&& target_cmsg
) {
1622 void *data
= CMSG_DATA(cmsg
);
1623 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1625 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1626 int tgt_len
, tgt_space
;
1628 /* We never copy a half-header but may copy half-data;
1629 * this is Linux's behaviour in put_cmsg(). Note that
1630 * truncation here is a guest problem (which we report
1631 * to the guest via the CTRUNC bit), unlike truncation
1632 * in target_to_host_cmsg, which is a QEMU bug.
1634 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1635 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1639 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1640 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1642 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1644 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1646 /* Payload types which need a different size of payload on
1647 * the target must adjust tgt_len here.
1650 switch (cmsg
->cmsg_level
) {
1652 switch (cmsg
->cmsg_type
) {
1654 tgt_len
= sizeof(struct target_timeval
);
1664 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1665 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1666 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1669 /* We must now copy-and-convert len bytes of payload
1670 * into tgt_len bytes of destination space. Bear in mind
1671 * that in both source and destination we may be dealing
1672 * with a truncated value!
1674 switch (cmsg
->cmsg_level
) {
1676 switch (cmsg
->cmsg_type
) {
1679 int *fd
= (int *)data
;
1680 int *target_fd
= (int *)target_data
;
1681 int i
, numfds
= tgt_len
/ sizeof(int);
1683 for (i
= 0; i
< numfds
; i
++) {
1684 __put_user(fd
[i
], target_fd
+ i
);
1690 struct timeval
*tv
= (struct timeval
*)data
;
1691 struct target_timeval
*target_tv
=
1692 (struct target_timeval
*)target_data
;
1694 if (len
!= sizeof(struct timeval
) ||
1695 tgt_len
!= sizeof(struct target_timeval
)) {
1699 /* copy struct timeval to target */
1700 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1701 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1704 case SCM_CREDENTIALS
:
1706 struct ucred
*cred
= (struct ucred
*)data
;
1707 struct target_ucred
*target_cred
=
1708 (struct target_ucred
*)target_data
;
1710 __put_user(cred
->pid
, &target_cred
->pid
);
1711 __put_user(cred
->uid
, &target_cred
->uid
);
1712 __put_user(cred
->gid
, &target_cred
->gid
);
1721 switch (cmsg
->cmsg_type
) {
1724 uint32_t *v
= (uint32_t *)data
;
1725 uint32_t *t_int
= (uint32_t *)target_data
;
1727 if (len
!= sizeof(uint32_t) ||
1728 tgt_len
!= sizeof(uint32_t)) {
1731 __put_user(*v
, t_int
);
1737 struct sock_extended_err ee
;
1738 struct sockaddr_in offender
;
1740 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1741 struct errhdr_t
*target_errh
=
1742 (struct errhdr_t
*)target_data
;
1744 if (len
!= sizeof(struct errhdr_t
) ||
1745 tgt_len
!= sizeof(struct errhdr_t
)) {
1748 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1749 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1750 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1751 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1752 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1753 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1754 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1755 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1756 (void *) &errh
->offender
, sizeof(errh
->offender
));
1765 switch (cmsg
->cmsg_type
) {
1768 uint32_t *v
= (uint32_t *)data
;
1769 uint32_t *t_int
= (uint32_t *)target_data
;
1771 if (len
!= sizeof(uint32_t) ||
1772 tgt_len
!= sizeof(uint32_t)) {
1775 __put_user(*v
, t_int
);
1781 struct sock_extended_err ee
;
1782 struct sockaddr_in6 offender
;
1784 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1785 struct errhdr6_t
*target_errh
=
1786 (struct errhdr6_t
*)target_data
;
1788 if (len
!= sizeof(struct errhdr6_t
) ||
1789 tgt_len
!= sizeof(struct errhdr6_t
)) {
1792 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1793 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1794 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1795 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1796 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1797 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1798 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1799 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1800 (void *) &errh
->offender
, sizeof(errh
->offender
));
1810 gemu_log("Unsupported ancillary data: %d/%d\n",
1811 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1812 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1813 if (tgt_len
> len
) {
1814 memset(target_data
+ len
, 0, tgt_len
- len
);
1818 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1819 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1820 if (msg_controllen
< tgt_space
) {
1821 tgt_space
= msg_controllen
;
1823 msg_controllen
-= tgt_space
;
1825 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1826 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1829 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1831 target_msgh
->msg_controllen
= tswapal(space
);
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1837 abi_ulong optval_addr
, socklen_t optlen
)
1841 struct ip_mreqn
*ip_mreq
;
1842 struct ip_mreq_source
*ip_mreq_source
;
1846 /* TCP options all take an 'int' value. */
1847 if (optlen
< sizeof(uint32_t))
1848 return -TARGET_EINVAL
;
1850 if (get_user_u32(val
, optval_addr
))
1851 return -TARGET_EFAULT
;
1852 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1859 case IP_ROUTER_ALERT
:
1863 case IP_MTU_DISCOVER
:
1870 case IP_MULTICAST_TTL
:
1871 case IP_MULTICAST_LOOP
:
1873 if (optlen
>= sizeof(uint32_t)) {
1874 if (get_user_u32(val
, optval_addr
))
1875 return -TARGET_EFAULT
;
1876 } else if (optlen
>= 1) {
1877 if (get_user_u8(val
, optval_addr
))
1878 return -TARGET_EFAULT
;
1880 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1882 case IP_ADD_MEMBERSHIP
:
1883 case IP_DROP_MEMBERSHIP
:
1884 if (optlen
< sizeof (struct target_ip_mreq
) ||
1885 optlen
> sizeof (struct target_ip_mreqn
))
1886 return -TARGET_EINVAL
;
1888 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1889 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1890 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1893 case IP_BLOCK_SOURCE
:
1894 case IP_UNBLOCK_SOURCE
:
1895 case IP_ADD_SOURCE_MEMBERSHIP
:
1896 case IP_DROP_SOURCE_MEMBERSHIP
:
1897 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1898 return -TARGET_EINVAL
;
1900 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1901 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1902 unlock_user (ip_mreq_source
, optval_addr
, 0);
1911 case IPV6_MTU_DISCOVER
:
1914 case IPV6_RECVPKTINFO
:
1915 case IPV6_UNICAST_HOPS
:
1916 case IPV6_MULTICAST_HOPS
:
1917 case IPV6_MULTICAST_LOOP
:
1919 case IPV6_RECVHOPLIMIT
:
1920 case IPV6_2292HOPLIMIT
:
1923 case IPV6_2292PKTINFO
:
1924 case IPV6_RECVTCLASS
:
1925 case IPV6_RECVRTHDR
:
1926 case IPV6_2292RTHDR
:
1927 case IPV6_RECVHOPOPTS
:
1928 case IPV6_2292HOPOPTS
:
1929 case IPV6_RECVDSTOPTS
:
1930 case IPV6_2292DSTOPTS
:
1932 #ifdef IPV6_RECVPATHMTU
1933 case IPV6_RECVPATHMTU
:
1935 #ifdef IPV6_TRANSPARENT
1936 case IPV6_TRANSPARENT
:
1938 #ifdef IPV6_FREEBIND
1941 #ifdef IPV6_RECVORIGDSTADDR
1942 case IPV6_RECVORIGDSTADDR
:
1945 if (optlen
< sizeof(uint32_t)) {
1946 return -TARGET_EINVAL
;
1948 if (get_user_u32(val
, optval_addr
)) {
1949 return -TARGET_EFAULT
;
1951 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1952 &val
, sizeof(val
)));
1956 struct in6_pktinfo pki
;
1958 if (optlen
< sizeof(pki
)) {
1959 return -TARGET_EINVAL
;
1962 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1963 return -TARGET_EFAULT
;
1966 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1968 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1969 &pki
, sizeof(pki
)));
1972 case IPV6_ADD_MEMBERSHIP
:
1973 case IPV6_DROP_MEMBERSHIP
:
1975 struct ipv6_mreq ipv6mreq
;
1977 if (optlen
< sizeof(ipv6mreq
)) {
1978 return -TARGET_EINVAL
;
1981 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1982 return -TARGET_EFAULT
;
1985 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1987 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1988 &ipv6mreq
, sizeof(ipv6mreq
)));
1999 struct icmp6_filter icmp6f
;
2001 if (optlen
> sizeof(icmp6f
)) {
2002 optlen
= sizeof(icmp6f
);
2005 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2006 return -TARGET_EFAULT
;
2009 for (val
= 0; val
< 8; val
++) {
2010 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2013 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2025 /* those take an u32 value */
2026 if (optlen
< sizeof(uint32_t)) {
2027 return -TARGET_EINVAL
;
2030 if (get_user_u32(val
, optval_addr
)) {
2031 return -TARGET_EFAULT
;
2033 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2034 &val
, sizeof(val
)));
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2046 char *alg_key
= g_malloc(optlen
);
2049 return -TARGET_ENOMEM
;
2051 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2053 return -TARGET_EFAULT
;
2055 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2060 case ALG_SET_AEAD_AUTHSIZE
:
2062 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2071 case TARGET_SOL_SOCKET
:
2073 case TARGET_SO_RCVTIMEO
:
2077 optname
= SO_RCVTIMEO
;
2080 if (optlen
!= sizeof(struct target_timeval
)) {
2081 return -TARGET_EINVAL
;
2084 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2085 return -TARGET_EFAULT
;
2088 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2092 case TARGET_SO_SNDTIMEO
:
2093 optname
= SO_SNDTIMEO
;
2095 case TARGET_SO_ATTACH_FILTER
:
2097 struct target_sock_fprog
*tfprog
;
2098 struct target_sock_filter
*tfilter
;
2099 struct sock_fprog fprog
;
2100 struct sock_filter
*filter
;
2103 if (optlen
!= sizeof(*tfprog
)) {
2104 return -TARGET_EINVAL
;
2106 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2107 return -TARGET_EFAULT
;
2109 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2110 tswapal(tfprog
->filter
), 0)) {
2111 unlock_user_struct(tfprog
, optval_addr
, 1);
2112 return -TARGET_EFAULT
;
2115 fprog
.len
= tswap16(tfprog
->len
);
2116 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2117 if (filter
== NULL
) {
2118 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2119 unlock_user_struct(tfprog
, optval_addr
, 1);
2120 return -TARGET_ENOMEM
;
2122 for (i
= 0; i
< fprog
.len
; i
++) {
2123 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2124 filter
[i
].jt
= tfilter
[i
].jt
;
2125 filter
[i
].jf
= tfilter
[i
].jf
;
2126 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2128 fprog
.filter
= filter
;
2130 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2131 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2134 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2135 unlock_user_struct(tfprog
, optval_addr
, 1);
2138 case TARGET_SO_BINDTODEVICE
:
2140 char *dev_ifname
, *addr_ifname
;
2142 if (optlen
> IFNAMSIZ
- 1) {
2143 optlen
= IFNAMSIZ
- 1;
2145 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2147 return -TARGET_EFAULT
;
2149 optname
= SO_BINDTODEVICE
;
2150 addr_ifname
= alloca(IFNAMSIZ
);
2151 memcpy(addr_ifname
, dev_ifname
, optlen
);
2152 addr_ifname
[optlen
] = 0;
2153 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2154 addr_ifname
, optlen
));
2155 unlock_user (dev_ifname
, optval_addr
, 0);
2158 case TARGET_SO_LINGER
:
2161 struct target_linger
*tlg
;
2163 if (optlen
!= sizeof(struct target_linger
)) {
2164 return -TARGET_EINVAL
;
2166 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2167 return -TARGET_EFAULT
;
2169 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2170 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2171 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2173 unlock_user_struct(tlg
, optval_addr
, 0);
2176 /* Options with 'int' argument. */
2177 case TARGET_SO_DEBUG
:
2180 case TARGET_SO_REUSEADDR
:
2181 optname
= SO_REUSEADDR
;
2184 case TARGET_SO_REUSEPORT
:
2185 optname
= SO_REUSEPORT
;
2188 case TARGET_SO_TYPE
:
2191 case TARGET_SO_ERROR
:
2194 case TARGET_SO_DONTROUTE
:
2195 optname
= SO_DONTROUTE
;
2197 case TARGET_SO_BROADCAST
:
2198 optname
= SO_BROADCAST
;
2200 case TARGET_SO_SNDBUF
:
2201 optname
= SO_SNDBUF
;
2203 case TARGET_SO_SNDBUFFORCE
:
2204 optname
= SO_SNDBUFFORCE
;
2206 case TARGET_SO_RCVBUF
:
2207 optname
= SO_RCVBUF
;
2209 case TARGET_SO_RCVBUFFORCE
:
2210 optname
= SO_RCVBUFFORCE
;
2212 case TARGET_SO_KEEPALIVE
:
2213 optname
= SO_KEEPALIVE
;
2215 case TARGET_SO_OOBINLINE
:
2216 optname
= SO_OOBINLINE
;
2218 case TARGET_SO_NO_CHECK
:
2219 optname
= SO_NO_CHECK
;
2221 case TARGET_SO_PRIORITY
:
2222 optname
= SO_PRIORITY
;
2225 case TARGET_SO_BSDCOMPAT
:
2226 optname
= SO_BSDCOMPAT
;
2229 case TARGET_SO_PASSCRED
:
2230 optname
= SO_PASSCRED
;
2232 case TARGET_SO_PASSSEC
:
2233 optname
= SO_PASSSEC
;
2235 case TARGET_SO_TIMESTAMP
:
2236 optname
= SO_TIMESTAMP
;
2238 case TARGET_SO_RCVLOWAT
:
2239 optname
= SO_RCVLOWAT
;
2244 if (optlen
< sizeof(uint32_t))
2245 return -TARGET_EINVAL
;
2247 if (get_user_u32(val
, optval_addr
))
2248 return -TARGET_EFAULT
;
2249 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2254 case NETLINK_PKTINFO
:
2255 case NETLINK_ADD_MEMBERSHIP
:
2256 case NETLINK_DROP_MEMBERSHIP
:
2257 case NETLINK_BROADCAST_ERROR
:
2258 case NETLINK_NO_ENOBUFS
:
2259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2260 case NETLINK_LISTEN_ALL_NSID
:
2261 case NETLINK_CAP_ACK
:
2262 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2264 case NETLINK_EXT_ACK
:
2265 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2266 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2267 case NETLINK_GET_STRICT_CHK
:
2268 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2274 if (optlen
< sizeof(uint32_t)) {
2275 return -TARGET_EINVAL
;
2277 if (get_user_u32(val
, optval_addr
)) {
2278 return -TARGET_EFAULT
;
2280 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2283 #endif /* SOL_NETLINK */
2286 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2287 ret
= -TARGET_ENOPROTOOPT
;
2292 /* do_getsockopt() Must return target values and target errnos. */
2293 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2294 abi_ulong optval_addr
, abi_ulong optlen
)
2301 case TARGET_SOL_SOCKET
:
2304 /* These don't just return a single integer */
2305 case TARGET_SO_RCVTIMEO
:
2306 case TARGET_SO_SNDTIMEO
:
2307 case TARGET_SO_PEERNAME
:
2309 case TARGET_SO_PEERCRED
: {
2312 struct target_ucred
*tcr
;
2314 if (get_user_u32(len
, optlen
)) {
2315 return -TARGET_EFAULT
;
2318 return -TARGET_EINVAL
;
2322 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2330 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2331 return -TARGET_EFAULT
;
2333 __put_user(cr
.pid
, &tcr
->pid
);
2334 __put_user(cr
.uid
, &tcr
->uid
);
2335 __put_user(cr
.gid
, &tcr
->gid
);
2336 unlock_user_struct(tcr
, optval_addr
, 1);
2337 if (put_user_u32(len
, optlen
)) {
2338 return -TARGET_EFAULT
;
2342 case TARGET_SO_LINGER
:
2346 struct target_linger
*tlg
;
2348 if (get_user_u32(len
, optlen
)) {
2349 return -TARGET_EFAULT
;
2352 return -TARGET_EINVAL
;
2356 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2364 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2365 return -TARGET_EFAULT
;
2367 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2368 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2369 unlock_user_struct(tlg
, optval_addr
, 1);
2370 if (put_user_u32(len
, optlen
)) {
2371 return -TARGET_EFAULT
;
2375 /* Options with 'int' argument. */
2376 case TARGET_SO_DEBUG
:
2379 case TARGET_SO_REUSEADDR
:
2380 optname
= SO_REUSEADDR
;
2383 case TARGET_SO_REUSEPORT
:
2384 optname
= SO_REUSEPORT
;
2387 case TARGET_SO_TYPE
:
2390 case TARGET_SO_ERROR
:
2393 case TARGET_SO_DONTROUTE
:
2394 optname
= SO_DONTROUTE
;
2396 case TARGET_SO_BROADCAST
:
2397 optname
= SO_BROADCAST
;
2399 case TARGET_SO_SNDBUF
:
2400 optname
= SO_SNDBUF
;
2402 case TARGET_SO_RCVBUF
:
2403 optname
= SO_RCVBUF
;
2405 case TARGET_SO_KEEPALIVE
:
2406 optname
= SO_KEEPALIVE
;
2408 case TARGET_SO_OOBINLINE
:
2409 optname
= SO_OOBINLINE
;
2411 case TARGET_SO_NO_CHECK
:
2412 optname
= SO_NO_CHECK
;
2414 case TARGET_SO_PRIORITY
:
2415 optname
= SO_PRIORITY
;
2418 case TARGET_SO_BSDCOMPAT
:
2419 optname
= SO_BSDCOMPAT
;
2422 case TARGET_SO_PASSCRED
:
2423 optname
= SO_PASSCRED
;
2425 case TARGET_SO_TIMESTAMP
:
2426 optname
= SO_TIMESTAMP
;
2428 case TARGET_SO_RCVLOWAT
:
2429 optname
= SO_RCVLOWAT
;
2431 case TARGET_SO_ACCEPTCONN
:
2432 optname
= SO_ACCEPTCONN
;
2439 /* TCP options all take an 'int' value. */
2441 if (get_user_u32(len
, optlen
))
2442 return -TARGET_EFAULT
;
2444 return -TARGET_EINVAL
;
2446 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2449 if (optname
== SO_TYPE
) {
2450 val
= host_to_target_sock_type(val
);
2455 if (put_user_u32(val
, optval_addr
))
2456 return -TARGET_EFAULT
;
2458 if (put_user_u8(val
, optval_addr
))
2459 return -TARGET_EFAULT
;
2461 if (put_user_u32(len
, optlen
))
2462 return -TARGET_EFAULT
;
2469 case IP_ROUTER_ALERT
:
2473 case IP_MTU_DISCOVER
:
2479 case IP_MULTICAST_TTL
:
2480 case IP_MULTICAST_LOOP
:
2481 if (get_user_u32(len
, optlen
))
2482 return -TARGET_EFAULT
;
2484 return -TARGET_EINVAL
;
2486 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2489 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2491 if (put_user_u32(len
, optlen
)
2492 || put_user_u8(val
, optval_addr
))
2493 return -TARGET_EFAULT
;
2495 if (len
> sizeof(int))
2497 if (put_user_u32(len
, optlen
)
2498 || put_user_u32(val
, optval_addr
))
2499 return -TARGET_EFAULT
;
2503 ret
= -TARGET_ENOPROTOOPT
;
2509 case IPV6_MTU_DISCOVER
:
2512 case IPV6_RECVPKTINFO
:
2513 case IPV6_UNICAST_HOPS
:
2514 case IPV6_MULTICAST_HOPS
:
2515 case IPV6_MULTICAST_LOOP
:
2517 case IPV6_RECVHOPLIMIT
:
2518 case IPV6_2292HOPLIMIT
:
2521 case IPV6_2292PKTINFO
:
2522 case IPV6_RECVTCLASS
:
2523 case IPV6_RECVRTHDR
:
2524 case IPV6_2292RTHDR
:
2525 case IPV6_RECVHOPOPTS
:
2526 case IPV6_2292HOPOPTS
:
2527 case IPV6_RECVDSTOPTS
:
2528 case IPV6_2292DSTOPTS
:
2530 #ifdef IPV6_RECVPATHMTU
2531 case IPV6_RECVPATHMTU
:
2533 #ifdef IPV6_TRANSPARENT
2534 case IPV6_TRANSPARENT
:
2536 #ifdef IPV6_FREEBIND
2539 #ifdef IPV6_RECVORIGDSTADDR
2540 case IPV6_RECVORIGDSTADDR
:
2542 if (get_user_u32(len
, optlen
))
2543 return -TARGET_EFAULT
;
2545 return -TARGET_EINVAL
;
2547 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2550 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2552 if (put_user_u32(len
, optlen
)
2553 || put_user_u8(val
, optval_addr
))
2554 return -TARGET_EFAULT
;
2556 if (len
> sizeof(int))
2558 if (put_user_u32(len
, optlen
)
2559 || put_user_u32(val
, optval_addr
))
2560 return -TARGET_EFAULT
;
2564 ret
= -TARGET_ENOPROTOOPT
;
2571 case NETLINK_PKTINFO
:
2572 case NETLINK_BROADCAST_ERROR
:
2573 case NETLINK_NO_ENOBUFS
:
2574 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2575 case NETLINK_LISTEN_ALL_NSID
:
2576 case NETLINK_CAP_ACK
:
2577 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2579 case NETLINK_EXT_ACK
:
2580 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2581 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2582 case NETLINK_GET_STRICT_CHK
:
2583 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2584 if (get_user_u32(len
, optlen
)) {
2585 return -TARGET_EFAULT
;
2587 if (len
!= sizeof(val
)) {
2588 return -TARGET_EINVAL
;
2591 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2595 if (put_user_u32(lv
, optlen
)
2596 || put_user_u32(val
, optval_addr
)) {
2597 return -TARGET_EFAULT
;
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601 case NETLINK_LIST_MEMBERSHIPS
:
2605 if (get_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2609 return -TARGET_EINVAL
;
2611 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2613 return -TARGET_EFAULT
;
2616 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2618 unlock_user(results
, optval_addr
, 0);
2621 /* swap host endianess to target endianess. */
2622 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2623 results
[i
] = tswap32(results
[i
]);
2625 if (put_user_u32(lv
, optlen
)) {
2626 return -TARGET_EFAULT
;
2628 unlock_user(results
, optval_addr
, 0);
2631 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2636 #endif /* SOL_NETLINK */
2639 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2641 ret
= -TARGET_EOPNOTSUPP
;
2647 /* Convert target low/high pair representing file offset into the host
2648 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2649 * as the kernel doesn't handle them either.
2651 static void target_to_host_low_high(abi_ulong tlow
,
2653 unsigned long *hlow
,
2654 unsigned long *hhigh
)
2656 uint64_t off
= tlow
|
2657 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2658 TARGET_LONG_BITS
/ 2;
2661 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2664 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2665 abi_ulong count
, int copy
)
2667 struct target_iovec
*target_vec
;
2669 abi_ulong total_len
, max_len
;
2672 bool bad_address
= false;
2678 if (count
> IOV_MAX
) {
2683 vec
= g_try_new0(struct iovec
, count
);
2689 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2690 count
* sizeof(struct target_iovec
), 1);
2691 if (target_vec
== NULL
) {
2696 /* ??? If host page size > target page size, this will result in a
2697 value larger than what we can actually support. */
2698 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2701 for (i
= 0; i
< count
; i
++) {
2702 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2703 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2708 } else if (len
== 0) {
2709 /* Zero length pointer is ignored. */
2710 vec
[i
].iov_base
= 0;
2712 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2713 /* If the first buffer pointer is bad, this is a fault. But
2714 * subsequent bad buffers will result in a partial write; this
2715 * is realized by filling the vector with null pointers and
2717 if (!vec
[i
].iov_base
) {
2728 if (len
> max_len
- total_len
) {
2729 len
= max_len
- total_len
;
2732 vec
[i
].iov_len
= len
;
2736 unlock_user(target_vec
, target_addr
, 0);
2741 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2742 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2745 unlock_user(target_vec
, target_addr
, 0);
2752 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2753 abi_ulong count
, int copy
)
2755 struct target_iovec
*target_vec
;
2758 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2759 count
* sizeof(struct target_iovec
), 1);
2761 for (i
= 0; i
< count
; i
++) {
2762 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2763 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2767 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2769 unlock_user(target_vec
, target_addr
, 0);
2775 static inline int target_to_host_sock_type(int *type
)
2778 int target_type
= *type
;
2780 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2781 case TARGET_SOCK_DGRAM
:
2782 host_type
= SOCK_DGRAM
;
2784 case TARGET_SOCK_STREAM
:
2785 host_type
= SOCK_STREAM
;
2788 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2791 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2792 #if defined(SOCK_CLOEXEC)
2793 host_type
|= SOCK_CLOEXEC
;
2795 return -TARGET_EINVAL
;
2798 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2799 #if defined(SOCK_NONBLOCK)
2800 host_type
|= SOCK_NONBLOCK
;
2801 #elif !defined(O_NONBLOCK)
2802 return -TARGET_EINVAL
;
2809 /* Try to emulate socket type flags after socket creation. */
2810 static int sock_flags_fixup(int fd
, int target_type
)
2812 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2813 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2814 int flags
= fcntl(fd
, F_GETFL
);
2815 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2817 return -TARGET_EINVAL
;
2824 /* do_socket() Must return target values and target errnos. */
2825 static abi_long
do_socket(int domain
, int type
, int protocol
)
2827 int target_type
= type
;
2830 ret
= target_to_host_sock_type(&type
);
2835 if (domain
== PF_NETLINK
&& !(
2836 #ifdef CONFIG_RTNETLINK
2837 protocol
== NETLINK_ROUTE
||
2839 protocol
== NETLINK_KOBJECT_UEVENT
||
2840 protocol
== NETLINK_AUDIT
)) {
2841 return -EPFNOSUPPORT
;
2844 if (domain
== AF_PACKET
||
2845 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2846 protocol
= tswap16(protocol
);
2849 ret
= get_errno(socket(domain
, type
, protocol
));
2851 ret
= sock_flags_fixup(ret
, target_type
);
2852 if (type
== SOCK_PACKET
) {
2853 /* Manage an obsolete case :
2854 * if socket type is SOCK_PACKET, bind by name
2856 fd_trans_register(ret
, &target_packet_trans
);
2857 } else if (domain
== PF_NETLINK
) {
2859 #ifdef CONFIG_RTNETLINK
2861 fd_trans_register(ret
, &target_netlink_route_trans
);
2864 case NETLINK_KOBJECT_UEVENT
:
2865 /* nothing to do: messages are strings */
2868 fd_trans_register(ret
, &target_netlink_audit_trans
);
2871 g_assert_not_reached();
2878 /* do_bind() Must return target values and target errnos. */
2879 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2885 if ((int)addrlen
< 0) {
2886 return -TARGET_EINVAL
;
2889 addr
= alloca(addrlen
+1);
2891 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2895 return get_errno(bind(sockfd
, addr
, addrlen
));
2898 /* do_connect() Must return target values and target errnos. */
2899 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2905 if ((int)addrlen
< 0) {
2906 return -TARGET_EINVAL
;
2909 addr
= alloca(addrlen
+1);
2911 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2915 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2918 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2919 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2920 int flags
, int send
)
2926 abi_ulong target_vec
;
2928 if (msgp
->msg_name
) {
2929 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2930 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2931 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2932 tswapal(msgp
->msg_name
),
2934 if (ret
== -TARGET_EFAULT
) {
2935 /* For connected sockets msg_name and msg_namelen must
2936 * be ignored, so returning EFAULT immediately is wrong.
2937 * Instead, pass a bad msg_name to the host kernel, and
2938 * let it decide whether to return EFAULT or not.
2940 msg
.msg_name
= (void *)-1;
2945 msg
.msg_name
= NULL
;
2946 msg
.msg_namelen
= 0;
2948 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2949 msg
.msg_control
= alloca(msg
.msg_controllen
);
2950 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2952 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2954 count
= tswapal(msgp
->msg_iovlen
);
2955 target_vec
= tswapal(msgp
->msg_iov
);
2957 if (count
> IOV_MAX
) {
2958 /* sendrcvmsg returns a different errno for this condition than
2959 * readv/writev, so we must catch it here before lock_iovec() does.
2961 ret
= -TARGET_EMSGSIZE
;
2965 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2966 target_vec
, count
, send
);
2968 ret
= -host_to_target_errno(errno
);
2971 msg
.msg_iovlen
= count
;
2975 if (fd_trans_target_to_host_data(fd
)) {
2978 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2979 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2980 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2981 msg
.msg_iov
->iov_len
);
2983 msg
.msg_iov
->iov_base
= host_msg
;
2984 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2988 ret
= target_to_host_cmsg(&msg
, msgp
);
2990 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2994 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2995 if (!is_error(ret
)) {
2997 if (fd_trans_host_to_target_data(fd
)) {
2998 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2999 MIN(msg
.msg_iov
->iov_len
, len
));
3001 ret
= host_to_target_cmsg(msgp
, &msg
);
3003 if (!is_error(ret
)) {
3004 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3005 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3006 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3007 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3008 msg
.msg_name
, msg
.msg_namelen
);
3020 unlock_iovec(vec
, target_vec
, count
, !send
);
3025 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3026 int flags
, int send
)
3029 struct target_msghdr
*msgp
;
3031 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3035 return -TARGET_EFAULT
;
3037 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3038 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3042 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3043 * so it might not have this *mmsg-specific flag either.
3045 #ifndef MSG_WAITFORONE
3046 #define MSG_WAITFORONE 0x10000
3049 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3050 unsigned int vlen
, unsigned int flags
,
3053 struct target_mmsghdr
*mmsgp
;
3057 if (vlen
> UIO_MAXIOV
) {
3061 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3063 return -TARGET_EFAULT
;
3066 for (i
= 0; i
< vlen
; i
++) {
3067 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3068 if (is_error(ret
)) {
3071 mmsgp
[i
].msg_len
= tswap32(ret
);
3072 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3073 if (flags
& MSG_WAITFORONE
) {
3074 flags
|= MSG_DONTWAIT
;
3078 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3080 /* Return number of datagrams sent if we sent any at all;
3081 * otherwise return the error.
3089 /* do_accept4() Must return target values and target errnos. */
3090 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3091 abi_ulong target_addrlen_addr
, int flags
)
3093 socklen_t addrlen
, ret_addrlen
;
3098 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3100 if (target_addr
== 0) {
3101 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3104 /* linux returns EINVAL if addrlen pointer is invalid */
3105 if (get_user_u32(addrlen
, target_addrlen_addr
))
3106 return -TARGET_EINVAL
;
3108 if ((int)addrlen
< 0) {
3109 return -TARGET_EINVAL
;
3112 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3113 return -TARGET_EINVAL
;
3115 addr
= alloca(addrlen
);
3117 ret_addrlen
= addrlen
;
3118 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3119 if (!is_error(ret
)) {
3120 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3121 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3122 ret
= -TARGET_EFAULT
;
3128 /* do_getpeername() Must return target values and target errnos. */
3129 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3130 abi_ulong target_addrlen_addr
)
3132 socklen_t addrlen
, ret_addrlen
;
3136 if (get_user_u32(addrlen
, target_addrlen_addr
))
3137 return -TARGET_EFAULT
;
3139 if ((int)addrlen
< 0) {
3140 return -TARGET_EINVAL
;
3143 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3144 return -TARGET_EFAULT
;
3146 addr
= alloca(addrlen
);
3148 ret_addrlen
= addrlen
;
3149 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3150 if (!is_error(ret
)) {
3151 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3152 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3153 ret
= -TARGET_EFAULT
;
3159 /* do_getsockname() Must return target values and target errnos. */
3160 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3161 abi_ulong target_addrlen_addr
)
3163 socklen_t addrlen
, ret_addrlen
;
3167 if (get_user_u32(addrlen
, target_addrlen_addr
))
3168 return -TARGET_EFAULT
;
3170 if ((int)addrlen
< 0) {
3171 return -TARGET_EINVAL
;
3174 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3175 return -TARGET_EFAULT
;
3177 addr
= alloca(addrlen
);
3179 ret_addrlen
= addrlen
;
3180 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3181 if (!is_error(ret
)) {
3182 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3183 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3184 ret
= -TARGET_EFAULT
;
3190 /* do_socketpair() Must return target values and target errnos. */
3191 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3192 abi_ulong target_tab_addr
)
3197 target_to_host_sock_type(&type
);
3199 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3200 if (!is_error(ret
)) {
3201 if (put_user_s32(tab
[0], target_tab_addr
)
3202 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3203 ret
= -TARGET_EFAULT
;
3208 /* do_sendto() Must return target values and target errnos. */
3209 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3210 abi_ulong target_addr
, socklen_t addrlen
)
3214 void *copy_msg
= NULL
;
3217 if ((int)addrlen
< 0) {
3218 return -TARGET_EINVAL
;
3221 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3223 return -TARGET_EFAULT
;
3224 if (fd_trans_target_to_host_data(fd
)) {
3225 copy_msg
= host_msg
;
3226 host_msg
= g_malloc(len
);
3227 memcpy(host_msg
, copy_msg
, len
);
3228 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3234 addr
= alloca(addrlen
+1);
3235 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3239 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3241 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3246 host_msg
= copy_msg
;
3248 unlock_user(host_msg
, msg
, 0);
3252 /* do_recvfrom() Must return target values and target errnos. */
3253 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3254 abi_ulong target_addr
,
3255 abi_ulong target_addrlen
)
3257 socklen_t addrlen
, ret_addrlen
;
3262 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3264 return -TARGET_EFAULT
;
3266 if (get_user_u32(addrlen
, target_addrlen
)) {
3267 ret
= -TARGET_EFAULT
;
3270 if ((int)addrlen
< 0) {
3271 ret
= -TARGET_EINVAL
;
3274 addr
= alloca(addrlen
);
3275 ret_addrlen
= addrlen
;
3276 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3277 addr
, &ret_addrlen
));
3279 addr
= NULL
; /* To keep compiler quiet. */
3280 addrlen
= 0; /* To keep compiler quiet. */
3281 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3283 if (!is_error(ret
)) {
3284 if (fd_trans_host_to_target_data(fd
)) {
3286 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3287 if (is_error(trans
)) {
3293 host_to_target_sockaddr(target_addr
, addr
,
3294 MIN(addrlen
, ret_addrlen
));
3295 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3296 ret
= -TARGET_EFAULT
;
3300 unlock_user(host_msg
, msg
, len
);
3303 unlock_user(host_msg
, msg
, 0);
3308 #ifdef TARGET_NR_socketcall
3309 /* do_socketcall() must return target values and target errnos. */
3310 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3312 static const unsigned nargs
[] = { /* number of arguments per operation */
3313 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3314 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3315 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3316 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3317 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3318 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3319 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3320 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3321 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3322 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3323 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3324 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3325 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3326 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3327 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3328 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3329 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3330 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3331 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3332 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3334 abi_long a
[6]; /* max 6 args */
3337 /* check the range of the first argument num */
3338 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3339 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3340 return -TARGET_EINVAL
;
3342 /* ensure we have space for args */
3343 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3344 return -TARGET_EINVAL
;
3346 /* collect the arguments in a[] according to nargs[] */
3347 for (i
= 0; i
< nargs
[num
]; ++i
) {
3348 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3349 return -TARGET_EFAULT
;
3352 /* now when we have the args, invoke the appropriate underlying function */
3354 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3355 return do_socket(a
[0], a
[1], a
[2]);
3356 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3357 return do_bind(a
[0], a
[1], a
[2]);
3358 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3359 return do_connect(a
[0], a
[1], a
[2]);
3360 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3361 return get_errno(listen(a
[0], a
[1]));
3362 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3363 return do_accept4(a
[0], a
[1], a
[2], 0);
3364 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3365 return do_getsockname(a
[0], a
[1], a
[2]);
3366 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3367 return do_getpeername(a
[0], a
[1], a
[2]);
3368 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3369 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3370 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3371 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3372 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3373 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3374 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3375 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3376 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3377 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3378 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3379 return get_errno(shutdown(a
[0], a
[1]));
3380 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3381 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3382 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3383 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3384 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3385 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3386 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3387 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3388 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3389 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3390 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3391 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3392 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3393 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3395 gemu_log("Unsupported socketcall: %d\n", num
);
3396 return -TARGET_EINVAL
;
3401 #define N_SHM_REGIONS 32
3403 static struct shm_region
{
3407 } shm_regions
[N_SHM_REGIONS
];
3409 #ifndef TARGET_SEMID64_DS
3410 /* asm-generic version of this struct */
3411 struct target_semid64_ds
3413 struct target_ipc_perm sem_perm
;
3414 abi_ulong sem_otime
;
3415 #if TARGET_ABI_BITS == 32
3416 abi_ulong __unused1
;
3418 abi_ulong sem_ctime
;
3419 #if TARGET_ABI_BITS == 32
3420 abi_ulong __unused2
;
3422 abi_ulong sem_nsems
;
3423 abi_ulong __unused3
;
3424 abi_ulong __unused4
;
3428 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3429 abi_ulong target_addr
)
3431 struct target_ipc_perm
*target_ip
;
3432 struct target_semid64_ds
*target_sd
;
3434 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3435 return -TARGET_EFAULT
;
3436 target_ip
= &(target_sd
->sem_perm
);
3437 host_ip
->__key
= tswap32(target_ip
->__key
);
3438 host_ip
->uid
= tswap32(target_ip
->uid
);
3439 host_ip
->gid
= tswap32(target_ip
->gid
);
3440 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3441 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3442 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3443 host_ip
->mode
= tswap32(target_ip
->mode
);
3445 host_ip
->mode
= tswap16(target_ip
->mode
);
3447 #if defined(TARGET_PPC)
3448 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3450 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3452 unlock_user_struct(target_sd
, target_addr
, 0);
3456 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3457 struct ipc_perm
*host_ip
)
3459 struct target_ipc_perm
*target_ip
;
3460 struct target_semid64_ds
*target_sd
;
3462 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3463 return -TARGET_EFAULT
;
3464 target_ip
= &(target_sd
->sem_perm
);
3465 target_ip
->__key
= tswap32(host_ip
->__key
);
3466 target_ip
->uid
= tswap32(host_ip
->uid
);
3467 target_ip
->gid
= tswap32(host_ip
->gid
);
3468 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3469 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3470 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3471 target_ip
->mode
= tswap32(host_ip
->mode
);
3473 target_ip
->mode
= tswap16(host_ip
->mode
);
3475 #if defined(TARGET_PPC)
3476 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3478 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3480 unlock_user_struct(target_sd
, target_addr
, 1);
3484 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3485 abi_ulong target_addr
)
3487 struct target_semid64_ds
*target_sd
;
3489 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3490 return -TARGET_EFAULT
;
3491 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3492 return -TARGET_EFAULT
;
3493 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3494 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3495 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3496 unlock_user_struct(target_sd
, target_addr
, 0);
3500 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3501 struct semid_ds
*host_sd
)
3503 struct target_semid64_ds
*target_sd
;
3505 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3506 return -TARGET_EFAULT
;
3507 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3508 return -TARGET_EFAULT
;
3509 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3510 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3511 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3512 unlock_user_struct(target_sd
, target_addr
, 1);
3516 struct target_seminfo
{
3529 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3530 struct seminfo
*host_seminfo
)
3532 struct target_seminfo
*target_seminfo
;
3533 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3534 return -TARGET_EFAULT
;
3535 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3536 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3537 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3538 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3539 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3540 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3541 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3542 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3543 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3544 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3545 unlock_user_struct(target_seminfo
, target_addr
, 1);
3551 struct semid_ds
*buf
;
3552 unsigned short *array
;
3553 struct seminfo
*__buf
;
3556 union target_semun
{
3563 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3564 abi_ulong target_addr
)
3567 unsigned short *array
;
3569 struct semid_ds semid_ds
;
3572 semun
.buf
= &semid_ds
;
3574 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3576 return get_errno(ret
);
3578 nsems
= semid_ds
.sem_nsems
;
3580 *host_array
= g_try_new(unsigned short, nsems
);
3582 return -TARGET_ENOMEM
;
3584 array
= lock_user(VERIFY_READ
, target_addr
,
3585 nsems
*sizeof(unsigned short), 1);
3587 g_free(*host_array
);
3588 return -TARGET_EFAULT
;
3591 for(i
=0; i
<nsems
; i
++) {
3592 __get_user((*host_array
)[i
], &array
[i
]);
3594 unlock_user(array
, target_addr
, 0);
3599 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3600 unsigned short **host_array
)
3603 unsigned short *array
;
3605 struct semid_ds semid_ds
;
3608 semun
.buf
= &semid_ds
;
3610 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3612 return get_errno(ret
);
3614 nsems
= semid_ds
.sem_nsems
;
3616 array
= lock_user(VERIFY_WRITE
, target_addr
,
3617 nsems
*sizeof(unsigned short), 0);
3619 return -TARGET_EFAULT
;
3621 for(i
=0; i
<nsems
; i
++) {
3622 __put_user((*host_array
)[i
], &array
[i
]);
3624 g_free(*host_array
);
3625 unlock_user(array
, target_addr
, 1);
3630 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3631 abi_ulong target_arg
)
3633 union target_semun target_su
= { .buf
= target_arg
};
3635 struct semid_ds dsarg
;
3636 unsigned short *array
= NULL
;
3637 struct seminfo seminfo
;
3638 abi_long ret
= -TARGET_EINVAL
;
3645 /* In 64 bit cross-endian situations, we will erroneously pick up
3646 * the wrong half of the union for the "val" element. To rectify
3647 * this, the entire 8-byte structure is byteswapped, followed by
3648 * a swap of the 4 byte val field. In other cases, the data is
3649 * already in proper host byte order. */
3650 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3651 target_su
.buf
= tswapal(target_su
.buf
);
3652 arg
.val
= tswap32(target_su
.val
);
3654 arg
.val
= target_su
.val
;
3656 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3660 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3664 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3665 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3672 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3676 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3677 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3683 arg
.__buf
= &seminfo
;
3684 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3685 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3693 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3700 struct target_sembuf
{
3701 unsigned short sem_num
;
3706 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3707 abi_ulong target_addr
,
3710 struct target_sembuf
*target_sembuf
;
3713 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3714 nsops
*sizeof(struct target_sembuf
), 1);
3716 return -TARGET_EFAULT
;
3718 for(i
=0; i
<nsops
; i
++) {
3719 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3720 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3721 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3724 unlock_user(target_sembuf
, target_addr
, 0);
3729 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3731 struct sembuf sops
[nsops
];
3734 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3735 return -TARGET_EFAULT
;
3737 ret
= -TARGET_ENOSYS
;
3738 #ifdef __NR_semtimedop
3739 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3742 if (ret
== -TARGET_ENOSYS
) {
3743 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3749 struct target_msqid_ds
3751 struct target_ipc_perm msg_perm
;
3752 abi_ulong msg_stime
;
3753 #if TARGET_ABI_BITS == 32
3754 abi_ulong __unused1
;
3756 abi_ulong msg_rtime
;
3757 #if TARGET_ABI_BITS == 32
3758 abi_ulong __unused2
;
3760 abi_ulong msg_ctime
;
3761 #if TARGET_ABI_BITS == 32
3762 abi_ulong __unused3
;
3764 abi_ulong __msg_cbytes
;
3766 abi_ulong msg_qbytes
;
3767 abi_ulong msg_lspid
;
3768 abi_ulong msg_lrpid
;
3769 abi_ulong __unused4
;
3770 abi_ulong __unused5
;
3773 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3774 abi_ulong target_addr
)
3776 struct target_msqid_ds
*target_md
;
3778 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3779 return -TARGET_EFAULT
;
3780 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3781 return -TARGET_EFAULT
;
3782 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3783 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3784 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3785 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3786 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3787 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3788 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3789 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3790 unlock_user_struct(target_md
, target_addr
, 0);
3794 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3795 struct msqid_ds
*host_md
)
3797 struct target_msqid_ds
*target_md
;
3799 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3800 return -TARGET_EFAULT
;
3801 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3802 return -TARGET_EFAULT
;
3803 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3804 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3805 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3806 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3807 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3808 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3809 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3810 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3811 unlock_user_struct(target_md
, target_addr
, 1);
3815 struct target_msginfo
{
3823 unsigned short int msgseg
;
3826 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3827 struct msginfo
*host_msginfo
)
3829 struct target_msginfo
*target_msginfo
;
3830 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3831 return -TARGET_EFAULT
;
3832 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3833 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3834 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3835 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3836 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3837 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3838 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3839 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3840 unlock_user_struct(target_msginfo
, target_addr
, 1);
3844 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3846 struct msqid_ds dsarg
;
3847 struct msginfo msginfo
;
3848 abi_long ret
= -TARGET_EINVAL
;
3856 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3857 return -TARGET_EFAULT
;
3858 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3859 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3860 return -TARGET_EFAULT
;
3863 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3867 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3868 if (host_to_target_msginfo(ptr
, &msginfo
))
3869 return -TARGET_EFAULT
;
3876 struct target_msgbuf
{
3881 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3882 ssize_t msgsz
, int msgflg
)
3884 struct target_msgbuf
*target_mb
;
3885 struct msgbuf
*host_mb
;
3889 return -TARGET_EINVAL
;
3892 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3893 return -TARGET_EFAULT
;
3894 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3896 unlock_user_struct(target_mb
, msgp
, 0);
3897 return -TARGET_ENOMEM
;
3899 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3900 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3901 ret
= -TARGET_ENOSYS
;
3903 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3906 if (ret
== -TARGET_ENOSYS
) {
3907 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3912 unlock_user_struct(target_mb
, msgp
, 0);
3917 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3918 ssize_t msgsz
, abi_long msgtyp
,
3921 struct target_msgbuf
*target_mb
;
3923 struct msgbuf
*host_mb
;
3927 return -TARGET_EINVAL
;
3930 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3931 return -TARGET_EFAULT
;
3933 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3935 ret
= -TARGET_ENOMEM
;
3938 ret
= -TARGET_ENOSYS
;
3940 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3943 if (ret
== -TARGET_ENOSYS
) {
3944 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
3945 msgflg
, host_mb
, msgtyp
));
3950 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3951 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3952 if (!target_mtext
) {
3953 ret
= -TARGET_EFAULT
;
3956 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3957 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3960 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3964 unlock_user_struct(target_mb
, msgp
, 1);
3969 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3970 abi_ulong target_addr
)
3972 struct target_shmid_ds
*target_sd
;
3974 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3975 return -TARGET_EFAULT
;
3976 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3977 return -TARGET_EFAULT
;
3978 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3979 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3980 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3981 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3982 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3983 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3984 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3985 unlock_user_struct(target_sd
, target_addr
, 0);
3989 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3990 struct shmid_ds
*host_sd
)
3992 struct target_shmid_ds
*target_sd
;
3994 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3995 return -TARGET_EFAULT
;
3996 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3997 return -TARGET_EFAULT
;
3998 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3999 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4000 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4001 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4002 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4003 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4004 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4005 unlock_user_struct(target_sd
, target_addr
, 1);
4009 struct target_shminfo
{
4017 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4018 struct shminfo
*host_shminfo
)
4020 struct target_shminfo
*target_shminfo
;
4021 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4022 return -TARGET_EFAULT
;
4023 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4024 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4025 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4026 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4027 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4028 unlock_user_struct(target_shminfo
, target_addr
, 1);
4032 struct target_shm_info
{
4037 abi_ulong swap_attempts
;
4038 abi_ulong swap_successes
;
4041 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4042 struct shm_info
*host_shm_info
)
4044 struct target_shm_info
*target_shm_info
;
4045 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4046 return -TARGET_EFAULT
;
4047 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4048 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4049 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4050 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4051 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4052 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4053 unlock_user_struct(target_shm_info
, target_addr
, 1);
4057 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4059 struct shmid_ds dsarg
;
4060 struct shminfo shminfo
;
4061 struct shm_info shm_info
;
4062 abi_long ret
= -TARGET_EINVAL
;
4070 if (target_to_host_shmid_ds(&dsarg
, buf
))
4071 return -TARGET_EFAULT
;
4072 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4073 if (host_to_target_shmid_ds(buf
, &dsarg
))
4074 return -TARGET_EFAULT
;
4077 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4078 if (host_to_target_shminfo(buf
, &shminfo
))
4079 return -TARGET_EFAULT
;
4082 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4083 if (host_to_target_shm_info(buf
, &shm_info
))
4084 return -TARGET_EFAULT
;
4089 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4096 #ifndef TARGET_FORCE_SHMLBA
4097 /* For most architectures, SHMLBA is the same as the page size;
4098 * some architectures have larger values, in which case they should
4099 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4100 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4101 * and defining its own value for SHMLBA.
4103 * The kernel also permits SHMLBA to be set by the architecture to a
4104 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4105 * this means that addresses are rounded to the large size if
4106 * SHM_RND is set but addresses not aligned to that size are not rejected
4107 * as long as they are at least page-aligned. Since the only architecture
4108 * which uses this is ia64 this code doesn't provide for that oddity.
4110 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4112 return TARGET_PAGE_SIZE
;
4116 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4117 int shmid
, abi_ulong shmaddr
, int shmflg
)
4121 struct shmid_ds shm_info
;
4125 /* find out the length of the shared memory segment */
4126 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4127 if (is_error(ret
)) {
4128 /* can't get length, bail out */
4132 shmlba
= target_shmlba(cpu_env
);
4134 if (shmaddr
& (shmlba
- 1)) {
4135 if (shmflg
& SHM_RND
) {
4136 shmaddr
&= ~(shmlba
- 1);
4138 return -TARGET_EINVAL
;
4141 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4142 return -TARGET_EINVAL
;
4148 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4150 abi_ulong mmap_start
;
4152 /* In order to use the host shmat, we need to honor host SHMLBA. */
4153 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4155 if (mmap_start
== -1) {
4157 host_raddr
= (void *)-1;
4159 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4162 if (host_raddr
== (void *)-1) {
4164 return get_errno((long)host_raddr
);
4166 raddr
=h2g((unsigned long)host_raddr
);
4168 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4169 PAGE_VALID
| PAGE_READ
|
4170 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4172 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4173 if (!shm_regions
[i
].in_use
) {
4174 shm_regions
[i
].in_use
= true;
4175 shm_regions
[i
].start
= raddr
;
4176 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4186 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4193 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4194 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4195 shm_regions
[i
].in_use
= false;
4196 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4200 rv
= get_errno(shmdt(g2h(shmaddr
)));
4207 #ifdef TARGET_NR_ipc
4208 /* ??? This only works with linear mappings. */
4209 /* do_ipc() must return target values and target errnos. */
4210 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4211 unsigned int call
, abi_long first
,
4212 abi_long second
, abi_long third
,
4213 abi_long ptr
, abi_long fifth
)
4218 version
= call
>> 16;
4223 ret
= do_semop(first
, ptr
, second
);
4227 ret
= get_errno(semget(first
, second
, third
));
4230 case IPCOP_semctl
: {
4231 /* The semun argument to semctl is passed by value, so dereference the
4234 get_user_ual(atptr
, ptr
);
4235 ret
= do_semctl(first
, second
, third
, atptr
);
4240 ret
= get_errno(msgget(first
, second
));
4244 ret
= do_msgsnd(first
, ptr
, second
, third
);
4248 ret
= do_msgctl(first
, second
, ptr
);
4255 struct target_ipc_kludge
{
4260 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4261 ret
= -TARGET_EFAULT
;
4265 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4267 unlock_user_struct(tmp
, ptr
, 0);
4271 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4280 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4281 if (is_error(raddr
))
4282 return get_errno(raddr
);
4283 if (put_user_ual(raddr
, third
))
4284 return -TARGET_EFAULT
;
4288 ret
= -TARGET_EINVAL
;
4293 ret
= do_shmdt(ptr
);
4297 /* IPC_* flag values are the same on all linux platforms */
4298 ret
= get_errno(shmget(first
, second
, third
));
4301 /* IPC_* and SHM_* command values are the same on all linux platforms */
4303 ret
= do_shmctl(first
, second
, ptr
);
4306 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4307 ret
= -TARGET_ENOSYS
;
4314 /* kernel structure types definitions */
4316 #define STRUCT(name, ...) STRUCT_ ## name,
4317 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4319 #include "syscall_types.h"
4323 #undef STRUCT_SPECIAL
4325 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4326 #define STRUCT_SPECIAL(name)
4327 #include "syscall_types.h"
4329 #undef STRUCT_SPECIAL
4331 typedef struct IOCTLEntry IOCTLEntry
;
4333 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4334 int fd
, int cmd
, abi_long arg
);
4338 unsigned int host_cmd
;
4341 do_ioctl_fn
*do_ioctl
;
4342 const argtype arg_type
[5];
4345 #define IOC_R 0x0001
4346 #define IOC_W 0x0002
4347 #define IOC_RW (IOC_R | IOC_W)
4349 #define MAX_STRUCT_SIZE 4096
4351 #ifdef CONFIG_FIEMAP
4352 /* So fiemap access checks don't overflow on 32 bit systems.
4353 * This is very slightly smaller than the limit imposed by
4354 * the underlying kernel.
4356 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4357 / sizeof(struct fiemap_extent))
4359 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4360 int fd
, int cmd
, abi_long arg
)
4362 /* The parameter for this ioctl is a struct fiemap followed
4363 * by an array of struct fiemap_extent whose size is set
4364 * in fiemap->fm_extent_count. The array is filled in by the
4367 int target_size_in
, target_size_out
;
4369 const argtype
*arg_type
= ie
->arg_type
;
4370 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4373 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4377 assert(arg_type
[0] == TYPE_PTR
);
4378 assert(ie
->access
== IOC_RW
);
4380 target_size_in
= thunk_type_size(arg_type
, 0);
4381 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4383 return -TARGET_EFAULT
;
4385 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4386 unlock_user(argptr
, arg
, 0);
4387 fm
= (struct fiemap
*)buf_temp
;
4388 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4389 return -TARGET_EINVAL
;
4392 outbufsz
= sizeof (*fm
) +
4393 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4395 if (outbufsz
> MAX_STRUCT_SIZE
) {
4396 /* We can't fit all the extents into the fixed size buffer.
4397 * Allocate one that is large enough and use it instead.
4399 fm
= g_try_malloc(outbufsz
);
4401 return -TARGET_ENOMEM
;
4403 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4406 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4407 if (!is_error(ret
)) {
4408 target_size_out
= target_size_in
;
4409 /* An extent_count of 0 means we were only counting the extents
4410 * so there are no structs to copy
4412 if (fm
->fm_extent_count
!= 0) {
4413 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4415 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4417 ret
= -TARGET_EFAULT
;
4419 /* Convert the struct fiemap */
4420 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4421 if (fm
->fm_extent_count
!= 0) {
4422 p
= argptr
+ target_size_in
;
4423 /* ...and then all the struct fiemap_extents */
4424 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4425 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4430 unlock_user(argptr
, arg
, target_size_out
);
4440 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4441 int fd
, int cmd
, abi_long arg
)
4443 const argtype
*arg_type
= ie
->arg_type
;
4447 struct ifconf
*host_ifconf
;
4449 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4450 int target_ifreq_size
;
4455 abi_long target_ifc_buf
;
4459 assert(arg_type
[0] == TYPE_PTR
);
4460 assert(ie
->access
== IOC_RW
);
4463 target_size
= thunk_type_size(arg_type
, 0);
4465 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4467 return -TARGET_EFAULT
;
4468 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4469 unlock_user(argptr
, arg
, 0);
4471 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4472 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4473 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4475 if (target_ifc_buf
!= 0) {
4476 target_ifc_len
= host_ifconf
->ifc_len
;
4477 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4478 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4480 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4481 if (outbufsz
> MAX_STRUCT_SIZE
) {
4483 * We can't fit all the extents into the fixed size buffer.
4484 * Allocate one that is large enough and use it instead.
4486 host_ifconf
= malloc(outbufsz
);
4488 return -TARGET_ENOMEM
;
4490 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4493 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4495 host_ifconf
->ifc_len
= host_ifc_len
;
4497 host_ifc_buf
= NULL
;
4499 host_ifconf
->ifc_buf
= host_ifc_buf
;
4501 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4502 if (!is_error(ret
)) {
4503 /* convert host ifc_len to target ifc_len */
4505 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4506 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4507 host_ifconf
->ifc_len
= target_ifc_len
;
4509 /* restore target ifc_buf */
4511 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4513 /* copy struct ifconf to target user */
4515 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4517 return -TARGET_EFAULT
;
4518 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4519 unlock_user(argptr
, arg
, target_size
);
4521 if (target_ifc_buf
!= 0) {
4522 /* copy ifreq[] to target user */
4523 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4524 for (i
= 0; i
< nb_ifreq
; i
++) {
4525 thunk_convert(argptr
+ i
* target_ifreq_size
,
4526 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4527 ifreq_arg_type
, THUNK_TARGET
);
4529 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4540 #if defined(CONFIG_USBFS)
4541 #if HOST_LONG_BITS > 64
4542 #error USBDEVFS thunks do not support >64 bit hosts yet.
4545 uint64_t target_urb_adr
;
4546 uint64_t target_buf_adr
;
4547 char *target_buf_ptr
;
4548 struct usbdevfs_urb host_urb
;
4551 static GHashTable
*usbdevfs_urb_hashtable(void)
4553 static GHashTable
*urb_hashtable
;
4555 if (!urb_hashtable
) {
4556 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4558 return urb_hashtable
;
4561 static void urb_hashtable_insert(struct live_urb
*urb
)
4563 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4564 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4567 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4569 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4570 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4573 static void urb_hashtable_remove(struct live_urb
*urb
)
4575 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4576 g_hash_table_remove(urb_hashtable
, urb
);
4580 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4581 int fd
, int cmd
, abi_long arg
)
4583 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4584 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4585 struct live_urb
*lurb
;
4589 uintptr_t target_urb_adr
;
4592 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4594 memset(buf_temp
, 0, sizeof(uint64_t));
4595 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4596 if (is_error(ret
)) {
4600 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4601 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4602 if (!lurb
->target_urb_adr
) {
4603 return -TARGET_EFAULT
;
4605 urb_hashtable_remove(lurb
);
4606 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4607 lurb
->host_urb
.buffer_length
);
4608 lurb
->target_buf_ptr
= NULL
;
4610 /* restore the guest buffer pointer */
4611 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4613 /* update the guest urb struct */
4614 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4617 return -TARGET_EFAULT
;
4619 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4620 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4622 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4623 /* write back the urb handle */
4624 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4627 return -TARGET_EFAULT
;
4630 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4631 target_urb_adr
= lurb
->target_urb_adr
;
4632 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4633 unlock_user(argptr
, arg
, target_size
);
4640 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4641 uint8_t *buf_temp
__attribute__((unused
)),
4642 int fd
, int cmd
, abi_long arg
)
4644 struct live_urb
*lurb
;
4646 /* map target address back to host URB with metadata. */
4647 lurb
= urb_hashtable_lookup(arg
);
4649 return -TARGET_EFAULT
;
4651 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4655 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4656 int fd
, int cmd
, abi_long arg
)
4658 const argtype
*arg_type
= ie
->arg_type
;
4663 struct live_urb
*lurb
;
4666 * each submitted URB needs to map to a unique ID for the
4667 * kernel, and that unique ID needs to be a pointer to
4668 * host memory. hence, we need to malloc for each URB.
4669 * isochronous transfers have a variable length struct.
4672 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4674 /* construct host copy of urb and metadata */
4675 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4677 return -TARGET_ENOMEM
;
4680 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4683 return -TARGET_EFAULT
;
4685 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4686 unlock_user(argptr
, arg
, 0);
4688 lurb
->target_urb_adr
= arg
;
4689 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4691 /* buffer space used depends on endpoint type so lock the entire buffer */
4692 /* control type urbs should check the buffer contents for true direction */
4693 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4694 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4695 lurb
->host_urb
.buffer_length
, 1);
4696 if (lurb
->target_buf_ptr
== NULL
) {
4698 return -TARGET_EFAULT
;
4701 /* update buffer pointer in host copy */
4702 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4704 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4705 if (is_error(ret
)) {
4706 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4709 urb_hashtable_insert(lurb
);
4714 #endif /* CONFIG_USBFS */
4716 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4717 int cmd
, abi_long arg
)
4720 struct dm_ioctl
*host_dm
;
4721 abi_long guest_data
;
4722 uint32_t guest_data_size
;
4724 const argtype
*arg_type
= ie
->arg_type
;
4726 void *big_buf
= NULL
;
4730 target_size
= thunk_type_size(arg_type
, 0);
4731 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4733 ret
= -TARGET_EFAULT
;
4736 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4737 unlock_user(argptr
, arg
, 0);
4739 /* buf_temp is too small, so fetch things into a bigger buffer */
4740 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4741 memcpy(big_buf
, buf_temp
, target_size
);
4745 guest_data
= arg
+ host_dm
->data_start
;
4746 if ((guest_data
- arg
) < 0) {
4747 ret
= -TARGET_EINVAL
;
4750 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4751 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4753 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4755 ret
= -TARGET_EFAULT
;
4759 switch (ie
->host_cmd
) {
4761 case DM_LIST_DEVICES
:
4764 case DM_DEV_SUSPEND
:
4767 case DM_TABLE_STATUS
:
4768 case DM_TABLE_CLEAR
:
4770 case DM_LIST_VERSIONS
:
4774 case DM_DEV_SET_GEOMETRY
:
4775 /* data contains only strings */
4776 memcpy(host_data
, argptr
, guest_data_size
);
4779 memcpy(host_data
, argptr
, guest_data_size
);
4780 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4784 void *gspec
= argptr
;
4785 void *cur_data
= host_data
;
4786 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4787 int spec_size
= thunk_type_size(arg_type
, 0);
4790 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4791 struct dm_target_spec
*spec
= cur_data
;
4795 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4796 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4798 spec
->next
= sizeof(*spec
) + slen
;
4799 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4801 cur_data
+= spec
->next
;
4806 ret
= -TARGET_EINVAL
;
4807 unlock_user(argptr
, guest_data
, 0);
4810 unlock_user(argptr
, guest_data
, 0);
4812 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4813 if (!is_error(ret
)) {
4814 guest_data
= arg
+ host_dm
->data_start
;
4815 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4816 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4817 switch (ie
->host_cmd
) {
4822 case DM_DEV_SUSPEND
:
4825 case DM_TABLE_CLEAR
:
4827 case DM_DEV_SET_GEOMETRY
:
4828 /* no return data */
4830 case DM_LIST_DEVICES
:
4832 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4833 uint32_t remaining_data
= guest_data_size
;
4834 void *cur_data
= argptr
;
4835 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4836 int nl_size
= 12; /* can't use thunk_size due to alignment */
4839 uint32_t next
= nl
->next
;
4841 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4843 if (remaining_data
< nl
->next
) {
4844 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4847 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4848 strcpy(cur_data
+ nl_size
, nl
->name
);
4849 cur_data
+= nl
->next
;
4850 remaining_data
-= nl
->next
;
4854 nl
= (void*)nl
+ next
;
4859 case DM_TABLE_STATUS
:
4861 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4862 void *cur_data
= argptr
;
4863 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4864 int spec_size
= thunk_type_size(arg_type
, 0);
4867 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4868 uint32_t next
= spec
->next
;
4869 int slen
= strlen((char*)&spec
[1]) + 1;
4870 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4871 if (guest_data_size
< spec
->next
) {
4872 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4875 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4876 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4877 cur_data
= argptr
+ spec
->next
;
4878 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4884 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4885 int count
= *(uint32_t*)hdata
;
4886 uint64_t *hdev
= hdata
+ 8;
4887 uint64_t *gdev
= argptr
+ 8;
4890 *(uint32_t*)argptr
= tswap32(count
);
4891 for (i
= 0; i
< count
; i
++) {
4892 *gdev
= tswap64(*hdev
);
4898 case DM_LIST_VERSIONS
:
4900 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4901 uint32_t remaining_data
= guest_data_size
;
4902 void *cur_data
= argptr
;
4903 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4904 int vers_size
= thunk_type_size(arg_type
, 0);
4907 uint32_t next
= vers
->next
;
4909 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4911 if (remaining_data
< vers
->next
) {
4912 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4915 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4916 strcpy(cur_data
+ vers_size
, vers
->name
);
4917 cur_data
+= vers
->next
;
4918 remaining_data
-= vers
->next
;
4922 vers
= (void*)vers
+ next
;
4927 unlock_user(argptr
, guest_data
, 0);
4928 ret
= -TARGET_EINVAL
;
4931 unlock_user(argptr
, guest_data
, guest_data_size
);
4933 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4935 ret
= -TARGET_EFAULT
;
4938 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4939 unlock_user(argptr
, arg
, target_size
);
4946 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4947 int cmd
, abi_long arg
)
4951 const argtype
*arg_type
= ie
->arg_type
;
4952 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4955 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4956 struct blkpg_partition host_part
;
4958 /* Read and convert blkpg */
4960 target_size
= thunk_type_size(arg_type
, 0);
4961 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4963 ret
= -TARGET_EFAULT
;
4966 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4967 unlock_user(argptr
, arg
, 0);
4969 switch (host_blkpg
->op
) {
4970 case BLKPG_ADD_PARTITION
:
4971 case BLKPG_DEL_PARTITION
:
4972 /* payload is struct blkpg_partition */
4975 /* Unknown opcode */
4976 ret
= -TARGET_EINVAL
;
4980 /* Read and convert blkpg->data */
4981 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4982 target_size
= thunk_type_size(part_arg_type
, 0);
4983 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4985 ret
= -TARGET_EFAULT
;
4988 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4989 unlock_user(argptr
, arg
, 0);
4991 /* Swizzle the data pointer to our local copy and call! */
4992 host_blkpg
->data
= &host_part
;
4993 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4999 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5000 int fd
, int cmd
, abi_long arg
)
5002 const argtype
*arg_type
= ie
->arg_type
;
5003 const StructEntry
*se
;
5004 const argtype
*field_types
;
5005 const int *dst_offsets
, *src_offsets
;
5008 abi_ulong
*target_rt_dev_ptr
= NULL
;
5009 unsigned long *host_rt_dev_ptr
= NULL
;
5013 assert(ie
->access
== IOC_W
);
5014 assert(*arg_type
== TYPE_PTR
);
5016 assert(*arg_type
== TYPE_STRUCT
);
5017 target_size
= thunk_type_size(arg_type
, 0);
5018 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5020 return -TARGET_EFAULT
;
5023 assert(*arg_type
== (int)STRUCT_rtentry
);
5024 se
= struct_entries
+ *arg_type
++;
5025 assert(se
->convert
[0] == NULL
);
5026 /* convert struct here to be able to catch rt_dev string */
5027 field_types
= se
->field_types
;
5028 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5029 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5030 for (i
= 0; i
< se
->nb_fields
; i
++) {
5031 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5032 assert(*field_types
== TYPE_PTRVOID
);
5033 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5034 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5035 if (*target_rt_dev_ptr
!= 0) {
5036 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5037 tswapal(*target_rt_dev_ptr
));
5038 if (!*host_rt_dev_ptr
) {
5039 unlock_user(argptr
, arg
, 0);
5040 return -TARGET_EFAULT
;
5043 *host_rt_dev_ptr
= 0;
5048 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5049 argptr
+ src_offsets
[i
],
5050 field_types
, THUNK_HOST
);
5052 unlock_user(argptr
, arg
, 0);
5054 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5056 assert(host_rt_dev_ptr
!= NULL
);
5057 assert(target_rt_dev_ptr
!= NULL
);
5058 if (*host_rt_dev_ptr
!= 0) {
5059 unlock_user((void *)*host_rt_dev_ptr
,
5060 *target_rt_dev_ptr
, 0);
5065 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5066 int fd
, int cmd
, abi_long arg
)
5068 int sig
= target_to_host_signal(arg
);
5069 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5072 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5073 int fd
, int cmd
, abi_long arg
)
5078 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5079 if (is_error(ret
)) {
5083 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5084 if (copy_to_user_timeval(arg
, &tv
)) {
5085 return -TARGET_EFAULT
;
5088 if (copy_to_user_timeval64(arg
, &tv
)) {
5089 return -TARGET_EFAULT
;
5096 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5097 int fd
, int cmd
, abi_long arg
)
5102 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5103 if (is_error(ret
)) {
5107 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5108 if (host_to_target_timespec(arg
, &ts
)) {
5109 return -TARGET_EFAULT
;
5112 if (host_to_target_timespec64(arg
, &ts
)) {
5113 return -TARGET_EFAULT
;
5121 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5122 int fd
, int cmd
, abi_long arg
)
5124 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5125 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5129 static IOCTLEntry ioctl_entries
[] = {
5130 #define IOCTL(cmd, access, ...) \
5131 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5132 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5133 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5134 #define IOCTL_IGNORE(cmd) \
5135 { TARGET_ ## cmd, 0, #cmd },
5140 /* ??? Implement proper locking for ioctls. */
5141 /* do_ioctl() Must return target values and target errnos. */
5142 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5144 const IOCTLEntry
*ie
;
5145 const argtype
*arg_type
;
5147 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5153 if (ie
->target_cmd
== 0) {
5154 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5155 return -TARGET_ENOSYS
;
5157 if (ie
->target_cmd
== cmd
)
5161 arg_type
= ie
->arg_type
;
5163 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5164 } else if (!ie
->host_cmd
) {
5165 /* Some architectures define BSD ioctls in their headers
5166 that are not implemented in Linux. */
5167 return -TARGET_ENOSYS
;
5170 switch(arg_type
[0]) {
5173 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5177 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5181 target_size
= thunk_type_size(arg_type
, 0);
5182 switch(ie
->access
) {
5184 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5185 if (!is_error(ret
)) {
5186 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5188 return -TARGET_EFAULT
;
5189 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5190 unlock_user(argptr
, arg
, target_size
);
5194 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5196 return -TARGET_EFAULT
;
5197 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5198 unlock_user(argptr
, arg
, 0);
5199 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5203 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5205 return -TARGET_EFAULT
;
5206 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5207 unlock_user(argptr
, arg
, 0);
5208 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5209 if (!is_error(ret
)) {
5210 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5212 return -TARGET_EFAULT
;
5213 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5214 unlock_user(argptr
, arg
, target_size
);
5220 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5221 (long)cmd
, arg_type
[0]);
5222 ret
= -TARGET_ENOSYS
;
5228 static const bitmask_transtbl iflag_tbl
[] = {
5229 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5230 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5231 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5232 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5233 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5234 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5235 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5236 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5237 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5238 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5239 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5240 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5241 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5242 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5246 static const bitmask_transtbl oflag_tbl
[] = {
5247 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5248 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5249 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5250 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5251 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5252 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5253 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5254 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5255 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5256 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5257 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5258 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5259 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5260 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5261 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5262 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5263 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5264 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5265 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5266 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5267 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5268 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5269 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5270 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5274 static const bitmask_transtbl cflag_tbl
[] = {
5275 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5276 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5277 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5278 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5279 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5280 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5281 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5282 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5283 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5284 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5285 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5286 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5287 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5288 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5289 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5290 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5291 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5292 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5293 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5294 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5295 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5296 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5297 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5298 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5299 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5300 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5301 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5302 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5303 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5304 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5305 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5309 static const bitmask_transtbl lflag_tbl
[] = {
5310 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5311 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5312 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5313 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5314 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5315 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5316 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5317 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5318 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5319 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5320 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5321 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5322 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5323 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5324 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5328 static void target_to_host_termios (void *dst
, const void *src
)
5330 struct host_termios
*host
= dst
;
5331 const struct target_termios
*target
= src
;
5334 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5336 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5338 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5340 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5341 host
->c_line
= target
->c_line
;
5343 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5344 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5345 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5346 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5347 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5348 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5349 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5350 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5351 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5352 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5353 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5354 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5355 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5356 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5357 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5358 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5359 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5360 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5363 static void host_to_target_termios (void *dst
, const void *src
)
5365 struct target_termios
*target
= dst
;
5366 const struct host_termios
*host
= src
;
5369 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5371 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5373 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5375 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5376 target
->c_line
= host
->c_line
;
5378 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5379 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5380 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5381 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5382 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5383 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5384 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5385 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5386 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5387 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5388 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5389 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5390 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5391 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5392 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5393 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5394 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5395 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5398 static const StructEntry struct_termios_def
= {
5399 .convert
= { host_to_target_termios
, target_to_host_termios
},
5400 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5401 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5404 static bitmask_transtbl mmap_flags_tbl
[] = {
5405 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5406 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5407 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5408 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5409 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5410 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5411 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5412 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5413 MAP_DENYWRITE
, MAP_DENYWRITE
},
5414 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5415 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5416 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5417 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5418 MAP_NORESERVE
, MAP_NORESERVE
},
5419 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5420 /* MAP_STACK had been ignored by the kernel for quite some time.
5421 Recognize it for the target insofar as we do not want to pass
5422 it through to the host. */
5423 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5427 #if defined(TARGET_I386)
5429 /* NOTE: there is really one LDT for all the threads */
5430 static uint8_t *ldt_table
;
5432 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5439 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5440 if (size
> bytecount
)
5442 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5444 return -TARGET_EFAULT
;
5445 /* ??? Should this by byteswapped? */
5446 memcpy(p
, ldt_table
, size
);
5447 unlock_user(p
, ptr
, size
);
5451 /* XXX: add locking support */
5452 static abi_long
write_ldt(CPUX86State
*env
,
5453 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5455 struct target_modify_ldt_ldt_s ldt_info
;
5456 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5457 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5458 int seg_not_present
, useable
, lm
;
5459 uint32_t *lp
, entry_1
, entry_2
;
5461 if (bytecount
!= sizeof(ldt_info
))
5462 return -TARGET_EINVAL
;
5463 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5464 return -TARGET_EFAULT
;
5465 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5466 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5467 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5468 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5469 unlock_user_struct(target_ldt_info
, ptr
, 0);
5471 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5472 return -TARGET_EINVAL
;
5473 seg_32bit
= ldt_info
.flags
& 1;
5474 contents
= (ldt_info
.flags
>> 1) & 3;
5475 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5476 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5477 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5478 useable
= (ldt_info
.flags
>> 6) & 1;
5482 lm
= (ldt_info
.flags
>> 7) & 1;
5484 if (contents
== 3) {
5486 return -TARGET_EINVAL
;
5487 if (seg_not_present
== 0)
5488 return -TARGET_EINVAL
;
5490 /* allocate the LDT */
5492 env
->ldt
.base
= target_mmap(0,
5493 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5494 PROT_READ
|PROT_WRITE
,
5495 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5496 if (env
->ldt
.base
== -1)
5497 return -TARGET_ENOMEM
;
5498 memset(g2h(env
->ldt
.base
), 0,
5499 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5500 env
->ldt
.limit
= 0xffff;
5501 ldt_table
= g2h(env
->ldt
.base
);
5504 /* NOTE: same code as Linux kernel */
5505 /* Allow LDTs to be cleared by the user. */
5506 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5509 read_exec_only
== 1 &&
5511 limit_in_pages
== 0 &&
5512 seg_not_present
== 1 &&
5520 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5521 (ldt_info
.limit
& 0x0ffff);
5522 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5523 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5524 (ldt_info
.limit
& 0xf0000) |
5525 ((read_exec_only
^ 1) << 9) |
5527 ((seg_not_present
^ 1) << 15) |
5529 (limit_in_pages
<< 23) |
5533 entry_2
|= (useable
<< 20);
5535 /* Install the new entry ... */
5537 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5538 lp
[0] = tswap32(entry_1
);
5539 lp
[1] = tswap32(entry_2
);
5543 /* specific and weird i386 syscalls */
5544 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5545 unsigned long bytecount
)
5551 ret
= read_ldt(ptr
, bytecount
);
5554 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5557 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5560 ret
= -TARGET_ENOSYS
;
5566 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5567 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5569 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5570 struct target_modify_ldt_ldt_s ldt_info
;
5571 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5572 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5573 int seg_not_present
, useable
, lm
;
5574 uint32_t *lp
, entry_1
, entry_2
;
5577 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5578 if (!target_ldt_info
)
5579 return -TARGET_EFAULT
;
5580 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5581 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5582 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5583 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5584 if (ldt_info
.entry_number
== -1) {
5585 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5586 if (gdt_table
[i
] == 0) {
5587 ldt_info
.entry_number
= i
;
5588 target_ldt_info
->entry_number
= tswap32(i
);
5593 unlock_user_struct(target_ldt_info
, ptr
, 1);
5595 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5596 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5597 return -TARGET_EINVAL
;
5598 seg_32bit
= ldt_info
.flags
& 1;
5599 contents
= (ldt_info
.flags
>> 1) & 3;
5600 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5601 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5602 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5603 useable
= (ldt_info
.flags
>> 6) & 1;
5607 lm
= (ldt_info
.flags
>> 7) & 1;
5610 if (contents
== 3) {
5611 if (seg_not_present
== 0)
5612 return -TARGET_EINVAL
;
5615 /* NOTE: same code as Linux kernel */
5616 /* Allow LDTs to be cleared by the user. */
5617 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5618 if ((contents
== 0 &&
5619 read_exec_only
== 1 &&
5621 limit_in_pages
== 0 &&
5622 seg_not_present
== 1 &&
5630 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5631 (ldt_info
.limit
& 0x0ffff);
5632 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5633 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5634 (ldt_info
.limit
& 0xf0000) |
5635 ((read_exec_only
^ 1) << 9) |
5637 ((seg_not_present
^ 1) << 15) |
5639 (limit_in_pages
<< 23) |
5644 /* Install the new entry ... */
5646 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5647 lp
[0] = tswap32(entry_1
);
5648 lp
[1] = tswap32(entry_2
);
5652 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5654 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5655 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5656 uint32_t base_addr
, limit
, flags
;
5657 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5658 int seg_not_present
, useable
, lm
;
5659 uint32_t *lp
, entry_1
, entry_2
;
5661 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5662 if (!target_ldt_info
)
5663 return -TARGET_EFAULT
;
5664 idx
= tswap32(target_ldt_info
->entry_number
);
5665 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5666 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5667 unlock_user_struct(target_ldt_info
, ptr
, 1);
5668 return -TARGET_EINVAL
;
5670 lp
= (uint32_t *)(gdt_table
+ idx
);
5671 entry_1
= tswap32(lp
[0]);
5672 entry_2
= tswap32(lp
[1]);
5674 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5675 contents
= (entry_2
>> 10) & 3;
5676 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5677 seg_32bit
= (entry_2
>> 22) & 1;
5678 limit_in_pages
= (entry_2
>> 23) & 1;
5679 useable
= (entry_2
>> 20) & 1;
5683 lm
= (entry_2
>> 21) & 1;
5685 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5686 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5687 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5688 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5689 base_addr
= (entry_1
>> 16) |
5690 (entry_2
& 0xff000000) |
5691 ((entry_2
& 0xff) << 16);
5692 target_ldt_info
->base_addr
= tswapal(base_addr
);
5693 target_ldt_info
->limit
= tswap32(limit
);
5694 target_ldt_info
->flags
= tswap32(flags
);
5695 unlock_user_struct(target_ldt_info
, ptr
, 1);
5698 #endif /* TARGET_I386 && TARGET_ABI32 */
5700 #ifndef TARGET_ABI32
5701 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5708 case TARGET_ARCH_SET_GS
:
5709 case TARGET_ARCH_SET_FS
:
5710 if (code
== TARGET_ARCH_SET_GS
)
5714 cpu_x86_load_seg(env
, idx
, 0);
5715 env
->segs
[idx
].base
= addr
;
5717 case TARGET_ARCH_GET_GS
:
5718 case TARGET_ARCH_GET_FS
:
5719 if (code
== TARGET_ARCH_GET_GS
)
5723 val
= env
->segs
[idx
].base
;
5724 if (put_user(val
, addr
, abi_ulong
))
5725 ret
= -TARGET_EFAULT
;
5728 ret
= -TARGET_EINVAL
;
5735 #endif /* defined(TARGET_I386) */
5737 #define NEW_STACK_SIZE 0x40000
5740 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5743 pthread_mutex_t mutex
;
5744 pthread_cond_t cond
;
5747 abi_ulong child_tidptr
;
5748 abi_ulong parent_tidptr
;
5752 static void *clone_func(void *arg
)
5754 new_thread_info
*info
= arg
;
5759 rcu_register_thread();
5760 tcg_register_thread();
5764 ts
= (TaskState
*)cpu
->opaque
;
5765 info
->tid
= sys_gettid();
5767 if (info
->child_tidptr
)
5768 put_user_u32(info
->tid
, info
->child_tidptr
);
5769 if (info
->parent_tidptr
)
5770 put_user_u32(info
->tid
, info
->parent_tidptr
);
5771 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5772 /* Enable signals. */
5773 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5774 /* Signal to the parent that we're ready. */
5775 pthread_mutex_lock(&info
->mutex
);
5776 pthread_cond_broadcast(&info
->cond
);
5777 pthread_mutex_unlock(&info
->mutex
);
5778 /* Wait until the parent has finished initializing the tls state. */
5779 pthread_mutex_lock(&clone_lock
);
5780 pthread_mutex_unlock(&clone_lock
);
5786 /* do_fork() Must return host values and target errnos (unlike most
5787 do_*() functions). */
5788 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5789 abi_ulong parent_tidptr
, target_ulong newtls
,
5790 abi_ulong child_tidptr
)
5792 CPUState
*cpu
= env_cpu(env
);
5796 CPUArchState
*new_env
;
5799 flags
&= ~CLONE_IGNORED_FLAGS
;
5801 /* Emulate vfork() with fork() */
5802 if (flags
& CLONE_VFORK
)
5803 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5805 if (flags
& CLONE_VM
) {
5806 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5807 new_thread_info info
;
5808 pthread_attr_t attr
;
5810 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5811 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5812 return -TARGET_EINVAL
;
5815 ts
= g_new0(TaskState
, 1);
5816 init_task_state(ts
);
5818 /* Grab a mutex so that thread setup appears atomic. */
5819 pthread_mutex_lock(&clone_lock
);
5821 /* we create a new CPU instance. */
5822 new_env
= cpu_copy(env
);
5823 /* Init regs that differ from the parent. */
5824 cpu_clone_regs_child(new_env
, newsp
, flags
);
5825 cpu_clone_regs_parent(env
, flags
);
5826 new_cpu
= env_cpu(new_env
);
5827 new_cpu
->opaque
= ts
;
5828 ts
->bprm
= parent_ts
->bprm
;
5829 ts
->info
= parent_ts
->info
;
5830 ts
->signal_mask
= parent_ts
->signal_mask
;
5832 if (flags
& CLONE_CHILD_CLEARTID
) {
5833 ts
->child_tidptr
= child_tidptr
;
5836 if (flags
& CLONE_SETTLS
) {
5837 cpu_set_tls (new_env
, newtls
);
5840 memset(&info
, 0, sizeof(info
));
5841 pthread_mutex_init(&info
.mutex
, NULL
);
5842 pthread_mutex_lock(&info
.mutex
);
5843 pthread_cond_init(&info
.cond
, NULL
);
5845 if (flags
& CLONE_CHILD_SETTID
) {
5846 info
.child_tidptr
= child_tidptr
;
5848 if (flags
& CLONE_PARENT_SETTID
) {
5849 info
.parent_tidptr
= parent_tidptr
;
5852 ret
= pthread_attr_init(&attr
);
5853 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5854 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5855 /* It is not safe to deliver signals until the child has finished
5856 initializing, so temporarily block all signals. */
5857 sigfillset(&sigmask
);
5858 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5859 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5861 /* If this is our first additional thread, we need to ensure we
5862 * generate code for parallel execution and flush old translations.
5864 if (!parallel_cpus
) {
5865 parallel_cpus
= true;
5869 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5870 /* TODO: Free new CPU state if thread creation failed. */
5872 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5873 pthread_attr_destroy(&attr
);
5875 /* Wait for the child to initialize. */
5876 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5881 pthread_mutex_unlock(&info
.mutex
);
5882 pthread_cond_destroy(&info
.cond
);
5883 pthread_mutex_destroy(&info
.mutex
);
5884 pthread_mutex_unlock(&clone_lock
);
5886 /* if no CLONE_VM, we consider it is a fork */
5887 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5888 return -TARGET_EINVAL
;
5891 /* We can't support custom termination signals */
5892 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5893 return -TARGET_EINVAL
;
5896 if (block_signals()) {
5897 return -TARGET_ERESTARTSYS
;
5903 /* Child Process. */
5904 cpu_clone_regs_child(env
, newsp
, flags
);
5906 /* There is a race condition here. The parent process could
5907 theoretically read the TID in the child process before the child
5908 tid is set. This would require using either ptrace
5909 (not implemented) or having *_tidptr to point at a shared memory
5910 mapping. We can't repeat the spinlock hack used above because
5911 the child process gets its own copy of the lock. */
5912 if (flags
& CLONE_CHILD_SETTID
)
5913 put_user_u32(sys_gettid(), child_tidptr
);
5914 if (flags
& CLONE_PARENT_SETTID
)
5915 put_user_u32(sys_gettid(), parent_tidptr
);
5916 ts
= (TaskState
*)cpu
->opaque
;
5917 if (flags
& CLONE_SETTLS
)
5918 cpu_set_tls (env
, newtls
);
5919 if (flags
& CLONE_CHILD_CLEARTID
)
5920 ts
->child_tidptr
= child_tidptr
;
5922 cpu_clone_regs_parent(env
, flags
);
5929 /* warning : doesn't handle linux specific flags... */
5930 static int target_to_host_fcntl_cmd(int cmd
)
5935 case TARGET_F_DUPFD
:
5936 case TARGET_F_GETFD
:
5937 case TARGET_F_SETFD
:
5938 case TARGET_F_GETFL
:
5939 case TARGET_F_SETFL
:
5942 case TARGET_F_GETLK
:
5945 case TARGET_F_SETLK
:
5948 case TARGET_F_SETLKW
:
5951 case TARGET_F_GETOWN
:
5954 case TARGET_F_SETOWN
:
5957 case TARGET_F_GETSIG
:
5960 case TARGET_F_SETSIG
:
5963 #if TARGET_ABI_BITS == 32
5964 case TARGET_F_GETLK64
:
5967 case TARGET_F_SETLK64
:
5970 case TARGET_F_SETLKW64
:
5974 case TARGET_F_SETLEASE
:
5977 case TARGET_F_GETLEASE
:
5980 #ifdef F_DUPFD_CLOEXEC
5981 case TARGET_F_DUPFD_CLOEXEC
:
5982 ret
= F_DUPFD_CLOEXEC
;
5985 case TARGET_F_NOTIFY
:
5989 case TARGET_F_GETOWN_EX
:
5994 case TARGET_F_SETOWN_EX
:
5999 case TARGET_F_SETPIPE_SZ
:
6002 case TARGET_F_GETPIPE_SZ
:
6007 ret
= -TARGET_EINVAL
;
6011 #if defined(__powerpc64__)
6012 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6013 * is not supported by kernel. The glibc fcntl call actually adjusts
6014 * them to 5, 6 and 7 before making the syscall(). Since we make the
6015 * syscall directly, adjust to what is supported by the kernel.
6017 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6018 ret
-= F_GETLK64
- 5;
6025 #define FLOCK_TRANSTBL \
6027 TRANSTBL_CONVERT(F_RDLCK); \
6028 TRANSTBL_CONVERT(F_WRLCK); \
6029 TRANSTBL_CONVERT(F_UNLCK); \
6030 TRANSTBL_CONVERT(F_EXLCK); \
6031 TRANSTBL_CONVERT(F_SHLCK); \
6034 static int target_to_host_flock(int type
)
6036 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6038 #undef TRANSTBL_CONVERT
6039 return -TARGET_EINVAL
;
6042 static int host_to_target_flock(int type
)
6044 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6046 #undef TRANSTBL_CONVERT
6047 /* if we don't know how to convert the value coming
6048 * from the host we copy to the target field as-is
6053 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6054 abi_ulong target_flock_addr
)
6056 struct target_flock
*target_fl
;
6059 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6060 return -TARGET_EFAULT
;
6063 __get_user(l_type
, &target_fl
->l_type
);
6064 l_type
= target_to_host_flock(l_type
);
6068 fl
->l_type
= l_type
;
6069 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6070 __get_user(fl
->l_start
, &target_fl
->l_start
);
6071 __get_user(fl
->l_len
, &target_fl
->l_len
);
6072 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6073 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6077 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6078 const struct flock64
*fl
)
6080 struct target_flock
*target_fl
;
6083 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6084 return -TARGET_EFAULT
;
6087 l_type
= host_to_target_flock(fl
->l_type
);
6088 __put_user(l_type
, &target_fl
->l_type
);
6089 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6090 __put_user(fl
->l_start
, &target_fl
->l_start
);
6091 __put_user(fl
->l_len
, &target_fl
->l_len
);
6092 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6093 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6097 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6098 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6100 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6101 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6102 abi_ulong target_flock_addr
)
6104 struct target_oabi_flock64
*target_fl
;
6107 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6108 return -TARGET_EFAULT
;
6111 __get_user(l_type
, &target_fl
->l_type
);
6112 l_type
= target_to_host_flock(l_type
);
6116 fl
->l_type
= l_type
;
6117 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6118 __get_user(fl
->l_start
, &target_fl
->l_start
);
6119 __get_user(fl
->l_len
, &target_fl
->l_len
);
6120 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6121 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6125 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6126 const struct flock64
*fl
)
6128 struct target_oabi_flock64
*target_fl
;
6131 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6132 return -TARGET_EFAULT
;
6135 l_type
= host_to_target_flock(fl
->l_type
);
6136 __put_user(l_type
, &target_fl
->l_type
);
6137 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6138 __put_user(fl
->l_start
, &target_fl
->l_start
);
6139 __put_user(fl
->l_len
, &target_fl
->l_len
);
6140 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6141 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6146 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6147 abi_ulong target_flock_addr
)
6149 struct target_flock64
*target_fl
;
6152 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6153 return -TARGET_EFAULT
;
6156 __get_user(l_type
, &target_fl
->l_type
);
6157 l_type
= target_to_host_flock(l_type
);
6161 fl
->l_type
= l_type
;
6162 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6163 __get_user(fl
->l_start
, &target_fl
->l_start
);
6164 __get_user(fl
->l_len
, &target_fl
->l_len
);
6165 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6166 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6170 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6171 const struct flock64
*fl
)
6173 struct target_flock64
*target_fl
;
6176 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6177 return -TARGET_EFAULT
;
6180 l_type
= host_to_target_flock(fl
->l_type
);
6181 __put_user(l_type
, &target_fl
->l_type
);
6182 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6183 __put_user(fl
->l_start
, &target_fl
->l_start
);
6184 __put_user(fl
->l_len
, &target_fl
->l_len
);
6185 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6186 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6190 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6192 struct flock64 fl64
;
6194 struct f_owner_ex fox
;
6195 struct target_f_owner_ex
*target_fox
;
6198 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6200 if (host_cmd
== -TARGET_EINVAL
)
6204 case TARGET_F_GETLK
:
6205 ret
= copy_from_user_flock(&fl64
, arg
);
6209 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6211 ret
= copy_to_user_flock(arg
, &fl64
);
6215 case TARGET_F_SETLK
:
6216 case TARGET_F_SETLKW
:
6217 ret
= copy_from_user_flock(&fl64
, arg
);
6221 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6224 case TARGET_F_GETLK64
:
6225 ret
= copy_from_user_flock64(&fl64
, arg
);
6229 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6231 ret
= copy_to_user_flock64(arg
, &fl64
);
6234 case TARGET_F_SETLK64
:
6235 case TARGET_F_SETLKW64
:
6236 ret
= copy_from_user_flock64(&fl64
, arg
);
6240 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6243 case TARGET_F_GETFL
:
6244 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6246 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6250 case TARGET_F_SETFL
:
6251 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6252 target_to_host_bitmask(arg
,
6257 case TARGET_F_GETOWN_EX
:
6258 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6260 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6261 return -TARGET_EFAULT
;
6262 target_fox
->type
= tswap32(fox
.type
);
6263 target_fox
->pid
= tswap32(fox
.pid
);
6264 unlock_user_struct(target_fox
, arg
, 1);
6270 case TARGET_F_SETOWN_EX
:
6271 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6272 return -TARGET_EFAULT
;
6273 fox
.type
= tswap32(target_fox
->type
);
6274 fox
.pid
= tswap32(target_fox
->pid
);
6275 unlock_user_struct(target_fox
, arg
, 0);
6276 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6280 case TARGET_F_SETOWN
:
6281 case TARGET_F_GETOWN
:
6282 case TARGET_F_SETSIG
:
6283 case TARGET_F_GETSIG
:
6284 case TARGET_F_SETLEASE
:
6285 case TARGET_F_GETLEASE
:
6286 case TARGET_F_SETPIPE_SZ
:
6287 case TARGET_F_GETPIPE_SZ
:
6288 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6292 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6300 static inline int high2lowuid(int uid
)
6308 static inline int high2lowgid(int gid
)
6316 static inline int low2highuid(int uid
)
6318 if ((int16_t)uid
== -1)
6324 static inline int low2highgid(int gid
)
6326 if ((int16_t)gid
== -1)
6331 static inline int tswapid(int id
)
6336 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6338 #else /* !USE_UID16 */
6339 static inline int high2lowuid(int uid
)
6343 static inline int high2lowgid(int gid
)
6347 static inline int low2highuid(int uid
)
6351 static inline int low2highgid(int gid
)
6355 static inline int tswapid(int id
)
6360 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6362 #endif /* USE_UID16 */
6364 /* We must do direct syscalls for setting UID/GID, because we want to
6365 * implement the Linux system call semantics of "change only for this thread",
6366 * not the libc/POSIX semantics of "change for all threads in process".
6367 * (See http://ewontfix.com/17/ for more details.)
6368 * We use the 32-bit version of the syscalls if present; if it is not
6369 * then either the host architecture supports 32-bit UIDs natively with
6370 * the standard syscall, or the 16-bit UID is the best we can do.
6372 #ifdef __NR_setuid32
6373 #define __NR_sys_setuid __NR_setuid32
6375 #define __NR_sys_setuid __NR_setuid
6377 #ifdef __NR_setgid32
6378 #define __NR_sys_setgid __NR_setgid32
6380 #define __NR_sys_setgid __NR_setgid
6382 #ifdef __NR_setresuid32
6383 #define __NR_sys_setresuid __NR_setresuid32
6385 #define __NR_sys_setresuid __NR_setresuid
6387 #ifdef __NR_setresgid32
6388 #define __NR_sys_setresgid __NR_setresgid32
6390 #define __NR_sys_setresgid __NR_setresgid
6393 _syscall1(int, sys_setuid
, uid_t
, uid
)
6394 _syscall1(int, sys_setgid
, gid_t
, gid
)
6395 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6396 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6398 void syscall_init(void)
6401 const argtype
*arg_type
;
6405 thunk_init(STRUCT_MAX
);
6407 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6408 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6409 #include "syscall_types.h"
6411 #undef STRUCT_SPECIAL
6413 /* Build target_to_host_errno_table[] table from
6414 * host_to_target_errno_table[]. */
6415 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6416 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6419 /* we patch the ioctl size if necessary. We rely on the fact that
6420 no ioctl has all the bits at '1' in the size field */
6422 while (ie
->target_cmd
!= 0) {
6423 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6424 TARGET_IOC_SIZEMASK
) {
6425 arg_type
= ie
->arg_type
;
6426 if (arg_type
[0] != TYPE_PTR
) {
6427 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6432 size
= thunk_type_size(arg_type
, 0);
6433 ie
->target_cmd
= (ie
->target_cmd
&
6434 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6435 (size
<< TARGET_IOC_SIZESHIFT
);
6438 /* automatic consistency check if same arch */
6439 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6440 (defined(__x86_64__) && defined(TARGET_X86_64))
6441 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6442 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6443 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6450 #if TARGET_ABI_BITS == 32
6451 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6453 #ifdef TARGET_WORDS_BIGENDIAN
6454 return ((uint64_t)word0
<< 32) | word1
;
6456 return ((uint64_t)word1
<< 32) | word0
;
6459 #else /* TARGET_ABI_BITS == 32 */
6460 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6464 #endif /* TARGET_ABI_BITS != 32 */
6466 #ifdef TARGET_NR_truncate64
6467 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6472 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6476 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6480 #ifdef TARGET_NR_ftruncate64
6481 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6486 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6490 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6494 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6495 abi_ulong target_addr
)
6497 struct target_itimerspec
*target_itspec
;
6499 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6500 return -TARGET_EFAULT
;
6503 host_itspec
->it_interval
.tv_sec
=
6504 tswapal(target_itspec
->it_interval
.tv_sec
);
6505 host_itspec
->it_interval
.tv_nsec
=
6506 tswapal(target_itspec
->it_interval
.tv_nsec
);
6507 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6508 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6510 unlock_user_struct(target_itspec
, target_addr
, 1);
6514 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6515 struct itimerspec
*host_its
)
6517 struct target_itimerspec
*target_itspec
;
6519 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6520 return -TARGET_EFAULT
;
6523 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6524 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6526 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6527 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6529 unlock_user_struct(target_itspec
, target_addr
, 0);
6533 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6534 abi_long target_addr
)
6536 struct target_timex
*target_tx
;
6538 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6539 return -TARGET_EFAULT
;
6542 __get_user(host_tx
->modes
, &target_tx
->modes
);
6543 __get_user(host_tx
->offset
, &target_tx
->offset
);
6544 __get_user(host_tx
->freq
, &target_tx
->freq
);
6545 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6546 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6547 __get_user(host_tx
->status
, &target_tx
->status
);
6548 __get_user(host_tx
->constant
, &target_tx
->constant
);
6549 __get_user(host_tx
->precision
, &target_tx
->precision
);
6550 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6551 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6552 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6553 __get_user(host_tx
->tick
, &target_tx
->tick
);
6554 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6555 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6556 __get_user(host_tx
->shift
, &target_tx
->shift
);
6557 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6558 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6559 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6560 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6561 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6562 __get_user(host_tx
->tai
, &target_tx
->tai
);
6564 unlock_user_struct(target_tx
, target_addr
, 0);
6568 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6569 struct timex
*host_tx
)
6571 struct target_timex
*target_tx
;
6573 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6574 return -TARGET_EFAULT
;
6577 __put_user(host_tx
->modes
, &target_tx
->modes
);
6578 __put_user(host_tx
->offset
, &target_tx
->offset
);
6579 __put_user(host_tx
->freq
, &target_tx
->freq
);
6580 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6581 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6582 __put_user(host_tx
->status
, &target_tx
->status
);
6583 __put_user(host_tx
->constant
, &target_tx
->constant
);
6584 __put_user(host_tx
->precision
, &target_tx
->precision
);
6585 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6586 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6587 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6588 __put_user(host_tx
->tick
, &target_tx
->tick
);
6589 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6590 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6591 __put_user(host_tx
->shift
, &target_tx
->shift
);
6592 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6593 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6594 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6595 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6596 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6597 __put_user(host_tx
->tai
, &target_tx
->tai
);
6599 unlock_user_struct(target_tx
, target_addr
, 1);
6604 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6605 abi_ulong target_addr
)
6607 struct target_sigevent
*target_sevp
;
6609 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6610 return -TARGET_EFAULT
;
6613 /* This union is awkward on 64 bit systems because it has a 32 bit
6614 * integer and a pointer in it; we follow the conversion approach
6615 * used for handling sigval types in signal.c so the guest should get
6616 * the correct value back even if we did a 64 bit byteswap and it's
6617 * using the 32 bit integer.
6619 host_sevp
->sigev_value
.sival_ptr
=
6620 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6621 host_sevp
->sigev_signo
=
6622 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6623 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6624 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6626 unlock_user_struct(target_sevp
, target_addr
, 1);
6630 #if defined(TARGET_NR_mlockall)
6631 static inline int target_to_host_mlockall_arg(int arg
)
6635 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6636 result
|= MCL_CURRENT
;
6638 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6639 result
|= MCL_FUTURE
;
6645 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6646 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6647 defined(TARGET_NR_newfstatat))
6648 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6649 abi_ulong target_addr
,
6650 struct stat
*host_st
)
6652 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6653 if (((CPUARMState
*)cpu_env
)->eabi
) {
6654 struct target_eabi_stat64
*target_st
;
6656 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6657 return -TARGET_EFAULT
;
6658 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6659 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6660 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6661 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6662 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6664 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6665 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6666 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6667 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6668 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6669 __put_user(host_st
->st_size
, &target_st
->st_size
);
6670 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6671 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6672 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6673 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6674 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6675 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6676 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6677 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6678 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6680 unlock_user_struct(target_st
, target_addr
, 1);
6684 #if defined(TARGET_HAS_STRUCT_STAT64)
6685 struct target_stat64
*target_st
;
6687 struct target_stat
*target_st
;
6690 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6691 return -TARGET_EFAULT
;
6692 memset(target_st
, 0, sizeof(*target_st
));
6693 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6694 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6695 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6696 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6698 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6699 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6700 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6701 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6702 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6703 /* XXX: better use of kernel struct */
6704 __put_user(host_st
->st_size
, &target_st
->st_size
);
6705 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6706 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6707 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6708 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6709 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6710 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6711 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6712 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6713 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6715 unlock_user_struct(target_st
, target_addr
, 1);
6722 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6723 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6724 abi_ulong target_addr
)
6726 struct target_statx
*target_stx
;
6728 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6729 return -TARGET_EFAULT
;
6731 memset(target_stx
, 0, sizeof(*target_stx
));
6733 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6734 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6735 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6736 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6737 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6738 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6739 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6740 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6741 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6742 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6743 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6744 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6745 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6746 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6747 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6748 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6749 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6750 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6751 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6752 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6753 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6754 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6755 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6757 unlock_user_struct(target_stx
, target_addr
, 1);
6764 /* ??? Using host futex calls even when target atomic operations
6765 are not really atomic probably breaks things. However implementing
6766 futexes locally would make futexes shared between multiple processes
6767 tricky. However they're probably useless because guest atomic
6768 operations won't work either. */
6769 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6770 target_ulong uaddr2
, int val3
)
6772 struct timespec ts
, *pts
;
6775 /* ??? We assume FUTEX_* constants are the same on both host
6777 #ifdef FUTEX_CMD_MASK
6778 base_op
= op
& FUTEX_CMD_MASK
;
6784 case FUTEX_WAIT_BITSET
:
6787 target_to_host_timespec(pts
, timeout
);
6791 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6794 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6796 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6798 case FUTEX_CMP_REQUEUE
:
6800 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6801 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6802 But the prototype takes a `struct timespec *'; insert casts
6803 to satisfy the compiler. We do not need to tswap TIMEOUT
6804 since it's not compared to guest memory. */
6805 pts
= (struct timespec
*)(uintptr_t) timeout
;
6806 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6808 (base_op
== FUTEX_CMP_REQUEUE
6812 return -TARGET_ENOSYS
;
6815 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6816 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6817 abi_long handle
, abi_long mount_id
,
6820 struct file_handle
*target_fh
;
6821 struct file_handle
*fh
;
6825 unsigned int size
, total_size
;
6827 if (get_user_s32(size
, handle
)) {
6828 return -TARGET_EFAULT
;
6831 name
= lock_user_string(pathname
);
6833 return -TARGET_EFAULT
;
6836 total_size
= sizeof(struct file_handle
) + size
;
6837 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6839 unlock_user(name
, pathname
, 0);
6840 return -TARGET_EFAULT
;
6843 fh
= g_malloc0(total_size
);
6844 fh
->handle_bytes
= size
;
6846 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6847 unlock_user(name
, pathname
, 0);
6849 /* man name_to_handle_at(2):
6850 * Other than the use of the handle_bytes field, the caller should treat
6851 * the file_handle structure as an opaque data type
6854 memcpy(target_fh
, fh
, total_size
);
6855 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6856 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6858 unlock_user(target_fh
, handle
, total_size
);
6860 if (put_user_s32(mid
, mount_id
)) {
6861 return -TARGET_EFAULT
;
6869 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6870 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6873 struct file_handle
*target_fh
;
6874 struct file_handle
*fh
;
6875 unsigned int size
, total_size
;
6878 if (get_user_s32(size
, handle
)) {
6879 return -TARGET_EFAULT
;
6882 total_size
= sizeof(struct file_handle
) + size
;
6883 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6885 return -TARGET_EFAULT
;
6888 fh
= g_memdup(target_fh
, total_size
);
6889 fh
->handle_bytes
= size
;
6890 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6892 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6893 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6897 unlock_user(target_fh
, handle
, total_size
);
6903 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6905 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6908 target_sigset_t
*target_mask
;
6912 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6913 return -TARGET_EINVAL
;
6915 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6916 return -TARGET_EFAULT
;
6919 target_to_host_sigset(&host_mask
, target_mask
);
6921 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6923 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6925 fd_trans_register(ret
, &target_signalfd_trans
);
6928 unlock_user_struct(target_mask
, mask
, 0);
6934 /* Map host to target signal numbers for the wait family of syscalls.
6935 Assume all other status bits are the same. */
6936 int host_to_target_waitstatus(int status
)
6938 if (WIFSIGNALED(status
)) {
6939 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6941 if (WIFSTOPPED(status
)) {
6942 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6948 static int open_self_cmdline(void *cpu_env
, int fd
)
6950 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6951 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6954 for (i
= 0; i
< bprm
->argc
; i
++) {
6955 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6957 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6965 static int open_self_maps(void *cpu_env
, int fd
)
6967 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6968 TaskState
*ts
= cpu
->opaque
;
6974 fp
= fopen("/proc/self/maps", "r");
6979 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6980 int fields
, dev_maj
, dev_min
, inode
;
6981 uint64_t min
, max
, offset
;
6982 char flag_r
, flag_w
, flag_x
, flag_p
;
6983 char path
[512] = "";
6984 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6985 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6986 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6988 if ((fields
< 10) || (fields
> 11)) {
6991 if (h2g_valid(min
)) {
6992 int flags
= page_get_flags(h2g(min
));
6993 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6994 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6997 if (h2g(min
) == ts
->info
->stack_limit
) {
6998 pstrcpy(path
, sizeof(path
), " [stack]");
7000 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7001 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7002 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7003 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7004 path
[0] ? " " : "", path
);
7014 static int open_self_stat(void *cpu_env
, int fd
)
7016 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7017 TaskState
*ts
= cpu
->opaque
;
7018 abi_ulong start_stack
= ts
->info
->start_stack
;
7021 for (i
= 0; i
< 44; i
++) {
7029 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7030 } else if (i
== 1) {
7032 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7033 } else if (i
== 27) {
7036 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7038 /* for the rest, there is MasterCard */
7039 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7043 if (write(fd
, buf
, len
) != len
) {
7051 static int open_self_auxv(void *cpu_env
, int fd
)
7053 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7054 TaskState
*ts
= cpu
->opaque
;
7055 abi_ulong auxv
= ts
->info
->saved_auxv
;
7056 abi_ulong len
= ts
->info
->auxv_len
;
7060 * Auxiliary vector is stored in target process stack.
7061 * read in whole auxv vector and copy it to file
7063 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7067 r
= write(fd
, ptr
, len
);
7074 lseek(fd
, 0, SEEK_SET
);
7075 unlock_user(ptr
, auxv
, len
);
7081 static int is_proc_myself(const char *filename
, const char *entry
)
7083 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7084 filename
+= strlen("/proc/");
7085 if (!strncmp(filename
, "self/", strlen("self/"))) {
7086 filename
+= strlen("self/");
7087 } else if (*filename
>= '1' && *filename
<= '9') {
7089 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7090 if (!strncmp(filename
, myself
, strlen(myself
))) {
7091 filename
+= strlen(myself
);
7098 if (!strcmp(filename
, entry
)) {
7105 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7106 defined(TARGET_SPARC) || defined(TARGET_M68K)
7107 static int is_proc(const char *filename
, const char *entry
)
7109 return strcmp(filename
, entry
) == 0;
7113 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7114 static int open_net_route(void *cpu_env
, int fd
)
7121 fp
= fopen("/proc/net/route", "r");
7128 read
= getline(&line
, &len
, fp
);
7129 dprintf(fd
, "%s", line
);
7133 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7135 uint32_t dest
, gw
, mask
;
7136 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7139 fields
= sscanf(line
,
7140 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7141 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7142 &mask
, &mtu
, &window
, &irtt
);
7146 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7147 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7148 metric
, tswap32(mask
), mtu
, window
, irtt
);
7158 #if defined(TARGET_SPARC)
7159 static int open_cpuinfo(void *cpu_env
, int fd
)
7161 dprintf(fd
, "type\t\t: sun4u\n");
7166 #if defined(TARGET_M68K)
7167 static int open_hardware(void *cpu_env
, int fd
)
7169 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7174 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7177 const char *filename
;
7178 int (*fill
)(void *cpu_env
, int fd
);
7179 int (*cmp
)(const char *s1
, const char *s2
);
7181 const struct fake_open
*fake_open
;
7182 static const struct fake_open fakes
[] = {
7183 { "maps", open_self_maps
, is_proc_myself
},
7184 { "stat", open_self_stat
, is_proc_myself
},
7185 { "auxv", open_self_auxv
, is_proc_myself
},
7186 { "cmdline", open_self_cmdline
, is_proc_myself
},
7187 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7188 { "/proc/net/route", open_net_route
, is_proc
},
7190 #if defined(TARGET_SPARC)
7191 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7193 #if defined(TARGET_M68K)
7194 { "/proc/hardware", open_hardware
, is_proc
},
7196 { NULL
, NULL
, NULL
}
7199 if (is_proc_myself(pathname
, "exe")) {
7200 int execfd
= qemu_getauxval(AT_EXECFD
);
7201 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7204 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7205 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7210 if (fake_open
->filename
) {
7212 char filename
[PATH_MAX
];
7215 /* create temporary file to map stat to */
7216 tmpdir
= getenv("TMPDIR");
7219 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7220 fd
= mkstemp(filename
);
7226 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7232 lseek(fd
, 0, SEEK_SET
);
7237 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7240 #define TIMER_MAGIC 0x0caf0000
7241 #define TIMER_MAGIC_MASK 0xffff0000
7243 /* Convert QEMU provided timer ID back to internal 16bit index format */
7244 static target_timer_t
get_timer_id(abi_long arg
)
7246 target_timer_t timerid
= arg
;
7248 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7249 return -TARGET_EINVAL
;
7254 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7255 return -TARGET_EINVAL
;
7261 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7263 abi_ulong target_addr
,
7266 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7267 unsigned host_bits
= sizeof(*host_mask
) * 8;
7268 abi_ulong
*target_mask
;
7271 assert(host_size
>= target_size
);
7273 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7275 return -TARGET_EFAULT
;
7277 memset(host_mask
, 0, host_size
);
7279 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7280 unsigned bit
= i
* target_bits
;
7283 __get_user(val
, &target_mask
[i
]);
7284 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7285 if (val
& (1UL << j
)) {
7286 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7291 unlock_user(target_mask
, target_addr
, 0);
7295 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7297 abi_ulong target_addr
,
7300 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7301 unsigned host_bits
= sizeof(*host_mask
) * 8;
7302 abi_ulong
*target_mask
;
7305 assert(host_size
>= target_size
);
7307 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7309 return -TARGET_EFAULT
;
7312 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7313 unsigned bit
= i
* target_bits
;
7316 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7317 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7321 __put_user(val
, &target_mask
[i
]);
7324 unlock_user(target_mask
, target_addr
, target_size
);
7328 /* This is an internal helper for do_syscall so that it is easier
7329 * to have a single return point, so that actions, such as logging
7330 * of syscall results, can be performed.
7331 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7333 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7334 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7335 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7338 CPUState
*cpu
= env_cpu(cpu_env
);
7340 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7341 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7342 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7343 || defined(TARGET_NR_statx)
7346 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7347 || defined(TARGET_NR_fstatfs)
7353 case TARGET_NR_exit
:
7354 /* In old applications this may be used to implement _exit(2).
7355 However in threaded applictions it is used for thread termination,
7356 and _exit_group is used for application termination.
7357 Do thread termination if we have more then one thread. */
7359 if (block_signals()) {
7360 return -TARGET_ERESTARTSYS
;
7365 if (CPU_NEXT(first_cpu
)) {
7368 /* Remove the CPU from the list. */
7369 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7374 if (ts
->child_tidptr
) {
7375 put_user_u32(0, ts
->child_tidptr
);
7376 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7380 object_unref(OBJECT(cpu
));
7382 rcu_unregister_thread();
7387 preexit_cleanup(cpu_env
, arg1
);
7389 return 0; /* avoid warning */
7390 case TARGET_NR_read
:
7391 if (arg2
== 0 && arg3
== 0) {
7392 return get_errno(safe_read(arg1
, 0, 0));
7394 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7395 return -TARGET_EFAULT
;
7396 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7398 fd_trans_host_to_target_data(arg1
)) {
7399 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7401 unlock_user(p
, arg2
, ret
);
7404 case TARGET_NR_write
:
7405 if (arg2
== 0 && arg3
== 0) {
7406 return get_errno(safe_write(arg1
, 0, 0));
7408 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7409 return -TARGET_EFAULT
;
7410 if (fd_trans_target_to_host_data(arg1
)) {
7411 void *copy
= g_malloc(arg3
);
7412 memcpy(copy
, p
, arg3
);
7413 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7415 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7419 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7421 unlock_user(p
, arg2
, 0);
7424 #ifdef TARGET_NR_open
7425 case TARGET_NR_open
:
7426 if (!(p
= lock_user_string(arg1
)))
7427 return -TARGET_EFAULT
;
7428 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7429 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7431 fd_trans_unregister(ret
);
7432 unlock_user(p
, arg1
, 0);
7435 case TARGET_NR_openat
:
7436 if (!(p
= lock_user_string(arg2
)))
7437 return -TARGET_EFAULT
;
7438 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7439 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7441 fd_trans_unregister(ret
);
7442 unlock_user(p
, arg2
, 0);
7444 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7445 case TARGET_NR_name_to_handle_at
:
7446 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7449 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7450 case TARGET_NR_open_by_handle_at
:
7451 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7452 fd_trans_unregister(ret
);
7455 case TARGET_NR_close
:
7456 fd_trans_unregister(arg1
);
7457 return get_errno(close(arg1
));
7460 return do_brk(arg1
);
7461 #ifdef TARGET_NR_fork
7462 case TARGET_NR_fork
:
7463 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7465 #ifdef TARGET_NR_waitpid
7466 case TARGET_NR_waitpid
:
7469 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7470 if (!is_error(ret
) && arg2
&& ret
7471 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7472 return -TARGET_EFAULT
;
7476 #ifdef TARGET_NR_waitid
7477 case TARGET_NR_waitid
:
7481 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7482 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7483 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7484 return -TARGET_EFAULT
;
7485 host_to_target_siginfo(p
, &info
);
7486 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7491 #ifdef TARGET_NR_creat /* not on alpha */
7492 case TARGET_NR_creat
:
7493 if (!(p
= lock_user_string(arg1
)))
7494 return -TARGET_EFAULT
;
7495 ret
= get_errno(creat(p
, arg2
));
7496 fd_trans_unregister(ret
);
7497 unlock_user(p
, arg1
, 0);
7500 #ifdef TARGET_NR_link
7501 case TARGET_NR_link
:
7504 p
= lock_user_string(arg1
);
7505 p2
= lock_user_string(arg2
);
7507 ret
= -TARGET_EFAULT
;
7509 ret
= get_errno(link(p
, p2
));
7510 unlock_user(p2
, arg2
, 0);
7511 unlock_user(p
, arg1
, 0);
7515 #if defined(TARGET_NR_linkat)
7516 case TARGET_NR_linkat
:
7520 return -TARGET_EFAULT
;
7521 p
= lock_user_string(arg2
);
7522 p2
= lock_user_string(arg4
);
7524 ret
= -TARGET_EFAULT
;
7526 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7527 unlock_user(p
, arg2
, 0);
7528 unlock_user(p2
, arg4
, 0);
7532 #ifdef TARGET_NR_unlink
7533 case TARGET_NR_unlink
:
7534 if (!(p
= lock_user_string(arg1
)))
7535 return -TARGET_EFAULT
;
7536 ret
= get_errno(unlink(p
));
7537 unlock_user(p
, arg1
, 0);
7540 #if defined(TARGET_NR_unlinkat)
7541 case TARGET_NR_unlinkat
:
7542 if (!(p
= lock_user_string(arg2
)))
7543 return -TARGET_EFAULT
;
7544 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7545 unlock_user(p
, arg2
, 0);
7548 case TARGET_NR_execve
:
7550 char **argp
, **envp
;
7553 abi_ulong guest_argp
;
7554 abi_ulong guest_envp
;
7561 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7562 if (get_user_ual(addr
, gp
))
7563 return -TARGET_EFAULT
;
7570 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7571 if (get_user_ual(addr
, gp
))
7572 return -TARGET_EFAULT
;
7578 argp
= g_new0(char *, argc
+ 1);
7579 envp
= g_new0(char *, envc
+ 1);
7581 for (gp
= guest_argp
, q
= argp
; gp
;
7582 gp
+= sizeof(abi_ulong
), q
++) {
7583 if (get_user_ual(addr
, gp
))
7587 if (!(*q
= lock_user_string(addr
)))
7589 total_size
+= strlen(*q
) + 1;
7593 for (gp
= guest_envp
, q
= envp
; gp
;
7594 gp
+= sizeof(abi_ulong
), q
++) {
7595 if (get_user_ual(addr
, gp
))
7599 if (!(*q
= lock_user_string(addr
)))
7601 total_size
+= strlen(*q
) + 1;
7605 if (!(p
= lock_user_string(arg1
)))
7607 /* Although execve() is not an interruptible syscall it is
7608 * a special case where we must use the safe_syscall wrapper:
7609 * if we allow a signal to happen before we make the host
7610 * syscall then we will 'lose' it, because at the point of
7611 * execve the process leaves QEMU's control. So we use the
7612 * safe syscall wrapper to ensure that we either take the
7613 * signal as a guest signal, or else it does not happen
7614 * before the execve completes and makes it the other
7615 * program's problem.
7617 ret
= get_errno(safe_execve(p
, argp
, envp
));
7618 unlock_user(p
, arg1
, 0);
7623 ret
= -TARGET_EFAULT
;
7626 for (gp
= guest_argp
, q
= argp
; *q
;
7627 gp
+= sizeof(abi_ulong
), q
++) {
7628 if (get_user_ual(addr
, gp
)
7631 unlock_user(*q
, addr
, 0);
7633 for (gp
= guest_envp
, q
= envp
; *q
;
7634 gp
+= sizeof(abi_ulong
), q
++) {
7635 if (get_user_ual(addr
, gp
)
7638 unlock_user(*q
, addr
, 0);
7645 case TARGET_NR_chdir
:
7646 if (!(p
= lock_user_string(arg1
)))
7647 return -TARGET_EFAULT
;
7648 ret
= get_errno(chdir(p
));
7649 unlock_user(p
, arg1
, 0);
7651 #ifdef TARGET_NR_time
7652 case TARGET_NR_time
:
7655 ret
= get_errno(time(&host_time
));
7658 && put_user_sal(host_time
, arg1
))
7659 return -TARGET_EFAULT
;
7663 #ifdef TARGET_NR_mknod
7664 case TARGET_NR_mknod
:
7665 if (!(p
= lock_user_string(arg1
)))
7666 return -TARGET_EFAULT
;
7667 ret
= get_errno(mknod(p
, arg2
, arg3
));
7668 unlock_user(p
, arg1
, 0);
7671 #if defined(TARGET_NR_mknodat)
7672 case TARGET_NR_mknodat
:
7673 if (!(p
= lock_user_string(arg2
)))
7674 return -TARGET_EFAULT
;
7675 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7676 unlock_user(p
, arg2
, 0);
7679 #ifdef TARGET_NR_chmod
7680 case TARGET_NR_chmod
:
7681 if (!(p
= lock_user_string(arg1
)))
7682 return -TARGET_EFAULT
;
7683 ret
= get_errno(chmod(p
, arg2
));
7684 unlock_user(p
, arg1
, 0);
7687 #ifdef TARGET_NR_lseek
7688 case TARGET_NR_lseek
:
7689 return get_errno(lseek(arg1
, arg2
, arg3
));
7691 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7692 /* Alpha specific */
7693 case TARGET_NR_getxpid
:
7694 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7695 return get_errno(getpid());
7697 #ifdef TARGET_NR_getpid
7698 case TARGET_NR_getpid
:
7699 return get_errno(getpid());
7701 case TARGET_NR_mount
:
7703 /* need to look at the data field */
7707 p
= lock_user_string(arg1
);
7709 return -TARGET_EFAULT
;
7715 p2
= lock_user_string(arg2
);
7718 unlock_user(p
, arg1
, 0);
7720 return -TARGET_EFAULT
;
7724 p3
= lock_user_string(arg3
);
7727 unlock_user(p
, arg1
, 0);
7729 unlock_user(p2
, arg2
, 0);
7730 return -TARGET_EFAULT
;
7736 /* FIXME - arg5 should be locked, but it isn't clear how to
7737 * do that since it's not guaranteed to be a NULL-terminated
7741 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7743 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7745 ret
= get_errno(ret
);
7748 unlock_user(p
, arg1
, 0);
7750 unlock_user(p2
, arg2
, 0);
7752 unlock_user(p3
, arg3
, 0);
7756 #ifdef TARGET_NR_umount
7757 case TARGET_NR_umount
:
7758 if (!(p
= lock_user_string(arg1
)))
7759 return -TARGET_EFAULT
;
7760 ret
= get_errno(umount(p
));
7761 unlock_user(p
, arg1
, 0);
7764 #ifdef TARGET_NR_stime /* not on alpha */
7765 case TARGET_NR_stime
:
7769 if (get_user_sal(ts
.tv_sec
, arg1
)) {
7770 return -TARGET_EFAULT
;
7772 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
7775 #ifdef TARGET_NR_alarm /* not on alpha */
7776 case TARGET_NR_alarm
:
7779 #ifdef TARGET_NR_pause /* not on alpha */
7780 case TARGET_NR_pause
:
7781 if (!block_signals()) {
7782 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7784 return -TARGET_EINTR
;
7786 #ifdef TARGET_NR_utime
7787 case TARGET_NR_utime
:
7789 struct utimbuf tbuf
, *host_tbuf
;
7790 struct target_utimbuf
*target_tbuf
;
7792 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7793 return -TARGET_EFAULT
;
7794 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7795 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7796 unlock_user_struct(target_tbuf
, arg2
, 0);
7801 if (!(p
= lock_user_string(arg1
)))
7802 return -TARGET_EFAULT
;
7803 ret
= get_errno(utime(p
, host_tbuf
));
7804 unlock_user(p
, arg1
, 0);
7808 #ifdef TARGET_NR_utimes
7809 case TARGET_NR_utimes
:
7811 struct timeval
*tvp
, tv
[2];
7813 if (copy_from_user_timeval(&tv
[0], arg2
)
7814 || copy_from_user_timeval(&tv
[1],
7815 arg2
+ sizeof(struct target_timeval
)))
7816 return -TARGET_EFAULT
;
7821 if (!(p
= lock_user_string(arg1
)))
7822 return -TARGET_EFAULT
;
7823 ret
= get_errno(utimes(p
, tvp
));
7824 unlock_user(p
, arg1
, 0);
7828 #if defined(TARGET_NR_futimesat)
7829 case TARGET_NR_futimesat
:
7831 struct timeval
*tvp
, tv
[2];
7833 if (copy_from_user_timeval(&tv
[0], arg3
)
7834 || copy_from_user_timeval(&tv
[1],
7835 arg3
+ sizeof(struct target_timeval
)))
7836 return -TARGET_EFAULT
;
7841 if (!(p
= lock_user_string(arg2
))) {
7842 return -TARGET_EFAULT
;
7844 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7845 unlock_user(p
, arg2
, 0);
7849 #ifdef TARGET_NR_access
7850 case TARGET_NR_access
:
7851 if (!(p
= lock_user_string(arg1
))) {
7852 return -TARGET_EFAULT
;
7854 ret
= get_errno(access(path(p
), arg2
));
7855 unlock_user(p
, arg1
, 0);
7858 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7859 case TARGET_NR_faccessat
:
7860 if (!(p
= lock_user_string(arg2
))) {
7861 return -TARGET_EFAULT
;
7863 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7864 unlock_user(p
, arg2
, 0);
7867 #ifdef TARGET_NR_nice /* not on alpha */
7868 case TARGET_NR_nice
:
7869 return get_errno(nice(arg1
));
7871 case TARGET_NR_sync
:
7874 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7875 case TARGET_NR_syncfs
:
7876 return get_errno(syncfs(arg1
));
7878 case TARGET_NR_kill
:
7879 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7880 #ifdef TARGET_NR_rename
7881 case TARGET_NR_rename
:
7884 p
= lock_user_string(arg1
);
7885 p2
= lock_user_string(arg2
);
7887 ret
= -TARGET_EFAULT
;
7889 ret
= get_errno(rename(p
, p2
));
7890 unlock_user(p2
, arg2
, 0);
7891 unlock_user(p
, arg1
, 0);
7895 #if defined(TARGET_NR_renameat)
7896 case TARGET_NR_renameat
:
7899 p
= lock_user_string(arg2
);
7900 p2
= lock_user_string(arg4
);
7902 ret
= -TARGET_EFAULT
;
7904 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7905 unlock_user(p2
, arg4
, 0);
7906 unlock_user(p
, arg2
, 0);
7910 #if defined(TARGET_NR_renameat2)
7911 case TARGET_NR_renameat2
:
7914 p
= lock_user_string(arg2
);
7915 p2
= lock_user_string(arg4
);
7917 ret
= -TARGET_EFAULT
;
7919 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7921 unlock_user(p2
, arg4
, 0);
7922 unlock_user(p
, arg2
, 0);
7926 #ifdef TARGET_NR_mkdir
7927 case TARGET_NR_mkdir
:
7928 if (!(p
= lock_user_string(arg1
)))
7929 return -TARGET_EFAULT
;
7930 ret
= get_errno(mkdir(p
, arg2
));
7931 unlock_user(p
, arg1
, 0);
7934 #if defined(TARGET_NR_mkdirat)
7935 case TARGET_NR_mkdirat
:
7936 if (!(p
= lock_user_string(arg2
)))
7937 return -TARGET_EFAULT
;
7938 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7939 unlock_user(p
, arg2
, 0);
7942 #ifdef TARGET_NR_rmdir
7943 case TARGET_NR_rmdir
:
7944 if (!(p
= lock_user_string(arg1
)))
7945 return -TARGET_EFAULT
;
7946 ret
= get_errno(rmdir(p
));
7947 unlock_user(p
, arg1
, 0);
7951 ret
= get_errno(dup(arg1
));
7953 fd_trans_dup(arg1
, ret
);
7956 #ifdef TARGET_NR_pipe
7957 case TARGET_NR_pipe
:
7958 return do_pipe(cpu_env
, arg1
, 0, 0);
7960 #ifdef TARGET_NR_pipe2
7961 case TARGET_NR_pipe2
:
7962 return do_pipe(cpu_env
, arg1
,
7963 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7965 case TARGET_NR_times
:
7967 struct target_tms
*tmsp
;
7969 ret
= get_errno(times(&tms
));
7971 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7973 return -TARGET_EFAULT
;
7974 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7975 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7976 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7977 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7980 ret
= host_to_target_clock_t(ret
);
7983 case TARGET_NR_acct
:
7985 ret
= get_errno(acct(NULL
));
7987 if (!(p
= lock_user_string(arg1
))) {
7988 return -TARGET_EFAULT
;
7990 ret
= get_errno(acct(path(p
)));
7991 unlock_user(p
, arg1
, 0);
7994 #ifdef TARGET_NR_umount2
7995 case TARGET_NR_umount2
:
7996 if (!(p
= lock_user_string(arg1
)))
7997 return -TARGET_EFAULT
;
7998 ret
= get_errno(umount2(p
, arg2
));
7999 unlock_user(p
, arg1
, 0);
8002 case TARGET_NR_ioctl
:
8003 return do_ioctl(arg1
, arg2
, arg3
);
8004 #ifdef TARGET_NR_fcntl
8005 case TARGET_NR_fcntl
:
8006 return do_fcntl(arg1
, arg2
, arg3
);
8008 case TARGET_NR_setpgid
:
8009 return get_errno(setpgid(arg1
, arg2
));
8010 case TARGET_NR_umask
:
8011 return get_errno(umask(arg1
));
8012 case TARGET_NR_chroot
:
8013 if (!(p
= lock_user_string(arg1
)))
8014 return -TARGET_EFAULT
;
8015 ret
= get_errno(chroot(p
));
8016 unlock_user(p
, arg1
, 0);
8018 #ifdef TARGET_NR_dup2
8019 case TARGET_NR_dup2
:
8020 ret
= get_errno(dup2(arg1
, arg2
));
8022 fd_trans_dup(arg1
, arg2
);
8026 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8027 case TARGET_NR_dup3
:
8031 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8034 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8035 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8037 fd_trans_dup(arg1
, arg2
);
8042 #ifdef TARGET_NR_getppid /* not on alpha */
8043 case TARGET_NR_getppid
:
8044 return get_errno(getppid());
8046 #ifdef TARGET_NR_getpgrp
8047 case TARGET_NR_getpgrp
:
8048 return get_errno(getpgrp());
8050 case TARGET_NR_setsid
:
8051 return get_errno(setsid());
8052 #ifdef TARGET_NR_sigaction
8053 case TARGET_NR_sigaction
:
8055 #if defined(TARGET_ALPHA)
8056 struct target_sigaction act
, oact
, *pact
= 0;
8057 struct target_old_sigaction
*old_act
;
8059 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8060 return -TARGET_EFAULT
;
8061 act
._sa_handler
= old_act
->_sa_handler
;
8062 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8063 act
.sa_flags
= old_act
->sa_flags
;
8064 act
.sa_restorer
= 0;
8065 unlock_user_struct(old_act
, arg2
, 0);
8068 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8069 if (!is_error(ret
) && arg3
) {
8070 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8071 return -TARGET_EFAULT
;
8072 old_act
->_sa_handler
= oact
._sa_handler
;
8073 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8074 old_act
->sa_flags
= oact
.sa_flags
;
8075 unlock_user_struct(old_act
, arg3
, 1);
8077 #elif defined(TARGET_MIPS)
8078 struct target_sigaction act
, oact
, *pact
, *old_act
;
8081 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8082 return -TARGET_EFAULT
;
8083 act
._sa_handler
= old_act
->_sa_handler
;
8084 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8085 act
.sa_flags
= old_act
->sa_flags
;
8086 unlock_user_struct(old_act
, arg2
, 0);
8092 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8094 if (!is_error(ret
) && arg3
) {
8095 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8096 return -TARGET_EFAULT
;
8097 old_act
->_sa_handler
= oact
._sa_handler
;
8098 old_act
->sa_flags
= oact
.sa_flags
;
8099 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8100 old_act
->sa_mask
.sig
[1] = 0;
8101 old_act
->sa_mask
.sig
[2] = 0;
8102 old_act
->sa_mask
.sig
[3] = 0;
8103 unlock_user_struct(old_act
, arg3
, 1);
8106 struct target_old_sigaction
*old_act
;
8107 struct target_sigaction act
, oact
, *pact
;
8109 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8110 return -TARGET_EFAULT
;
8111 act
._sa_handler
= old_act
->_sa_handler
;
8112 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8113 act
.sa_flags
= old_act
->sa_flags
;
8114 act
.sa_restorer
= old_act
->sa_restorer
;
8115 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8116 act
.ka_restorer
= 0;
8118 unlock_user_struct(old_act
, arg2
, 0);
8123 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8124 if (!is_error(ret
) && arg3
) {
8125 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8126 return -TARGET_EFAULT
;
8127 old_act
->_sa_handler
= oact
._sa_handler
;
8128 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8129 old_act
->sa_flags
= oact
.sa_flags
;
8130 old_act
->sa_restorer
= oact
.sa_restorer
;
8131 unlock_user_struct(old_act
, arg3
, 1);
8137 case TARGET_NR_rt_sigaction
:
8139 #if defined(TARGET_ALPHA)
8140 /* For Alpha and SPARC this is a 5 argument syscall, with
8141 * a 'restorer' parameter which must be copied into the
8142 * sa_restorer field of the sigaction struct.
8143 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8144 * and arg5 is the sigsetsize.
8145 * Alpha also has a separate rt_sigaction struct that it uses
8146 * here; SPARC uses the usual sigaction struct.
8148 struct target_rt_sigaction
*rt_act
;
8149 struct target_sigaction act
, oact
, *pact
= 0;
8151 if (arg4
!= sizeof(target_sigset_t
)) {
8152 return -TARGET_EINVAL
;
8155 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8156 return -TARGET_EFAULT
;
8157 act
._sa_handler
= rt_act
->_sa_handler
;
8158 act
.sa_mask
= rt_act
->sa_mask
;
8159 act
.sa_flags
= rt_act
->sa_flags
;
8160 act
.sa_restorer
= arg5
;
8161 unlock_user_struct(rt_act
, arg2
, 0);
8164 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8165 if (!is_error(ret
) && arg3
) {
8166 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8167 return -TARGET_EFAULT
;
8168 rt_act
->_sa_handler
= oact
._sa_handler
;
8169 rt_act
->sa_mask
= oact
.sa_mask
;
8170 rt_act
->sa_flags
= oact
.sa_flags
;
8171 unlock_user_struct(rt_act
, arg3
, 1);
8175 target_ulong restorer
= arg4
;
8176 target_ulong sigsetsize
= arg5
;
8178 target_ulong sigsetsize
= arg4
;
8180 struct target_sigaction
*act
;
8181 struct target_sigaction
*oact
;
8183 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8184 return -TARGET_EINVAL
;
8187 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8188 return -TARGET_EFAULT
;
8190 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8191 act
->ka_restorer
= restorer
;
8197 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8198 ret
= -TARGET_EFAULT
;
8199 goto rt_sigaction_fail
;
8203 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8206 unlock_user_struct(act
, arg2
, 0);
8208 unlock_user_struct(oact
, arg3
, 1);
8212 #ifdef TARGET_NR_sgetmask /* not on alpha */
8213 case TARGET_NR_sgetmask
:
8216 abi_ulong target_set
;
8217 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8219 host_to_target_old_sigset(&target_set
, &cur_set
);
8225 #ifdef TARGET_NR_ssetmask /* not on alpha */
8226 case TARGET_NR_ssetmask
:
8229 abi_ulong target_set
= arg1
;
8230 target_to_host_old_sigset(&set
, &target_set
);
8231 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8233 host_to_target_old_sigset(&target_set
, &oset
);
8239 #ifdef TARGET_NR_sigprocmask
8240 case TARGET_NR_sigprocmask
:
8242 #if defined(TARGET_ALPHA)
8243 sigset_t set
, oldset
;
8248 case TARGET_SIG_BLOCK
:
8251 case TARGET_SIG_UNBLOCK
:
8254 case TARGET_SIG_SETMASK
:
8258 return -TARGET_EINVAL
;
8261 target_to_host_old_sigset(&set
, &mask
);
8263 ret
= do_sigprocmask(how
, &set
, &oldset
);
8264 if (!is_error(ret
)) {
8265 host_to_target_old_sigset(&mask
, &oldset
);
8267 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8270 sigset_t set
, oldset
, *set_ptr
;
8275 case TARGET_SIG_BLOCK
:
8278 case TARGET_SIG_UNBLOCK
:
8281 case TARGET_SIG_SETMASK
:
8285 return -TARGET_EINVAL
;
8287 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8288 return -TARGET_EFAULT
;
8289 target_to_host_old_sigset(&set
, p
);
8290 unlock_user(p
, arg2
, 0);
8296 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8297 if (!is_error(ret
) && arg3
) {
8298 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8299 return -TARGET_EFAULT
;
8300 host_to_target_old_sigset(p
, &oldset
);
8301 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8307 case TARGET_NR_rt_sigprocmask
:
8310 sigset_t set
, oldset
, *set_ptr
;
8312 if (arg4
!= sizeof(target_sigset_t
)) {
8313 return -TARGET_EINVAL
;
8318 case TARGET_SIG_BLOCK
:
8321 case TARGET_SIG_UNBLOCK
:
8324 case TARGET_SIG_SETMASK
:
8328 return -TARGET_EINVAL
;
8330 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8331 return -TARGET_EFAULT
;
8332 target_to_host_sigset(&set
, p
);
8333 unlock_user(p
, arg2
, 0);
8339 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8340 if (!is_error(ret
) && arg3
) {
8341 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8342 return -TARGET_EFAULT
;
8343 host_to_target_sigset(p
, &oldset
);
8344 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8348 #ifdef TARGET_NR_sigpending
8349 case TARGET_NR_sigpending
:
8352 ret
= get_errno(sigpending(&set
));
8353 if (!is_error(ret
)) {
8354 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8355 return -TARGET_EFAULT
;
8356 host_to_target_old_sigset(p
, &set
);
8357 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8362 case TARGET_NR_rt_sigpending
:
8366 /* Yes, this check is >, not != like most. We follow the kernel's
8367 * logic and it does it like this because it implements
8368 * NR_sigpending through the same code path, and in that case
8369 * the old_sigset_t is smaller in size.
8371 if (arg2
> sizeof(target_sigset_t
)) {
8372 return -TARGET_EINVAL
;
8375 ret
= get_errno(sigpending(&set
));
8376 if (!is_error(ret
)) {
8377 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8378 return -TARGET_EFAULT
;
8379 host_to_target_sigset(p
, &set
);
8380 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8384 #ifdef TARGET_NR_sigsuspend
8385 case TARGET_NR_sigsuspend
:
8387 TaskState
*ts
= cpu
->opaque
;
8388 #if defined(TARGET_ALPHA)
8389 abi_ulong mask
= arg1
;
8390 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8392 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8393 return -TARGET_EFAULT
;
8394 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8395 unlock_user(p
, arg1
, 0);
8397 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8399 if (ret
!= -TARGET_ERESTARTSYS
) {
8400 ts
->in_sigsuspend
= 1;
8405 case TARGET_NR_rt_sigsuspend
:
8407 TaskState
*ts
= cpu
->opaque
;
8409 if (arg2
!= sizeof(target_sigset_t
)) {
8410 return -TARGET_EINVAL
;
8412 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8413 return -TARGET_EFAULT
;
8414 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8415 unlock_user(p
, arg1
, 0);
8416 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8418 if (ret
!= -TARGET_ERESTARTSYS
) {
8419 ts
->in_sigsuspend
= 1;
8423 case TARGET_NR_rt_sigtimedwait
:
8426 struct timespec uts
, *puts
;
8429 if (arg4
!= sizeof(target_sigset_t
)) {
8430 return -TARGET_EINVAL
;
8433 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8434 return -TARGET_EFAULT
;
8435 target_to_host_sigset(&set
, p
);
8436 unlock_user(p
, arg1
, 0);
8439 target_to_host_timespec(puts
, arg3
);
8443 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8445 if (!is_error(ret
)) {
8447 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8450 return -TARGET_EFAULT
;
8452 host_to_target_siginfo(p
, &uinfo
);
8453 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8455 ret
= host_to_target_signal(ret
);
8459 case TARGET_NR_rt_sigqueueinfo
:
8463 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8465 return -TARGET_EFAULT
;
8467 target_to_host_siginfo(&uinfo
, p
);
8468 unlock_user(p
, arg3
, 0);
8469 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8472 case TARGET_NR_rt_tgsigqueueinfo
:
8476 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8478 return -TARGET_EFAULT
;
8480 target_to_host_siginfo(&uinfo
, p
);
8481 unlock_user(p
, arg4
, 0);
8482 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8485 #ifdef TARGET_NR_sigreturn
8486 case TARGET_NR_sigreturn
:
8487 if (block_signals()) {
8488 return -TARGET_ERESTARTSYS
;
8490 return do_sigreturn(cpu_env
);
8492 case TARGET_NR_rt_sigreturn
:
8493 if (block_signals()) {
8494 return -TARGET_ERESTARTSYS
;
8496 return do_rt_sigreturn(cpu_env
);
8497 case TARGET_NR_sethostname
:
8498 if (!(p
= lock_user_string(arg1
)))
8499 return -TARGET_EFAULT
;
8500 ret
= get_errno(sethostname(p
, arg2
));
8501 unlock_user(p
, arg1
, 0);
8503 #ifdef TARGET_NR_setrlimit
8504 case TARGET_NR_setrlimit
:
8506 int resource
= target_to_host_resource(arg1
);
8507 struct target_rlimit
*target_rlim
;
8509 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8510 return -TARGET_EFAULT
;
8511 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8512 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8513 unlock_user_struct(target_rlim
, arg2
, 0);
8515 * If we just passed through resource limit settings for memory then
8516 * they would also apply to QEMU's own allocations, and QEMU will
8517 * crash or hang or die if its allocations fail. Ideally we would
8518 * track the guest allocations in QEMU and apply the limits ourselves.
8519 * For now, just tell the guest the call succeeded but don't actually
8522 if (resource
!= RLIMIT_AS
&&
8523 resource
!= RLIMIT_DATA
&&
8524 resource
!= RLIMIT_STACK
) {
8525 return get_errno(setrlimit(resource
, &rlim
));
8531 #ifdef TARGET_NR_getrlimit
8532 case TARGET_NR_getrlimit
:
8534 int resource
= target_to_host_resource(arg1
);
8535 struct target_rlimit
*target_rlim
;
8538 ret
= get_errno(getrlimit(resource
, &rlim
));
8539 if (!is_error(ret
)) {
8540 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8541 return -TARGET_EFAULT
;
8542 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8543 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8544 unlock_user_struct(target_rlim
, arg2
, 1);
8549 case TARGET_NR_getrusage
:
8551 struct rusage rusage
;
8552 ret
= get_errno(getrusage(arg1
, &rusage
));
8553 if (!is_error(ret
)) {
8554 ret
= host_to_target_rusage(arg2
, &rusage
);
8558 case TARGET_NR_gettimeofday
:
8561 ret
= get_errno(gettimeofday(&tv
, NULL
));
8562 if (!is_error(ret
)) {
8563 if (copy_to_user_timeval(arg1
, &tv
))
8564 return -TARGET_EFAULT
;
8568 case TARGET_NR_settimeofday
:
8570 struct timeval tv
, *ptv
= NULL
;
8571 struct timezone tz
, *ptz
= NULL
;
8574 if (copy_from_user_timeval(&tv
, arg1
)) {
8575 return -TARGET_EFAULT
;
8581 if (copy_from_user_timezone(&tz
, arg2
)) {
8582 return -TARGET_EFAULT
;
8587 return get_errno(settimeofday(ptv
, ptz
));
8589 #if defined(TARGET_NR_select)
8590 case TARGET_NR_select
:
8591 #if defined(TARGET_WANT_NI_OLD_SELECT)
8592 /* some architectures used to have old_select here
8593 * but now ENOSYS it.
8595 ret
= -TARGET_ENOSYS
;
8596 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8597 ret
= do_old_select(arg1
);
8599 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8603 #ifdef TARGET_NR_pselect6
8604 case TARGET_NR_pselect6
:
8606 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8607 fd_set rfds
, wfds
, efds
;
8608 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8609 struct timespec ts
, *ts_ptr
;
8612 * The 6th arg is actually two args smashed together,
8613 * so we cannot use the C library.
8621 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8622 target_sigset_t
*target_sigset
;
8630 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8634 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8638 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8644 * This takes a timespec, and not a timeval, so we cannot
8645 * use the do_select() helper ...
8648 if (target_to_host_timespec(&ts
, ts_addr
)) {
8649 return -TARGET_EFAULT
;
8656 /* Extract the two packed args for the sigset */
8659 sig
.size
= SIGSET_T_SIZE
;
8661 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8663 return -TARGET_EFAULT
;
8665 arg_sigset
= tswapal(arg7
[0]);
8666 arg_sigsize
= tswapal(arg7
[1]);
8667 unlock_user(arg7
, arg6
, 0);
8671 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8672 /* Like the kernel, we enforce correct size sigsets */
8673 return -TARGET_EINVAL
;
8675 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8676 sizeof(*target_sigset
), 1);
8677 if (!target_sigset
) {
8678 return -TARGET_EFAULT
;
8680 target_to_host_sigset(&set
, target_sigset
);
8681 unlock_user(target_sigset
, arg_sigset
, 0);
8689 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8692 if (!is_error(ret
)) {
8693 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8694 return -TARGET_EFAULT
;
8695 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8696 return -TARGET_EFAULT
;
8697 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8698 return -TARGET_EFAULT
;
8700 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8701 return -TARGET_EFAULT
;
8706 #ifdef TARGET_NR_symlink
8707 case TARGET_NR_symlink
:
8710 p
= lock_user_string(arg1
);
8711 p2
= lock_user_string(arg2
);
8713 ret
= -TARGET_EFAULT
;
8715 ret
= get_errno(symlink(p
, p2
));
8716 unlock_user(p2
, arg2
, 0);
8717 unlock_user(p
, arg1
, 0);
8721 #if defined(TARGET_NR_symlinkat)
8722 case TARGET_NR_symlinkat
:
8725 p
= lock_user_string(arg1
);
8726 p2
= lock_user_string(arg3
);
8728 ret
= -TARGET_EFAULT
;
8730 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8731 unlock_user(p2
, arg3
, 0);
8732 unlock_user(p
, arg1
, 0);
8736 #ifdef TARGET_NR_readlink
8737 case TARGET_NR_readlink
:
8740 p
= lock_user_string(arg1
);
8741 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8743 ret
= -TARGET_EFAULT
;
8745 /* Short circuit this for the magic exe check. */
8746 ret
= -TARGET_EINVAL
;
8747 } else if (is_proc_myself((const char *)p
, "exe")) {
8748 char real
[PATH_MAX
], *temp
;
8749 temp
= realpath(exec_path
, real
);
8750 /* Return value is # of bytes that we wrote to the buffer. */
8752 ret
= get_errno(-1);
8754 /* Don't worry about sign mismatch as earlier mapping
8755 * logic would have thrown a bad address error. */
8756 ret
= MIN(strlen(real
), arg3
);
8757 /* We cannot NUL terminate the string. */
8758 memcpy(p2
, real
, ret
);
8761 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8763 unlock_user(p2
, arg2
, ret
);
8764 unlock_user(p
, arg1
, 0);
8768 #if defined(TARGET_NR_readlinkat)
8769 case TARGET_NR_readlinkat
:
8772 p
= lock_user_string(arg2
);
8773 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8775 ret
= -TARGET_EFAULT
;
8776 } else if (is_proc_myself((const char *)p
, "exe")) {
8777 char real
[PATH_MAX
], *temp
;
8778 temp
= realpath(exec_path
, real
);
8779 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8780 snprintf((char *)p2
, arg4
, "%s", real
);
8782 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8784 unlock_user(p2
, arg3
, ret
);
8785 unlock_user(p
, arg2
, 0);
8789 #ifdef TARGET_NR_swapon
8790 case TARGET_NR_swapon
:
8791 if (!(p
= lock_user_string(arg1
)))
8792 return -TARGET_EFAULT
;
8793 ret
= get_errno(swapon(p
, arg2
));
8794 unlock_user(p
, arg1
, 0);
8797 case TARGET_NR_reboot
:
8798 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8799 /* arg4 must be ignored in all other cases */
8800 p
= lock_user_string(arg4
);
8802 return -TARGET_EFAULT
;
8804 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8805 unlock_user(p
, arg4
, 0);
8807 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8810 #ifdef TARGET_NR_mmap
8811 case TARGET_NR_mmap
:
8812 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8813 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8814 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8815 || defined(TARGET_S390X)
8818 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8819 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8820 return -TARGET_EFAULT
;
8827 unlock_user(v
, arg1
, 0);
8828 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8829 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8833 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8834 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8840 #ifdef TARGET_NR_mmap2
8841 case TARGET_NR_mmap2
:
8843 #define MMAP_SHIFT 12
8845 ret
= target_mmap(arg1
, arg2
, arg3
,
8846 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8847 arg5
, arg6
<< MMAP_SHIFT
);
8848 return get_errno(ret
);
8850 case TARGET_NR_munmap
:
8851 return get_errno(target_munmap(arg1
, arg2
));
8852 case TARGET_NR_mprotect
:
8854 TaskState
*ts
= cpu
->opaque
;
8855 /* Special hack to detect libc making the stack executable. */
8856 if ((arg3
& PROT_GROWSDOWN
)
8857 && arg1
>= ts
->info
->stack_limit
8858 && arg1
<= ts
->info
->start_stack
) {
8859 arg3
&= ~PROT_GROWSDOWN
;
8860 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8861 arg1
= ts
->info
->stack_limit
;
8864 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8865 #ifdef TARGET_NR_mremap
8866 case TARGET_NR_mremap
:
8867 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8869 /* ??? msync/mlock/munlock are broken for softmmu. */
8870 #ifdef TARGET_NR_msync
8871 case TARGET_NR_msync
:
8872 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8874 #ifdef TARGET_NR_mlock
8875 case TARGET_NR_mlock
:
8876 return get_errno(mlock(g2h(arg1
), arg2
));
8878 #ifdef TARGET_NR_munlock
8879 case TARGET_NR_munlock
:
8880 return get_errno(munlock(g2h(arg1
), arg2
));
8882 #ifdef TARGET_NR_mlockall
8883 case TARGET_NR_mlockall
:
8884 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8886 #ifdef TARGET_NR_munlockall
8887 case TARGET_NR_munlockall
:
8888 return get_errno(munlockall());
8890 #ifdef TARGET_NR_truncate
8891 case TARGET_NR_truncate
:
8892 if (!(p
= lock_user_string(arg1
)))
8893 return -TARGET_EFAULT
;
8894 ret
= get_errno(truncate(p
, arg2
));
8895 unlock_user(p
, arg1
, 0);
8898 #ifdef TARGET_NR_ftruncate
8899 case TARGET_NR_ftruncate
:
8900 return get_errno(ftruncate(arg1
, arg2
));
8902 case TARGET_NR_fchmod
:
8903 return get_errno(fchmod(arg1
, arg2
));
8904 #if defined(TARGET_NR_fchmodat)
8905 case TARGET_NR_fchmodat
:
8906 if (!(p
= lock_user_string(arg2
)))
8907 return -TARGET_EFAULT
;
8908 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8909 unlock_user(p
, arg2
, 0);
8912 case TARGET_NR_getpriority
:
8913 /* Note that negative values are valid for getpriority, so we must
8914 differentiate based on errno settings. */
8916 ret
= getpriority(arg1
, arg2
);
8917 if (ret
== -1 && errno
!= 0) {
8918 return -host_to_target_errno(errno
);
8921 /* Return value is the unbiased priority. Signal no error. */
8922 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8924 /* Return value is a biased priority to avoid negative numbers. */
8928 case TARGET_NR_setpriority
:
8929 return get_errno(setpriority(arg1
, arg2
, arg3
));
8930 #ifdef TARGET_NR_statfs
8931 case TARGET_NR_statfs
:
8932 if (!(p
= lock_user_string(arg1
))) {
8933 return -TARGET_EFAULT
;
8935 ret
= get_errno(statfs(path(p
), &stfs
));
8936 unlock_user(p
, arg1
, 0);
8938 if (!is_error(ret
)) {
8939 struct target_statfs
*target_stfs
;
8941 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8942 return -TARGET_EFAULT
;
8943 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8944 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8945 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8946 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8947 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8948 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8949 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8950 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8951 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8952 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8953 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8954 #ifdef _STATFS_F_FLAGS
8955 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8957 __put_user(0, &target_stfs
->f_flags
);
8959 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8960 unlock_user_struct(target_stfs
, arg2
, 1);
8964 #ifdef TARGET_NR_fstatfs
8965 case TARGET_NR_fstatfs
:
8966 ret
= get_errno(fstatfs(arg1
, &stfs
));
8967 goto convert_statfs
;
8969 #ifdef TARGET_NR_statfs64
8970 case TARGET_NR_statfs64
:
8971 if (!(p
= lock_user_string(arg1
))) {
8972 return -TARGET_EFAULT
;
8974 ret
= get_errno(statfs(path(p
), &stfs
));
8975 unlock_user(p
, arg1
, 0);
8977 if (!is_error(ret
)) {
8978 struct target_statfs64
*target_stfs
;
8980 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8981 return -TARGET_EFAULT
;
8982 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8983 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8984 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8985 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8986 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8987 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8988 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8989 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8990 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8991 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8992 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8993 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8994 unlock_user_struct(target_stfs
, arg3
, 1);
8997 case TARGET_NR_fstatfs64
:
8998 ret
= get_errno(fstatfs(arg1
, &stfs
));
8999 goto convert_statfs64
;
9001 #ifdef TARGET_NR_socketcall
9002 case TARGET_NR_socketcall
:
9003 return do_socketcall(arg1
, arg2
);
9005 #ifdef TARGET_NR_accept
9006 case TARGET_NR_accept
:
9007 return do_accept4(arg1
, arg2
, arg3
, 0);
9009 #ifdef TARGET_NR_accept4
9010 case TARGET_NR_accept4
:
9011 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9013 #ifdef TARGET_NR_bind
9014 case TARGET_NR_bind
:
9015 return do_bind(arg1
, arg2
, arg3
);
9017 #ifdef TARGET_NR_connect
9018 case TARGET_NR_connect
:
9019 return do_connect(arg1
, arg2
, arg3
);
9021 #ifdef TARGET_NR_getpeername
9022 case TARGET_NR_getpeername
:
9023 return do_getpeername(arg1
, arg2
, arg3
);
9025 #ifdef TARGET_NR_getsockname
9026 case TARGET_NR_getsockname
:
9027 return do_getsockname(arg1
, arg2
, arg3
);
9029 #ifdef TARGET_NR_getsockopt
9030 case TARGET_NR_getsockopt
:
9031 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9033 #ifdef TARGET_NR_listen
9034 case TARGET_NR_listen
:
9035 return get_errno(listen(arg1
, arg2
));
9037 #ifdef TARGET_NR_recv
9038 case TARGET_NR_recv
:
9039 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9041 #ifdef TARGET_NR_recvfrom
9042 case TARGET_NR_recvfrom
:
9043 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9045 #ifdef TARGET_NR_recvmsg
9046 case TARGET_NR_recvmsg
:
9047 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9049 #ifdef TARGET_NR_send
9050 case TARGET_NR_send
:
9051 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9053 #ifdef TARGET_NR_sendmsg
9054 case TARGET_NR_sendmsg
:
9055 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9057 #ifdef TARGET_NR_sendmmsg
9058 case TARGET_NR_sendmmsg
:
9059 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9060 case TARGET_NR_recvmmsg
:
9061 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9063 #ifdef TARGET_NR_sendto
9064 case TARGET_NR_sendto
:
9065 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9067 #ifdef TARGET_NR_shutdown
9068 case TARGET_NR_shutdown
:
9069 return get_errno(shutdown(arg1
, arg2
));
9071 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9072 case TARGET_NR_getrandom
:
9073 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9075 return -TARGET_EFAULT
;
9077 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9078 unlock_user(p
, arg1
, ret
);
9081 #ifdef TARGET_NR_socket
9082 case TARGET_NR_socket
:
9083 return do_socket(arg1
, arg2
, arg3
);
9085 #ifdef TARGET_NR_socketpair
9086 case TARGET_NR_socketpair
:
9087 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9089 #ifdef TARGET_NR_setsockopt
9090 case TARGET_NR_setsockopt
:
9091 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9093 #if defined(TARGET_NR_syslog)
9094 case TARGET_NR_syslog
:
9099 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9100 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9101 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9102 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9103 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9104 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9105 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9106 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9107 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9108 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9109 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9110 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9113 return -TARGET_EINVAL
;
9118 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9120 return -TARGET_EFAULT
;
9122 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9123 unlock_user(p
, arg2
, arg3
);
9127 return -TARGET_EINVAL
;
9132 case TARGET_NR_setitimer
:
9134 struct itimerval value
, ovalue
, *pvalue
;
9138 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9139 || copy_from_user_timeval(&pvalue
->it_value
,
9140 arg2
+ sizeof(struct target_timeval
)))
9141 return -TARGET_EFAULT
;
9145 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9146 if (!is_error(ret
) && arg3
) {
9147 if (copy_to_user_timeval(arg3
,
9148 &ovalue
.it_interval
)
9149 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9151 return -TARGET_EFAULT
;
9155 case TARGET_NR_getitimer
:
9157 struct itimerval value
;
9159 ret
= get_errno(getitimer(arg1
, &value
));
9160 if (!is_error(ret
) && arg2
) {
9161 if (copy_to_user_timeval(arg2
,
9163 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9165 return -TARGET_EFAULT
;
9169 #ifdef TARGET_NR_stat
9170 case TARGET_NR_stat
:
9171 if (!(p
= lock_user_string(arg1
))) {
9172 return -TARGET_EFAULT
;
9174 ret
= get_errno(stat(path(p
), &st
));
9175 unlock_user(p
, arg1
, 0);
9178 #ifdef TARGET_NR_lstat
9179 case TARGET_NR_lstat
:
9180 if (!(p
= lock_user_string(arg1
))) {
9181 return -TARGET_EFAULT
;
9183 ret
= get_errno(lstat(path(p
), &st
));
9184 unlock_user(p
, arg1
, 0);
9187 #ifdef TARGET_NR_fstat
9188 case TARGET_NR_fstat
:
9190 ret
= get_errno(fstat(arg1
, &st
));
9191 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9194 if (!is_error(ret
)) {
9195 struct target_stat
*target_st
;
9197 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9198 return -TARGET_EFAULT
;
9199 memset(target_st
, 0, sizeof(*target_st
));
9200 __put_user(st
.st_dev
, &target_st
->st_dev
);
9201 __put_user(st
.st_ino
, &target_st
->st_ino
);
9202 __put_user(st
.st_mode
, &target_st
->st_mode
);
9203 __put_user(st
.st_uid
, &target_st
->st_uid
);
9204 __put_user(st
.st_gid
, &target_st
->st_gid
);
9205 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9206 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9207 __put_user(st
.st_size
, &target_st
->st_size
);
9208 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9209 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9210 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9211 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9212 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9213 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9214 defined(TARGET_STAT_HAVE_NSEC)
9215 __put_user(st
.st_atim
.tv_nsec
,
9216 &target_st
->target_st_atime_nsec
);
9217 __put_user(st
.st_mtim
.tv_nsec
,
9218 &target_st
->target_st_mtime_nsec
);
9219 __put_user(st
.st_ctim
.tv_nsec
,
9220 &target_st
->target_st_ctime_nsec
);
9222 unlock_user_struct(target_st
, arg2
, 1);
9227 case TARGET_NR_vhangup
:
9228 return get_errno(vhangup());
9229 #ifdef TARGET_NR_syscall
9230 case TARGET_NR_syscall
:
9231 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9232 arg6
, arg7
, arg8
, 0);
9234 case TARGET_NR_wait4
:
9237 abi_long status_ptr
= arg2
;
9238 struct rusage rusage
, *rusage_ptr
;
9239 abi_ulong target_rusage
= arg4
;
9240 abi_long rusage_err
;
9242 rusage_ptr
= &rusage
;
9245 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9246 if (!is_error(ret
)) {
9247 if (status_ptr
&& ret
) {
9248 status
= host_to_target_waitstatus(status
);
9249 if (put_user_s32(status
, status_ptr
))
9250 return -TARGET_EFAULT
;
9252 if (target_rusage
) {
9253 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9261 #ifdef TARGET_NR_swapoff
9262 case TARGET_NR_swapoff
:
9263 if (!(p
= lock_user_string(arg1
)))
9264 return -TARGET_EFAULT
;
9265 ret
= get_errno(swapoff(p
));
9266 unlock_user(p
, arg1
, 0);
9269 case TARGET_NR_sysinfo
:
9271 struct target_sysinfo
*target_value
;
9272 struct sysinfo value
;
9273 ret
= get_errno(sysinfo(&value
));
9274 if (!is_error(ret
) && arg1
)
9276 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9277 return -TARGET_EFAULT
;
9278 __put_user(value
.uptime
, &target_value
->uptime
);
9279 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9280 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9281 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9282 __put_user(value
.totalram
, &target_value
->totalram
);
9283 __put_user(value
.freeram
, &target_value
->freeram
);
9284 __put_user(value
.sharedram
, &target_value
->sharedram
);
9285 __put_user(value
.bufferram
, &target_value
->bufferram
);
9286 __put_user(value
.totalswap
, &target_value
->totalswap
);
9287 __put_user(value
.freeswap
, &target_value
->freeswap
);
9288 __put_user(value
.procs
, &target_value
->procs
);
9289 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9290 __put_user(value
.freehigh
, &target_value
->freehigh
);
9291 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9292 unlock_user_struct(target_value
, arg1
, 1);
9296 #ifdef TARGET_NR_ipc
9298 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9300 #ifdef TARGET_NR_semget
9301 case TARGET_NR_semget
:
9302 return get_errno(semget(arg1
, arg2
, arg3
));
9304 #ifdef TARGET_NR_semop
9305 case TARGET_NR_semop
:
9306 return do_semop(arg1
, arg2
, arg3
);
9308 #ifdef TARGET_NR_semctl
9309 case TARGET_NR_semctl
:
9310 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9312 #ifdef TARGET_NR_msgctl
9313 case TARGET_NR_msgctl
:
9314 return do_msgctl(arg1
, arg2
, arg3
);
9316 #ifdef TARGET_NR_msgget
9317 case TARGET_NR_msgget
:
9318 return get_errno(msgget(arg1
, arg2
));
9320 #ifdef TARGET_NR_msgrcv
9321 case TARGET_NR_msgrcv
:
9322 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9324 #ifdef TARGET_NR_msgsnd
9325 case TARGET_NR_msgsnd
:
9326 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9328 #ifdef TARGET_NR_shmget
9329 case TARGET_NR_shmget
:
9330 return get_errno(shmget(arg1
, arg2
, arg3
));
9332 #ifdef TARGET_NR_shmctl
9333 case TARGET_NR_shmctl
:
9334 return do_shmctl(arg1
, arg2
, arg3
);
9336 #ifdef TARGET_NR_shmat
9337 case TARGET_NR_shmat
:
9338 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9340 #ifdef TARGET_NR_shmdt
9341 case TARGET_NR_shmdt
:
9342 return do_shmdt(arg1
);
9344 case TARGET_NR_fsync
:
9345 return get_errno(fsync(arg1
));
9346 case TARGET_NR_clone
:
9347 /* Linux manages to have three different orderings for its
9348 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9349 * match the kernel's CONFIG_CLONE_* settings.
9350 * Microblaze is further special in that it uses a sixth
9351 * implicit argument to clone for the TLS pointer.
9353 #if defined(TARGET_MICROBLAZE)
9354 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9355 #elif defined(TARGET_CLONE_BACKWARDS)
9356 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9357 #elif defined(TARGET_CLONE_BACKWARDS2)
9358 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9360 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9363 #ifdef __NR_exit_group
9364 /* new thread calls */
9365 case TARGET_NR_exit_group
:
9366 preexit_cleanup(cpu_env
, arg1
);
9367 return get_errno(exit_group(arg1
));
9369 case TARGET_NR_setdomainname
:
9370 if (!(p
= lock_user_string(arg1
)))
9371 return -TARGET_EFAULT
;
9372 ret
= get_errno(setdomainname(p
, arg2
));
9373 unlock_user(p
, arg1
, 0);
9375 case TARGET_NR_uname
:
9376 /* no need to transcode because we use the linux syscall */
9378 struct new_utsname
* buf
;
9380 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9381 return -TARGET_EFAULT
;
9382 ret
= get_errno(sys_uname(buf
));
9383 if (!is_error(ret
)) {
9384 /* Overwrite the native machine name with whatever is being
9386 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9387 sizeof(buf
->machine
));
9388 /* Allow the user to override the reported release. */
9389 if (qemu_uname_release
&& *qemu_uname_release
) {
9390 g_strlcpy(buf
->release
, qemu_uname_release
,
9391 sizeof(buf
->release
));
9394 unlock_user_struct(buf
, arg1
, 1);
9398 case TARGET_NR_modify_ldt
:
9399 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9400 #if !defined(TARGET_X86_64)
9401 case TARGET_NR_vm86
:
9402 return do_vm86(cpu_env
, arg1
, arg2
);
9405 case TARGET_NR_adjtimex
:
9407 struct timex host_buf
;
9409 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9410 return -TARGET_EFAULT
;
9412 ret
= get_errno(adjtimex(&host_buf
));
9413 if (!is_error(ret
)) {
9414 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9415 return -TARGET_EFAULT
;
9420 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9421 case TARGET_NR_clock_adjtime
:
9423 struct timex htx
, *phtx
= &htx
;
9425 if (target_to_host_timex(phtx
, arg2
) != 0) {
9426 return -TARGET_EFAULT
;
9428 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9429 if (!is_error(ret
) && phtx
) {
9430 if (host_to_target_timex(arg2
, phtx
) != 0) {
9431 return -TARGET_EFAULT
;
9437 case TARGET_NR_getpgid
:
9438 return get_errno(getpgid(arg1
));
9439 case TARGET_NR_fchdir
:
9440 return get_errno(fchdir(arg1
));
9441 case TARGET_NR_personality
:
9442 return get_errno(personality(arg1
));
9443 #ifdef TARGET_NR__llseek /* Not on alpha */
9444 case TARGET_NR__llseek
:
9447 #if !defined(__NR_llseek)
9448 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9450 ret
= get_errno(res
);
9455 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9457 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9458 return -TARGET_EFAULT
;
9463 #ifdef TARGET_NR_getdents
9464 case TARGET_NR_getdents
:
9465 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9466 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9468 struct target_dirent
*target_dirp
;
9469 struct linux_dirent
*dirp
;
9470 abi_long count
= arg3
;
9472 dirp
= g_try_malloc(count
);
9474 return -TARGET_ENOMEM
;
9477 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9478 if (!is_error(ret
)) {
9479 struct linux_dirent
*de
;
9480 struct target_dirent
*tde
;
9482 int reclen
, treclen
;
9483 int count1
, tnamelen
;
9487 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9488 return -TARGET_EFAULT
;
9491 reclen
= de
->d_reclen
;
9492 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9493 assert(tnamelen
>= 0);
9494 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9495 assert(count1
+ treclen
<= count
);
9496 tde
->d_reclen
= tswap16(treclen
);
9497 tde
->d_ino
= tswapal(de
->d_ino
);
9498 tde
->d_off
= tswapal(de
->d_off
);
9499 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9500 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9502 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9506 unlock_user(target_dirp
, arg2
, ret
);
9512 struct linux_dirent
*dirp
;
9513 abi_long count
= arg3
;
9515 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9516 return -TARGET_EFAULT
;
9517 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9518 if (!is_error(ret
)) {
9519 struct linux_dirent
*de
;
9524 reclen
= de
->d_reclen
;
9527 de
->d_reclen
= tswap16(reclen
);
9528 tswapls(&de
->d_ino
);
9529 tswapls(&de
->d_off
);
9530 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9534 unlock_user(dirp
, arg2
, ret
);
9538 /* Implement getdents in terms of getdents64 */
9540 struct linux_dirent64
*dirp
;
9541 abi_long count
= arg3
;
9543 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9545 return -TARGET_EFAULT
;
9547 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9548 if (!is_error(ret
)) {
9549 /* Convert the dirent64 structs to target dirent. We do this
9550 * in-place, since we can guarantee that a target_dirent is no
9551 * larger than a dirent64; however this means we have to be
9552 * careful to read everything before writing in the new format.
9554 struct linux_dirent64
*de
;
9555 struct target_dirent
*tde
;
9560 tde
= (struct target_dirent
*)dirp
;
9562 int namelen
, treclen
;
9563 int reclen
= de
->d_reclen
;
9564 uint64_t ino
= de
->d_ino
;
9565 int64_t off
= de
->d_off
;
9566 uint8_t type
= de
->d_type
;
9568 namelen
= strlen(de
->d_name
);
9569 treclen
= offsetof(struct target_dirent
, d_name
)
9571 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9573 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9574 tde
->d_ino
= tswapal(ino
);
9575 tde
->d_off
= tswapal(off
);
9576 tde
->d_reclen
= tswap16(treclen
);
9577 /* The target_dirent type is in what was formerly a padding
9578 * byte at the end of the structure:
9580 *(((char *)tde
) + treclen
- 1) = type
;
9582 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9583 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9589 unlock_user(dirp
, arg2
, ret
);
9593 #endif /* TARGET_NR_getdents */
9594 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9595 case TARGET_NR_getdents64
:
9597 struct linux_dirent64
*dirp
;
9598 abi_long count
= arg3
;
9599 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9600 return -TARGET_EFAULT
;
9601 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9602 if (!is_error(ret
)) {
9603 struct linux_dirent64
*de
;
9608 reclen
= de
->d_reclen
;
9611 de
->d_reclen
= tswap16(reclen
);
9612 tswap64s((uint64_t *)&de
->d_ino
);
9613 tswap64s((uint64_t *)&de
->d_off
);
9614 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9618 unlock_user(dirp
, arg2
, ret
);
9621 #endif /* TARGET_NR_getdents64 */
9622 #if defined(TARGET_NR__newselect)
9623 case TARGET_NR__newselect
:
9624 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9626 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9627 # ifdef TARGET_NR_poll
9628 case TARGET_NR_poll
:
9630 # ifdef TARGET_NR_ppoll
9631 case TARGET_NR_ppoll
:
9634 struct target_pollfd
*target_pfd
;
9635 unsigned int nfds
= arg2
;
9642 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9643 return -TARGET_EINVAL
;
9646 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9647 sizeof(struct target_pollfd
) * nfds
, 1);
9649 return -TARGET_EFAULT
;
9652 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9653 for (i
= 0; i
< nfds
; i
++) {
9654 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9655 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9660 # ifdef TARGET_NR_ppoll
9661 case TARGET_NR_ppoll
:
9663 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9664 target_sigset_t
*target_set
;
9665 sigset_t _set
, *set
= &_set
;
9668 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9669 unlock_user(target_pfd
, arg1
, 0);
9670 return -TARGET_EFAULT
;
9677 if (arg5
!= sizeof(target_sigset_t
)) {
9678 unlock_user(target_pfd
, arg1
, 0);
9679 return -TARGET_EINVAL
;
9682 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9684 unlock_user(target_pfd
, arg1
, 0);
9685 return -TARGET_EFAULT
;
9687 target_to_host_sigset(set
, target_set
);
9692 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9693 set
, SIGSET_T_SIZE
));
9695 if (!is_error(ret
) && arg3
) {
9696 host_to_target_timespec(arg3
, timeout_ts
);
9699 unlock_user(target_set
, arg4
, 0);
9704 # ifdef TARGET_NR_poll
9705 case TARGET_NR_poll
:
9707 struct timespec ts
, *pts
;
9710 /* Convert ms to secs, ns */
9711 ts
.tv_sec
= arg3
/ 1000;
9712 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9715 /* -ve poll() timeout means "infinite" */
9718 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9723 g_assert_not_reached();
9726 if (!is_error(ret
)) {
9727 for(i
= 0; i
< nfds
; i
++) {
9728 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9731 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9735 case TARGET_NR_flock
:
9736 /* NOTE: the flock constant seems to be the same for every
9738 return get_errno(safe_flock(arg1
, arg2
));
9739 case TARGET_NR_readv
:
9741 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9743 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9744 unlock_iovec(vec
, arg2
, arg3
, 1);
9746 ret
= -host_to_target_errno(errno
);
9750 case TARGET_NR_writev
:
9752 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9754 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9755 unlock_iovec(vec
, arg2
, arg3
, 0);
9757 ret
= -host_to_target_errno(errno
);
9761 #if defined(TARGET_NR_preadv)
9762 case TARGET_NR_preadv
:
9764 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9766 unsigned long low
, high
;
9768 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9769 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9770 unlock_iovec(vec
, arg2
, arg3
, 1);
9772 ret
= -host_to_target_errno(errno
);
9777 #if defined(TARGET_NR_pwritev)
9778 case TARGET_NR_pwritev
:
9780 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9782 unsigned long low
, high
;
9784 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9785 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9786 unlock_iovec(vec
, arg2
, arg3
, 0);
9788 ret
= -host_to_target_errno(errno
);
9793 case TARGET_NR_getsid
:
9794 return get_errno(getsid(arg1
));
9795 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9796 case TARGET_NR_fdatasync
:
9797 return get_errno(fdatasync(arg1
));
9799 #ifdef TARGET_NR__sysctl
9800 case TARGET_NR__sysctl
:
9801 /* We don't implement this, but ENOTDIR is always a safe
9803 return -TARGET_ENOTDIR
;
9805 case TARGET_NR_sched_getaffinity
:
9807 unsigned int mask_size
;
9808 unsigned long *mask
;
9811 * sched_getaffinity needs multiples of ulong, so need to take
9812 * care of mismatches between target ulong and host ulong sizes.
9814 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9815 return -TARGET_EINVAL
;
9817 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9819 mask
= alloca(mask_size
);
9820 memset(mask
, 0, mask_size
);
9821 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9823 if (!is_error(ret
)) {
9825 /* More data returned than the caller's buffer will fit.
9826 * This only happens if sizeof(abi_long) < sizeof(long)
9827 * and the caller passed us a buffer holding an odd number
9828 * of abi_longs. If the host kernel is actually using the
9829 * extra 4 bytes then fail EINVAL; otherwise we can just
9830 * ignore them and only copy the interesting part.
9832 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9833 if (numcpus
> arg2
* 8) {
9834 return -TARGET_EINVAL
;
9839 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9840 return -TARGET_EFAULT
;
9845 case TARGET_NR_sched_setaffinity
:
9847 unsigned int mask_size
;
9848 unsigned long *mask
;
9851 * sched_setaffinity needs multiples of ulong, so need to take
9852 * care of mismatches between target ulong and host ulong sizes.
9854 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9855 return -TARGET_EINVAL
;
9857 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9858 mask
= alloca(mask_size
);
9860 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9865 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9867 case TARGET_NR_getcpu
:
9870 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9871 arg2
? &node
: NULL
,
9873 if (is_error(ret
)) {
9876 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9877 return -TARGET_EFAULT
;
9879 if (arg2
&& put_user_u32(node
, arg2
)) {
9880 return -TARGET_EFAULT
;
9884 case TARGET_NR_sched_setparam
:
9886 struct sched_param
*target_schp
;
9887 struct sched_param schp
;
9890 return -TARGET_EINVAL
;
9892 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9893 return -TARGET_EFAULT
;
9894 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9895 unlock_user_struct(target_schp
, arg2
, 0);
9896 return get_errno(sched_setparam(arg1
, &schp
));
9898 case TARGET_NR_sched_getparam
:
9900 struct sched_param
*target_schp
;
9901 struct sched_param schp
;
9904 return -TARGET_EINVAL
;
9906 ret
= get_errno(sched_getparam(arg1
, &schp
));
9907 if (!is_error(ret
)) {
9908 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9909 return -TARGET_EFAULT
;
9910 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9911 unlock_user_struct(target_schp
, arg2
, 1);
9915 case TARGET_NR_sched_setscheduler
:
9917 struct sched_param
*target_schp
;
9918 struct sched_param schp
;
9920 return -TARGET_EINVAL
;
9922 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9923 return -TARGET_EFAULT
;
9924 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9925 unlock_user_struct(target_schp
, arg3
, 0);
9926 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9928 case TARGET_NR_sched_getscheduler
:
9929 return get_errno(sched_getscheduler(arg1
));
9930 case TARGET_NR_sched_yield
:
9931 return get_errno(sched_yield());
9932 case TARGET_NR_sched_get_priority_max
:
9933 return get_errno(sched_get_priority_max(arg1
));
9934 case TARGET_NR_sched_get_priority_min
:
9935 return get_errno(sched_get_priority_min(arg1
));
9936 case TARGET_NR_sched_rr_get_interval
:
9939 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9940 if (!is_error(ret
)) {
9941 ret
= host_to_target_timespec(arg2
, &ts
);
9945 case TARGET_NR_nanosleep
:
9947 struct timespec req
, rem
;
9948 target_to_host_timespec(&req
, arg1
);
9949 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9950 if (is_error(ret
) && arg2
) {
9951 host_to_target_timespec(arg2
, &rem
);
9955 case TARGET_NR_prctl
:
9957 case PR_GET_PDEATHSIG
:
9960 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9961 if (!is_error(ret
) && arg2
9962 && put_user_ual(deathsig
, arg2
)) {
9963 return -TARGET_EFAULT
;
9970 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9972 return -TARGET_EFAULT
;
9974 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9976 unlock_user(name
, arg2
, 16);
9981 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9983 return -TARGET_EFAULT
;
9985 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9987 unlock_user(name
, arg2
, 0);
9992 case TARGET_PR_GET_FP_MODE
:
9994 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9996 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9997 ret
|= TARGET_PR_FP_MODE_FR
;
9999 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10000 ret
|= TARGET_PR_FP_MODE_FRE
;
10004 case TARGET_PR_SET_FP_MODE
:
10006 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10007 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10008 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10009 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10010 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10012 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10013 TARGET_PR_FP_MODE_FRE
;
10015 /* If nothing to change, return right away, successfully. */
10016 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10019 /* Check the value is valid */
10020 if (arg2
& ~known_bits
) {
10021 return -TARGET_EOPNOTSUPP
;
10023 /* Setting FRE without FR is not supported. */
10024 if (new_fre
&& !new_fr
) {
10025 return -TARGET_EOPNOTSUPP
;
10027 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10028 /* FR1 is not supported */
10029 return -TARGET_EOPNOTSUPP
;
10031 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10032 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10033 /* cannot set FR=0 */
10034 return -TARGET_EOPNOTSUPP
;
10036 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10037 /* Cannot set FRE=1 */
10038 return -TARGET_EOPNOTSUPP
;
10042 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10043 for (i
= 0; i
< 32 ; i
+= 2) {
10044 if (!old_fr
&& new_fr
) {
10045 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10046 } else if (old_fr
&& !new_fr
) {
10047 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10052 env
->CP0_Status
|= (1 << CP0St_FR
);
10053 env
->hflags
|= MIPS_HFLAG_F64
;
10055 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10056 env
->hflags
&= ~MIPS_HFLAG_F64
;
10059 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10060 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10061 env
->hflags
|= MIPS_HFLAG_FRE
;
10064 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10065 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10071 #ifdef TARGET_AARCH64
10072 case TARGET_PR_SVE_SET_VL
:
10074 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10075 * PR_SVE_VL_INHERIT. Note the kernel definition
10076 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10077 * even though the current architectural maximum is VQ=16.
10079 ret
= -TARGET_EINVAL
;
10080 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10081 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10082 CPUARMState
*env
= cpu_env
;
10083 ARMCPU
*cpu
= env_archcpu(env
);
10084 uint32_t vq
, old_vq
;
10086 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10087 vq
= MAX(arg2
/ 16, 1);
10088 vq
= MIN(vq
, cpu
->sve_max_vq
);
10091 aarch64_sve_narrow_vq(env
, vq
);
10093 env
->vfp
.zcr_el
[1] = vq
- 1;
10094 arm_rebuild_hflags(env
);
10098 case TARGET_PR_SVE_GET_VL
:
10099 ret
= -TARGET_EINVAL
;
10101 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10102 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10103 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10107 case TARGET_PR_PAC_RESET_KEYS
:
10109 CPUARMState
*env
= cpu_env
;
10110 ARMCPU
*cpu
= env_archcpu(env
);
10112 if (arg3
|| arg4
|| arg5
) {
10113 return -TARGET_EINVAL
;
10115 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10116 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10117 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10118 TARGET_PR_PAC_APGAKEY
);
10124 } else if (arg2
& ~all
) {
10125 return -TARGET_EINVAL
;
10127 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10128 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10129 sizeof(ARMPACKey
), &err
);
10131 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10132 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10133 sizeof(ARMPACKey
), &err
);
10135 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10136 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10137 sizeof(ARMPACKey
), &err
);
10139 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10140 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10141 sizeof(ARMPACKey
), &err
);
10143 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10144 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10145 sizeof(ARMPACKey
), &err
);
10149 * Some unknown failure in the crypto. The best
10150 * we can do is log it and fail the syscall.
10151 * The real syscall cannot fail this way.
10153 qemu_log_mask(LOG_UNIMP
,
10154 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10155 error_get_pretty(err
));
10157 return -TARGET_EIO
;
10162 return -TARGET_EINVAL
;
10163 #endif /* AARCH64 */
10164 case PR_GET_SECCOMP
:
10165 case PR_SET_SECCOMP
:
10166 /* Disable seccomp to prevent the target disabling syscalls we
10168 return -TARGET_EINVAL
;
10170 /* Most prctl options have no pointer arguments */
10171 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10174 #ifdef TARGET_NR_arch_prctl
10175 case TARGET_NR_arch_prctl
:
10176 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10177 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10182 #ifdef TARGET_NR_pread64
10183 case TARGET_NR_pread64
:
10184 if (regpairs_aligned(cpu_env
, num
)) {
10188 if (arg2
== 0 && arg3
== 0) {
10189 /* Special-case NULL buffer and zero length, which should succeed */
10192 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10194 return -TARGET_EFAULT
;
10197 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10198 unlock_user(p
, arg2
, ret
);
10200 case TARGET_NR_pwrite64
:
10201 if (regpairs_aligned(cpu_env
, num
)) {
10205 if (arg2
== 0 && arg3
== 0) {
10206 /* Special-case NULL buffer and zero length, which should succeed */
10209 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10211 return -TARGET_EFAULT
;
10214 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10215 unlock_user(p
, arg2
, 0);
10218 case TARGET_NR_getcwd
:
10219 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10220 return -TARGET_EFAULT
;
10221 ret
= get_errno(sys_getcwd1(p
, arg2
));
10222 unlock_user(p
, arg1
, ret
);
10224 case TARGET_NR_capget
:
10225 case TARGET_NR_capset
:
10227 struct target_user_cap_header
*target_header
;
10228 struct target_user_cap_data
*target_data
= NULL
;
10229 struct __user_cap_header_struct header
;
10230 struct __user_cap_data_struct data
[2];
10231 struct __user_cap_data_struct
*dataptr
= NULL
;
10232 int i
, target_datalen
;
10233 int data_items
= 1;
10235 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10236 return -TARGET_EFAULT
;
10238 header
.version
= tswap32(target_header
->version
);
10239 header
.pid
= tswap32(target_header
->pid
);
10241 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10242 /* Version 2 and up takes pointer to two user_data structs */
10246 target_datalen
= sizeof(*target_data
) * data_items
;
10249 if (num
== TARGET_NR_capget
) {
10250 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10252 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10254 if (!target_data
) {
10255 unlock_user_struct(target_header
, arg1
, 0);
10256 return -TARGET_EFAULT
;
10259 if (num
== TARGET_NR_capset
) {
10260 for (i
= 0; i
< data_items
; i
++) {
10261 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10262 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10263 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10270 if (num
== TARGET_NR_capget
) {
10271 ret
= get_errno(capget(&header
, dataptr
));
10273 ret
= get_errno(capset(&header
, dataptr
));
10276 /* The kernel always updates version for both capget and capset */
10277 target_header
->version
= tswap32(header
.version
);
10278 unlock_user_struct(target_header
, arg1
, 1);
10281 if (num
== TARGET_NR_capget
) {
10282 for (i
= 0; i
< data_items
; i
++) {
10283 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10284 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10285 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10287 unlock_user(target_data
, arg2
, target_datalen
);
10289 unlock_user(target_data
, arg2
, 0);
10294 case TARGET_NR_sigaltstack
:
10295 return do_sigaltstack(arg1
, arg2
,
10296 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10298 #ifdef CONFIG_SENDFILE
10299 #ifdef TARGET_NR_sendfile
10300 case TARGET_NR_sendfile
:
10302 off_t
*offp
= NULL
;
10305 ret
= get_user_sal(off
, arg3
);
10306 if (is_error(ret
)) {
10311 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10312 if (!is_error(ret
) && arg3
) {
10313 abi_long ret2
= put_user_sal(off
, arg3
);
10314 if (is_error(ret2
)) {
10321 #ifdef TARGET_NR_sendfile64
10322 case TARGET_NR_sendfile64
:
10324 off_t
*offp
= NULL
;
10327 ret
= get_user_s64(off
, arg3
);
10328 if (is_error(ret
)) {
10333 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10334 if (!is_error(ret
) && arg3
) {
10335 abi_long ret2
= put_user_s64(off
, arg3
);
10336 if (is_error(ret2
)) {
10344 #ifdef TARGET_NR_vfork
10345 case TARGET_NR_vfork
:
10346 return get_errno(do_fork(cpu_env
,
10347 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10350 #ifdef TARGET_NR_ugetrlimit
10351 case TARGET_NR_ugetrlimit
:
10353 struct rlimit rlim
;
10354 int resource
= target_to_host_resource(arg1
);
10355 ret
= get_errno(getrlimit(resource
, &rlim
));
10356 if (!is_error(ret
)) {
10357 struct target_rlimit
*target_rlim
;
10358 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10359 return -TARGET_EFAULT
;
10360 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10361 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10362 unlock_user_struct(target_rlim
, arg2
, 1);
10367 #ifdef TARGET_NR_truncate64
10368 case TARGET_NR_truncate64
:
10369 if (!(p
= lock_user_string(arg1
)))
10370 return -TARGET_EFAULT
;
10371 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10372 unlock_user(p
, arg1
, 0);
10375 #ifdef TARGET_NR_ftruncate64
10376 case TARGET_NR_ftruncate64
:
10377 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10379 #ifdef TARGET_NR_stat64
10380 case TARGET_NR_stat64
:
10381 if (!(p
= lock_user_string(arg1
))) {
10382 return -TARGET_EFAULT
;
10384 ret
= get_errno(stat(path(p
), &st
));
10385 unlock_user(p
, arg1
, 0);
10386 if (!is_error(ret
))
10387 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10390 #ifdef TARGET_NR_lstat64
10391 case TARGET_NR_lstat64
:
10392 if (!(p
= lock_user_string(arg1
))) {
10393 return -TARGET_EFAULT
;
10395 ret
= get_errno(lstat(path(p
), &st
));
10396 unlock_user(p
, arg1
, 0);
10397 if (!is_error(ret
))
10398 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10401 #ifdef TARGET_NR_fstat64
10402 case TARGET_NR_fstat64
:
10403 ret
= get_errno(fstat(arg1
, &st
));
10404 if (!is_error(ret
))
10405 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10408 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10409 #ifdef TARGET_NR_fstatat64
10410 case TARGET_NR_fstatat64
:
10412 #ifdef TARGET_NR_newfstatat
10413 case TARGET_NR_newfstatat
:
10415 if (!(p
= lock_user_string(arg2
))) {
10416 return -TARGET_EFAULT
;
10418 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10419 unlock_user(p
, arg2
, 0);
10420 if (!is_error(ret
))
10421 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10424 #if defined(TARGET_NR_statx)
10425 case TARGET_NR_statx
:
10427 struct target_statx
*target_stx
;
10431 p
= lock_user_string(arg2
);
10433 return -TARGET_EFAULT
;
10435 #if defined(__NR_statx)
10438 * It is assumed that struct statx is architecture independent.
10440 struct target_statx host_stx
;
10443 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10444 if (!is_error(ret
)) {
10445 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10446 unlock_user(p
, arg2
, 0);
10447 return -TARGET_EFAULT
;
10451 if (ret
!= -TARGET_ENOSYS
) {
10452 unlock_user(p
, arg2
, 0);
10457 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10458 unlock_user(p
, arg2
, 0);
10460 if (!is_error(ret
)) {
10461 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10462 return -TARGET_EFAULT
;
10464 memset(target_stx
, 0, sizeof(*target_stx
));
10465 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10466 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10467 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10468 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10469 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10470 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10471 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10472 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10473 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10474 __put_user(st
.st_size
, &target_stx
->stx_size
);
10475 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10476 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10477 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10478 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10479 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10480 unlock_user_struct(target_stx
, arg5
, 1);
10485 #ifdef TARGET_NR_lchown
10486 case TARGET_NR_lchown
:
10487 if (!(p
= lock_user_string(arg1
)))
10488 return -TARGET_EFAULT
;
10489 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10490 unlock_user(p
, arg1
, 0);
10493 #ifdef TARGET_NR_getuid
10494 case TARGET_NR_getuid
:
10495 return get_errno(high2lowuid(getuid()));
10497 #ifdef TARGET_NR_getgid
10498 case TARGET_NR_getgid
:
10499 return get_errno(high2lowgid(getgid()));
10501 #ifdef TARGET_NR_geteuid
10502 case TARGET_NR_geteuid
:
10503 return get_errno(high2lowuid(geteuid()));
10505 #ifdef TARGET_NR_getegid
10506 case TARGET_NR_getegid
:
10507 return get_errno(high2lowgid(getegid()));
10509 case TARGET_NR_setreuid
:
10510 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10511 case TARGET_NR_setregid
:
10512 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10513 case TARGET_NR_getgroups
:
10515 int gidsetsize
= arg1
;
10516 target_id
*target_grouplist
;
10520 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10521 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10522 if (gidsetsize
== 0)
10524 if (!is_error(ret
)) {
10525 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10526 if (!target_grouplist
)
10527 return -TARGET_EFAULT
;
10528 for(i
= 0;i
< ret
; i
++)
10529 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10530 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10534 case TARGET_NR_setgroups
:
10536 int gidsetsize
= arg1
;
10537 target_id
*target_grouplist
;
10538 gid_t
*grouplist
= NULL
;
10541 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10542 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10543 if (!target_grouplist
) {
10544 return -TARGET_EFAULT
;
10546 for (i
= 0; i
< gidsetsize
; i
++) {
10547 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10549 unlock_user(target_grouplist
, arg2
, 0);
10551 return get_errno(setgroups(gidsetsize
, grouplist
));
10553 case TARGET_NR_fchown
:
10554 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10555 #if defined(TARGET_NR_fchownat)
10556 case TARGET_NR_fchownat
:
10557 if (!(p
= lock_user_string(arg2
)))
10558 return -TARGET_EFAULT
;
10559 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10560 low2highgid(arg4
), arg5
));
10561 unlock_user(p
, arg2
, 0);
10564 #ifdef TARGET_NR_setresuid
10565 case TARGET_NR_setresuid
:
10566 return get_errno(sys_setresuid(low2highuid(arg1
),
10568 low2highuid(arg3
)));
10570 #ifdef TARGET_NR_getresuid
10571 case TARGET_NR_getresuid
:
10573 uid_t ruid
, euid
, suid
;
10574 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10575 if (!is_error(ret
)) {
10576 if (put_user_id(high2lowuid(ruid
), arg1
)
10577 || put_user_id(high2lowuid(euid
), arg2
)
10578 || put_user_id(high2lowuid(suid
), arg3
))
10579 return -TARGET_EFAULT
;
10584 #ifdef TARGET_NR_getresgid
10585 case TARGET_NR_setresgid
:
10586 return get_errno(sys_setresgid(low2highgid(arg1
),
10588 low2highgid(arg3
)));
10590 #ifdef TARGET_NR_getresgid
10591 case TARGET_NR_getresgid
:
10593 gid_t rgid
, egid
, sgid
;
10594 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10595 if (!is_error(ret
)) {
10596 if (put_user_id(high2lowgid(rgid
), arg1
)
10597 || put_user_id(high2lowgid(egid
), arg2
)
10598 || put_user_id(high2lowgid(sgid
), arg3
))
10599 return -TARGET_EFAULT
;
10604 #ifdef TARGET_NR_chown
10605 case TARGET_NR_chown
:
10606 if (!(p
= lock_user_string(arg1
)))
10607 return -TARGET_EFAULT
;
10608 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10609 unlock_user(p
, arg1
, 0);
10612 case TARGET_NR_setuid
:
10613 return get_errno(sys_setuid(low2highuid(arg1
)));
10614 case TARGET_NR_setgid
:
10615 return get_errno(sys_setgid(low2highgid(arg1
)));
10616 case TARGET_NR_setfsuid
:
10617 return get_errno(setfsuid(arg1
));
10618 case TARGET_NR_setfsgid
:
10619 return get_errno(setfsgid(arg1
));
10621 #ifdef TARGET_NR_lchown32
10622 case TARGET_NR_lchown32
:
10623 if (!(p
= lock_user_string(arg1
)))
10624 return -TARGET_EFAULT
;
10625 ret
= get_errno(lchown(p
, arg2
, arg3
));
10626 unlock_user(p
, arg1
, 0);
10629 #ifdef TARGET_NR_getuid32
10630 case TARGET_NR_getuid32
:
10631 return get_errno(getuid());
10634 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10635 /* Alpha specific */
10636 case TARGET_NR_getxuid
:
10640 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10642 return get_errno(getuid());
10644 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10645 /* Alpha specific */
10646 case TARGET_NR_getxgid
:
10650 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10652 return get_errno(getgid());
10654 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10655 /* Alpha specific */
10656 case TARGET_NR_osf_getsysinfo
:
10657 ret
= -TARGET_EOPNOTSUPP
;
10659 case TARGET_GSI_IEEE_FP_CONTROL
:
10661 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10662 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10664 swcr
&= ~SWCR_STATUS_MASK
;
10665 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10667 if (put_user_u64 (swcr
, arg2
))
10668 return -TARGET_EFAULT
;
10673 /* case GSI_IEEE_STATE_AT_SIGNAL:
10674 -- Not implemented in linux kernel.
10676 -- Retrieves current unaligned access state; not much used.
10677 case GSI_PROC_TYPE:
10678 -- Retrieves implver information; surely not used.
10679 case GSI_GET_HWRPB:
10680 -- Grabs a copy of the HWRPB; surely not used.
10685 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10686 /* Alpha specific */
10687 case TARGET_NR_osf_setsysinfo
:
10688 ret
= -TARGET_EOPNOTSUPP
;
10690 case TARGET_SSI_IEEE_FP_CONTROL
:
10692 uint64_t swcr
, fpcr
;
10694 if (get_user_u64 (swcr
, arg2
)) {
10695 return -TARGET_EFAULT
;
10699 * The kernel calls swcr_update_status to update the
10700 * status bits from the fpcr at every point that it
10701 * could be queried. Therefore, we store the status
10702 * bits only in FPCR.
10704 ((CPUAlphaState
*)cpu_env
)->swcr
10705 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10707 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10708 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10709 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10710 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10715 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10717 uint64_t exc
, fpcr
, fex
;
10719 if (get_user_u64(exc
, arg2
)) {
10720 return -TARGET_EFAULT
;
10722 exc
&= SWCR_STATUS_MASK
;
10723 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10725 /* Old exceptions are not signaled. */
10726 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10728 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10729 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10731 /* Update the hardware fpcr. */
10732 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10733 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10736 int si_code
= TARGET_FPE_FLTUNK
;
10737 target_siginfo_t info
;
10739 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10740 si_code
= TARGET_FPE_FLTUND
;
10742 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10743 si_code
= TARGET_FPE_FLTRES
;
10745 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10746 si_code
= TARGET_FPE_FLTUND
;
10748 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10749 si_code
= TARGET_FPE_FLTOVF
;
10751 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10752 si_code
= TARGET_FPE_FLTDIV
;
10754 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10755 si_code
= TARGET_FPE_FLTINV
;
10758 info
.si_signo
= SIGFPE
;
10760 info
.si_code
= si_code
;
10761 info
._sifields
._sigfault
._addr
10762 = ((CPUArchState
*)cpu_env
)->pc
;
10763 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10764 QEMU_SI_FAULT
, &info
);
10770 /* case SSI_NVPAIRS:
10771 -- Used with SSIN_UACPROC to enable unaligned accesses.
10772 case SSI_IEEE_STATE_AT_SIGNAL:
10773 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10774 -- Not implemented in linux kernel
10779 #ifdef TARGET_NR_osf_sigprocmask
10780 /* Alpha specific. */
10781 case TARGET_NR_osf_sigprocmask
:
10785 sigset_t set
, oldset
;
10788 case TARGET_SIG_BLOCK
:
10791 case TARGET_SIG_UNBLOCK
:
10794 case TARGET_SIG_SETMASK
:
10798 return -TARGET_EINVAL
;
10801 target_to_host_old_sigset(&set
, &mask
);
10802 ret
= do_sigprocmask(how
, &set
, &oldset
);
10804 host_to_target_old_sigset(&mask
, &oldset
);
10811 #ifdef TARGET_NR_getgid32
10812 case TARGET_NR_getgid32
:
10813 return get_errno(getgid());
10815 #ifdef TARGET_NR_geteuid32
10816 case TARGET_NR_geteuid32
:
10817 return get_errno(geteuid());
10819 #ifdef TARGET_NR_getegid32
10820 case TARGET_NR_getegid32
:
10821 return get_errno(getegid());
10823 #ifdef TARGET_NR_setreuid32
10824 case TARGET_NR_setreuid32
:
10825 return get_errno(setreuid(arg1
, arg2
));
10827 #ifdef TARGET_NR_setregid32
10828 case TARGET_NR_setregid32
:
10829 return get_errno(setregid(arg1
, arg2
));
10831 #ifdef TARGET_NR_getgroups32
10832 case TARGET_NR_getgroups32
:
10834 int gidsetsize
= arg1
;
10835 uint32_t *target_grouplist
;
10839 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10840 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10841 if (gidsetsize
== 0)
10843 if (!is_error(ret
)) {
10844 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10845 if (!target_grouplist
) {
10846 return -TARGET_EFAULT
;
10848 for(i
= 0;i
< ret
; i
++)
10849 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10850 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10855 #ifdef TARGET_NR_setgroups32
10856 case TARGET_NR_setgroups32
:
10858 int gidsetsize
= arg1
;
10859 uint32_t *target_grouplist
;
10863 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10864 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10865 if (!target_grouplist
) {
10866 return -TARGET_EFAULT
;
10868 for(i
= 0;i
< gidsetsize
; i
++)
10869 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10870 unlock_user(target_grouplist
, arg2
, 0);
10871 return get_errno(setgroups(gidsetsize
, grouplist
));
10874 #ifdef TARGET_NR_fchown32
10875 case TARGET_NR_fchown32
:
10876 return get_errno(fchown(arg1
, arg2
, arg3
));
10878 #ifdef TARGET_NR_setresuid32
10879 case TARGET_NR_setresuid32
:
10880 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10882 #ifdef TARGET_NR_getresuid32
10883 case TARGET_NR_getresuid32
:
10885 uid_t ruid
, euid
, suid
;
10886 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10887 if (!is_error(ret
)) {
10888 if (put_user_u32(ruid
, arg1
)
10889 || put_user_u32(euid
, arg2
)
10890 || put_user_u32(suid
, arg3
))
10891 return -TARGET_EFAULT
;
10896 #ifdef TARGET_NR_setresgid32
10897 case TARGET_NR_setresgid32
:
10898 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10900 #ifdef TARGET_NR_getresgid32
10901 case TARGET_NR_getresgid32
:
10903 gid_t rgid
, egid
, sgid
;
10904 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10905 if (!is_error(ret
)) {
10906 if (put_user_u32(rgid
, arg1
)
10907 || put_user_u32(egid
, arg2
)
10908 || put_user_u32(sgid
, arg3
))
10909 return -TARGET_EFAULT
;
10914 #ifdef TARGET_NR_chown32
10915 case TARGET_NR_chown32
:
10916 if (!(p
= lock_user_string(arg1
)))
10917 return -TARGET_EFAULT
;
10918 ret
= get_errno(chown(p
, arg2
, arg3
));
10919 unlock_user(p
, arg1
, 0);
10922 #ifdef TARGET_NR_setuid32
10923 case TARGET_NR_setuid32
:
10924 return get_errno(sys_setuid(arg1
));
10926 #ifdef TARGET_NR_setgid32
10927 case TARGET_NR_setgid32
:
10928 return get_errno(sys_setgid(arg1
));
10930 #ifdef TARGET_NR_setfsuid32
10931 case TARGET_NR_setfsuid32
:
10932 return get_errno(setfsuid(arg1
));
10934 #ifdef TARGET_NR_setfsgid32
10935 case TARGET_NR_setfsgid32
:
10936 return get_errno(setfsgid(arg1
));
10938 #ifdef TARGET_NR_mincore
10939 case TARGET_NR_mincore
:
10941 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10943 return -TARGET_ENOMEM
;
10945 p
= lock_user_string(arg3
);
10947 ret
= -TARGET_EFAULT
;
10949 ret
= get_errno(mincore(a
, arg2
, p
));
10950 unlock_user(p
, arg3
, ret
);
10952 unlock_user(a
, arg1
, 0);
10956 #ifdef TARGET_NR_arm_fadvise64_64
10957 case TARGET_NR_arm_fadvise64_64
:
10958 /* arm_fadvise64_64 looks like fadvise64_64 but
10959 * with different argument order: fd, advice, offset, len
10960 * rather than the usual fd, offset, len, advice.
10961 * Note that offset and len are both 64-bit so appear as
10962 * pairs of 32-bit registers.
10964 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10965 target_offset64(arg5
, arg6
), arg2
);
10966 return -host_to_target_errno(ret
);
10969 #if TARGET_ABI_BITS == 32
10971 #ifdef TARGET_NR_fadvise64_64
10972 case TARGET_NR_fadvise64_64
:
10973 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10974 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10982 /* 6 args: fd, offset (high, low), len (high, low), advice */
10983 if (regpairs_aligned(cpu_env
, num
)) {
10984 /* offset is in (3,4), len in (5,6) and advice in 7 */
10992 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10993 target_offset64(arg4
, arg5
), arg6
);
10994 return -host_to_target_errno(ret
);
10997 #ifdef TARGET_NR_fadvise64
10998 case TARGET_NR_fadvise64
:
10999 /* 5 args: fd, offset (high, low), len, advice */
11000 if (regpairs_aligned(cpu_env
, num
)) {
11001 /* offset is in (3,4), len in 5 and advice in 6 */
11007 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11008 return -host_to_target_errno(ret
);
11011 #else /* not a 32-bit ABI */
11012 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11013 #ifdef TARGET_NR_fadvise64_64
11014 case TARGET_NR_fadvise64_64
:
11016 #ifdef TARGET_NR_fadvise64
11017 case TARGET_NR_fadvise64
:
11019 #ifdef TARGET_S390X
11021 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11022 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11023 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11024 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11028 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11030 #endif /* end of 64-bit ABI fadvise handling */
11032 #ifdef TARGET_NR_madvise
11033 case TARGET_NR_madvise
:
11034 /* A straight passthrough may not be safe because qemu sometimes
11035 turns private file-backed mappings into anonymous mappings.
11036 This will break MADV_DONTNEED.
11037 This is a hint, so ignoring and returning success is ok. */
11040 #if TARGET_ABI_BITS == 32
11041 case TARGET_NR_fcntl64
:
11045 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11046 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11049 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11050 copyfrom
= copy_from_user_oabi_flock64
;
11051 copyto
= copy_to_user_oabi_flock64
;
11055 cmd
= target_to_host_fcntl_cmd(arg2
);
11056 if (cmd
== -TARGET_EINVAL
) {
11061 case TARGET_F_GETLK64
:
11062 ret
= copyfrom(&fl
, arg3
);
11066 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11068 ret
= copyto(arg3
, &fl
);
11072 case TARGET_F_SETLK64
:
11073 case TARGET_F_SETLKW64
:
11074 ret
= copyfrom(&fl
, arg3
);
11078 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11081 ret
= do_fcntl(arg1
, arg2
, arg3
);
11087 #ifdef TARGET_NR_cacheflush
11088 case TARGET_NR_cacheflush
:
11089 /* self-modifying code is handled automatically, so nothing needed */
11092 #ifdef TARGET_NR_getpagesize
11093 case TARGET_NR_getpagesize
:
11094 return TARGET_PAGE_SIZE
;
11096 case TARGET_NR_gettid
:
11097 return get_errno(sys_gettid());
11098 #ifdef TARGET_NR_readahead
11099 case TARGET_NR_readahead
:
11100 #if TARGET_ABI_BITS == 32
11101 if (regpairs_aligned(cpu_env
, num
)) {
11106 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11108 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11113 #ifdef TARGET_NR_setxattr
11114 case TARGET_NR_listxattr
:
11115 case TARGET_NR_llistxattr
:
11119 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11121 return -TARGET_EFAULT
;
11124 p
= lock_user_string(arg1
);
11126 if (num
== TARGET_NR_listxattr
) {
11127 ret
= get_errno(listxattr(p
, b
, arg3
));
11129 ret
= get_errno(llistxattr(p
, b
, arg3
));
11132 ret
= -TARGET_EFAULT
;
11134 unlock_user(p
, arg1
, 0);
11135 unlock_user(b
, arg2
, arg3
);
11138 case TARGET_NR_flistxattr
:
11142 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11144 return -TARGET_EFAULT
;
11147 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11148 unlock_user(b
, arg2
, arg3
);
11151 case TARGET_NR_setxattr
:
11152 case TARGET_NR_lsetxattr
:
11154 void *p
, *n
, *v
= 0;
11156 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11158 return -TARGET_EFAULT
;
11161 p
= lock_user_string(arg1
);
11162 n
= lock_user_string(arg2
);
11164 if (num
== TARGET_NR_setxattr
) {
11165 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11167 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11170 ret
= -TARGET_EFAULT
;
11172 unlock_user(p
, arg1
, 0);
11173 unlock_user(n
, arg2
, 0);
11174 unlock_user(v
, arg3
, 0);
11177 case TARGET_NR_fsetxattr
:
11181 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11183 return -TARGET_EFAULT
;
11186 n
= lock_user_string(arg2
);
11188 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11190 ret
= -TARGET_EFAULT
;
11192 unlock_user(n
, arg2
, 0);
11193 unlock_user(v
, arg3
, 0);
11196 case TARGET_NR_getxattr
:
11197 case TARGET_NR_lgetxattr
:
11199 void *p
, *n
, *v
= 0;
11201 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11203 return -TARGET_EFAULT
;
11206 p
= lock_user_string(arg1
);
11207 n
= lock_user_string(arg2
);
11209 if (num
== TARGET_NR_getxattr
) {
11210 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11212 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11215 ret
= -TARGET_EFAULT
;
11217 unlock_user(p
, arg1
, 0);
11218 unlock_user(n
, arg2
, 0);
11219 unlock_user(v
, arg3
, arg4
);
11222 case TARGET_NR_fgetxattr
:
11226 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11228 return -TARGET_EFAULT
;
11231 n
= lock_user_string(arg2
);
11233 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11235 ret
= -TARGET_EFAULT
;
11237 unlock_user(n
, arg2
, 0);
11238 unlock_user(v
, arg3
, arg4
);
11241 case TARGET_NR_removexattr
:
11242 case TARGET_NR_lremovexattr
:
11245 p
= lock_user_string(arg1
);
11246 n
= lock_user_string(arg2
);
11248 if (num
== TARGET_NR_removexattr
) {
11249 ret
= get_errno(removexattr(p
, n
));
11251 ret
= get_errno(lremovexattr(p
, n
));
11254 ret
= -TARGET_EFAULT
;
11256 unlock_user(p
, arg1
, 0);
11257 unlock_user(n
, arg2
, 0);
11260 case TARGET_NR_fremovexattr
:
11263 n
= lock_user_string(arg2
);
11265 ret
= get_errno(fremovexattr(arg1
, n
));
11267 ret
= -TARGET_EFAULT
;
11269 unlock_user(n
, arg2
, 0);
11273 #endif /* CONFIG_ATTR */
11274 #ifdef TARGET_NR_set_thread_area
11275 case TARGET_NR_set_thread_area
:
11276 #if defined(TARGET_MIPS)
11277 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11279 #elif defined(TARGET_CRIS)
11281 ret
= -TARGET_EINVAL
;
11283 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11287 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11288 return do_set_thread_area(cpu_env
, arg1
);
11289 #elif defined(TARGET_M68K)
11291 TaskState
*ts
= cpu
->opaque
;
11292 ts
->tp_value
= arg1
;
11296 return -TARGET_ENOSYS
;
11299 #ifdef TARGET_NR_get_thread_area
11300 case TARGET_NR_get_thread_area
:
11301 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11302 return do_get_thread_area(cpu_env
, arg1
);
11303 #elif defined(TARGET_M68K)
11305 TaskState
*ts
= cpu
->opaque
;
11306 return ts
->tp_value
;
11309 return -TARGET_ENOSYS
;
11312 #ifdef TARGET_NR_getdomainname
11313 case TARGET_NR_getdomainname
:
11314 return -TARGET_ENOSYS
;
11317 #ifdef TARGET_NR_clock_settime
11318 case TARGET_NR_clock_settime
:
11320 struct timespec ts
;
11322 ret
= target_to_host_timespec(&ts
, arg2
);
11323 if (!is_error(ret
)) {
11324 ret
= get_errno(clock_settime(arg1
, &ts
));
11329 #ifdef TARGET_NR_clock_gettime
11330 case TARGET_NR_clock_gettime
:
11332 struct timespec ts
;
11333 ret
= get_errno(clock_gettime(arg1
, &ts
));
11334 if (!is_error(ret
)) {
11335 ret
= host_to_target_timespec(arg2
, &ts
);
11340 #ifdef TARGET_NR_clock_getres
11341 case TARGET_NR_clock_getres
:
11343 struct timespec ts
;
11344 ret
= get_errno(clock_getres(arg1
, &ts
));
11345 if (!is_error(ret
)) {
11346 host_to_target_timespec(arg2
, &ts
);
11351 #ifdef TARGET_NR_clock_nanosleep
11352 case TARGET_NR_clock_nanosleep
:
11354 struct timespec ts
;
11355 target_to_host_timespec(&ts
, arg3
);
11356 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11357 &ts
, arg4
? &ts
: NULL
));
11359 host_to_target_timespec(arg4
, &ts
);
11361 #if defined(TARGET_PPC)
11362 /* clock_nanosleep is odd in that it returns positive errno values.
11363 * On PPC, CR0 bit 3 should be set in such a situation. */
11364 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11365 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11372 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11373 case TARGET_NR_set_tid_address
:
11374 return get_errno(set_tid_address((int *)g2h(arg1
)));
11377 case TARGET_NR_tkill
:
11378 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11380 case TARGET_NR_tgkill
:
11381 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11382 target_to_host_signal(arg3
)));
11384 #ifdef TARGET_NR_set_robust_list
11385 case TARGET_NR_set_robust_list
:
11386 case TARGET_NR_get_robust_list
:
11387 /* The ABI for supporting robust futexes has userspace pass
11388 * the kernel a pointer to a linked list which is updated by
11389 * userspace after the syscall; the list is walked by the kernel
11390 * when the thread exits. Since the linked list in QEMU guest
11391 * memory isn't a valid linked list for the host and we have
11392 * no way to reliably intercept the thread-death event, we can't
11393 * support these. Silently return ENOSYS so that guest userspace
11394 * falls back to a non-robust futex implementation (which should
11395 * be OK except in the corner case of the guest crashing while
11396 * holding a mutex that is shared with another process via
11399 return -TARGET_ENOSYS
;
11402 #if defined(TARGET_NR_utimensat)
11403 case TARGET_NR_utimensat
:
11405 struct timespec
*tsp
, ts
[2];
11409 target_to_host_timespec(ts
, arg3
);
11410 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11414 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11416 if (!(p
= lock_user_string(arg2
))) {
11417 return -TARGET_EFAULT
;
11419 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11420 unlock_user(p
, arg2
, 0);
11425 case TARGET_NR_futex
:
11426 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11427 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11428 case TARGET_NR_inotify_init
:
11429 ret
= get_errno(sys_inotify_init());
11431 fd_trans_register(ret
, &target_inotify_trans
);
11435 #ifdef CONFIG_INOTIFY1
11436 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11437 case TARGET_NR_inotify_init1
:
11438 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11439 fcntl_flags_tbl
)));
11441 fd_trans_register(ret
, &target_inotify_trans
);
11446 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11447 case TARGET_NR_inotify_add_watch
:
11448 p
= lock_user_string(arg2
);
11449 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11450 unlock_user(p
, arg2
, 0);
11453 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11454 case TARGET_NR_inotify_rm_watch
:
11455 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11458 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11459 case TARGET_NR_mq_open
:
11461 struct mq_attr posix_mq_attr
;
11462 struct mq_attr
*pposix_mq_attr
;
11465 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11466 pposix_mq_attr
= NULL
;
11468 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11469 return -TARGET_EFAULT
;
11471 pposix_mq_attr
= &posix_mq_attr
;
11473 p
= lock_user_string(arg1
- 1);
11475 return -TARGET_EFAULT
;
11477 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11478 unlock_user (p
, arg1
, 0);
11482 case TARGET_NR_mq_unlink
:
11483 p
= lock_user_string(arg1
- 1);
11485 return -TARGET_EFAULT
;
11487 ret
= get_errno(mq_unlink(p
));
11488 unlock_user (p
, arg1
, 0);
11491 case TARGET_NR_mq_timedsend
:
11493 struct timespec ts
;
11495 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11497 target_to_host_timespec(&ts
, arg5
);
11498 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11499 host_to_target_timespec(arg5
, &ts
);
11501 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11503 unlock_user (p
, arg2
, arg3
);
11507 case TARGET_NR_mq_timedreceive
:
11509 struct timespec ts
;
11512 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11514 target_to_host_timespec(&ts
, arg5
);
11515 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11517 host_to_target_timespec(arg5
, &ts
);
11519 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11522 unlock_user (p
, arg2
, arg3
);
11524 put_user_u32(prio
, arg4
);
11528 /* Not implemented for now... */
11529 /* case TARGET_NR_mq_notify: */
11532 case TARGET_NR_mq_getsetattr
:
11534 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11537 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11538 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11539 &posix_mq_attr_out
));
11540 } else if (arg3
!= 0) {
11541 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11543 if (ret
== 0 && arg3
!= 0) {
11544 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11550 #ifdef CONFIG_SPLICE
11551 #ifdef TARGET_NR_tee
11552 case TARGET_NR_tee
:
11554 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11558 #ifdef TARGET_NR_splice
11559 case TARGET_NR_splice
:
11561 loff_t loff_in
, loff_out
;
11562 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11564 if (get_user_u64(loff_in
, arg2
)) {
11565 return -TARGET_EFAULT
;
11567 ploff_in
= &loff_in
;
11570 if (get_user_u64(loff_out
, arg4
)) {
11571 return -TARGET_EFAULT
;
11573 ploff_out
= &loff_out
;
11575 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11577 if (put_user_u64(loff_in
, arg2
)) {
11578 return -TARGET_EFAULT
;
11582 if (put_user_u64(loff_out
, arg4
)) {
11583 return -TARGET_EFAULT
;
11589 #ifdef TARGET_NR_vmsplice
11590 case TARGET_NR_vmsplice
:
11592 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11594 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11595 unlock_iovec(vec
, arg2
, arg3
, 0);
11597 ret
= -host_to_target_errno(errno
);
11602 #endif /* CONFIG_SPLICE */
11603 #ifdef CONFIG_EVENTFD
11604 #if defined(TARGET_NR_eventfd)
11605 case TARGET_NR_eventfd
:
11606 ret
= get_errno(eventfd(arg1
, 0));
11608 fd_trans_register(ret
, &target_eventfd_trans
);
11612 #if defined(TARGET_NR_eventfd2)
11613 case TARGET_NR_eventfd2
:
11615 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11616 if (arg2
& TARGET_O_NONBLOCK
) {
11617 host_flags
|= O_NONBLOCK
;
11619 if (arg2
& TARGET_O_CLOEXEC
) {
11620 host_flags
|= O_CLOEXEC
;
11622 ret
= get_errno(eventfd(arg1
, host_flags
));
11624 fd_trans_register(ret
, &target_eventfd_trans
);
11629 #endif /* CONFIG_EVENTFD */
11630 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11631 case TARGET_NR_fallocate
:
11632 #if TARGET_ABI_BITS == 32
11633 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11634 target_offset64(arg5
, arg6
)));
11636 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11640 #if defined(CONFIG_SYNC_FILE_RANGE)
11641 #if defined(TARGET_NR_sync_file_range)
11642 case TARGET_NR_sync_file_range
:
11643 #if TARGET_ABI_BITS == 32
11644 #if defined(TARGET_MIPS)
11645 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11646 target_offset64(arg5
, arg6
), arg7
));
11648 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11649 target_offset64(arg4
, arg5
), arg6
));
11650 #endif /* !TARGET_MIPS */
11652 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11656 #if defined(TARGET_NR_sync_file_range2)
11657 case TARGET_NR_sync_file_range2
:
11658 /* This is like sync_file_range but the arguments are reordered */
11659 #if TARGET_ABI_BITS == 32
11660 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11661 target_offset64(arg5
, arg6
), arg2
));
11663 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11668 #if defined(TARGET_NR_signalfd4)
11669 case TARGET_NR_signalfd4
:
11670 return do_signalfd4(arg1
, arg2
, arg4
);
11672 #if defined(TARGET_NR_signalfd)
11673 case TARGET_NR_signalfd
:
11674 return do_signalfd4(arg1
, arg2
, 0);
11676 #if defined(CONFIG_EPOLL)
11677 #if defined(TARGET_NR_epoll_create)
11678 case TARGET_NR_epoll_create
:
11679 return get_errno(epoll_create(arg1
));
11681 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11682 case TARGET_NR_epoll_create1
:
11683 return get_errno(epoll_create1(arg1
));
11685 #if defined(TARGET_NR_epoll_ctl)
11686 case TARGET_NR_epoll_ctl
:
11688 struct epoll_event ep
;
11689 struct epoll_event
*epp
= 0;
11691 struct target_epoll_event
*target_ep
;
11692 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11693 return -TARGET_EFAULT
;
11695 ep
.events
= tswap32(target_ep
->events
);
11696 /* The epoll_data_t union is just opaque data to the kernel,
11697 * so we transfer all 64 bits across and need not worry what
11698 * actual data type it is.
11700 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11701 unlock_user_struct(target_ep
, arg4
, 0);
11704 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11708 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11709 #if defined(TARGET_NR_epoll_wait)
11710 case TARGET_NR_epoll_wait
:
11712 #if defined(TARGET_NR_epoll_pwait)
11713 case TARGET_NR_epoll_pwait
:
11716 struct target_epoll_event
*target_ep
;
11717 struct epoll_event
*ep
;
11719 int maxevents
= arg3
;
11720 int timeout
= arg4
;
11722 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11723 return -TARGET_EINVAL
;
11726 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11727 maxevents
* sizeof(struct target_epoll_event
), 1);
11729 return -TARGET_EFAULT
;
11732 ep
= g_try_new(struct epoll_event
, maxevents
);
11734 unlock_user(target_ep
, arg2
, 0);
11735 return -TARGET_ENOMEM
;
11739 #if defined(TARGET_NR_epoll_pwait)
11740 case TARGET_NR_epoll_pwait
:
11742 target_sigset_t
*target_set
;
11743 sigset_t _set
, *set
= &_set
;
11746 if (arg6
!= sizeof(target_sigset_t
)) {
11747 ret
= -TARGET_EINVAL
;
11751 target_set
= lock_user(VERIFY_READ
, arg5
,
11752 sizeof(target_sigset_t
), 1);
11754 ret
= -TARGET_EFAULT
;
11757 target_to_host_sigset(set
, target_set
);
11758 unlock_user(target_set
, arg5
, 0);
11763 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11764 set
, SIGSET_T_SIZE
));
11768 #if defined(TARGET_NR_epoll_wait)
11769 case TARGET_NR_epoll_wait
:
11770 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11775 ret
= -TARGET_ENOSYS
;
11777 if (!is_error(ret
)) {
11779 for (i
= 0; i
< ret
; i
++) {
11780 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11781 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11783 unlock_user(target_ep
, arg2
,
11784 ret
* sizeof(struct target_epoll_event
));
11786 unlock_user(target_ep
, arg2
, 0);
11793 #ifdef TARGET_NR_prlimit64
11794 case TARGET_NR_prlimit64
:
11796 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11797 struct target_rlimit64
*target_rnew
, *target_rold
;
11798 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11799 int resource
= target_to_host_resource(arg2
);
11801 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11802 return -TARGET_EFAULT
;
11804 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11805 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11806 unlock_user_struct(target_rnew
, arg3
, 0);
11810 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11811 if (!is_error(ret
) && arg4
) {
11812 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11813 return -TARGET_EFAULT
;
11815 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11816 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11817 unlock_user_struct(target_rold
, arg4
, 1);
11822 #ifdef TARGET_NR_gethostname
11823 case TARGET_NR_gethostname
:
11825 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11827 ret
= get_errno(gethostname(name
, arg2
));
11828 unlock_user(name
, arg1
, arg2
);
11830 ret
= -TARGET_EFAULT
;
11835 #ifdef TARGET_NR_atomic_cmpxchg_32
11836 case TARGET_NR_atomic_cmpxchg_32
:
11838 /* should use start_exclusive from main.c */
11839 abi_ulong mem_value
;
11840 if (get_user_u32(mem_value
, arg6
)) {
11841 target_siginfo_t info
;
11842 info
.si_signo
= SIGSEGV
;
11844 info
.si_code
= TARGET_SEGV_MAPERR
;
11845 info
._sifields
._sigfault
._addr
= arg6
;
11846 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11847 QEMU_SI_FAULT
, &info
);
11851 if (mem_value
== arg2
)
11852 put_user_u32(arg1
, arg6
);
11856 #ifdef TARGET_NR_atomic_barrier
11857 case TARGET_NR_atomic_barrier
:
11858 /* Like the kernel implementation and the
11859 qemu arm barrier, no-op this? */
11863 #ifdef TARGET_NR_timer_create
11864 case TARGET_NR_timer_create
:
11866 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11868 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11871 int timer_index
= next_free_host_timer();
11873 if (timer_index
< 0) {
11874 ret
= -TARGET_EAGAIN
;
11876 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11879 phost_sevp
= &host_sevp
;
11880 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11886 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11890 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11891 return -TARGET_EFAULT
;
11899 #ifdef TARGET_NR_timer_settime
11900 case TARGET_NR_timer_settime
:
11902 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11903 * struct itimerspec * old_value */
11904 target_timer_t timerid
= get_timer_id(arg1
);
11908 } else if (arg3
== 0) {
11909 ret
= -TARGET_EINVAL
;
11911 timer_t htimer
= g_posix_timers
[timerid
];
11912 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11914 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11915 return -TARGET_EFAULT
;
11918 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11919 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11920 return -TARGET_EFAULT
;
11927 #ifdef TARGET_NR_timer_gettime
11928 case TARGET_NR_timer_gettime
:
11930 /* args: timer_t timerid, struct itimerspec *curr_value */
11931 target_timer_t timerid
= get_timer_id(arg1
);
11935 } else if (!arg2
) {
11936 ret
= -TARGET_EFAULT
;
11938 timer_t htimer
= g_posix_timers
[timerid
];
11939 struct itimerspec hspec
;
11940 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11942 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11943 ret
= -TARGET_EFAULT
;
11950 #ifdef TARGET_NR_timer_getoverrun
11951 case TARGET_NR_timer_getoverrun
:
11953 /* args: timer_t timerid */
11954 target_timer_t timerid
= get_timer_id(arg1
);
11959 timer_t htimer
= g_posix_timers
[timerid
];
11960 ret
= get_errno(timer_getoverrun(htimer
));
11966 #ifdef TARGET_NR_timer_delete
11967 case TARGET_NR_timer_delete
:
11969 /* args: timer_t timerid */
11970 target_timer_t timerid
= get_timer_id(arg1
);
11975 timer_t htimer
= g_posix_timers
[timerid
];
11976 ret
= get_errno(timer_delete(htimer
));
11977 g_posix_timers
[timerid
] = 0;
11983 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11984 case TARGET_NR_timerfd_create
:
11985 return get_errno(timerfd_create(arg1
,
11986 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11989 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11990 case TARGET_NR_timerfd_gettime
:
11992 struct itimerspec its_curr
;
11994 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11996 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11997 return -TARGET_EFAULT
;
12003 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12004 case TARGET_NR_timerfd_settime
:
12006 struct itimerspec its_new
, its_old
, *p_new
;
12009 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12010 return -TARGET_EFAULT
;
12017 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12019 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12020 return -TARGET_EFAULT
;
12026 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12027 case TARGET_NR_ioprio_get
:
12028 return get_errno(ioprio_get(arg1
, arg2
));
12031 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12032 case TARGET_NR_ioprio_set
:
12033 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12036 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12037 case TARGET_NR_setns
:
12038 return get_errno(setns(arg1
, arg2
));
12040 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12041 case TARGET_NR_unshare
:
12042 return get_errno(unshare(arg1
));
12044 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12045 case TARGET_NR_kcmp
:
12046 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12048 #ifdef TARGET_NR_swapcontext
12049 case TARGET_NR_swapcontext
:
12050 /* PowerPC specific. */
12051 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12053 #ifdef TARGET_NR_memfd_create
12054 case TARGET_NR_memfd_create
:
12055 p
= lock_user_string(arg1
);
12057 return -TARGET_EFAULT
;
12059 ret
= get_errno(memfd_create(p
, arg2
));
12060 fd_trans_unregister(ret
);
12061 unlock_user(p
, arg1
, 0);
12066 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12067 return -TARGET_ENOSYS
;
12072 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12073 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12074 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12077 CPUState
*cpu
= env_cpu(cpu_env
);
12080 #ifdef DEBUG_ERESTARTSYS
12081 /* Debug-only code for exercising the syscall-restart code paths
12082 * in the per-architecture cpu main loops: restart every syscall
12083 * the guest makes once before letting it through.
12089 return -TARGET_ERESTARTSYS
;
12094 record_syscall_start(cpu
, num
, arg1
,
12095 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12097 if (unlikely(do_strace
)) {
12098 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12099 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12100 arg5
, arg6
, arg7
, arg8
);
12101 print_syscall_ret(num
, ret
);
12103 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12104 arg5
, arg6
, arg7
, arg8
);
12107 record_syscall_return(cpu
, num
, ret
);